Merge pull request #1627 from coryb/gateway-exec-proto

update gateway to add ability to run and exec into containers
v0.8
Akihiro Suda 2020-10-04 20:11:45 +09:00 committed by GitHub
commit 78f3e86dc1
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
27 changed files with 6302 additions and 1149 deletions

View File

@ -45,11 +45,14 @@ func (c *Client) Build(ctx context.Context, opt SolveOpt, product string, buildF
}
cb := func(ref string, s *session.Session) error {
g, err := grpcclient.New(ctx, feOpts, s.ID(), product, c.gatewayClientForBuild(ref), gworkers)
gwClient := c.gatewayClientForBuild(ref)
g, err := grpcclient.New(ctx, feOpts, s.ID(), product, gwClient, gworkers)
if err != nil {
return err
}
gwClient.caps = g.BuildOpts().Caps
if err := g.Run(ctx, buildFunc); err != nil {
return errors.Wrap(err, "failed to run Build function")
}
@ -59,14 +62,18 @@ func (c *Client) Build(ctx context.Context, opt SolveOpt, product string, buildF
return c.solve(ctx, nil, cb, opt, statusChan)
}
func (c *Client) gatewayClientForBuild(buildid string) gatewayapi.LLBBridgeClient {
func (c *Client) gatewayClientForBuild(buildid string) *gatewayClientForBuild {
g := gatewayapi.NewLLBBridgeClient(c.conn)
return &gatewayClientForBuild{g, buildid}
return &gatewayClientForBuild{
gateway: g,
buildID: buildid,
}
}
type gatewayClientForBuild struct {
gateway gatewayapi.LLBBridgeClient
buildID string
caps apicaps.CapSet
}
func (g *gatewayClientForBuild) ResolveImageConfig(ctx context.Context, in *gatewayapi.ResolveImageConfigRequest, opts ...grpc.CallOption) (*gatewayapi.ResolveImageConfigResponse, error) {
@ -85,11 +92,17 @@ func (g *gatewayClientForBuild) ReadFile(ctx context.Context, in *gatewayapi.Rea
}
func (g *gatewayClientForBuild) ReadDir(ctx context.Context, in *gatewayapi.ReadDirRequest, opts ...grpc.CallOption) (*gatewayapi.ReadDirResponse, error) {
if err := g.caps.Supports(gatewayapi.CapReadDir); err != nil {
return nil, err
}
ctx = buildid.AppendToOutgoingContext(ctx, g.buildID)
return g.gateway.ReadDir(ctx, in, opts...)
}
func (g *gatewayClientForBuild) StatFile(ctx context.Context, in *gatewayapi.StatFileRequest, opts ...grpc.CallOption) (*gatewayapi.StatFileResponse, error) {
if err := g.caps.Supports(gatewayapi.CapStatFile); err != nil {
return nil, err
}
ctx = buildid.AppendToOutgoingContext(ctx, g.buildID)
return g.gateway.StatFile(ctx, in, opts...)
}
@ -105,6 +118,33 @@ func (g *gatewayClientForBuild) Return(ctx context.Context, in *gatewayapi.Retur
}
func (g *gatewayClientForBuild) Inputs(ctx context.Context, in *gatewayapi.InputsRequest, opts ...grpc.CallOption) (*gatewayapi.InputsResponse, error) {
if err := g.caps.Supports(gatewayapi.CapFrontendInputs); err != nil {
return nil, err
}
ctx = buildid.AppendToOutgoingContext(ctx, g.buildID)
return g.gateway.Inputs(ctx, in, opts...)
}
func (g *gatewayClientForBuild) NewContainer(ctx context.Context, in *gatewayapi.NewContainerRequest, opts ...grpc.CallOption) (*gatewayapi.NewContainerResponse, error) {
if err := g.caps.Supports(gatewayapi.CapGatewayExec); err != nil {
return nil, err
}
ctx = buildid.AppendToOutgoingContext(ctx, g.buildID)
return g.gateway.NewContainer(ctx, in, opts...)
}
func (g *gatewayClientForBuild) ReleaseContainer(ctx context.Context, in *gatewayapi.ReleaseContainerRequest, opts ...grpc.CallOption) (*gatewayapi.ReleaseContainerResponse, error) {
if err := g.caps.Supports(gatewayapi.CapGatewayExec); err != nil {
return nil, err
}
ctx = buildid.AppendToOutgoingContext(ctx, g.buildID)
return g.gateway.ReleaseContainer(ctx, in, opts...)
}
func (g *gatewayClientForBuild) ExecProcess(ctx context.Context, opts ...grpc.CallOption) (gatewayapi.LLBBridge_ExecProcessClient, error) {
if err := g.caps.Supports(gatewayapi.CapGatewayExec); err != nil {
return nil, err
}
ctx = buildid.AppendToOutgoingContext(ctx, g.buildID)
return g.gateway.ExecProcess(ctx, opts...)
}

View File

@ -1,19 +1,28 @@
package client
import (
"bytes"
"context"
"io"
"io/ioutil"
"os"
"path/filepath"
"testing"
"time"
"github.com/moby/buildkit/client/llb"
"github.com/moby/buildkit/frontend/gateway/client"
gatewayapi "github.com/moby/buildkit/frontend/gateway/pb"
"github.com/moby/buildkit/identity"
"github.com/moby/buildkit/session"
"github.com/moby/buildkit/session/secrets/secretsprovider"
"github.com/moby/buildkit/session/sshforward/sshprovider"
"github.com/moby/buildkit/solver/errdefs"
"github.com/moby/buildkit/solver/pb"
"github.com/moby/buildkit/util/testutil/integration"
"github.com/pkg/errors"
"github.com/stretchr/testify/require"
"golang.org/x/crypto/ssh/agent"
)
func TestClientGatewayIntegration(t *testing.T) {
@ -23,6 +32,11 @@ func TestClientGatewayIntegration(t *testing.T) {
testClientGatewayEmptySolve,
testNoBuildID,
testUnknownBuildID,
testClientGatewayContainerExecPipe,
testClientGatewayContainerCancelOnRelease,
testClientGatewayContainerPID1Fail,
testClientGatewayContainerPID1Exit,
testClientGatewayContainerMounts,
}, integration.WithMirroredImages(integration.OfficialImages("busybox:latest")))
}
@ -175,3 +189,539 @@ func testUnknownBuildID(t *testing.T, sb integration.Sandbox) {
require.Error(t, err)
require.Contains(t, err.Error(), "no such job")
}
// testClientGatewayContainerCancelOnRelease is testing that all running
// processes are terminated when the container is released.
func testClientGatewayContainerCancelOnRelease(t *testing.T, sb integration.Sandbox) {
requiresLinux(t)
ctx := context.TODO()
c, err := New(ctx, sb.Address())
require.NoError(t, err)
defer c.Close()
product := "buildkit_test"
b := func(ctx context.Context, c client.Client) (*client.Result, error) {
st := llb.Image("busybox:latest")
def, err := st.Marshal(ctx)
if err != nil {
return nil, errors.Wrap(err, "failed to marshal state")
}
r, err := c.Solve(ctx, client.SolveRequest{
Definition: def.ToPB(),
})
if err != nil {
return nil, errors.Wrap(err, "failed to solve")
}
ctr, err := c.NewContainer(ctx, client.NewContainerRequest{
Mounts: []client.Mount{{
Dest: "/",
MountType: pb.MountType_BIND,
Ref: r.Ref,
}},
})
if err != nil {
return nil, err
}
start := time.Now()
defer func() {
// ensure pid1 and pid2 exit from cancel before the 10s sleep
// exits naturally
require.WithinDuration(t, start, time.Now(), 10*time.Second)
}()
// background pid1 process that starts container
pid1, err := ctr.Start(ctx, client.StartRequest{
Args: []string{"sleep", "10"},
})
require.NoError(t, err)
pid2, err := ctr.Start(ctx, client.StartRequest{
Args: []string{"sleep", "10"},
})
require.NoError(t, err)
ctr.Release(ctx)
err = pid1.Wait()
require.Contains(t, err.Error(), context.Canceled.Error())
err = pid2.Wait()
require.Contains(t, err.Error(), context.Canceled.Error())
return &client.Result{}, nil
}
_, err = c.Build(ctx, SolveOpt{}, product, b, nil)
require.NoError(t, err)
checkAllReleasable(t, c, sb, true)
}
// testClientGatewayContainerExecPipe is testing the ability to pipe multiple
// process together all started via `Exec` into the same container.
// We are mimicing: `echo testing | cat | cat > /tmp/foo && cat /tmp/foo`
func testClientGatewayContainerExecPipe(t *testing.T, sb integration.Sandbox) {
if sb.Rootless() {
// TODO fix this
// We get `panic: cannot statfs cgroup root` from runc when when running
// this test with runc-rootless, no idea why.
t.Skip("Skipping oci-rootless for cgroup error")
}
requiresLinux(t)
ctx := context.TODO()
c, err := New(ctx, sb.Address())
require.NoError(t, err)
defer c.Close()
product := "buildkit_test"
output := bytes.NewBuffer(nil)
b := func(ctx context.Context, c client.Client) (*client.Result, error) {
st := llb.Image("busybox:latest")
def, err := st.Marshal(ctx)
if err != nil {
return nil, errors.Wrap(err, "failed to marshal state")
}
r, err := c.Solve(ctx, client.SolveRequest{
Definition: def.ToPB(),
})
if err != nil {
return nil, errors.Wrap(err, "failed to solve")
}
ctr, err := c.NewContainer(ctx, client.NewContainerRequest{
Mounts: []client.Mount{{
Dest: "/",
MountType: pb.MountType_BIND,
Ref: r.Ref,
}},
})
if err != nil {
return nil, err
}
// background pid1 process that starts container
pid1, err := ctr.Start(ctx, client.StartRequest{
Args: []string{"sleep", "10"},
})
if err != nil {
ctr.Release(ctx)
return nil, err
}
defer func() {
// cancel pid1
ctr.Release(ctx)
pid1.Wait()
}()
// first part is `echo testing | cat`
stdin2 := bytes.NewBuffer([]byte("testing"))
stdin3, stdout2 := io.Pipe()
pid2, err := ctr.Start(ctx, client.StartRequest{
Args: []string{"cat"},
Cwd: "/",
Tty: false,
Stdin: ioutil.NopCloser(stdin2),
Stdout: stdout2,
})
if err != nil {
return nil, err
}
// next part is: `| cat > /tmp/test`
pid3, err := ctr.Start(ctx, client.StartRequest{
Args: []string{"sh", "-c", "cat > /tmp/test"},
Stdin: stdin3,
})
if err != nil {
return nil, err
}
err = pid2.Wait()
if err != nil {
stdout2.Close()
return nil, err
}
err = stdout2.Close()
if err != nil {
return nil, err
}
err = pid3.Wait()
if err != nil {
return nil, err
}
err = stdin3.Close()
if err != nil {
return nil, err
}
pid4, err := ctr.Start(ctx, client.StartRequest{
Args: []string{"cat", "/tmp/test"},
Stdout: &nopCloser{output},
})
if err != nil {
return nil, err
}
err = pid4.Wait()
if err != nil {
return nil, err
}
return &client.Result{}, nil
}
_, err = c.Build(ctx, SolveOpt{}, product, b, nil)
require.NoError(t, err)
require.Equal(t, "testing", output.String())
checkAllReleasable(t, c, sb, true)
}
// testClientGatewayContainerPID1Fail is testing clean shutdown and release
// of resources when the primary pid1 exits with non-zero exit status
func testClientGatewayContainerPID1Fail(t *testing.T, sb integration.Sandbox) {
requiresLinux(t)
ctx := context.TODO()
c, err := New(ctx, sb.Address())
require.NoError(t, err)
defer c.Close()
product := "buildkit_test"
b := func(ctx context.Context, c client.Client) (*client.Result, error) {
st := llb.Image("busybox:latest")
def, err := st.Marshal(ctx)
if err != nil {
return nil, errors.Wrap(err, "failed to marshal state")
}
r, err := c.Solve(ctx, client.SolveRequest{
Definition: def.ToPB(),
})
if err != nil {
return nil, errors.Wrap(err, "failed to solve")
}
ctr, err := c.NewContainer(ctx, client.NewContainerRequest{
Mounts: []client.Mount{{
Dest: "/",
MountType: pb.MountType_BIND,
Ref: r.Ref,
}},
})
if err != nil {
return nil, err
}
pid1, err := ctr.Start(ctx, client.StartRequest{
Args: []string{"sh", "-c", "exit 99"},
})
if err != nil {
ctr.Release(ctx)
return nil, err
}
defer ctr.Release(ctx)
err = pid1.Wait()
var exitError *errdefs.ExitError
require.True(t, errors.As(err, &exitError))
require.Equal(t, uint32(99), exitError.ExitCode)
return nil, err
}
_, err = c.Build(ctx, SolveOpt{}, product, b, nil)
require.Error(t, err)
checkAllReleasable(t, c, sb, true)
}
// testClientGatewayContainerPID1Exit is testing that all process started
// via `Exec` are shutdown when the primary pid1 process exits
func testClientGatewayContainerPID1Exit(t *testing.T, sb integration.Sandbox) {
if sb.Rootless() {
// TODO fix this
// We get `panic: cannot statfs cgroup root` when running this test
// with runc-rootless
t.Skip("Skipping runc-rootless for cgroup error")
}
requiresLinux(t)
ctx := context.TODO()
c, err := New(ctx, sb.Address())
require.NoError(t, err)
defer c.Close()
product := "buildkit_test"
b := func(ctx context.Context, c client.Client) (*client.Result, error) {
st := llb.Image("busybox:latest")
def, err := st.Marshal(ctx)
if err != nil {
return nil, errors.Wrap(err, "failed to marshal state")
}
r, err := c.Solve(ctx, client.SolveRequest{
Definition: def.ToPB(),
})
if err != nil {
return nil, errors.Wrap(err, "failed to solve")
}
ctr, err := c.NewContainer(ctx, client.NewContainerRequest{
Mounts: []client.Mount{{
Dest: "/",
MountType: pb.MountType_BIND,
Ref: r.Ref,
}},
})
if err != nil {
return nil, err
}
defer ctr.Release(ctx)
start := time.Now()
defer func() {
// ensure pid1 and pid2 exits from cancel before the 10s sleep
// exits naturally
require.WithinDuration(t, start, time.Now(), 10*time.Second)
// assert this test ran for at least one second for pid1
lapse := time.Since(start)
require.Greater(t, lapse.Seconds(), float64(1))
}()
pid1, err := ctr.Start(ctx, client.StartRequest{
Args: []string{"sleep", "1"},
})
require.NoError(t, err)
defer pid1.Wait()
pid2, err := ctr.Start(ctx, client.StartRequest{
Args: []string{"sleep", "10"},
})
require.NoError(t, err)
return &client.Result{}, pid2.Wait()
}
_, err = c.Build(ctx, SolveOpt{}, product, b, nil)
// pid2 should error with `buildkit-runc did not terminate successfully` on runc or
// `exit code: 137` (ie sigkill) on containerd
require.Error(t, err)
require.Regexp(t, "exit code: 137|buildkit-runc did not terminate successfully", err.Error())
checkAllReleasable(t, c, sb, true)
}
// testClientGatewayContainerMounts is testing mounts derived from various
// llb.States
func testClientGatewayContainerMounts(t *testing.T, sb integration.Sandbox) {
if sb.Rootless() {
// TODO fix this
// We get `panic: cannot statfs cgroup root` when running this test
// with runc-rootless
t.Skip("Skipping runc-rootless for cgroup error")
}
requiresLinux(t)
ctx := context.TODO()
c, err := New(ctx, sb.Address())
require.NoError(t, err)
defer c.Close()
tmpdir, err := ioutil.TempDir("", "buildkit-buildctl")
require.NoError(t, err)
defer os.RemoveAll(tmpdir)
err = ioutil.WriteFile(filepath.Join(tmpdir, "local-file"), []byte("local"), 0644)
require.NoError(t, err)
a := agent.NewKeyring()
sockPath, clean, err := makeSSHAgentSock(a)
require.NoError(t, err)
defer clean()
ssh, err := sshprovider.NewSSHAgentProvider([]sshprovider.AgentConfig{{
ID: t.Name(),
Paths: []string{sockPath},
}})
require.NoError(t, err)
product := "buildkit_test"
b := func(ctx context.Context, c client.Client) (*client.Result, error) {
mounts := map[string]llb.State{
"/": llb.Image("busybox:latest").Run(
llb.Shlex("touch /root-file /cached/cache-file"),
llb.AddMount("/cached", llb.Scratch(), llb.AsPersistentCacheDir(t.Name(), llb.CacheMountShared)),
).Root(),
"/foo": llb.Image("busybox:latest").Run(
llb.Shlex("touch foo-file"),
llb.Dir("/tmp"),
llb.AddMount("/tmp", llb.Scratch()),
).GetMount("/tmp"),
"/local": llb.Local("mylocal"),
// TODO How do we get a results.Ref for a cache mount, tmpfs mount
}
containerMounts := []client.Mount{{
Dest: "/cached",
MountType: pb.MountType_CACHE,
CacheOpt: &pb.CacheOpt{
ID: t.Name(),
Sharing: pb.CacheSharingOpt_SHARED,
},
}, {
Dest: "/tmpfs",
MountType: pb.MountType_TMPFS,
}, {
Dest: "/run/secrets/mysecret",
MountType: pb.MountType_SECRET,
SecretOpt: &pb.SecretOpt{
ID: "/run/secrets/mysecret",
},
}, {
Dest: sockPath,
MountType: pb.MountType_SSH,
SSHOpt: &pb.SSHOpt{
ID: t.Name(),
},
}}
for mountpoint, st := range mounts {
def, err := st.Marshal(ctx)
if err != nil {
return nil, errors.Wrap(err, "failed to marshal state")
}
r, err := c.Solve(ctx, client.SolveRequest{
Definition: def.ToPB(),
})
if err != nil {
return nil, errors.Wrap(err, "failed to solve")
}
containerMounts = append(containerMounts, client.Mount{
Dest: mountpoint,
MountType: pb.MountType_BIND,
Ref: r.Ref,
})
}
ctr, err := c.NewContainer(ctx, client.NewContainerRequest{Mounts: containerMounts})
if err != nil {
return nil, err
}
pid1, err := ctr.Start(ctx, client.StartRequest{
Args: []string{"sleep", "10"},
Stderr: os.Stderr,
})
require.NoError(t, err)
defer pid1.Wait()
pid, err := ctr.Start(ctx, client.StartRequest{
Args: []string{"test", "-f", "/root-file"},
})
require.NoError(t, err)
err = pid.Wait()
require.NoError(t, err)
pid, err = ctr.Start(ctx, client.StartRequest{
Args: []string{"test", "-f", "/foo/foo-file"},
})
require.NoError(t, err)
err = pid.Wait()
require.NoError(t, err)
pid, err = ctr.Start(ctx, client.StartRequest{
Args: []string{"test", "-f", "/local/local-file"},
})
require.NoError(t, err)
err = pid.Wait()
require.NoError(t, err)
pid, err = ctr.Start(ctx, client.StartRequest{
Args: []string{"test", "-f", "/cached/cache-file"},
})
require.NoError(t, err)
err = pid.Wait()
require.NoError(t, err)
pid, err = ctr.Start(ctx, client.StartRequest{
Args: []string{"test", "-w", "/tmpfs"},
})
require.NoError(t, err)
err = pid.Wait()
require.NoError(t, err)
secretOutput := bytes.NewBuffer(nil)
pid, err = ctr.Start(ctx, client.StartRequest{
Args: []string{"cat", "/run/secrets/mysecret"},
Stdout: &nopCloser{secretOutput},
})
require.NoError(t, err)
err = pid.Wait()
require.NoError(t, err)
require.Equal(t, "foo-secret", secretOutput.String())
pid, err = ctr.Start(ctx, client.StartRequest{
Args: []string{"test", "-S", sockPath},
})
require.NoError(t, err)
err = pid.Wait()
require.NoError(t, err)
return &client.Result{}, ctr.Release(ctx)
}
_, err = c.Build(ctx, SolveOpt{
LocalDirs: map[string]string{
"mylocal": tmpdir,
},
Session: []session.Attachable{
ssh,
secretsprovider.FromMap(map[string][]byte{
"/run/secrets/mysecret": []byte("foo-secret"),
}),
},
}, product, b, nil)
require.Error(t, err)
require.Contains(t, err.Error(), context.Canceled.Error())
checkAllReleasable(t, c, sb, true)
}
type nopCloser struct {
io.Writer
}
func (n *nopCloser) Close() error {
return nil
}

View File

@ -152,3 +152,27 @@ func (gwf *GatewayForwarder) StatFile(ctx context.Context, req *gwapi.StatFileRe
}
return fwd.StatFile(ctx, req)
}
func (gwf *GatewayForwarder) NewContainer(ctx context.Context, req *gwapi.NewContainerRequest) (*gwapi.NewContainerResponse, error) {
fwd, err := gwf.lookupForwarder(ctx)
if err != nil {
return nil, errors.Wrap(err, "forwarding NewContainer")
}
return fwd.NewContainer(ctx, req)
}
func (gwf *GatewayForwarder) ReleaseContainer(ctx context.Context, req *gwapi.ReleaseContainerRequest) (*gwapi.ReleaseContainerResponse, error) {
fwd, err := gwf.lookupForwarder(ctx)
if err != nil {
return nil, errors.Wrap(err, "forwarding ReleaseContainer")
}
return fwd.ReleaseContainer(ctx, req)
}
func (gwf *GatewayForwarder) ExecProcess(srv gwapi.LLBBridge_ExecProcessServer) error {
fwd, err := gwf.lookupForwarder(srv.Context())
if err != nil {
return errors.Wrap(err, "forwarding ExecProcess")
}
return fwd.ExecProcess(srv)
}

View File

@ -2,6 +2,8 @@ package containerdexecutor
import (
"context"
"io"
"io/ioutil"
"os"
"path/filepath"
"strings"
@ -19,6 +21,7 @@ import (
"github.com/moby/buildkit/executor/oci"
"github.com/moby/buildkit/identity"
"github.com/moby/buildkit/snapshot"
"github.com/moby/buildkit/solver/errdefs"
"github.com/moby/buildkit/solver/pb"
"github.com/moby/buildkit/util/network"
"github.com/opencontainers/runtime-spec/specs-go"
@ -187,6 +190,7 @@ func (w *containerdExecutor) Run(ctx context.Context, id string, root cache.Moun
}
}()
fixProcessOutput(&process)
cioOpts := []cio.Opt{cio.WithStreams(process.Stdin, process.Stdout, process.Stderr)}
if meta.Tty {
cioOpts = append(cioOpts, cio.WithTerminal)
@ -286,6 +290,7 @@ func (w *containerdExecutor) Exec(ctx context.Context, id string, process execut
spec.Process.Env = process.Meta.Env
}
fixProcessOutput(&process)
cioOpts := []cio.Opt{cio.WithStreams(process.Stdin, process.Stdout, process.Stderr)}
if meta.Tty {
cioOpts = append(cioOpts, cio.WithTerminal)
@ -300,6 +305,19 @@ func (w *containerdExecutor) Exec(ctx context.Context, id string, process execut
return err
}
func fixProcessOutput(process *executor.ProcessInfo) {
// It seems like if containerd has one of stdin, stdout or stderr then the
// others need to be present as well otherwise we get this error:
// failed to start io pipe copy: unable to copy pipes: containerd-shim: opening file "" failed: open : no such file or directory: unknown
// So just stub out any missing output
if process.Stdout == nil {
process.Stdout = &nopCloser{ioutil.Discard}
}
if process.Stderr == nil {
process.Stderr = &nopCloser{ioutil.Discard}
}
}
func (w *containerdExecutor) runProcess(ctx context.Context, p containerd.Process, resize <-chan executor.WinSize, started func()) error {
// Not using `ctx` here because the context passed only affects the statusCh which we
// don't want cancelled when ctx.Done is sent. We want to process statusCh on cancel.
@ -356,7 +374,7 @@ func (w *containerdExecutor) runProcess(ctx context.Context, p containerd.Proces
cancel()
}
if status.ExitCode() != 0 {
exitErr := &executor.ExitError{
exitErr := &errdefs.ExitError{
ExitCode: status.ExitCode(),
Err: status.Error(),
}
@ -379,3 +397,11 @@ func (w *containerdExecutor) runProcess(ctx context.Context, p containerd.Proces
}
}
}
type nopCloser struct {
io.Writer
}
func (c *nopCloser) Close() error {
return nil
}

View File

@ -2,7 +2,6 @@ package executor
import (
"context"
"fmt"
"io"
"net"
@ -55,24 +54,3 @@ type HostIP struct {
Host string
IP net.IP
}
// ExitError will be returned from Run and Exec when the container process exits with
// a non-zero exit code.
type ExitError struct {
ExitCode uint32
Err error
}
func (err *ExitError) Error() string {
if err.Err != nil {
return err.Err.Error()
}
return fmt.Sprintf("exit code: %d", err.ExitCode)
}
func (err *ExitError) Unwrap() error {
if err.Err == nil {
return fmt.Errorf("exit code: %d", err.ExitCode)
}
return err.Err
}

View File

@ -21,6 +21,7 @@ import (
"github.com/moby/buildkit/executor"
"github.com/moby/buildkit/executor/oci"
"github.com/moby/buildkit/identity"
"github.com/moby/buildkit/solver/errdefs"
"github.com/moby/buildkit/solver/pb"
"github.com/moby/buildkit/util/network"
rootlessspecconv "github.com/moby/buildkit/util/rootless/specconv"
@ -332,7 +333,7 @@ func (w *runcExecutor) Run(ctx context.Context, id string, root cache.Mountable,
close(ended)
if status != 0 || err != nil {
exitErr := &executor.ExitError{
exitErr := &errdefs.ExitError{
ExitCode: uint32(status),
Err: err,
}
@ -418,7 +419,7 @@ func (w *runcExecutor) Exec(ctx context.Context, id string, process executor.Pro
var exitError *exec.ExitError
if errors.As(err, &exitError) {
err = &executor.ExitError{
err = &errdefs.ExitError{
ExitCode: uint32(exitError.ExitCode()),
Err: err,
}

View File

@ -7,12 +7,13 @@ import (
"github.com/moby/buildkit/client/llb"
"github.com/moby/buildkit/executor"
gw "github.com/moby/buildkit/frontend/gateway/client"
"github.com/moby/buildkit/session"
"github.com/moby/buildkit/solver/pb"
digest "github.com/opencontainers/go-digest"
)
type Frontend interface {
Solve(ctx context.Context, llb FrontendLLBBridge, opt map[string]string, inputs map[string]*pb.Definition, sid string) (*Result, error)
Solve(ctx context.Context, llb FrontendLLBBridge, opt map[string]string, inputs map[string]*pb.Definition, sid string, sm *session.Manager) (*Result, error)
}
type FrontendLLBBridge interface {

View File

@ -2,6 +2,7 @@ package client
import (
"context"
"io"
"github.com/moby/buildkit/client/llb"
"github.com/moby/buildkit/solver/pb"
@ -16,6 +17,61 @@ type Client interface {
ResolveImageConfig(ctx context.Context, ref string, opt llb.ResolveImageConfigOpt) (digest.Digest, []byte, error)
BuildOpts() BuildOpts
Inputs(ctx context.Context) (map[string]llb.State, error)
NewContainer(ctx context.Context, req NewContainerRequest) (Container, error)
}
// NewContainerRequest encapsulates the requirements for a client to define a
// new container, without defining the initial process.
type NewContainerRequest struct {
Mounts []Mount
NetMode pb.NetMode
}
// Mount allows clients to specify a filesystem mount. A Reference to a
// previously solved Result is required.
type Mount struct {
Selector string
Dest string
Ref Reference
Readonly bool
MountType pb.MountType
CacheOpt *pb.CacheOpt
SecretOpt *pb.SecretOpt
SSHOpt *pb.SSHOpt
}
// Container is used to start new processes inside a container and release the
// container resources when done.
type Container interface {
Start(context.Context, StartRequest) (ContainerProcess, error)
Release(context.Context) error
}
// StartRequest encapsulates the arguments to define a process within a
// container.
type StartRequest struct {
Args []string
Env []string
User string
Cwd string
Tty bool
Stdin io.ReadCloser
Stdout, Stderr io.WriteCloser
SecurityMode pb.SecurityMode
}
// WinSize is same as executor.WinSize, copied here to prevent circular package
// dependencies.
type WinSize struct {
Rows uint32
Cols uint32
}
// ContainerProcess represents a process within a container.
type ContainerProcess interface {
Wait() error
Resize(ctx context.Context, size WinSize) error
// TODO Signal(ctx context.Context, sig os.Signal)
}
type Reference interface {

View File

@ -0,0 +1,338 @@
package gateway
import (
"context"
"fmt"
"sort"
"strings"
"sync"
"github.com/moby/buildkit/cache"
"github.com/moby/buildkit/executor"
"github.com/moby/buildkit/frontend/gateway/client"
"github.com/moby/buildkit/session"
"github.com/moby/buildkit/solver"
"github.com/moby/buildkit/solver/llbsolver/mounts"
opspb "github.com/moby/buildkit/solver/pb"
"github.com/moby/buildkit/util/stack"
utilsystem "github.com/moby/buildkit/util/system"
"github.com/moby/buildkit/worker"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
"golang.org/x/sync/errgroup"
)
type NewContainerRequest struct {
ContainerID string
NetMode opspb.NetMode
Mounts []Mount
}
// Mount used for the gateway.Container is nearly identical to the client.Mount
// except is has a RefProxy instead of Ref to allow for a common abstraction
// between gateway clients.
type Mount struct {
Dest string
Selector string
Readonly bool
MountType opspb.MountType
RefProxy solver.ResultProxy
CacheOpt *opspb.CacheOpt
SecretOpt *opspb.SecretOpt
SSHOpt *opspb.SSHOpt
}
func toProtoMount(m Mount) *opspb.Mount {
return &opspb.Mount{
Selector: m.Selector,
Dest: m.Dest,
Readonly: m.Readonly,
MountType: m.MountType,
CacheOpt: m.CacheOpt,
SecretOpt: m.SecretOpt,
SSHOpt: m.SSHOpt,
}
}
func NewContainer(ctx context.Context, e executor.Executor, sm *session.Manager, g session.Group, req NewContainerRequest) (client.Container, error) {
ctx, cancel := context.WithCancel(ctx)
eg, ctx := errgroup.WithContext(ctx)
ctr := &gatewayContainer{
id: req.ContainerID,
netMode: req.NetMode,
executor: e,
errGroup: eg,
ctx: ctx,
cancel: cancel,
}
makeMutable := func(worker worker.Worker, ref cache.ImmutableRef) (cache.MutableRef, error) {
mRef, err := worker.CacheManager().New(ctx, ref)
if err != nil {
return nil, stack.Enable(err)
}
ctr.cleanup = append(ctr.cleanup, func() error {
return stack.Enable(mRef.Release(context.TODO()))
})
return mRef, nil
}
var mm *mounts.MountManager
mnts := req.Mounts
for i, m := range mnts {
if m.Dest == opspb.RootMount && m.RefProxy != nil {
res, err := m.RefProxy.Result(ctx)
if err != nil {
return nil, stack.Enable(err)
}
workerRef, ok := res.Sys().(*worker.WorkerRef)
if !ok {
return nil, errors.Errorf("invalid reference for exec %T", res.Sys())
}
name := fmt.Sprintf("container %s", req.ContainerID)
mm = mounts.NewMountManager(name, workerRef.Worker.CacheManager(), sm, workerRef.Worker.MetadataStore())
ctr.rootFS = workerRef.ImmutableRef
if !m.Readonly {
ctr.rootFS, err = makeMutable(workerRef.Worker, workerRef.ImmutableRef)
if err != nil {
return nil, stack.Enable(err)
}
}
// delete root mount from list, handled here
mnts = append(mnts[:i], mnts[i+1:]...)
break
}
}
if ctr.rootFS == nil {
return nil, errors.Errorf("root mount required")
}
for _, m := range mnts {
var ref cache.ImmutableRef
var mountable cache.Mountable
if m.RefProxy != nil {
res, err := m.RefProxy.Result(ctx)
if err != nil {
return nil, stack.Enable(err)
}
workerRef, ok := res.Sys().(*worker.WorkerRef)
if !ok {
return nil, errors.Errorf("invalid reference for exec %T", res.Sys())
}
ref = workerRef.ImmutableRef
mountable = ref
if !m.Readonly {
mountable, err = makeMutable(workerRef.Worker, ref)
if err != nil {
return nil, stack.Enable(err)
}
}
}
switch m.MountType {
case opspb.MountType_BIND:
// nothing to do here
case opspb.MountType_CACHE:
mRef, err := mm.MountableCache(ctx, toProtoMount(m), ref)
if err != nil {
return nil, err
}
mountable = mRef
ctr.cleanup = append(ctr.cleanup, func() error {
return stack.Enable(mRef.Release(context.TODO()))
})
case opspb.MountType_TMPFS:
mountable = mm.MountableTmpFS()
case opspb.MountType_SECRET:
var err error
mountable, err = mm.MountableSecret(ctx, toProtoMount(m), g)
if err != nil {
return nil, err
}
if mountable == nil {
continue
}
case opspb.MountType_SSH:
var err error
mountable, err = mm.MountableSSH(ctx, toProtoMount(m), g)
if err != nil {
return nil, err
}
if mountable == nil {
continue
}
default:
return nil, errors.Errorf("mount type %s not implemented", m.MountType)
}
// validate that there is a mount
if mountable == nil {
return nil, errors.Errorf("mount %s has no input", m.Dest)
}
execMount := executor.Mount{
Src: mountable,
Selector: m.Selector,
Dest: m.Dest,
Readonly: m.Readonly,
}
ctr.mounts = append(ctr.mounts, execMount)
}
// sort mounts so parents are mounted first
sort.Slice(ctr.mounts, func(i, j int) bool {
return ctr.mounts[i].Dest < ctr.mounts[j].Dest
})
return ctr, nil
}
type gatewayContainer struct {
id string
netMode opspb.NetMode
rootFS cache.Mountable
mounts []executor.Mount
executor executor.Executor
started bool
errGroup *errgroup.Group
mu sync.Mutex
cleanup []func() error
ctx context.Context
cancel func()
}
func (gwCtr *gatewayContainer) Start(ctx context.Context, req client.StartRequest) (client.ContainerProcess, error) {
resize := make(chan executor.WinSize)
procInfo := executor.ProcessInfo{
Meta: executor.Meta{
Args: req.Args,
Env: req.Env,
User: req.User,
Cwd: req.Cwd,
Tty: req.Tty,
NetMode: gwCtr.netMode,
SecurityMode: req.SecurityMode,
},
Stdin: req.Stdin,
Stdout: req.Stdout,
Stderr: req.Stderr,
Resize: resize,
}
if procInfo.Meta.Cwd == "" {
procInfo.Meta.Cwd = "/"
}
procInfo.Meta.Env = addDefaultEnvvar(procInfo.Meta.Env, "PATH", utilsystem.DefaultPathEnv)
if req.Tty {
procInfo.Meta.Env = addDefaultEnvvar(procInfo.Meta.Env, "TERM", "xterm")
}
// mark that we have started on the first call to execProcess for this
// container, so that future calls will call Exec rather than Run
gwCtr.mu.Lock()
started := gwCtr.started
gwCtr.started = true
gwCtr.mu.Unlock()
eg, ctx := errgroup.WithContext(gwCtr.ctx)
gwProc := &gatewayContainerProcess{
resize: resize,
errGroup: eg,
groupCtx: ctx,
}
if !started {
startedCh := make(chan struct{})
gwProc.errGroup.Go(func() error {
logrus.Debugf("Starting new container for %s with args: %q", gwCtr.id, procInfo.Meta.Args)
err := gwCtr.executor.Run(ctx, gwCtr.id, gwCtr.rootFS, gwCtr.mounts, procInfo, startedCh)
return stack.Enable(err)
})
select {
case <-ctx.Done():
case <-startedCh:
}
} else {
gwProc.errGroup.Go(func() error {
logrus.Debugf("Execing into container %s with args: %q", gwCtr.id, procInfo.Meta.Args)
err := gwCtr.executor.Exec(ctx, gwCtr.id, procInfo)
return stack.Enable(err)
})
}
gwCtr.errGroup.Go(gwProc.errGroup.Wait)
return gwProc, nil
}
func (gwCtr *gatewayContainer) Release(ctx context.Context) error {
gwCtr.cancel()
err1 := gwCtr.errGroup.Wait()
var err2 error
for i := len(gwCtr.cleanup) - 1; i >= 0; i-- { // call in LIFO order
err := gwCtr.cleanup[i]()
if err2 == nil {
err2 = err
}
}
if err1 != nil {
return stack.Enable(err1)
}
return stack.Enable(err2)
}
type gatewayContainerProcess struct {
errGroup *errgroup.Group
groupCtx context.Context
resize chan<- executor.WinSize
mu sync.Mutex
}
func (gwProc *gatewayContainerProcess) Wait() error {
err := stack.Enable(gwProc.errGroup.Wait())
gwProc.mu.Lock()
defer gwProc.mu.Unlock()
close(gwProc.resize)
return err
}
func (gwProc *gatewayContainerProcess) Resize(ctx context.Context, size client.WinSize) error {
gwProc.mu.Lock()
defer gwProc.mu.Unlock()
// is the container done or should we proceed with sending event?
select {
case <-gwProc.groupCtx.Done():
return nil
case <-ctx.Done():
return nil
default:
}
// now we select on contexts again in case p.resize blocks b/c
// container no longer reading from it. In that case when
// the errgroup finishes we want to unblock on the write
// and exit
select {
case <-gwProc.groupCtx.Done():
case <-ctx.Done():
case gwProc.resize <- executor.WinSize{Cols: size.Cols, Rows: size.Rows}:
}
return nil
}
func addDefaultEnvvar(env []string, k, v string) []string {
for _, e := range env {
if strings.HasPrefix(e, k+"=") {
return env
}
}
return append(env, k+"="+v)
}

View File

@ -9,8 +9,11 @@ import (
clienttypes "github.com/moby/buildkit/client"
"github.com/moby/buildkit/client/llb"
"github.com/moby/buildkit/frontend"
"github.com/moby/buildkit/frontend/gateway"
"github.com/moby/buildkit/frontend/gateway/client"
gwpb "github.com/moby/buildkit/frontend/gateway/pb"
"github.com/moby/buildkit/identity"
"github.com/moby/buildkit/session"
"github.com/moby/buildkit/solver"
opspb "github.com/moby/buildkit/solver/pb"
"github.com/moby/buildkit/util/apicaps"
@ -19,12 +22,13 @@ import (
fstypes "github.com/tonistiigi/fsutil/types"
)
func llbBridgeToGatewayClient(ctx context.Context, llbBridge frontend.FrontendLLBBridge, opts map[string]string, inputs map[string]*opspb.Definition, workerInfos []clienttypes.WorkerInfo, sid string) (*bridgeClient, error) {
func llbBridgeToGatewayClient(ctx context.Context, llbBridge frontend.FrontendLLBBridge, opts map[string]string, inputs map[string]*opspb.Definition, workerInfos []clienttypes.WorkerInfo, sid string, sm *session.Manager) (*bridgeClient, error) {
return &bridgeClient{
opts: opts,
inputs: inputs,
FrontendLLBBridge: llbBridge,
sid: sid,
sm: sm,
workerInfos: workerInfos,
final: map[*ref]struct{}{},
}, nil
@ -37,6 +41,7 @@ type bridgeClient struct {
inputs map[string]*opspb.Definition
final map[*ref]struct{}
sid string
sm *session.Manager
refs []*ref
workerInfos []clienttypes.WorkerInfo
}
@ -149,6 +154,41 @@ func (c *bridgeClient) discard(err error) {
}
}
func (c *bridgeClient) NewContainer(ctx context.Context, req client.NewContainerRequest) (client.Container, error) {
ctrReq := gateway.NewContainerRequest{
ContainerID: identity.NewID(),
NetMode: req.NetMode,
}
for _, m := range req.Mounts {
var refProxy solver.ResultProxy
if m.Ref != nil {
var ok bool
refProxy, ok = m.Ref.(*ref)
if !ok {
return nil, errors.Errorf("unexpected Ref type: %T", m.Ref)
}
}
ctrReq.Mounts = append(ctrReq.Mounts, gateway.Mount{
Dest: m.Dest,
Selector: m.Selector,
Readonly: m.Readonly,
MountType: m.MountType,
RefProxy: refProxy,
CacheOpt: m.CacheOpt,
SecretOpt: m.SecretOpt,
SSHOpt: m.SSHOpt,
})
}
group := session.NewGroup(c.sid)
ctr, err := gateway.NewContainer(ctx, c, c.sm, group, ctrReq)
if err != nil {
return nil, err
}
return ctr, nil
}
type ref struct {
solver.ResultProxy
}

View File

@ -5,6 +5,7 @@ import (
"github.com/moby/buildkit/frontend"
"github.com/moby/buildkit/frontend/gateway/client"
"github.com/moby/buildkit/session"
"github.com/moby/buildkit/solver/pb"
)
@ -20,8 +21,8 @@ type GatewayForwarder struct {
f client.BuildFunc
}
func (gf *GatewayForwarder) Solve(ctx context.Context, llbBridge frontend.FrontendLLBBridge, opts map[string]string, inputs map[string]*pb.Definition, sid string) (retRes *frontend.Result, retErr error) {
c, err := llbBridgeToGatewayClient(ctx, llbBridge, opts, inputs, gf.workers.WorkerInfos(), sid)
func (gf *GatewayForwarder) Solve(ctx context.Context, llbBridge frontend.FrontendLLBBridge, opts map[string]string, inputs map[string]*pb.Definition, sid string, sm *session.Manager) (retRes *frontend.Result, retErr error) {
c, err := llbBridgeToGatewayClient(ctx, llbBridge, opts, inputs, gf.workers.WorkerInfos(), sid, sm)
if err != nil {
return nil, err
}

View File

@ -12,7 +12,9 @@ import (
"sync"
"time"
"github.com/containerd/containerd"
"github.com/docker/distribution/reference"
"github.com/gogo/googleapis/google/rpc"
gogotypes "github.com/gogo/protobuf/types"
"github.com/golang/protobuf/ptypes/any"
apitypes "github.com/moby/buildkit/api/types"
@ -23,20 +25,26 @@ import (
"github.com/moby/buildkit/executor"
"github.com/moby/buildkit/exporter/containerimage/exptypes"
"github.com/moby/buildkit/frontend"
gwclient "github.com/moby/buildkit/frontend/gateway/client"
pb "github.com/moby/buildkit/frontend/gateway/pb"
"github.com/moby/buildkit/identity"
"github.com/moby/buildkit/session"
"github.com/moby/buildkit/solver"
"github.com/moby/buildkit/solver/errdefs"
opspb "github.com/moby/buildkit/solver/pb"
"github.com/moby/buildkit/util/apicaps"
"github.com/moby/buildkit/util/grpcerrors"
"github.com/moby/buildkit/util/stack"
"github.com/moby/buildkit/util/tracing"
"github.com/moby/buildkit/worker"
specs "github.com/opencontainers/image-spec/specs-go/v1"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
"golang.org/x/net/http2"
"golang.org/x/sync/errgroup"
spb "google.golang.org/genproto/googleapis/rpc/status"
"google.golang.org/grpc"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/health"
"google.golang.org/grpc/health/grpc_health_v1"
"google.golang.org/grpc/status"
@ -67,7 +75,7 @@ func filterPrefix(opts map[string]string, pfx string) map[string]string {
return m
}
func (gf *gatewayFrontend) Solve(ctx context.Context, llbBridge frontend.FrontendLLBBridge, opts map[string]string, inputs map[string]*opspb.Definition, sid string) (*frontend.Result, error) {
func (gf *gatewayFrontend) Solve(ctx context.Context, llbBridge frontend.FrontendLLBBridge, opts map[string]string, inputs map[string]*opspb.Definition, sid string, sm *session.Manager) (*frontend.Result, error) {
source, ok := opts[keySource]
if !ok {
return nil, errors.Errorf("no source specified for gateway")
@ -175,7 +183,7 @@ func (gf *gatewayFrontend) Solve(ctx context.Context, llbBridge frontend.Fronten
defer rootFS.Release(context.TODO())
}
lbf, ctx, err := serveLLBBridgeForwarder(ctx, llbBridge, gf.workers, inputs, sid)
lbf, ctx, err := serveLLBBridgeForwarder(ctx, llbBridge, gf.workers, inputs, sid, sm)
defer lbf.conn.Close() //nolint
if err != nil {
return nil, err
@ -302,11 +310,11 @@ func (lbf *llbBridgeForwarder) Result() (*frontend.Result, error) {
return lbf.result, nil
}
func NewBridgeForwarder(ctx context.Context, llbBridge frontend.FrontendLLBBridge, workers frontend.WorkerInfos, inputs map[string]*opspb.Definition, sid string) LLBBridgeForwarder {
return newBridgeForwarder(ctx, llbBridge, workers, inputs, sid)
func NewBridgeForwarder(ctx context.Context, llbBridge frontend.FrontendLLBBridge, workers frontend.WorkerInfos, inputs map[string]*opspb.Definition, sid string, sm *session.Manager) LLBBridgeForwarder {
return newBridgeForwarder(ctx, llbBridge, workers, inputs, sid, sm)
}
func newBridgeForwarder(ctx context.Context, llbBridge frontend.FrontendLLBBridge, workers frontend.WorkerInfos, inputs map[string]*opspb.Definition, sid string) *llbBridgeForwarder {
func newBridgeForwarder(ctx context.Context, llbBridge frontend.FrontendLLBBridge, workers frontend.WorkerInfos, inputs map[string]*opspb.Definition, sid string, sm *session.Manager) *llbBridgeForwarder {
lbf := &llbBridgeForwarder{
callCtx: ctx,
llbBridge: llbBridge,
@ -316,13 +324,15 @@ func newBridgeForwarder(ctx context.Context, llbBridge frontend.FrontendLLBBridg
workers: workers,
inputs: inputs,
sid: sid,
sm: sm,
ctrs: map[string]gwclient.Container{},
}
return lbf
}
func serveLLBBridgeForwarder(ctx context.Context, llbBridge frontend.FrontendLLBBridge, workers frontend.WorkerInfos, inputs map[string]*opspb.Definition, sid string) (*llbBridgeForwarder, context.Context, error) {
func serveLLBBridgeForwarder(ctx context.Context, llbBridge frontend.FrontendLLBBridge, workers frontend.WorkerInfos, inputs map[string]*opspb.Definition, sid string, sm *session.Manager) (*llbBridgeForwarder, context.Context, error) {
ctx, cancel := context.WithCancel(ctx)
lbf := newBridgeForwarder(ctx, llbBridge, workers, inputs, sid)
lbf := newBridgeForwarder(ctx, llbBridge, workers, inputs, sid, sm)
server := grpc.NewServer(grpc.UnaryInterceptor(grpcerrors.UnaryServerInterceptor), grpc.StreamInterceptor(grpcerrors.StreamServerInterceptor))
grpc_health_v1.RegisterHealthServer(server, health.NewServer())
pb.RegisterLLBBridgeServer(server, lbf)
@ -415,7 +425,10 @@ type llbBridgeForwarder struct {
inputs map[string]*opspb.Definition
isErrServerClosed bool
sid string
sm *session.Manager
*pipe
ctrs map[string]gwclient.Container
ctrsMu sync.Mutex
}
func (lbf *llbBridgeForwarder) ResolveImageConfig(ctx context.Context, req *pb.ResolveImageConfigRequest) (*pb.ResolveImageConfigResponse, error) {
@ -738,6 +751,415 @@ func (lbf *llbBridgeForwarder) Inputs(ctx context.Context, in *pb.InputsRequest)
}, nil
}
func (lbf *llbBridgeForwarder) NewContainer(ctx context.Context, in *pb.NewContainerRequest) (_ *pb.NewContainerResponse, err error) {
logrus.Debugf("|<--- NewContainer %s", in.ContainerID)
ctrReq := NewContainerRequest{
ContainerID: in.ContainerID,
NetMode: in.Network,
}
for _, m := range in.Mounts {
var refProxy solver.ResultProxy
if m.ResultID != "" {
refProxy, err = lbf.convertRef(m.ResultID)
if err != nil {
return nil, errors.Wrapf(err, "failed to find ref %s for %q mount", m.ResultID, m.Dest)
}
}
ctrReq.Mounts = append(ctrReq.Mounts, Mount{
Dest: m.Dest,
Selector: m.Selector,
Readonly: m.Readonly,
MountType: m.MountType,
RefProxy: refProxy,
CacheOpt: m.CacheOpt,
SecretOpt: m.SecretOpt,
SSHOpt: m.SSHOpt,
})
}
// Not using `ctx` here because it will get cancelled as soon as NewContainer returns
// and we want the context to live for the duration of the container.
group := session.NewGroup(lbf.sid)
ctr, err := NewContainer(context.Background(), lbf.llbBridge, lbf.sm, group, ctrReq)
if err != nil {
return nil, stack.Enable(err)
}
defer func() {
if err != nil {
ctr.Release(ctx) // ensure release on error
}
}()
lbf.ctrsMu.Lock()
defer lbf.ctrsMu.Unlock()
// ensure we are not clobbering a dup container id request
if _, ok := lbf.ctrs[in.ContainerID]; ok {
return nil, stack.Enable(status.Errorf(codes.AlreadyExists, "Container %s already exists", in.ContainerID))
}
lbf.ctrs[in.ContainerID] = ctr
return &pb.NewContainerResponse{}, nil
}
func (lbf *llbBridgeForwarder) ReleaseContainer(ctx context.Context, in *pb.ReleaseContainerRequest) (*pb.ReleaseContainerResponse, error) {
logrus.Debugf("|<--- ReleaseContainer %s", in.ContainerID)
lbf.ctrsMu.Lock()
ctr, ok := lbf.ctrs[in.ContainerID]
delete(lbf.ctrs, in.ContainerID)
lbf.ctrsMu.Unlock()
if !ok {
return nil, errors.Errorf("container details for %s not found", in.ContainerID)
}
err := ctr.Release(ctx)
return &pb.ReleaseContainerResponse{}, stack.Enable(err)
}
type processIO struct {
id string
mu sync.Mutex
resize func(context.Context, gwclient.WinSize) error
done chan struct{}
doneOnce sync.Once
// these track the process side of the io pipe for
// read (fd=0) and write (fd=1, fd=2)
processReaders map[uint32]io.ReadCloser
processWriters map[uint32]io.WriteCloser
// these track the server side of the io pipe, so
// when we receive an EOF over grpc, we will close
// this end
serverWriters map[uint32]io.WriteCloser
serverReaders map[uint32]io.ReadCloser
}
func newProcessIO(id string, openFds []uint32) *processIO {
pio := &processIO{
id: id,
processReaders: map[uint32]io.ReadCloser{},
processWriters: map[uint32]io.WriteCloser{},
serverReaders: map[uint32]io.ReadCloser{},
serverWriters: map[uint32]io.WriteCloser{},
done: make(chan struct{}),
}
for _, fd := range openFds {
// TODO do we know which way to pipe each fd? For now assume fd0 is for
// reading, and the rest are for writing
r, w := io.Pipe()
if fd == 0 {
pio.processReaders[fd] = r
pio.serverWriters[fd] = w
} else {
pio.processWriters[fd] = w
pio.serverReaders[fd] = r
}
}
return pio
}
func (pio *processIO) Close() (err error) {
pio.mu.Lock()
defer pio.mu.Unlock()
for fd, r := range pio.processReaders {
delete(pio.processReaders, fd)
err1 := r.Close()
if err1 != nil && err == nil {
err = stack.Enable(err1)
}
}
for fd, w := range pio.serverReaders {
delete(pio.serverReaders, fd)
err1 := w.Close()
if err1 != nil && err == nil {
err = stack.Enable(err1)
}
}
pio.Done()
return err
}
func (pio *processIO) Done() {
stillOpen := len(pio.processReaders) + len(pio.processWriters) + len(pio.serverReaders) + len(pio.serverWriters)
if stillOpen == 0 {
pio.doneOnce.Do(func() {
close(pio.done)
})
}
}
func (pio *processIO) Write(f *pb.FdMessage) (err error) {
pio.mu.Lock()
writer := pio.serverWriters[f.Fd]
pio.mu.Unlock()
if writer == nil {
return status.Errorf(codes.OutOfRange, "fd %d unavailable to write", f.Fd)
}
defer func() {
if err != nil || f.EOF {
writer.Close()
pio.mu.Lock()
defer pio.mu.Unlock()
delete(pio.serverWriters, f.Fd)
pio.Done()
}
}()
if len(f.Data) > 0 {
_, err = writer.Write(f.Data)
return stack.Enable(err)
}
return nil
}
type outputWriter struct {
stream pb.LLBBridge_ExecProcessServer
fd uint32
processID string
}
func (w *outputWriter) Write(msg []byte) (int, error) {
logrus.Debugf("|---> File Message %s, fd=%d, %d bytes", w.processID, w.fd, len(msg))
err := w.stream.Send(&pb.ExecMessage{
ProcessID: w.processID,
Input: &pb.ExecMessage_File{
File: &pb.FdMessage{
Fd: w.fd,
Data: msg,
},
},
})
return len(msg), stack.Enable(err)
}
func (lbf *llbBridgeForwarder) ExecProcess(srv pb.LLBBridge_ExecProcessServer) error {
eg, ctx := errgroup.WithContext(srv.Context())
msgs := make(chan *pb.ExecMessage)
eg.Go(func() error {
defer close(msgs)
for {
execMsg, err := srv.Recv()
if err != nil {
if errors.Is(err, io.EOF) {
return nil
}
return stack.Enable(err)
}
switch m := execMsg.GetInput().(type) {
case *pb.ExecMessage_Init:
logrus.Debugf("|<--- Init Message %s", execMsg.ProcessID)
case *pb.ExecMessage_File:
if m.File.EOF {
logrus.Debugf("|<--- File Message %s, fd=%d, EOF", execMsg.ProcessID, m.File.Fd)
} else {
logrus.Debugf("|<--- File Message %s, fd=%d, %d bytes", execMsg.ProcessID, m.File.Fd, len(m.File.Data))
}
case *pb.ExecMessage_Resize:
logrus.Debugf("|<--- Resize Message %s", execMsg.ProcessID)
}
select {
case <-ctx.Done():
case msgs <- execMsg:
}
}
})
eg.Go(func() error {
pios := make(map[string]*processIO)
// close any stray pios on exit to make sure
// all the associated resources get cleaned up
defer func() {
for _, pio := range pios {
pio.Close()
}
}()
for {
var execMsg *pb.ExecMessage
select {
case <-ctx.Done():
return nil
case execMsg = <-msgs:
}
if execMsg == nil {
return nil
}
pid := execMsg.ProcessID
if pid == "" {
return stack.Enable(status.Errorf(codes.InvalidArgument, "ProcessID required"))
}
pio, pioFound := pios[pid]
if data := execMsg.GetFile(); data != nil {
if !pioFound {
return stack.Enable(status.Errorf(codes.NotFound, "IO for process %q not found", pid))
}
err := pio.Write(data)
if err != nil {
return stack.Enable(err)
}
} else if resize := execMsg.GetResize(); resize != nil {
if !pioFound {
return stack.Enable(status.Errorf(codes.NotFound, "IO for process %q not found", pid))
}
pio.resize(ctx, gwclient.WinSize{
Cols: resize.Cols,
Rows: resize.Rows,
})
} else if init := execMsg.GetInit(); init != nil {
if pioFound {
return stack.Enable(status.Errorf(codes.AlreadyExists, "Process %s already exists", pid))
}
id := init.ContainerID
lbf.ctrsMu.Lock()
ctr, ok := lbf.ctrs[id]
lbf.ctrsMu.Unlock()
if !ok {
return stack.Enable(status.Errorf(codes.NotFound, "container %q previously released or not created", id))
}
initCtx, initCancel := context.WithCancel(context.Background())
defer initCancel()
pio := newProcessIO(pid, init.Fds)
pios[pid] = pio
proc, err := ctr.Start(initCtx, gwclient.StartRequest{
Args: init.Meta.Args,
Env: init.Meta.Env,
User: init.Meta.User,
Cwd: init.Meta.Cwd,
Tty: init.Tty,
Stdin: pio.processReaders[0],
Stdout: pio.processWriters[1],
Stderr: pio.processWriters[2],
})
if err != nil {
return stack.Enable(err)
}
pio.resize = proc.Resize
eg.Go(func() error {
<-pio.done
logrus.Debugf("|---> Done Message %s", pid)
err := srv.Send(&pb.ExecMessage{
ProcessID: pid,
Input: &pb.ExecMessage_Done{
Done: &pb.DoneMessage{},
},
})
return stack.Enable(err)
})
eg.Go(func() error {
defer func() {
pio.Close()
}()
err := proc.Wait()
var statusCode uint32
var exitError *errdefs.ExitError
var statusError *rpc.Status
if err != nil {
statusCode = containerd.UnknownExitStatus
st, _ := status.FromError(grpcerrors.ToGRPC(err))
stp := st.Proto()
statusError = &rpc.Status{
Code: stp.Code,
Message: stp.Message,
Details: convertToGogoAny(stp.Details),
}
}
if errors.As(err, &exitError) {
statusCode = exitError.ExitCode
}
logrus.Debugf("|---> Exit Message %s, code=%d, error=%s", pid, statusCode, err)
sendErr := srv.Send(&pb.ExecMessage{
ProcessID: pid,
Input: &pb.ExecMessage_Exit{
Exit: &pb.ExitMessage{
Code: statusCode,
Error: statusError,
},
},
})
if sendErr != nil && err != nil {
return errors.Wrap(sendErr, err.Error())
} else if sendErr != nil {
return stack.Enable(sendErr)
}
if err != nil && statusCode != 0 {
// this was a container exit error which is "normal" so
// don't return this error from the errgroup
return nil
}
return stack.Enable(err)
})
logrus.Debugf("|---> Started Message %s", pid)
err = srv.Send(&pb.ExecMessage{
ProcessID: pid,
Input: &pb.ExecMessage_Started{
Started: &pb.StartedMessage{},
},
})
if err != nil {
return stack.Enable(err)
}
// start sending Fd output back to client, this is done after
// StartedMessage so that Fd output will not potentially arrive
// to the client before "Started" as the container starts up.
for fd, file := range pio.serverReaders {
fd, file := fd, file
eg.Go(func() error {
defer func() {
file.Close()
pio.mu.Lock()
defer pio.mu.Unlock()
w := pio.processWriters[fd]
if w != nil {
w.Close()
}
delete(pio.processWriters, fd)
pio.Done()
}()
dest := &outputWriter{
stream: srv,
fd: uint32(fd),
processID: pid,
}
_, err := io.Copy(dest, file)
// ignore ErrClosedPipe, it is EOF for our usage.
if err != nil && !errors.Is(err, io.ErrClosedPipe) {
return stack.Enable(err)
}
// no error so must be EOF
logrus.Debugf("|---> File Message %s, fd=%d, EOF", pid, fd)
err = srv.Send(&pb.ExecMessage{
ProcessID: pid,
Input: &pb.ExecMessage_File{
File: &pb.FdMessage{
Fd: uint32(fd),
EOF: true,
},
},
})
return stack.Enable(err)
})
}
}
}
})
err := eg.Wait()
return stack.Enable(err)
}
func (lbf *llbBridgeForwarder) convertRef(id string) (solver.ResultProxy, error) {
if id == "" {
return nil, nil
@ -776,3 +1198,11 @@ func convertGogoAny(in []*gogotypes.Any) []*any.Any {
}
return out
}
func convertToGogoAny(in []*any.Any) []*gogotypes.Any {
out := make([]*gogotypes.Any, len(in))
for i := range in {
out[i] = &gogotypes.Any{TypeUrl: in[i].TypeUrl, Value: in[i].Value}
}
return out
}

View File

@ -3,38 +3,48 @@ package grpcclient
import (
"context"
"encoding/json"
"fmt"
"io"
"net"
"os"
"strings"
"sync"
"time"
"github.com/containerd/containerd"
"github.com/gogo/googleapis/google/rpc"
gogotypes "github.com/gogo/protobuf/types"
"github.com/golang/protobuf/ptypes/any"
"github.com/moby/buildkit/client/llb"
"github.com/moby/buildkit/frontend/gateway/client"
pb "github.com/moby/buildkit/frontend/gateway/pb"
"github.com/moby/buildkit/identity"
"github.com/moby/buildkit/solver/errdefs"
opspb "github.com/moby/buildkit/solver/pb"
"github.com/moby/buildkit/util/apicaps"
"github.com/moby/buildkit/util/grpcerrors"
digest "github.com/opencontainers/go-digest"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
fstypes "github.com/tonistiigi/fsutil/types"
"golang.org/x/sync/errgroup"
spb "google.golang.org/genproto/googleapis/rpc/status"
"google.golang.org/grpc"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
)
const frontendPrefix = "BUILDKIT_FRONTEND_OPT_"
type GrpcClient interface {
client.Client
Run(context.Context, client.BuildFunc) error
}
func New(ctx context.Context, opts map[string]string, session, product string, c pb.LLBBridgeClient, w []client.WorkerInfo) (GrpcClient, error) {
ctx, cancel := context.WithTimeout(ctx, 15*time.Second)
defer cancel()
resp, err := c.Ping(ctx, &pb.PingRequest{})
pingCtx, pingCancel := context.WithTimeout(ctx, 15*time.Second)
defer pingCancel()
resp, err := c.Ping(pingCtx, &pb.PingRequest{})
if err != nil {
return nil, err
}
@ -56,6 +66,7 @@ func New(ctx context.Context, opts map[string]string, session, product string, c
caps: pb.Caps.CapSet(resp.FrontendAPICaps),
llbCaps: opspb.Caps.CapSet(resp.LLBCaps),
requests: map[string]*pb.SolveRequest{},
execMsgs: newMessageForwarder(ctx, c),
}, nil
}
@ -167,6 +178,13 @@ func (c *grpcClient) Run(ctx context.Context, f client.BuildFunc) (retError erro
}()
}
defer func() {
err = c.execMsgs.Release()
if err != nil && retError != nil {
retError = err
}
}()
if res, err = f(ctx, c); err != nil {
return err
}
@ -257,6 +275,7 @@ type grpcClient struct {
caps apicaps.CapSet
llbCaps apicaps.CapSet
requests map[string]*pb.SolveRequest
execMsgs *messageForwarder
}
func (c *grpcClient) requestForRef(ref client.Reference) (*pb.SolveRequest, error) {
@ -427,7 +446,438 @@ func (c *grpcClient) Inputs(ctx context.Context) (map[string]llb.State, error) {
inputs[key] = llb.NewState(op)
}
return inputs, nil
}
// procMessageForwarder is created per container process to act as the
// communication channel between the process and the ExecProcess message
// stream.
type procMessageForwarder struct {
done chan struct{}
closeOnce sync.Once
msgs chan *pb.ExecMessage
}
func newProcMessageForwarder() *procMessageForwarder {
return &procMessageForwarder{
done: make(chan struct{}),
msgs: make(chan *pb.ExecMessage),
}
}
func (b *procMessageForwarder) Send(ctx context.Context, m *pb.ExecMessage) {
select {
case <-ctx.Done():
case <-b.done:
b.closeOnce.Do(func() {
close(b.msgs)
})
case b.msgs <- m:
}
}
func (b *procMessageForwarder) Recv(ctx context.Context) *pb.ExecMessage {
select {
case <-ctx.Done():
case <-b.done:
case m := <-b.msgs:
return m
}
return nil
}
func (b *procMessageForwarder) Close() {
close(b.done)
b.Recv(context.Background()) // flush any messages in queue
b.Send(context.Background(), nil) // ensure channel is closed
}
// messageForwarder manages a single grpc stream for ExecProcess to facilitate
// a pub/sub message channel for each new process started from the client
// connection.
type messageForwarder struct {
client pb.LLBBridgeClient
ctx context.Context
cancel func()
eg *errgroup.Group
mu sync.Mutex
pids map[string]*procMessageForwarder
stream pb.LLBBridge_ExecProcessClient
// startOnce used to only start the exec message forwarder once,
// so we only have one exec stream per client
startOnce sync.Once
// startErr tracks the error when initializing the stream, it will
// be returned on subsequent calls to Start
startErr error
}
func newMessageForwarder(ctx context.Context, client pb.LLBBridgeClient) *messageForwarder {
ctx, cancel := context.WithCancel(ctx)
eg, ctx := errgroup.WithContext(ctx)
return &messageForwarder{
client: client,
pids: map[string]*procMessageForwarder{},
ctx: ctx,
cancel: cancel,
eg: eg,
}
}
func (m *messageForwarder) Start() (err error) {
defer func() {
if err != nil {
m.startErr = err
}
}()
if m.startErr != nil {
return m.startErr
}
m.startOnce.Do(func() {
m.stream, err = m.client.ExecProcess(m.ctx)
if err != nil {
return
}
m.eg.Go(func() error {
for {
msg, err := m.stream.Recv()
if errors.Is(err, io.EOF) || grpcerrors.Code(err) == codes.Canceled {
return nil
}
logrus.Debugf("|<--- %s", debugMessage(msg))
if err != nil {
return err
}
m.mu.Lock()
msgs, ok := m.pids[msg.ProcessID]
m.mu.Unlock()
if !ok {
logrus.Debugf("Received exec message for unregistered process: %s", msg.String())
continue
}
msgs.Send(m.ctx, msg)
}
})
})
return err
}
func debugMessage(msg *pb.ExecMessage) string {
switch m := msg.GetInput().(type) {
case *pb.ExecMessage_Init:
return fmt.Sprintf("Init Message %s", msg.ProcessID)
case *pb.ExecMessage_File:
if m.File.EOF {
return fmt.Sprintf("File Message %s, fd=%d, EOF", msg.ProcessID, m.File.Fd)
}
return fmt.Sprintf("File Message %s, fd=%d, %d bytes", msg.ProcessID, m.File.Fd, len(m.File.Data))
case *pb.ExecMessage_Resize:
return fmt.Sprintf("Resize Message %s", msg.ProcessID)
case *pb.ExecMessage_Started:
return fmt.Sprintf("Started Message %s", msg.ProcessID)
case *pb.ExecMessage_Exit:
return fmt.Sprintf("Exit Message %s, code=%d, err=%s", msg.ProcessID, m.Exit.Code, m.Exit.Error)
case *pb.ExecMessage_Done:
return fmt.Sprintf("Done Message %s", msg.ProcessID)
}
return fmt.Sprintf("Unknown Message %s", msg.String())
}
func (m *messageForwarder) Send(msg *pb.ExecMessage) error {
m.mu.Lock()
_, ok := m.pids[msg.ProcessID]
defer m.mu.Unlock()
if !ok {
return errors.Errorf("process %s has ended, not sending message %#v", msg.ProcessID, msg.Input)
}
logrus.Debugf("|---> %s", debugMessage(msg))
return m.stream.Send(msg)
}
func (m *messageForwarder) Release() error {
m.cancel()
return m.eg.Wait()
}
func (m *messageForwarder) Register(pid string) *procMessageForwarder {
m.mu.Lock()
defer m.mu.Unlock()
sender := newProcMessageForwarder()
m.pids[pid] = sender
return sender
}
func (m *messageForwarder) Deregister(pid string) {
m.mu.Lock()
defer m.mu.Unlock()
sender, ok := m.pids[pid]
if !ok {
return
}
delete(m.pids, pid)
sender.Close()
}
type msgWriter struct {
mux *messageForwarder
fd uint32
processID string
}
func (w *msgWriter) Write(msg []byte) (int, error) {
err := w.mux.Send(&pb.ExecMessage{
ProcessID: w.processID,
Input: &pb.ExecMessage_File{
File: &pb.FdMessage{
Fd: w.fd,
Data: msg,
},
},
})
if err != nil {
return 0, err
}
return len(msg), nil
}
func (c *grpcClient) NewContainer(ctx context.Context, req client.NewContainerRequest) (client.Container, error) {
err := c.caps.Supports(pb.CapGatewayExec)
if err != nil {
return nil, err
}
id := identity.NewID()
var mounts []*opspb.Mount
for _, m := range req.Mounts {
var resultID string
if m.Ref != nil {
ref, ok := m.Ref.(*reference)
if !ok {
return nil, errors.Errorf("unexpected type for reference, got %T", m.Ref)
}
resultID = ref.id
}
mounts = append(mounts, &opspb.Mount{
Dest: m.Dest,
Selector: m.Selector,
Readonly: m.Readonly,
MountType: m.MountType,
ResultID: resultID,
CacheOpt: m.CacheOpt,
SecretOpt: m.SecretOpt,
SSHOpt: m.SSHOpt,
})
}
logrus.Debugf("|---> NewContainer %s", id)
_, err = c.client.NewContainer(ctx, &pb.NewContainerRequest{
ContainerID: id,
Mounts: mounts,
})
if err != nil {
return nil, err
}
// ensure message forwarder is started, only sets up stream first time called
err = c.execMsgs.Start()
if err != nil {
return nil, err
}
return &container{
client: c.client,
id: id,
execMsgs: c.execMsgs,
}, nil
}
type container struct {
client pb.LLBBridgeClient
id string
execMsgs *messageForwarder
}
func (ctr *container) Start(ctx context.Context, req client.StartRequest) (client.ContainerProcess, error) {
pid := fmt.Sprintf("%s:%s", ctr.id, identity.NewID())
msgs := ctr.execMsgs.Register(pid)
init := &pb.InitMessage{
ContainerID: ctr.id,
Meta: &opspb.Meta{
Args: req.Args,
Env: req.Env,
Cwd: req.Cwd,
User: req.User,
},
Tty: req.Tty,
Security: req.SecurityMode,
}
if req.Stdin != nil {
init.Fds = append(init.Fds, 0)
}
if req.Stdout != nil {
init.Fds = append(init.Fds, 1)
}
if req.Stderr != nil {
init.Fds = append(init.Fds, 2)
}
err := ctr.execMsgs.Send(&pb.ExecMessage{
ProcessID: pid,
Input: &pb.ExecMessage_Init{
Init: init,
},
})
if err != nil {
return nil, err
}
msg := msgs.Recv(ctx)
if msg == nil {
return nil, errors.Errorf("failed to receive started message")
}
started := msg.GetStarted()
if started == nil {
return nil, errors.Errorf("expecting started message, got %T", msg.GetInput())
}
eg, ctx := errgroup.WithContext(ctx)
done := make(chan struct{})
ctrProc := &containerProcess{
execMsgs: ctr.execMsgs,
id: pid,
eg: eg,
}
var stdinReader *io.PipeReader
ctrProc.eg.Go(func() error {
<-done
if stdinReader != nil {
return stdinReader.Close()
}
return nil
})
if req.Stdin != nil {
var stdinWriter io.WriteCloser
stdinReader, stdinWriter = io.Pipe()
// This go routine is intentionally not part of the errgroup because
// if os.Stdin is used for req.Stdin then this will block until
// the user closes the input, which will likely be after we are done
// with the container, so we can't Wait on it.
go func() {
io.Copy(stdinWriter, req.Stdin)
stdinWriter.Close()
}()
ctrProc.eg.Go(func() error {
m := &msgWriter{
mux: ctr.execMsgs,
processID: pid,
fd: 0,
}
_, err := io.Copy(m, stdinReader)
// ignore ErrClosedPipe, it is EOF for our usage.
if err != nil && !errors.Is(err, io.ErrClosedPipe) {
return err
}
// not an error so must be eof
return ctr.execMsgs.Send(&pb.ExecMessage{
ProcessID: pid,
Input: &pb.ExecMessage_File{
File: &pb.FdMessage{
Fd: 0,
EOF: true,
},
},
})
})
}
ctrProc.eg.Go(func() error {
var exitError error
for {
msg := msgs.Recv(ctx)
if msg == nil {
return exitError
}
if file := msg.GetFile(); file != nil {
var out io.WriteCloser
switch file.Fd {
case 1:
out = req.Stdout
case 2:
out = req.Stderr
}
if out == nil {
// if things are plumbed correctly this should never happen
return errors.Errorf("missing writer for output fd %d", file.Fd)
}
if len(file.Data) > 0 {
_, err := out.Write(file.Data)
if err != nil {
return err
}
}
} else if exit := msg.GetExit(); exit != nil {
// capture exit message to exitError so we can return it after
// the server sends the Done message
close(done)
if exit.Code == 0 {
continue
}
exitError = grpcerrors.FromGRPC(status.ErrorProto(&spb.Status{
Code: exit.Error.Code,
Message: exit.Error.Message,
Details: convertGogoAny(exit.Error.Details),
}))
if exit.Code != containerd.UnknownExitStatus {
exitError = &errdefs.ExitError{ExitCode: exit.Code, Err: exitError}
}
} else if serverDone := msg.GetDone(); serverDone != nil {
return exitError
} else {
return errors.Errorf("unexpected Exec Message for pid %s: %T", pid, msg.GetInput())
}
}
})
return ctrProc, nil
}
func (ctr *container) Release(ctx context.Context) error {
logrus.Debugf("|---> ReleaseContainer %s", ctr.id)
_, err := ctr.client.ReleaseContainer(ctx, &pb.ReleaseContainerRequest{
ContainerID: ctr.id,
})
return err
}
type containerProcess struct {
execMsgs *messageForwarder
id string
eg *errgroup.Group
}
func (ctrProc *containerProcess) Wait() error {
defer ctrProc.execMsgs.Deregister(ctrProc.id)
return ctrProc.eg.Wait()
}
func (ctrProc *containerProcess) Resize(_ context.Context, size client.WinSize) error {
return ctrProc.execMsgs.Send(&pb.ExecMessage{
ProcessID: ctrProc.id,
Input: &pb.ExecMessage_Resize{
Resize: &pb.ResizeMessage{
Cols: size.Cols,
Rows: size.Rows,
},
},
})
}
type reference struct {
@ -596,6 +1046,14 @@ func product() string {
return os.Getenv("BUILDKIT_EXPORTEDPRODUCT")
}
func convertGogoAny(in []*gogotypes.Any) []*any.Any {
out := make([]*any.Any, len(in))
for i := range in {
out[i] = &any.Any{TypeUrl: in[i].TypeUrl, Value: in[i].Value}
}
return out
}
func convertToGogoAny(in []*any.Any) []*gogotypes.Any {
out := make([]*gogotypes.Any, len(in))
for i := range in {

View File

@ -35,6 +35,10 @@ const (
// CapGatewaySolveMetadata can be used to check if solve calls from gateway reliably return metadata
CapGatewaySolveMetadata apicaps.CapID = "gateway.solve.metadata"
// CapGatewayExec is the capability to create and interact with new
// containers directly through the gateway
CapGatewayExec apicaps.CapID = "gateway.exec"
)
func init() {
@ -136,4 +140,11 @@ func init() {
Enabled: true,
Status: apicaps.CapStatusExperimental,
})
Caps.Init(apicaps.Cap{
ID: CapGatewayExec,
Name: "gateway exec",
Enabled: true,
Status: apicaps.CapStatusExperimental,
})
}

File diff suppressed because it is too large Load Diff

View File

@ -28,6 +28,10 @@ service LLBBridge {
rpc Return(ReturnRequest) returns (ReturnResponse);
// apicaps:CapFrontendInputs
rpc Inputs(InputsRequest) returns (InputsResponse);
rpc NewContainer(NewContainerRequest) returns (NewContainerResponse);
rpc ReleaseContainer(ReleaseContainerRequest) returns (ReleaseContainerResponse);
rpc ExecProcess(stream ExecMessage) returns (stream ExecMessage);
}
message Result {
@ -162,3 +166,69 @@ message PongResponse{
repeated moby.buildkit.v1.apicaps.APICap LLBCaps = 2 [(gogoproto.nullable) = false];
repeated moby.buildkit.v1.types.WorkerRecord Workers = 3;
}
message NewContainerRequest {
string ContainerID = 1;
// For mount input values we can use random identifiers passed with ref
repeated pb.Mount Mounts = 2;
pb.NetMode Network = 3;
}
message NewContainerResponse{}
message ReleaseContainerRequest {
string ContainerID = 1;
}
message ReleaseContainerResponse{}
message ExecMessage {
string ProcessID = 1;
oneof Input {
// InitMessage sent from client to server will start a new process in a
// container
InitMessage Init = 2;
// FdMessage used from client to server for input (stdin) and
// from server to client for output (stdout, stderr)
FdMessage File = 3;
// ResizeMessage used from client to server for terminal resize events
ResizeMessage Resize = 4;
// StartedMessage sent from server to client after InitMessage to
// indicate the process has started.
StartedMessage Started = 5;
// ExitMessage sent from server to client will contain the exit code
// when the process ends.
ExitMessage Exit = 6;
// DoneMessage from server to client will be the last message for any
// process. Note that FdMessage might be sent after ExitMessage.
DoneMessage Done = 7;
}
}
message InitMessage{
string ContainerID = 1;
pb.Meta Meta = 2;
repeated uint32 Fds = 3;
bool Tty = 4;
pb.SecurityMode Security = 5;
}
message ExitMessage {
uint32 Code = 1;
google.rpc.Status Error = 2;
}
message StartedMessage{}
message DoneMessage{}
message FdMessage{
uint32 Fd = 1; // what fd the data was from
bool EOF = 2; // true if eof was reached
bytes Data = 3;
}
message ResizeMessage{
uint32 Rows = 1;
uint32 Cols = 2;
}

24
solver/errdefs/exec.go Normal file
View File

@ -0,0 +1,24 @@
package errdefs
import fmt "fmt"
// ExitError will be returned when the container process exits with a non-zero
// exit code.
type ExitError struct {
ExitCode uint32
Err error
}
func (err *ExitError) Error() string {
if err.Err != nil {
return err.Err.Error()
}
return fmt.Sprintf("exit code: %d", err.ExitCode)
}
func (err *ExitError) Unwrap() error {
if err.Err == nil {
return fmt.Errorf("exit code: %d", err.ExitCode)
}
return err.Err
}

View File

@ -131,7 +131,7 @@ func (b *llbBridge) Solve(ctx context.Context, req frontend.SolveRequest, sid st
if !ok {
return nil, errors.Errorf("invalid frontend: %s", req.Frontend)
}
res, err = f.Solve(ctx, b, req.FrontendOpt, req.FrontendInputs, sid)
res, err = f.Solve(ctx, b, req.FrontendOpt, req.FrontendInputs, sid, b.sm)
if err != nil {
return nil, errors.Wrapf(err, "failed to solve with frontend %s", req.Frontend)
}

View File

@ -0,0 +1,514 @@
package mounts
import (
"context"
"fmt"
"io/ioutil"
"os"
"path/filepath"
"sync"
"time"
"github.com/containerd/containerd/mount"
"github.com/containerd/containerd/sys"
"github.com/docker/docker/pkg/idtools"
"github.com/docker/docker/pkg/locker"
"github.com/moby/buildkit/cache"
"github.com/moby/buildkit/cache/metadata"
"github.com/moby/buildkit/client"
"github.com/moby/buildkit/identity"
"github.com/moby/buildkit/session"
"github.com/moby/buildkit/session/secrets"
"github.com/moby/buildkit/session/sshforward"
"github.com/moby/buildkit/snapshot"
"github.com/moby/buildkit/solver/pb"
"github.com/moby/buildkit/util/grpcerrors"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
bolt "go.etcd.io/bbolt"
"google.golang.org/grpc/codes"
)
func NewMountManager(name string, cm cache.Manager, sm *session.Manager, md *metadata.Store) *MountManager {
return &MountManager{
cm: cm,
sm: sm,
cacheMounts: map[string]*cacheRefShare{},
md: md,
managerName: name,
}
}
type MountManager struct {
cm cache.Manager
sm *session.Manager
cacheMountsMu sync.Mutex
cacheMounts map[string]*cacheRefShare
md *metadata.Store
managerName string
}
func (mm *MountManager) getRefCacheDir(ctx context.Context, ref cache.ImmutableRef, id string, m *pb.Mount, sharing pb.CacheSharingOpt) (mref cache.MutableRef, err error) {
g := &cacheRefGetter{
locker: &mm.cacheMountsMu,
cacheMounts: mm.cacheMounts,
cm: mm.cm,
md: mm.md,
globalCacheRefs: sharedCacheRefs,
name: fmt.Sprintf("cached mount %s from %s", m.Dest, mm.managerName),
}
return g.getRefCacheDir(ctx, ref, id, sharing)
}
type cacheRefGetter struct {
locker sync.Locker
cacheMounts map[string]*cacheRefShare
cm cache.Manager
md *metadata.Store
globalCacheRefs *cacheRefs
name string
}
func (g *cacheRefGetter) getRefCacheDir(ctx context.Context, ref cache.ImmutableRef, id string, sharing pb.CacheSharingOpt) (mref cache.MutableRef, err error) {
key := "cache-dir:" + id
if ref != nil {
key += ":" + ref.ID()
}
mu := g.locker
mu.Lock()
defer mu.Unlock()
if ref, ok := g.cacheMounts[key]; ok {
return ref.clone(), nil
}
defer func() {
if err == nil {
share := &cacheRefShare{MutableRef: mref, refs: map[*cacheRef]struct{}{}}
g.cacheMounts[key] = share
mref = share.clone()
}
}()
switch sharing {
case pb.CacheSharingOpt_SHARED:
return g.globalCacheRefs.get(key, func() (cache.MutableRef, error) {
return g.getRefCacheDirNoCache(ctx, key, ref, id, false)
})
case pb.CacheSharingOpt_PRIVATE:
return g.getRefCacheDirNoCache(ctx, key, ref, id, false)
case pb.CacheSharingOpt_LOCKED:
return g.getRefCacheDirNoCache(ctx, key, ref, id, true)
default:
return nil, errors.Errorf("invalid cache sharing option: %s", sharing.String())
}
}
func (g *cacheRefGetter) getRefCacheDirNoCache(ctx context.Context, key string, ref cache.ImmutableRef, id string, block bool) (cache.MutableRef, error) {
makeMutable := func(ref cache.ImmutableRef) (cache.MutableRef, error) {
return g.cm.New(ctx, ref, cache.WithRecordType(client.UsageRecordTypeCacheMount), cache.WithDescription(g.name), cache.CachePolicyRetain)
}
cacheRefsLocker.Lock(key)
defer cacheRefsLocker.Unlock(key)
for {
sis, err := g.md.Search(key)
if err != nil {
return nil, err
}
locked := false
for _, si := range sis {
if mRef, err := g.cm.GetMutable(ctx, si.ID()); err == nil {
logrus.Debugf("reusing ref for cache dir: %s", mRef.ID())
return mRef, nil
} else if errors.Is(err, cache.ErrLocked) {
locked = true
}
}
if block && locked {
cacheRefsLocker.Unlock(key)
select {
case <-ctx.Done():
cacheRefsLocker.Lock(key)
return nil, ctx.Err()
case <-time.After(100 * time.Millisecond):
cacheRefsLocker.Lock(key)
}
} else {
break
}
}
mRef, err := makeMutable(ref)
if err != nil {
return nil, err
}
si, _ := g.md.Get(mRef.ID())
v, err := metadata.NewValue(key)
if err != nil {
mRef.Release(context.TODO())
return nil, err
}
v.Index = key
if err := si.Update(func(b *bolt.Bucket) error {
return si.SetValue(b, key, v)
}); err != nil {
mRef.Release(context.TODO())
return nil, err
}
return mRef, nil
}
func (mm *MountManager) getSSHMountable(ctx context.Context, m *pb.Mount, g session.Group) (cache.Mountable, error) {
var caller session.Caller
err := mm.sm.Any(ctx, g, func(ctx context.Context, _ string, c session.Caller) error {
if err := sshforward.CheckSSHID(ctx, c, m.SSHOpt.ID); err != nil {
if m.SSHOpt.Optional {
return nil
}
if grpcerrors.Code(err) == codes.Unimplemented {
return errors.Errorf("no SSH key %q forwarded from the client", m.SSHOpt.ID)
}
return err
}
caller = c
return nil
})
if err != nil {
return nil, err
}
// because ssh socket remains active, to actually handle session disconnecting ssh error
// should restart the whole exec with new session
return &sshMount{mount: m, caller: caller, idmap: mm.cm.IdentityMapping()}, nil
}
type sshMount struct {
mount *pb.Mount
caller session.Caller
idmap *idtools.IdentityMapping
}
func (sm *sshMount) Mount(ctx context.Context, readonly bool) (snapshot.Mountable, error) {
return &sshMountInstance{sm: sm, idmap: sm.idmap}, nil
}
type sshMountInstance struct {
sm *sshMount
idmap *idtools.IdentityMapping
}
func (sm *sshMountInstance) Mount() ([]mount.Mount, func() error, error) {
ctx, cancel := context.WithCancel(context.TODO())
uid := int(sm.sm.mount.SSHOpt.Uid)
gid := int(sm.sm.mount.SSHOpt.Gid)
if sm.idmap != nil {
identity, err := sm.idmap.ToHost(idtools.Identity{
UID: uid,
GID: gid,
})
if err != nil {
cancel()
return nil, nil, err
}
uid = identity.UID
gid = identity.GID
}
sock, cleanup, err := sshforward.MountSSHSocket(ctx, sm.sm.caller, sshforward.SocketOpt{
ID: sm.sm.mount.SSHOpt.ID,
UID: uid,
GID: gid,
Mode: int(sm.sm.mount.SSHOpt.Mode & 0777),
})
if err != nil {
cancel()
return nil, nil, err
}
release := func() error {
var err error
if cleanup != nil {
err = cleanup()
}
cancel()
return err
}
return []mount.Mount{{
Type: "bind",
Source: sock,
Options: []string{"rbind"},
}}, release, nil
}
func (sm *sshMountInstance) IdentityMapping() *idtools.IdentityMapping {
return sm.idmap
}
func (mm *MountManager) getSecretMountable(ctx context.Context, m *pb.Mount, g session.Group) (cache.Mountable, error) {
if m.SecretOpt == nil {
return nil, errors.Errorf("invalid secret mount options")
}
sopt := *m.SecretOpt
id := sopt.ID
if id == "" {
return nil, errors.Errorf("secret ID missing from mount options")
}
var dt []byte
var err error
err = mm.sm.Any(ctx, g, func(ctx context.Context, _ string, caller session.Caller) error {
dt, err = secrets.GetSecret(ctx, caller, id)
if err != nil {
if errors.Is(err, secrets.ErrNotFound) && m.SecretOpt.Optional {
return nil
}
return err
}
return nil
})
if err != nil || dt == nil {
return nil, err
}
return &secretMount{mount: m, data: dt, idmap: mm.cm.IdentityMapping()}, nil
}
type secretMount struct {
mount *pb.Mount
data []byte
idmap *idtools.IdentityMapping
}
func (sm *secretMount) Mount(ctx context.Context, readonly bool) (snapshot.Mountable, error) {
return &secretMountInstance{sm: sm, idmap: sm.idmap}, nil
}
type secretMountInstance struct {
sm *secretMount
root string
idmap *idtools.IdentityMapping
}
func (sm *secretMountInstance) Mount() ([]mount.Mount, func() error, error) {
dir, err := ioutil.TempDir("", "buildkit-secrets")
if err != nil {
return nil, nil, errors.Wrap(err, "failed to create temp dir")
}
cleanupDir := func() error {
return os.RemoveAll(dir)
}
if err := os.Chmod(dir, 0711); err != nil {
cleanupDir()
return nil, nil, err
}
tmpMount := mount.Mount{
Type: "tmpfs",
Source: "tmpfs",
Options: []string{"nodev", "nosuid", "noexec", fmt.Sprintf("uid=%d,gid=%d", os.Geteuid(), os.Getegid())},
}
if sys.RunningInUserNS() {
tmpMount.Options = nil
}
if err := mount.All([]mount.Mount{tmpMount}, dir); err != nil {
cleanupDir()
return nil, nil, errors.Wrap(err, "unable to setup secret mount")
}
sm.root = dir
cleanup := func() error {
if err := mount.Unmount(dir, 0); err != nil {
return err
}
return cleanupDir()
}
randID := identity.NewID()
fp := filepath.Join(dir, randID)
if err := ioutil.WriteFile(fp, sm.sm.data, 0600); err != nil {
cleanup()
return nil, nil, err
}
uid := int(sm.sm.mount.SecretOpt.Uid)
gid := int(sm.sm.mount.SecretOpt.Gid)
if sm.idmap != nil {
identity, err := sm.idmap.ToHost(idtools.Identity{
UID: uid,
GID: gid,
})
if err != nil {
cleanup()
return nil, nil, err
}
uid = identity.UID
gid = identity.GID
}
if err := os.Chown(fp, uid, gid); err != nil {
cleanup()
return nil, nil, err
}
if err := os.Chmod(fp, os.FileMode(sm.sm.mount.SecretOpt.Mode&0777)); err != nil {
cleanup()
return nil, nil, err
}
return []mount.Mount{{
Type: "bind",
Source: fp,
Options: []string{"ro", "rbind", "nodev", "nosuid", "noexec"},
}}, cleanup, nil
}
func (sm *secretMountInstance) IdentityMapping() *idtools.IdentityMapping {
return sm.idmap
}
func (mm *MountManager) MountableCache(ctx context.Context, m *pb.Mount, ref cache.ImmutableRef) (cache.MutableRef, error) {
if m.CacheOpt == nil {
return nil, errors.Errorf("missing cache mount options")
}
return mm.getRefCacheDir(ctx, ref, m.CacheOpt.ID, m, m.CacheOpt.Sharing)
}
func (mm *MountManager) MountableTmpFS() cache.Mountable {
return newTmpfs(mm.cm.IdentityMapping())
}
func (mm *MountManager) MountableSecret(ctx context.Context, m *pb.Mount, g session.Group) (cache.Mountable, error) {
return mm.getSecretMountable(ctx, m, g)
}
func (mm *MountManager) MountableSSH(ctx context.Context, m *pb.Mount, g session.Group) (cache.Mountable, error) {
return mm.getSSHMountable(ctx, m, g)
}
func newTmpfs(idmap *idtools.IdentityMapping) cache.Mountable {
return &tmpfs{idmap: idmap}
}
type tmpfs struct {
idmap *idtools.IdentityMapping
}
func (f *tmpfs) Mount(ctx context.Context, readonly bool) (snapshot.Mountable, error) {
return &tmpfsMount{readonly: readonly, idmap: f.idmap}, nil
}
type tmpfsMount struct {
readonly bool
idmap *idtools.IdentityMapping
}
func (m *tmpfsMount) Mount() ([]mount.Mount, func() error, error) {
opt := []string{"nosuid"}
if m.readonly {
opt = append(opt, "ro")
}
return []mount.Mount{{
Type: "tmpfs",
Source: "tmpfs",
Options: opt,
}}, func() error { return nil }, nil
}
func (m *tmpfsMount) IdentityMapping() *idtools.IdentityMapping {
return m.idmap
}
var cacheRefsLocker = locker.New()
var sharedCacheRefs = &cacheRefs{}
type cacheRefs struct {
mu sync.Mutex
shares map[string]*cacheRefShare
}
// ClearActiveCacheMounts clears shared cache mounts currently in use.
// Caller needs to hold CacheMountsLocker before calling
func ClearActiveCacheMounts() {
sharedCacheRefs.shares = nil
}
func CacheMountsLocker() sync.Locker {
return &sharedCacheRefs.mu
}
func (r *cacheRefs) get(key string, fn func() (cache.MutableRef, error)) (cache.MutableRef, error) {
r.mu.Lock()
defer r.mu.Unlock()
if r.shares == nil {
r.shares = map[string]*cacheRefShare{}
}
share, ok := r.shares[key]
if ok {
return share.clone(), nil
}
mref, err := fn()
if err != nil {
return nil, err
}
share = &cacheRefShare{MutableRef: mref, main: r, key: key, refs: map[*cacheRef]struct{}{}}
r.shares[key] = share
return share.clone(), nil
}
type cacheRefShare struct {
cache.MutableRef
mu sync.Mutex
refs map[*cacheRef]struct{}
main *cacheRefs
key string
}
func (r *cacheRefShare) clone() cache.MutableRef {
cacheRef := &cacheRef{cacheRefShare: r}
if cacheRefCloneHijack != nil {
cacheRefCloneHijack()
}
r.mu.Lock()
r.refs[cacheRef] = struct{}{}
r.mu.Unlock()
return cacheRef
}
func (r *cacheRefShare) release(ctx context.Context) error {
if r.main != nil {
delete(r.main.shares, r.key)
}
return r.MutableRef.Release(ctx)
}
var cacheRefReleaseHijack func()
var cacheRefCloneHijack func()
type cacheRef struct {
*cacheRefShare
}
func (r *cacheRef) Release(ctx context.Context) error {
if r.main != nil {
r.main.mu.Lock()
defer r.main.mu.Unlock()
}
r.mu.Lock()
defer r.mu.Unlock()
delete(r.refs, r)
if len(r.refs) == 0 {
if cacheRefReleaseHijack != nil {
cacheRefReleaseHijack()
}
return r.release(ctx)
}
return nil
}

View File

@ -0,0 +1,387 @@
package mounts
import (
"context"
"io/ioutil"
"os"
"path/filepath"
"sync"
"testing"
"time"
"github.com/containerd/containerd/content"
"github.com/containerd/containerd/content/local"
"github.com/containerd/containerd/diff/apply"
"github.com/containerd/containerd/leases"
ctdmetadata "github.com/containerd/containerd/metadata"
"github.com/containerd/containerd/namespaces"
"github.com/containerd/containerd/snapshots"
"github.com/containerd/containerd/snapshots/native"
"github.com/moby/buildkit/cache"
"github.com/moby/buildkit/cache/metadata"
"github.com/moby/buildkit/snapshot"
containerdsnapshot "github.com/moby/buildkit/snapshot/containerd"
"github.com/moby/buildkit/solver/pb"
"github.com/moby/buildkit/util/leaseutil"
"github.com/pkg/errors"
"github.com/stretchr/testify/require"
bolt "go.etcd.io/bbolt"
"golang.org/x/sync/errgroup"
)
type cmOpt struct {
snapshotterName string
snapshotter snapshots.Snapshotter
tmpdir string
}
type cmOut struct {
manager cache.Manager
lm leases.Manager
cs content.Store
md *metadata.Store
}
func newCacheManager(ctx context.Context, opt cmOpt) (co *cmOut, cleanup func() error, err error) {
ns, ok := namespaces.Namespace(ctx)
if !ok {
return nil, nil, errors.Errorf("namespace required for test")
}
if opt.snapshotterName == "" {
opt.snapshotterName = "native"
}
tmpdir, err := ioutil.TempDir("", "cachemanager")
if err != nil {
return nil, nil, err
}
defers := make([]func() error, 0)
cleanup = func() error {
var err error
for i := range defers {
if err1 := defers[len(defers)-1-i](); err1 != nil && err == nil {
err = err1
}
}
return err
}
defer func() {
if err != nil {
cleanup()
}
}()
if opt.tmpdir == "" {
defers = append(defers, func() error {
return os.RemoveAll(tmpdir)
})
} else {
os.RemoveAll(tmpdir)
tmpdir = opt.tmpdir
}
if opt.snapshotter == nil {
snapshotter, err := native.NewSnapshotter(filepath.Join(tmpdir, "snapshots"))
if err != nil {
return nil, nil, err
}
opt.snapshotter = snapshotter
}
md, err := metadata.NewStore(filepath.Join(tmpdir, "metadata.db"))
if err != nil {
return nil, nil, err
}
store, err := local.NewStore(tmpdir)
if err != nil {
return nil, nil, err
}
db, err := bolt.Open(filepath.Join(tmpdir, "containerdmeta.db"), 0644, nil)
if err != nil {
return nil, nil, err
}
defers = append(defers, func() error {
return db.Close()
})
mdb := ctdmetadata.NewDB(db, store, map[string]snapshots.Snapshotter{
opt.snapshotterName: opt.snapshotter,
})
if err := mdb.Init(context.TODO()); err != nil {
return nil, nil, err
}
lm := ctdmetadata.NewLeaseManager(mdb)
cm, err := cache.NewManager(cache.ManagerOpt{
Snapshotter: snapshot.FromContainerdSnapshotter(opt.snapshotterName, containerdsnapshot.NSSnapshotter(ns, mdb.Snapshotter(opt.snapshotterName)), nil),
MetadataStore: md,
ContentStore: mdb.ContentStore(),
LeaseManager: leaseutil.WithNamespace(lm, ns),
GarbageCollect: mdb.GarbageCollect,
Applier: apply.NewFileSystemApplier(mdb.ContentStore()),
})
if err != nil {
return nil, nil, err
}
return &cmOut{
manager: cm,
lm: lm,
cs: mdb.ContentStore(),
md: md,
}, cleanup, nil
}
func newRefGetter(m cache.Manager, md *metadata.Store, shared *cacheRefs) *cacheRefGetter {
return &cacheRefGetter{
locker: &sync.Mutex{},
cacheMounts: map[string]*cacheRefShare{},
cm: m,
md: md,
globalCacheRefs: shared,
}
}
func TestCacheMountPrivateRefs(t *testing.T) {
t.Parallel()
ctx := namespaces.WithNamespace(context.Background(), "buildkit-test")
tmpdir, err := ioutil.TempDir("", "cachemanager")
require.NoError(t, err)
defer os.RemoveAll(tmpdir)
snapshotter, err := native.NewSnapshotter(filepath.Join(tmpdir, "snapshots"))
require.NoError(t, err)
co, cleanup, err := newCacheManager(ctx, cmOpt{
snapshotter: snapshotter,
snapshotterName: "native",
})
require.NoError(t, err)
defer cleanup()
g1 := newRefGetter(co.manager, co.md, sharedCacheRefs)
g2 := newRefGetter(co.manager, co.md, sharedCacheRefs)
g3 := newRefGetter(co.manager, co.md, sharedCacheRefs)
g4 := newRefGetter(co.manager, co.md, sharedCacheRefs)
ref, err := g1.getRefCacheDir(ctx, nil, "foo", pb.CacheSharingOpt_PRIVATE)
require.NoError(t, err)
ref2, err := g1.getRefCacheDir(ctx, nil, "bar", pb.CacheSharingOpt_PRIVATE)
require.NoError(t, err)
// different ID returns different ref
require.NotEqual(t, ref.ID(), ref2.ID())
// same ID on same mount still shares the reference
ref3, err := g1.getRefCacheDir(ctx, nil, "foo", pb.CacheSharingOpt_PRIVATE)
require.NoError(t, err)
require.Equal(t, ref.ID(), ref3.ID())
// same ID on different mount gets a new ID
ref4, err := g2.getRefCacheDir(ctx, nil, "foo", pb.CacheSharingOpt_PRIVATE)
require.NoError(t, err)
require.NotEqual(t, ref.ID(), ref4.ID())
// releasing one of two refs still keeps first ID private
ref.Release(context.TODO())
ref5, err := g3.getRefCacheDir(ctx, nil, "foo", pb.CacheSharingOpt_PRIVATE)
require.NoError(t, err)
require.NotEqual(t, ref.ID(), ref5.ID())
require.NotEqual(t, ref4.ID(), ref5.ID())
// releasing all refs releases ID to be reused
ref3.Release(context.TODO())
ref5, err = g4.getRefCacheDir(ctx, nil, "foo", pb.CacheSharingOpt_PRIVATE)
require.NoError(t, err)
require.Equal(t, ref.ID(), ref5.ID())
// other mounts still keep their IDs
ref6, err := g2.getRefCacheDir(ctx, nil, "foo", pb.CacheSharingOpt_PRIVATE)
require.NoError(t, err)
require.Equal(t, ref4.ID(), ref6.ID())
}
func TestCacheMountSharedRefs(t *testing.T) {
t.Parallel()
ctx := namespaces.WithNamespace(context.Background(), "buildkit-test")
tmpdir, err := ioutil.TempDir("", "cachemanager")
require.NoError(t, err)
defer os.RemoveAll(tmpdir)
snapshotter, err := native.NewSnapshotter(filepath.Join(tmpdir, "snapshots"))
require.NoError(t, err)
co, cleanup, err := newCacheManager(ctx, cmOpt{
snapshotter: snapshotter,
snapshotterName: "native",
})
require.NoError(t, err)
defer cleanup()
g1 := newRefGetter(co.manager, co.md, sharedCacheRefs)
g2 := newRefGetter(co.manager, co.md, sharedCacheRefs)
g3 := newRefGetter(co.manager, co.md, sharedCacheRefs)
ref, err := g1.getRefCacheDir(ctx, nil, "foo", pb.CacheSharingOpt_SHARED)
require.NoError(t, err)
ref2, err := g1.getRefCacheDir(ctx, nil, "bar", pb.CacheSharingOpt_SHARED)
require.NoError(t, err)
// different ID returns different ref
require.NotEqual(t, ref.ID(), ref2.ID())
// same ID on same mount still shares the reference
ref3, err := g1.getRefCacheDir(ctx, nil, "foo", pb.CacheSharingOpt_SHARED)
require.NoError(t, err)
require.Equal(t, ref.ID(), ref3.ID())
// same ID on different mount gets same ID
ref4, err := g2.getRefCacheDir(ctx, nil, "foo", pb.CacheSharingOpt_SHARED)
require.NoError(t, err)
require.Equal(t, ref.ID(), ref4.ID())
// private gets a new ID
ref5, err := g3.getRefCacheDir(ctx, nil, "foo", pb.CacheSharingOpt_PRIVATE)
require.NoError(t, err)
require.NotEqual(t, ref.ID(), ref5.ID())
}
func TestCacheMountLockedRefs(t *testing.T) {
t.Parallel()
ctx := namespaces.WithNamespace(context.Background(), "buildkit-test")
tmpdir, err := ioutil.TempDir("", "cachemanager")
require.NoError(t, err)
defer os.RemoveAll(tmpdir)
snapshotter, err := native.NewSnapshotter(filepath.Join(tmpdir, "snapshots"))
require.NoError(t, err)
co, cleanup, err := newCacheManager(ctx, cmOpt{
snapshotter: snapshotter,
snapshotterName: "native",
})
require.NoError(t, err)
defer cleanup()
g1 := newRefGetter(co.manager, co.md, sharedCacheRefs)
g2 := newRefGetter(co.manager, co.md, sharedCacheRefs)
ref, err := g1.getRefCacheDir(ctx, nil, "foo", pb.CacheSharingOpt_LOCKED)
require.NoError(t, err)
ref2, err := g1.getRefCacheDir(ctx, nil, "bar", pb.CacheSharingOpt_LOCKED)
require.NoError(t, err)
// different ID returns different ref
require.NotEqual(t, ref.ID(), ref2.ID())
// same ID on same mount still shares the reference
ref3, err := g1.getRefCacheDir(ctx, nil, "foo", pb.CacheSharingOpt_LOCKED)
require.NoError(t, err)
require.Equal(t, ref.ID(), ref3.ID())
// same ID on different mount blocks
gotRef4 := make(chan struct{})
go func() {
ref4, err := g2.getRefCacheDir(ctx, nil, "foo", pb.CacheSharingOpt_LOCKED)
require.NoError(t, err)
require.Equal(t, ref.ID(), ref4.ID())
close(gotRef4)
}()
select {
case <-gotRef4:
require.FailNow(t, "mount did not lock")
case <-time.After(500 * time.Millisecond):
}
ref.Release(ctx)
ref3.Release(ctx)
select {
case <-gotRef4:
case <-time.After(500 * time.Millisecond):
require.FailNow(t, "mount did not unlock")
}
}
// moby/buildkit#1322
func TestCacheMountSharedRefsDeadlock(t *testing.T) {
// not parallel
ctx := namespaces.WithNamespace(context.Background(), "buildkit-test")
tmpdir, err := ioutil.TempDir("", "cachemanager")
require.NoError(t, err)
defer os.RemoveAll(tmpdir)
snapshotter, err := native.NewSnapshotter(filepath.Join(tmpdir, "snapshots"))
require.NoError(t, err)
co, cleanup, err := newCacheManager(ctx, cmOpt{
snapshotter: snapshotter,
snapshotterName: "native",
})
require.NoError(t, err)
defer cleanup()
var sharedCacheRefs = &cacheRefs{}
g1 := newRefGetter(co.manager, co.md, sharedCacheRefs)
g2 := newRefGetter(co.manager, co.md, sharedCacheRefs)
ref, err := g1.getRefCacheDir(ctx, nil, "foo", pb.CacheSharingOpt_SHARED)
require.NoError(t, err)
cacheRefReleaseHijack = func() {
time.Sleep(200 * time.Millisecond)
}
cacheRefCloneHijack = func() {
time.Sleep(400 * time.Millisecond)
}
defer func() {
cacheRefReleaseHijack = nil
cacheRefCloneHijack = nil
}()
eg, _ := errgroup.WithContext(context.TODO())
eg.Go(func() error {
return ref.Release(context.TODO())
})
eg.Go(func() error {
_, err := g2.getRefCacheDir(ctx, nil, "foo", pb.CacheSharingOpt_SHARED)
return err
})
done := make(chan struct{})
go func() {
err = eg.Wait()
require.NoError(t, err)
close(done)
}()
select {
case <-done:
case <-time.After(10 * time.Second):
require.FailNow(t, "deadlock on releasing while getting new ref")
}
}

View File

@ -5,34 +5,21 @@ import (
"context"
"encoding/json"
"fmt"
"io/ioutil"
"net"
"os"
"path"
"path/filepath"
"sort"
"strings"
"sync"
"time"
"github.com/containerd/containerd/mount"
"github.com/containerd/containerd/platforms"
"github.com/containerd/containerd/sys"
"github.com/docker/docker/pkg/idtools"
"github.com/docker/docker/pkg/locker"
"github.com/moby/buildkit/cache"
"github.com/moby/buildkit/cache/metadata"
"github.com/moby/buildkit/client"
"github.com/moby/buildkit/executor"
"github.com/moby/buildkit/identity"
"github.com/moby/buildkit/session"
"github.com/moby/buildkit/session/secrets"
"github.com/moby/buildkit/session/sshforward"
"github.com/moby/buildkit/snapshot"
"github.com/moby/buildkit/solver"
"github.com/moby/buildkit/solver/llbsolver"
"github.com/moby/buildkit/solver/llbsolver/mounts"
"github.com/moby/buildkit/solver/pb"
"github.com/moby/buildkit/util/grpcerrors"
"github.com/moby/buildkit/util/progress/logs"
utilsystem "github.com/moby/buildkit/util/system"
"github.com/moby/buildkit/worker"
@ -40,8 +27,6 @@ import (
specs "github.com/opencontainers/image-spec/specs-go/v1"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
bolt "go.etcd.io/bbolt"
"google.golang.org/grpc/codes"
)
const execCacheType = "buildkit.exec.v0"
@ -49,31 +34,26 @@ const execCacheType = "buildkit.exec.v0"
type execOp struct {
op *pb.ExecOp
cm cache.Manager
sm *session.Manager
md *metadata.Store
mm *mounts.MountManager
exec executor.Executor
w worker.Worker
platform *pb.Platform
numInputs int
cacheMounts map[string]*cacheRefShare
cacheMountsMu sync.Mutex
}
func NewExecOp(v solver.Vertex, op *pb.Op_Exec, platform *pb.Platform, cm cache.Manager, sm *session.Manager, md *metadata.Store, exec executor.Executor, w worker.Worker) (solver.Op, error) {
if err := llbsolver.ValidateOp(&pb.Op{Op: op}); err != nil {
return nil, err
}
name := fmt.Sprintf("exec %s", strings.Join(op.Exec.Meta.Args, " "))
return &execOp{
op: op.Exec,
cm: cm,
sm: sm,
md: md,
exec: exec,
numInputs: len(v.Inputs()),
w: w,
platform: platform,
cacheMounts: map[string]*cacheRefShare{},
op: op.Exec,
mm: mounts.NewMountManager(name, cm, sm, md),
cm: cm,
exec: exec,
numInputs: len(v.Inputs()),
w: w,
platform: platform,
}, nil
}
@ -221,328 +201,6 @@ func (e *execOp) getMountDeps() ([]dep, error) {
return deps, nil
}
func (e *execOp) getRefCacheDir(ctx context.Context, ref cache.ImmutableRef, id string, m *pb.Mount, sharing pb.CacheSharingOpt) (mref cache.MutableRef, err error) {
g := &cacheRefGetter{
locker: &e.cacheMountsMu,
cacheMounts: e.cacheMounts,
cm: e.cm,
md: e.md,
globalCacheRefs: sharedCacheRefs,
name: fmt.Sprintf("cached mount %s from exec %s", m.Dest, strings.Join(e.op.Meta.Args, " ")),
}
return g.getRefCacheDir(ctx, ref, id, sharing)
}
type cacheRefGetter struct {
locker sync.Locker
cacheMounts map[string]*cacheRefShare
cm cache.Manager
md *metadata.Store
globalCacheRefs *cacheRefs
name string
}
func (g *cacheRefGetter) getRefCacheDir(ctx context.Context, ref cache.ImmutableRef, id string, sharing pb.CacheSharingOpt) (mref cache.MutableRef, err error) {
key := "cache-dir:" + id
if ref != nil {
key += ":" + ref.ID()
}
mu := g.locker
mu.Lock()
defer mu.Unlock()
if ref, ok := g.cacheMounts[key]; ok {
return ref.clone(), nil
}
defer func() {
if err == nil {
share := &cacheRefShare{MutableRef: mref, refs: map[*cacheRef]struct{}{}}
g.cacheMounts[key] = share
mref = share.clone()
}
}()
switch sharing {
case pb.CacheSharingOpt_SHARED:
return g.globalCacheRefs.get(key, func() (cache.MutableRef, error) {
return g.getRefCacheDirNoCache(ctx, key, ref, id, false)
})
case pb.CacheSharingOpt_PRIVATE:
return g.getRefCacheDirNoCache(ctx, key, ref, id, false)
case pb.CacheSharingOpt_LOCKED:
return g.getRefCacheDirNoCache(ctx, key, ref, id, true)
default:
return nil, errors.Errorf("invalid cache sharing option: %s", sharing.String())
}
}
func (g *cacheRefGetter) getRefCacheDirNoCache(ctx context.Context, key string, ref cache.ImmutableRef, id string, block bool) (cache.MutableRef, error) {
makeMutable := func(ref cache.ImmutableRef) (cache.MutableRef, error) {
return g.cm.New(ctx, ref, cache.WithRecordType(client.UsageRecordTypeCacheMount), cache.WithDescription(g.name), cache.CachePolicyRetain)
}
cacheRefsLocker.Lock(key)
defer cacheRefsLocker.Unlock(key)
for {
sis, err := g.md.Search(key)
if err != nil {
return nil, err
}
locked := false
for _, si := range sis {
if mRef, err := g.cm.GetMutable(ctx, si.ID()); err == nil {
logrus.Debugf("reusing ref for cache dir: %s", mRef.ID())
return mRef, nil
} else if errors.Is(err, cache.ErrLocked) {
locked = true
}
}
if block && locked {
cacheRefsLocker.Unlock(key)
select {
case <-ctx.Done():
cacheRefsLocker.Lock(key)
return nil, ctx.Err()
case <-time.After(100 * time.Millisecond):
cacheRefsLocker.Lock(key)
}
} else {
break
}
}
mRef, err := makeMutable(ref)
if err != nil {
return nil, err
}
si, _ := g.md.Get(mRef.ID())
v, err := metadata.NewValue(key)
if err != nil {
mRef.Release(context.TODO())
return nil, err
}
v.Index = key
if err := si.Update(func(b *bolt.Bucket) error {
return si.SetValue(b, key, v)
}); err != nil {
mRef.Release(context.TODO())
return nil, err
}
return mRef, nil
}
func (e *execOp) getSSHMountable(ctx context.Context, m *pb.Mount, g session.Group) (cache.Mountable, error) {
var caller session.Caller
err := e.sm.Any(ctx, g, func(ctx context.Context, _ string, c session.Caller) error {
if err := sshforward.CheckSSHID(ctx, c, m.SSHOpt.ID); err != nil {
if m.SSHOpt.Optional {
return nil
}
if grpcerrors.Code(err) == codes.Unimplemented {
return errors.Errorf("no SSH key %q forwarded from the client", m.SSHOpt.ID)
}
return err
}
caller = c
return nil
})
if err != nil {
return nil, err
}
// because ssh socket remains active, to actually handle session disconnecting ssh error
// should restart the whole exec with new session
return &sshMount{mount: m, caller: caller, idmap: e.cm.IdentityMapping()}, nil
}
type sshMount struct {
mount *pb.Mount
caller session.Caller
idmap *idtools.IdentityMapping
}
func (sm *sshMount) Mount(ctx context.Context, readonly bool) (snapshot.Mountable, error) {
return &sshMountInstance{sm: sm, idmap: sm.idmap}, nil
}
type sshMountInstance struct {
sm *sshMount
idmap *idtools.IdentityMapping
}
func (sm *sshMountInstance) Mount() ([]mount.Mount, func() error, error) {
ctx, cancel := context.WithCancel(context.TODO())
uid := int(sm.sm.mount.SSHOpt.Uid)
gid := int(sm.sm.mount.SSHOpt.Gid)
if sm.idmap != nil {
identity, err := sm.idmap.ToHost(idtools.Identity{
UID: uid,
GID: gid,
})
if err != nil {
cancel()
return nil, nil, err
}
uid = identity.UID
gid = identity.GID
}
sock, cleanup, err := sshforward.MountSSHSocket(ctx, sm.sm.caller, sshforward.SocketOpt{
ID: sm.sm.mount.SSHOpt.ID,
UID: uid,
GID: gid,
Mode: int(sm.sm.mount.SSHOpt.Mode & 0777),
})
if err != nil {
cancel()
return nil, nil, err
}
release := func() error {
var err error
if cleanup != nil {
err = cleanup()
}
cancel()
return err
}
return []mount.Mount{{
Type: "bind",
Source: sock,
Options: []string{"rbind"},
}}, release, nil
}
func (sm *sshMountInstance) IdentityMapping() *idtools.IdentityMapping {
return sm.idmap
}
func (e *execOp) getSecretMountable(ctx context.Context, m *pb.Mount, g session.Group) (cache.Mountable, error) {
if m.SecretOpt == nil {
return nil, errors.Errorf("invalid sercet mount options")
}
sopt := *m.SecretOpt
id := sopt.ID
if id == "" {
return nil, errors.Errorf("secret ID missing from mount options")
}
var dt []byte
var err error
err = e.sm.Any(ctx, g, func(ctx context.Context, _ string, caller session.Caller) error {
dt, err = secrets.GetSecret(ctx, caller, id)
if err != nil {
if errors.Is(err, secrets.ErrNotFound) && m.SecretOpt.Optional {
return nil
}
return err
}
return nil
})
if err != nil || dt == nil {
return nil, err
}
return &secretMount{mount: m, data: dt, idmap: e.cm.IdentityMapping()}, nil
}
type secretMount struct {
mount *pb.Mount
data []byte
idmap *idtools.IdentityMapping
}
func (sm *secretMount) Mount(ctx context.Context, readonly bool) (snapshot.Mountable, error) {
return &secretMountInstance{sm: sm, idmap: sm.idmap}, nil
}
type secretMountInstance struct {
sm *secretMount
root string
idmap *idtools.IdentityMapping
}
func (sm *secretMountInstance) Mount() ([]mount.Mount, func() error, error) {
dir, err := ioutil.TempDir("", "buildkit-secrets")
if err != nil {
return nil, nil, errors.Wrap(err, "failed to create temp dir")
}
cleanupDir := func() error {
return os.RemoveAll(dir)
}
if err := os.Chmod(dir, 0711); err != nil {
cleanupDir()
return nil, nil, err
}
tmpMount := mount.Mount{
Type: "tmpfs",
Source: "tmpfs",
Options: []string{"nodev", "nosuid", "noexec", fmt.Sprintf("uid=%d,gid=%d", os.Geteuid(), os.Getegid())},
}
if sys.RunningInUserNS() {
tmpMount.Options = nil
}
if err := mount.All([]mount.Mount{tmpMount}, dir); err != nil {
cleanupDir()
return nil, nil, errors.Wrap(err, "unable to setup secret mount")
}
sm.root = dir
cleanup := func() error {
if err := mount.Unmount(dir, 0); err != nil {
return err
}
return cleanupDir()
}
randID := identity.NewID()
fp := filepath.Join(dir, randID)
if err := ioutil.WriteFile(fp, sm.sm.data, 0600); err != nil {
cleanup()
return nil, nil, err
}
uid := int(sm.sm.mount.SecretOpt.Uid)
gid := int(sm.sm.mount.SecretOpt.Gid)
if sm.idmap != nil {
identity, err := sm.idmap.ToHost(idtools.Identity{
UID: uid,
GID: gid,
})
if err != nil {
cleanup()
return nil, nil, err
}
uid = identity.UID
gid = identity.GID
}
if err := os.Chown(fp, uid, gid); err != nil {
cleanup()
return nil, nil, err
}
if err := os.Chmod(fp, os.FileMode(sm.sm.mount.SecretOpt.Mode&0777)); err != nil {
cleanup()
return nil, nil, err
}
return []mount.Mount{{
Type: "bind",
Source: fp,
Options: []string{"ro", "rbind", "nodev", "nosuid", "noexec"},
}}, cleanup, nil
}
func (sm *secretMountInstance) IdentityMapping() *idtools.IdentityMapping {
return sm.idmap
}
func addDefaultEnvvar(env []string, k, v string) []string {
for _, e := range env {
if strings.HasPrefix(e, k+"=") {
@ -599,7 +257,7 @@ func (e *execOp) Exec(ctx context.Context, g session.Group, inputs []solver.Resu
case pb.MountType_BIND:
// if mount creates an output
if m.Output != pb.SkipOutput {
// it it is readonly and not root then output is the input
// if it is readonly and not root then output is the input
if m.Readonly && ref != nil && m.Dest != pb.RootMount {
outputs = append(outputs, ref.Clone())
} else {
@ -622,10 +280,7 @@ func (e *execOp) Exec(ctx context.Context, g session.Group, inputs []solver.Resu
}
case pb.MountType_CACHE:
if m.CacheOpt == nil {
return nil, errors.Errorf("missing cache mount options")
}
mRef, err := e.getRefCacheDir(ctx, ref, m.CacheOpt.ID, m, m.CacheOpt.Sharing)
mRef, err := e.mm.MountableCache(ctx, m, ref)
if err != nil {
return nil, err
}
@ -638,27 +293,25 @@ func (e *execOp) Exec(ctx context.Context, g session.Group, inputs []solver.Resu
}
case pb.MountType_TMPFS:
mountable = newTmpfs(e.cm.IdentityMapping())
mountable = e.mm.MountableTmpFS()
case pb.MountType_SECRET:
secretMount, err := e.getSecretMountable(ctx, m, g)
var err error
mountable, err = e.mm.MountableSecret(ctx, m, g)
if err != nil {
return nil, err
}
if secretMount == nil {
if mountable == nil {
continue
}
mountable = secretMount
case pb.MountType_SSH:
sshMount, err := e.getSSHMountable(ctx, m, g)
var err error
mountable, err = e.mm.MountableSSH(ctx, m, g)
if err != nil {
return nil, err
}
if sshMount == nil {
if mountable == nil {
continue
}
mountable = sshMount
default:
return nil, errors.Errorf("mount type %s not implemented", m.MountType)
@ -769,130 +422,6 @@ func proxyEnvList(p *pb.ProxyEnv) []string {
return out
}
func newTmpfs(idmap *idtools.IdentityMapping) cache.Mountable {
return &tmpfs{idmap: idmap}
}
type tmpfs struct {
idmap *idtools.IdentityMapping
}
func (f *tmpfs) Mount(ctx context.Context, readonly bool) (snapshot.Mountable, error) {
return &tmpfsMount{readonly: readonly, idmap: f.idmap}, nil
}
type tmpfsMount struct {
readonly bool
idmap *idtools.IdentityMapping
}
func (m *tmpfsMount) Mount() ([]mount.Mount, func() error, error) {
opt := []string{"nosuid"}
if m.readonly {
opt = append(opt, "ro")
}
return []mount.Mount{{
Type: "tmpfs",
Source: "tmpfs",
Options: opt,
}}, func() error { return nil }, nil
}
func (m *tmpfsMount) IdentityMapping() *idtools.IdentityMapping {
return m.idmap
}
var cacheRefsLocker = locker.New()
var sharedCacheRefs = &cacheRefs{}
type cacheRefs struct {
mu sync.Mutex
shares map[string]*cacheRefShare
}
// ClearActiveCacheMounts clears shared cache mounts currently in use.
// Caller needs to hold CacheMountsLocker before calling
func ClearActiveCacheMounts() {
sharedCacheRefs.shares = nil
}
func CacheMountsLocker() sync.Locker {
return &sharedCacheRefs.mu
}
func (r *cacheRefs) get(key string, fn func() (cache.MutableRef, error)) (cache.MutableRef, error) {
r.mu.Lock()
defer r.mu.Unlock()
if r.shares == nil {
r.shares = map[string]*cacheRefShare{}
}
share, ok := r.shares[key]
if ok {
return share.clone(), nil
}
mref, err := fn()
if err != nil {
return nil, err
}
share = &cacheRefShare{MutableRef: mref, main: r, key: key, refs: map[*cacheRef]struct{}{}}
r.shares[key] = share
return share.clone(), nil
}
type cacheRefShare struct {
cache.MutableRef
mu sync.Mutex
refs map[*cacheRef]struct{}
main *cacheRefs
key string
}
func (r *cacheRefShare) clone() cache.MutableRef {
cacheRef := &cacheRef{cacheRefShare: r}
if cacheRefCloneHijack != nil {
cacheRefCloneHijack()
}
r.mu.Lock()
r.refs[cacheRef] = struct{}{}
r.mu.Unlock()
return cacheRef
}
func (r *cacheRefShare) release(ctx context.Context) error {
if r.main != nil {
delete(r.main.shares, r.key)
}
return r.MutableRef.Release(ctx)
}
var cacheRefReleaseHijack func()
var cacheRefCloneHijack func()
type cacheRef struct {
*cacheRefShare
}
func (r *cacheRef) Release(ctx context.Context) error {
if r.main != nil {
r.main.mu.Lock()
defer r.main.mu.Unlock()
}
r.mu.Lock()
defer r.mu.Unlock()
delete(r.refs, r)
if len(r.refs) == 0 {
if cacheRefReleaseHijack != nil {
cacheRefReleaseHijack()
}
return r.release(ctx)
}
return nil
}
func parseExtraHosts(ips []*pb.HostIP) ([]executor.HostIP, error) {
out := make([]executor.HostIP, len(ips))
for i, hip := range ips {

View File

@ -1,140 +1,11 @@
package ops
import (
"context"
"io/ioutil"
"os"
"path/filepath"
"sync"
"testing"
"time"
"github.com/containerd/containerd/content"
"github.com/containerd/containerd/content/local"
"github.com/containerd/containerd/diff/apply"
"github.com/containerd/containerd/leases"
ctdmetadata "github.com/containerd/containerd/metadata"
"github.com/containerd/containerd/namespaces"
"github.com/containerd/containerd/snapshots"
"github.com/containerd/containerd/snapshots/native"
"github.com/moby/buildkit/cache"
"github.com/moby/buildkit/cache/metadata"
"github.com/moby/buildkit/snapshot"
containerdsnapshot "github.com/moby/buildkit/snapshot/containerd"
"github.com/moby/buildkit/solver/pb"
"github.com/moby/buildkit/util/leaseutil"
"github.com/pkg/errors"
"github.com/stretchr/testify/require"
bolt "go.etcd.io/bbolt"
"golang.org/x/sync/errgroup"
)
type cmOpt struct {
snapshotterName string
snapshotter snapshots.Snapshotter
tmpdir string
}
type cmOut struct {
manager cache.Manager
lm leases.Manager
cs content.Store
md *metadata.Store
}
func newCacheManager(ctx context.Context, opt cmOpt) (co *cmOut, cleanup func() error, err error) {
ns, ok := namespaces.Namespace(ctx)
if !ok {
return nil, nil, errors.Errorf("namespace required for test")
}
if opt.snapshotterName == "" {
opt.snapshotterName = "native"
}
tmpdir, err := ioutil.TempDir("", "cachemanager")
if err != nil {
return nil, nil, err
}
defers := make([]func() error, 0)
cleanup = func() error {
var err error
for i := range defers {
if err1 := defers[len(defers)-1-i](); err1 != nil && err == nil {
err = err1
}
}
return err
}
defer func() {
if err != nil {
cleanup()
}
}()
if opt.tmpdir == "" {
defers = append(defers, func() error {
return os.RemoveAll(tmpdir)
})
} else {
os.RemoveAll(tmpdir)
tmpdir = opt.tmpdir
}
if opt.snapshotter == nil {
snapshotter, err := native.NewSnapshotter(filepath.Join(tmpdir, "snapshots"))
if err != nil {
return nil, nil, err
}
opt.snapshotter = snapshotter
}
md, err := metadata.NewStore(filepath.Join(tmpdir, "metadata.db"))
if err != nil {
return nil, nil, err
}
store, err := local.NewStore(tmpdir)
if err != nil {
return nil, nil, err
}
db, err := bolt.Open(filepath.Join(tmpdir, "containerdmeta.db"), 0644, nil)
if err != nil {
return nil, nil, err
}
defers = append(defers, func() error {
return db.Close()
})
mdb := ctdmetadata.NewDB(db, store, map[string]snapshots.Snapshotter{
opt.snapshotterName: opt.snapshotter,
})
if err := mdb.Init(context.TODO()); err != nil {
return nil, nil, err
}
lm := ctdmetadata.NewLeaseManager(mdb)
cm, err := cache.NewManager(cache.ManagerOpt{
Snapshotter: snapshot.FromContainerdSnapshotter(opt.snapshotterName, containerdsnapshot.NSSnapshotter(ns, mdb.Snapshotter(opt.snapshotterName)), nil),
MetadataStore: md,
ContentStore: mdb.ContentStore(),
LeaseManager: leaseutil.WithNamespace(lm, ns),
GarbageCollect: mdb.GarbageCollect,
Applier: apply.NewFileSystemApplier(mdb.ContentStore()),
})
if err != nil {
return nil, nil, err
}
return &cmOut{
manager: cm,
lm: lm,
cs: mdb.ContentStore(),
md: md,
}, cleanup, nil
}
func TestDedupPaths(t *testing.T) {
res := dedupePaths([]string{"Gemfile", "Gemfile/foo"})
require.Equal(t, []string{"Gemfile"}, res)
@ -154,254 +25,3 @@ func TestDedupPaths(t *testing.T) {
res = dedupePaths([]string{"foo/bar/baz", "foo/bara", "foo/bar/bax", "foo/bar"})
require.Equal(t, []string{"foo/bar", "foo/bara"}, res)
}
func newRefGetter(m cache.Manager, md *metadata.Store, shared *cacheRefs) *cacheRefGetter {
return &cacheRefGetter{
locker: &sync.Mutex{},
cacheMounts: map[string]*cacheRefShare{},
cm: m,
md: md,
globalCacheRefs: shared,
}
}
func TestCacheMountPrivateRefs(t *testing.T) {
t.Parallel()
ctx := namespaces.WithNamespace(context.Background(), "buildkit-test")
tmpdir, err := ioutil.TempDir("", "cachemanager")
require.NoError(t, err)
defer os.RemoveAll(tmpdir)
snapshotter, err := native.NewSnapshotter(filepath.Join(tmpdir, "snapshots"))
require.NoError(t, err)
co, cleanup, err := newCacheManager(ctx, cmOpt{
snapshotter: snapshotter,
snapshotterName: "native",
})
require.NoError(t, err)
defer cleanup()
g1 := newRefGetter(co.manager, co.md, sharedCacheRefs)
g2 := newRefGetter(co.manager, co.md, sharedCacheRefs)
g3 := newRefGetter(co.manager, co.md, sharedCacheRefs)
g4 := newRefGetter(co.manager, co.md, sharedCacheRefs)
ref, err := g1.getRefCacheDir(ctx, nil, "foo", pb.CacheSharingOpt_PRIVATE)
require.NoError(t, err)
ref2, err := g1.getRefCacheDir(ctx, nil, "bar", pb.CacheSharingOpt_PRIVATE)
require.NoError(t, err)
// different ID returns different ref
require.NotEqual(t, ref.ID(), ref2.ID())
// same ID on same mount still shares the reference
ref3, err := g1.getRefCacheDir(ctx, nil, "foo", pb.CacheSharingOpt_PRIVATE)
require.NoError(t, err)
require.Equal(t, ref.ID(), ref3.ID())
// same ID on different mount gets a new ID
ref4, err := g2.getRefCacheDir(ctx, nil, "foo", pb.CacheSharingOpt_PRIVATE)
require.NoError(t, err)
require.NotEqual(t, ref.ID(), ref4.ID())
// releasing one of two refs still keeps first ID private
ref.Release(context.TODO())
ref5, err := g3.getRefCacheDir(ctx, nil, "foo", pb.CacheSharingOpt_PRIVATE)
require.NoError(t, err)
require.NotEqual(t, ref.ID(), ref5.ID())
require.NotEqual(t, ref4.ID(), ref5.ID())
// releasing all refs releases ID to be reused
ref3.Release(context.TODO())
ref5, err = g4.getRefCacheDir(ctx, nil, "foo", pb.CacheSharingOpt_PRIVATE)
require.NoError(t, err)
require.Equal(t, ref.ID(), ref5.ID())
// other mounts still keep their IDs
ref6, err := g2.getRefCacheDir(ctx, nil, "foo", pb.CacheSharingOpt_PRIVATE)
require.NoError(t, err)
require.Equal(t, ref4.ID(), ref6.ID())
}
func TestCacheMountSharedRefs(t *testing.T) {
t.Parallel()
ctx := namespaces.WithNamespace(context.Background(), "buildkit-test")
tmpdir, err := ioutil.TempDir("", "cachemanager")
require.NoError(t, err)
defer os.RemoveAll(tmpdir)
snapshotter, err := native.NewSnapshotter(filepath.Join(tmpdir, "snapshots"))
require.NoError(t, err)
co, cleanup, err := newCacheManager(ctx, cmOpt{
snapshotter: snapshotter,
snapshotterName: "native",
})
require.NoError(t, err)
defer cleanup()
g1 := newRefGetter(co.manager, co.md, sharedCacheRefs)
g2 := newRefGetter(co.manager, co.md, sharedCacheRefs)
g3 := newRefGetter(co.manager, co.md, sharedCacheRefs)
ref, err := g1.getRefCacheDir(ctx, nil, "foo", pb.CacheSharingOpt_SHARED)
require.NoError(t, err)
ref2, err := g1.getRefCacheDir(ctx, nil, "bar", pb.CacheSharingOpt_SHARED)
require.NoError(t, err)
// different ID returns different ref
require.NotEqual(t, ref.ID(), ref2.ID())
// same ID on same mount still shares the reference
ref3, err := g1.getRefCacheDir(ctx, nil, "foo", pb.CacheSharingOpt_SHARED)
require.NoError(t, err)
require.Equal(t, ref.ID(), ref3.ID())
// same ID on different mount gets same ID
ref4, err := g2.getRefCacheDir(ctx, nil, "foo", pb.CacheSharingOpt_SHARED)
require.NoError(t, err)
require.Equal(t, ref.ID(), ref4.ID())
// private gets a new ID
ref5, err := g3.getRefCacheDir(ctx, nil, "foo", pb.CacheSharingOpt_PRIVATE)
require.NoError(t, err)
require.NotEqual(t, ref.ID(), ref5.ID())
}
func TestCacheMountLockedRefs(t *testing.T) {
t.Parallel()
ctx := namespaces.WithNamespace(context.Background(), "buildkit-test")
tmpdir, err := ioutil.TempDir("", "cachemanager")
require.NoError(t, err)
defer os.RemoveAll(tmpdir)
snapshotter, err := native.NewSnapshotter(filepath.Join(tmpdir, "snapshots"))
require.NoError(t, err)
co, cleanup, err := newCacheManager(ctx, cmOpt{
snapshotter: snapshotter,
snapshotterName: "native",
})
require.NoError(t, err)
defer cleanup()
g1 := newRefGetter(co.manager, co.md, sharedCacheRefs)
g2 := newRefGetter(co.manager, co.md, sharedCacheRefs)
ref, err := g1.getRefCacheDir(ctx, nil, "foo", pb.CacheSharingOpt_LOCKED)
require.NoError(t, err)
ref2, err := g1.getRefCacheDir(ctx, nil, "bar", pb.CacheSharingOpt_LOCKED)
require.NoError(t, err)
// different ID returns different ref
require.NotEqual(t, ref.ID(), ref2.ID())
// same ID on same mount still shares the reference
ref3, err := g1.getRefCacheDir(ctx, nil, "foo", pb.CacheSharingOpt_LOCKED)
require.NoError(t, err)
require.Equal(t, ref.ID(), ref3.ID())
// same ID on different mount blocks
gotRef4 := make(chan struct{})
go func() {
ref4, err := g2.getRefCacheDir(ctx, nil, "foo", pb.CacheSharingOpt_LOCKED)
require.NoError(t, err)
require.Equal(t, ref.ID(), ref4.ID())
close(gotRef4)
}()
select {
case <-gotRef4:
require.FailNow(t, "mount did not lock")
case <-time.After(500 * time.Millisecond):
}
ref.Release(ctx)
ref3.Release(ctx)
select {
case <-gotRef4:
case <-time.After(500 * time.Millisecond):
require.FailNow(t, "mount did not unlock")
}
}
// moby/buildkit#1322
func TestCacheMountSharedRefsDeadlock(t *testing.T) {
// not parallel
ctx := namespaces.WithNamespace(context.Background(), "buildkit-test")
tmpdir, err := ioutil.TempDir("", "cachemanager")
require.NoError(t, err)
defer os.RemoveAll(tmpdir)
snapshotter, err := native.NewSnapshotter(filepath.Join(tmpdir, "snapshots"))
require.NoError(t, err)
co, cleanup, err := newCacheManager(ctx, cmOpt{
snapshotter: snapshotter,
snapshotterName: "native",
})
require.NoError(t, err)
defer cleanup()
var sharedCacheRefs = &cacheRefs{}
g1 := newRefGetter(co.manager, co.md, sharedCacheRefs)
g2 := newRefGetter(co.manager, co.md, sharedCacheRefs)
ref, err := g1.getRefCacheDir(ctx, nil, "foo", pb.CacheSharingOpt_SHARED)
require.NoError(t, err)
cacheRefReleaseHijack = func() {
time.Sleep(200 * time.Millisecond)
}
cacheRefCloneHijack = func() {
time.Sleep(400 * time.Millisecond)
}
defer func() {
cacheRefReleaseHijack = nil
cacheRefCloneHijack = nil
}()
eg, _ := errgroup.WithContext(context.TODO())
eg.Go(func() error {
return ref.Release(context.TODO())
})
eg.Go(func() error {
_, err := g2.getRefCacheDir(ctx, nil, "foo", pb.CacheSharingOpt_SHARED)
return err
})
done := make(chan struct{})
go func() {
err = eg.Wait()
require.NoError(t, err)
close(done)
}()
select {
case <-done:
case <-time.After(10 * time.Second):
require.FailNow(t, "deadlock on releasing while getting new ref")
}
}

View File

@ -107,7 +107,7 @@ func (s *Solver) Solve(ctx context.Context, id string, sessionID string, req fro
var res *frontend.Result
if s.gatewayForwarder != nil && req.Definition == nil && req.Frontend == "" {
fwd := gateway.NewBridgeForwarder(ctx, s.Bridge(j), s.workerController, req.FrontendInputs, sessionID)
fwd := gateway.NewBridgeForwarder(ctx, s.Bridge(j), s.workerController, req.FrontendInputs, sessionID, s.sm)
defer fwd.Discard()
if err := s.gatewayForwarder.RegisterBuild(ctx, id, fwd); err != nil {
return nil, err

View File

@ -551,6 +551,7 @@ type Mount struct {
CacheOpt *CacheOpt `protobuf:"bytes,20,opt,name=cacheOpt,proto3" json:"cacheOpt,omitempty"`
SecretOpt *SecretOpt `protobuf:"bytes,21,opt,name=secretOpt,proto3" json:"secretOpt,omitempty"`
SSHOpt *SSHOpt `protobuf:"bytes,22,opt,name=SSHOpt,proto3" json:"SSHOpt,omitempty"`
ResultID string `protobuf:"bytes,23,opt,name=resultID,proto3" json:"resultID,omitempty"`
}
func (m *Mount) Reset() { *m = Mount{} }
@ -631,6 +632,13 @@ func (m *Mount) GetSSHOpt() *SSHOpt {
return nil
}
func (m *Mount) GetResultID() string {
if m != nil {
return m.ResultID
}
return ""
}
// CacheOpt defines options specific to cache mounts
type CacheOpt struct {
// ID is an optional namespace for the mount
@ -2316,144 +2324,145 @@ func init() {
func init() { proto.RegisterFile("ops.proto", fileDescriptor_8de16154b2733812) }
var fileDescriptor_8de16154b2733812 = []byte{
// 2189 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x58, 0x4b, 0x6f, 0x1b, 0xc9,
0xf1, 0x17, 0xdf, 0x64, 0x51, 0x92, 0xf9, 0xef, 0xf5, 0xee, 0x9f, 0xab, 0x38, 0x92, 0x76, 0xec,
0x2c, 0x64, 0xd9, 0xa6, 0x00, 0x2d, 0xb0, 0x5e, 0x2c, 0x82, 0x20, 0xe2, 0xc3, 0x10, 0xd7, 0xb6,
0x28, 0x34, 0xfd, 0xc8, 0xcd, 0x18, 0x0d, 0x9b, 0xd4, 0x40, 0xe4, 0xf4, 0xa0, 0xa7, 0x69, 0x8b,
0x97, 0x1c, 0xfc, 0x09, 0x16, 0x08, 0x90, 0x5b, 0x02, 0xe4, 0x12, 0x20, 0xf7, 0x5c, 0x73, 0xdf,
0xe3, 0x22, 0xc8, 0x61, 0x91, 0xc3, 0x26, 0xb0, 0x3f, 0x47, 0x80, 0xa0, 0xaa, 0x7b, 0x1e, 0x94,
0x65, 0xd8, 0x46, 0x82, 0x9c, 0xd8, 0xfd, 0xab, 0x5f, 0x57, 0x57, 0x57, 0x55, 0xd7, 0x54, 0x13,
0x6a, 0x32, 0x8c, 0x5a, 0xa1, 0x92, 0x5a, 0xb2, 0x7c, 0x78, 0xb2, 0x71, 0x67, 0xe2, 0xeb, 0xd3,
0xf9, 0x49, 0xcb, 0x93, 0xb3, 0xbd, 0x89, 0x9c, 0xc8, 0x3d, 0x12, 0x9d, 0xcc, 0xc7, 0x34, 0xa3,
0x09, 0x8d, 0xcc, 0x12, 0xe7, 0x0f, 0x79, 0xc8, 0x0f, 0x42, 0xf6, 0x19, 0x94, 0xfd, 0x20, 0x9c,
0xeb, 0xa8, 0x99, 0xdb, 0x2e, 0xec, 0xd4, 0xf7, 0x6b, 0xad, 0xf0, 0xa4, 0xd5, 0x47, 0x84, 0x5b,
0x01, 0xdb, 0x86, 0xa2, 0x38, 0x17, 0x5e, 0x33, 0xbf, 0x9d, 0xdb, 0xa9, 0xef, 0x03, 0x12, 0x7a,
0xe7, 0xc2, 0x1b, 0x84, 0x87, 0x2b, 0x9c, 0x24, 0xec, 0x73, 0x28, 0x47, 0x72, 0xae, 0x3c, 0xd1,
0x2c, 0x10, 0x67, 0x15, 0x39, 0x43, 0x42, 0x88, 0x65, 0xa5, 0xa8, 0x69, 0xec, 0x4f, 0x45, 0xb3,
0x98, 0x6a, 0xba, 0xe7, 0x4f, 0x0d, 0x87, 0x24, 0xec, 0x3a, 0x94, 0x4e, 0xe6, 0xfe, 0x74, 0xd4,
0x2c, 0x11, 0xa5, 0x8e, 0x94, 0x36, 0x02, 0xc4, 0x31, 0x32, 0xb6, 0x03, 0xd5, 0x70, 0xea, 0xea,
0xb1, 0x54, 0xb3, 0x26, 0xa4, 0x1b, 0x1e, 0x5b, 0x8c, 0x27, 0x52, 0x76, 0x17, 0xea, 0x9e, 0x0c,
0x22, 0xad, 0x5c, 0x3f, 0xd0, 0x51, 0xb3, 0x4e, 0xe4, 0x8f, 0x91, 0xfc, 0x54, 0xaa, 0x33, 0xa1,
0x3a, 0xa9, 0x90, 0x67, 0x99, 0xed, 0x22, 0xe4, 0x65, 0xe8, 0xfc, 0x36, 0x07, 0xd5, 0x58, 0x2b,
0x73, 0x60, 0xf5, 0x40, 0x79, 0xa7, 0xbe, 0x16, 0x9e, 0x9e, 0x2b, 0xd1, 0xcc, 0x6d, 0xe7, 0x76,
0x6a, 0x7c, 0x09, 0x63, 0xeb, 0x90, 0x1f, 0x0c, 0xc9, 0x51, 0x35, 0x9e, 0x1f, 0x0c, 0x59, 0x13,
0x2a, 0x4f, 0x5c, 0xe5, 0xbb, 0x81, 0x26, 0xcf, 0xd4, 0x78, 0x3c, 0x65, 0xd7, 0xa0, 0x36, 0x18,
0x3e, 0x11, 0x2a, 0xf2, 0x65, 0x40, 0xfe, 0xa8, 0xf1, 0x14, 0x60, 0x9b, 0x00, 0x83, 0xe1, 0x3d,
0xe1, 0xa2, 0xd2, 0xa8, 0x59, 0xda, 0x2e, 0xec, 0xd4, 0x78, 0x06, 0x71, 0x7e, 0x0d, 0x25, 0x8a,
0x11, 0xfb, 0x06, 0xca, 0x23, 0x7f, 0x22, 0x22, 0x6d, 0xcc, 0x69, 0xef, 0x7f, 0xf7, 0xe3, 0xd6,
0xca, 0xdf, 0x7f, 0xdc, 0xda, 0xcd, 0x24, 0x83, 0x0c, 0x45, 0xe0, 0xc9, 0x40, 0xbb, 0x7e, 0x20,
0x54, 0xb4, 0x37, 0x91, 0x77, 0xcc, 0x92, 0x56, 0x97, 0x7e, 0xb8, 0xd5, 0xc0, 0x6e, 0x42, 0xc9,
0x0f, 0x46, 0xe2, 0x9c, 0xec, 0x2f, 0xb4, 0x3f, 0xb2, 0xaa, 0xea, 0x83, 0xb9, 0x0e, 0xe7, 0xba,
0x8f, 0x22, 0x6e, 0x18, 0xce, 0xef, 0x73, 0x50, 0x36, 0x39, 0xc0, 0xae, 0x41, 0x71, 0x26, 0xb4,
0x4b, 0xfb, 0xd7, 0xf7, 0xab, 0xe8, 0xdb, 0x87, 0x42, 0xbb, 0x9c, 0x50, 0x4c, 0xaf, 0x99, 0x9c,
0xa3, 0xef, 0xf3, 0x69, 0x7a, 0x3d, 0x44, 0x84, 0x5b, 0x01, 0xfb, 0x19, 0x54, 0x02, 0xa1, 0x5f,
0x48, 0x75, 0x46, 0x3e, 0x5a, 0x37, 0x41, 0x3f, 0x12, 0xfa, 0xa1, 0x1c, 0x09, 0x1e, 0xcb, 0xd8,
0x6d, 0xa8, 0x46, 0xc2, 0x9b, 0x2b, 0x5f, 0x2f, 0xc8, 0x5f, 0xeb, 0xfb, 0x0d, 0xca, 0x32, 0x8b,
0x11, 0x39, 0x61, 0x38, 0x7f, 0xca, 0x41, 0x11, 0xcd, 0x60, 0x0c, 0x8a, 0xae, 0x9a, 0x98, 0xec,
0xae, 0x71, 0x1a, 0xb3, 0x06, 0x14, 0x44, 0xf0, 0x9c, 0x2c, 0xaa, 0x71, 0x1c, 0x22, 0xe2, 0xbd,
0x18, 0xd9, 0x18, 0xe1, 0x10, 0xd7, 0xcd, 0x23, 0xa1, 0x6c, 0x68, 0x68, 0xcc, 0x6e, 0x42, 0x2d,
0x54, 0xf2, 0x7c, 0xf1, 0x0c, 0x57, 0x97, 0x32, 0x89, 0x87, 0x60, 0x2f, 0x78, 0xce, 0xab, 0xa1,
0x1d, 0xb1, 0x5d, 0x00, 0x71, 0xae, 0x95, 0x7b, 0x28, 0x23, 0x1d, 0x35, 0xcb, 0x74, 0x76, 0xca,
0x77, 0x04, 0xfa, 0xc7, 0x3c, 0x23, 0x75, 0xfe, 0x9a, 0x87, 0x12, 0xb9, 0x84, 0xed, 0x60, 0x04,
0xc2, 0xb9, 0x09, 0x66, 0xa1, 0xcd, 0x6c, 0x04, 0x80, 0x62, 0x9d, 0x04, 0x00, 0xe3, 0xbe, 0x81,
0xde, 0x98, 0x0a, 0x4f, 0x4b, 0x65, 0xd3, 0x2d, 0x99, 0xa3, 0xe9, 0x23, 0xcc, 0x08, 0x73, 0x1a,
0x1a, 0xb3, 0x5b, 0x50, 0x96, 0x14, 0x46, 0x3a, 0xd0, 0x5b, 0x82, 0x6b, 0x29, 0xa8, 0x5c, 0x09,
0x77, 0x24, 0x83, 0xe9, 0x82, 0x8e, 0x59, 0xe5, 0xc9, 0x9c, 0xdd, 0x82, 0x1a, 0xc5, 0xed, 0xd1,
0x22, 0x14, 0xcd, 0x32, 0xc5, 0x61, 0x2d, 0x89, 0x29, 0x82, 0x3c, 0x95, 0xe3, 0x45, 0xf5, 0x5c,
0xef, 0x54, 0x0c, 0x42, 0xdd, 0xbc, 0x9a, 0xfa, 0xab, 0x63, 0x31, 0x9e, 0x48, 0x51, 0x6d, 0x24,
0x3c, 0x25, 0x34, 0x52, 0x3f, 0x26, 0xea, 0x9a, 0x0d, 0xaf, 0x01, 0x79, 0x2a, 0x67, 0x0e, 0x94,
0x87, 0xc3, 0x43, 0x64, 0x7e, 0x92, 0x16, 0x12, 0x83, 0x70, 0x2b, 0x71, 0xfa, 0x50, 0x8d, 0xb7,
0xc1, 0x5b, 0xd9, 0xef, 0xda, 0xfb, 0x9a, 0xef, 0x77, 0xd9, 0x1d, 0xa8, 0x44, 0xa7, 0xae, 0xf2,
0x83, 0x09, 0xf9, 0x6e, 0x7d, 0xff, 0xa3, 0xc4, 0xaa, 0xa1, 0xc1, 0x51, 0x53, 0xcc, 0x71, 0x24,
0xd4, 0x12, 0x33, 0xde, 0xd0, 0xd5, 0x80, 0xc2, 0xdc, 0x1f, 0x91, 0x9e, 0x35, 0x8e, 0x43, 0x44,
0x26, 0xbe, 0xc9, 0xa5, 0x35, 0x8e, 0x43, 0x0c, 0xc8, 0x4c, 0x8e, 0x4c, 0xd9, 0x5b, 0xe3, 0x34,
0x46, 0x1f, 0xcb, 0x50, 0xfb, 0x32, 0x70, 0xa7, 0xb1, 0x8f, 0xe3, 0xb9, 0x33, 0x8d, 0xcf, 0xf7,
0x3f, 0xd9, 0xed, 0x37, 0x39, 0xa8, 0xc6, 0xb5, 0x1a, 0x0b, 0x8f, 0x3f, 0x12, 0x81, 0xf6, 0xc7,
0xbe, 0x50, 0x76, 0xe3, 0x0c, 0xc2, 0xee, 0x40, 0xc9, 0xd5, 0x5a, 0xc5, 0xd7, 0xf9, 0xff, 0xb3,
0x85, 0xbe, 0x75, 0x80, 0x92, 0x5e, 0xa0, 0xd5, 0x82, 0x1b, 0xd6, 0xc6, 0x57, 0x00, 0x29, 0x88,
0xb6, 0x9e, 0x89, 0x85, 0xd5, 0x8a, 0x43, 0x76, 0x15, 0x4a, 0xcf, 0xdd, 0xe9, 0x5c, 0xd8, 0x1c,
0x36, 0x93, 0xaf, 0xf3, 0x5f, 0xe5, 0x9c, 0xbf, 0xe4, 0xa1, 0x62, 0x0b, 0x3f, 0xbb, 0x0d, 0x15,
0x2a, 0xfc, 0xd6, 0xa2, 0xcb, 0x2f, 0x46, 0x4c, 0x61, 0x7b, 0xc9, 0x17, 0x2d, 0x63, 0xa3, 0x55,
0x65, 0xbe, 0x6c, 0xd6, 0xc6, 0xf4, 0xfb, 0x56, 0x18, 0x89, 0xb1, 0xfd, 0x74, 0xad, 0x23, 0xbb,
0x2b, 0xc6, 0x7e, 0xe0, 0xa3, 0x7f, 0x38, 0x8a, 0xd8, 0xed, 0xf8, 0xd4, 0x45, 0xd2, 0xf8, 0x49,
0x56, 0xe3, 0x9b, 0x87, 0xee, 0x43, 0x3d, 0xb3, 0xcd, 0x25, 0xa7, 0xbe, 0x91, 0x3d, 0xb5, 0xdd,
0x92, 0xd4, 0x99, 0xef, 0x6e, 0xea, 0x85, 0xff, 0xc0, 0x7f, 0x5f, 0x02, 0xa4, 0x2a, 0xdf, 0xbf,
0xb0, 0x38, 0x2f, 0x0b, 0x00, 0x83, 0x10, 0x4b, 0xe7, 0xc8, 0xa5, 0xfa, 0xbd, 0xea, 0x4f, 0x02,
0xa9, 0xc4, 0x33, 0xba, 0xaa, 0xb4, 0xbe, 0xca, 0xeb, 0x06, 0xa3, 0x1b, 0xc3, 0x0e, 0xa0, 0x3e,
0x12, 0x91, 0xa7, 0x7c, 0x4a, 0x28, 0xeb, 0xf4, 0x2d, 0x3c, 0x53, 0xaa, 0xa7, 0xd5, 0x4d, 0x19,
0xc6, 0x57, 0xd9, 0x35, 0x6c, 0x1f, 0x56, 0xc5, 0x79, 0x28, 0x95, 0xb6, 0xbb, 0x98, 0xfe, 0xe0,
0x8a, 0xe9, 0x34, 0x10, 0xa7, 0x9d, 0x78, 0x5d, 0xa4, 0x13, 0xe6, 0x42, 0xd1, 0x73, 0x43, 0xf3,
0x71, 0xac, 0xef, 0x37, 0x2f, 0xec, 0xd7, 0x71, 0x43, 0xe3, 0xb4, 0xf6, 0x17, 0x78, 0xd6, 0x97,
0xff, 0xd8, 0xba, 0x95, 0xf9, 0x22, 0xce, 0xe4, 0xc9, 0x62, 0x8f, 0xf2, 0xe5, 0xcc, 0xd7, 0x7b,
0x73, 0xed, 0x4f, 0xf7, 0xdc, 0xd0, 0x47, 0x75, 0xb8, 0xb0, 0xdf, 0xe5, 0xa4, 0x7a, 0xe3, 0x17,
0xd0, 0xb8, 0x68, 0xf7, 0x87, 0xc4, 0x60, 0xe3, 0x2e, 0xd4, 0x12, 0x3b, 0xde, 0xb5, 0xb0, 0x9a,
0x0d, 0xde, 0x9f, 0x73, 0x50, 0x36, 0xb7, 0x8a, 0xdd, 0x85, 0xda, 0x54, 0x7a, 0x2e, 0x1a, 0x10,
0xb7, 0x68, 0x9f, 0xa6, 0x97, 0xae, 0xf5, 0x20, 0x96, 0x19, 0xaf, 0xa6, 0x5c, 0x4c, 0x32, 0x3f,
0x18, 0xcb, 0xf8, 0x16, 0xac, 0xa7, 0x8b, 0xfa, 0xc1, 0x58, 0x72, 0x23, 0xdc, 0xb8, 0x0f, 0xeb,
0xcb, 0x2a, 0x2e, 0xb1, 0xf3, 0xfa, 0x72, 0xba, 0x52, 0x5d, 0x4e, 0x16, 0x65, 0xcd, 0xbe, 0x0b,
0xb5, 0x04, 0x67, 0xbb, 0x6f, 0x1a, 0xbe, 0x9a, 0x5d, 0x99, 0xb1, 0xd5, 0x99, 0x02, 0xa4, 0xa6,
0x61, 0xb1, 0xc2, 0x5e, 0x30, 0x70, 0x67, 0x71, 0x93, 0x95, 0xcc, 0xe9, 0xdb, 0xe6, 0x6a, 0x97,
0x4c, 0x59, 0xe5, 0x34, 0x66, 0x2d, 0x80, 0x51, 0x72, 0x61, 0xdf, 0x72, 0x8d, 0x33, 0x0c, 0x67,
0x00, 0xd5, 0xd8, 0x08, 0xb6, 0x0d, 0xf5, 0xc8, 0xee, 0x8c, 0x9d, 0x0f, 0x6e, 0x57, 0xe2, 0x59,
0x08, 0x3b, 0x18, 0xe5, 0x06, 0x13, 0xb1, 0xd4, 0xc1, 0x70, 0x44, 0xb8, 0x15, 0x38, 0x4f, 0xa1,
0x44, 0x00, 0x5e, 0xb3, 0x48, 0xbb, 0x4a, 0xdb, 0x66, 0xc8, 0x34, 0x07, 0x32, 0xa2, 0x6d, 0xdb,
0x45, 0x4c, 0x44, 0x6e, 0x08, 0xec, 0x06, 0xb6, 0x20, 0x23, 0xeb, 0xd1, 0xcb, 0x78, 0x28, 0x76,
0x7e, 0x0e, 0xd5, 0x18, 0xc6, 0x93, 0x3f, 0xf0, 0x03, 0x61, 0x4d, 0xa4, 0x31, 0x36, 0x91, 0x9d,
0x53, 0x57, 0xb9, 0x9e, 0x16, 0xa6, 0x0d, 0x28, 0xf1, 0x14, 0x70, 0xae, 0x43, 0x3d, 0x73, 0x7b,
0x30, 0xdd, 0x9e, 0x50, 0x18, 0xcd, 0x1d, 0x36, 0x13, 0xe7, 0x25, 0xb6, 0xb8, 0x71, 0xd7, 0xf2,
0x53, 0x80, 0x53, 0xad, 0xc3, 0x67, 0xd4, 0xc6, 0x58, 0xdf, 0xd7, 0x10, 0x21, 0x06, 0xdb, 0x82,
0x3a, 0x4e, 0x22, 0x2b, 0x37, 0xf9, 0x4e, 0x2b, 0x22, 0x43, 0xf8, 0x09, 0xd4, 0xc6, 0xc9, 0xf2,
0x82, 0x0d, 0x5d, 0xbc, 0xfa, 0x53, 0xa8, 0x06, 0xd2, 0xca, 0x4c, 0x57, 0x55, 0x09, 0x24, 0x89,
0x9c, 0x5b, 0xf0, 0x7f, 0x6f, 0xf4, 0xe3, 0xec, 0x13, 0x28, 0x8f, 0xfd, 0xa9, 0xa6, 0xa2, 0x8f,
0x8d, 0x9a, 0x9d, 0x39, 0xff, 0xca, 0x01, 0xa4, 0x91, 0xc5, 0x7c, 0xc5, 0xea, 0x8d, 0x9c, 0x55,
0x53, 0xad, 0xa7, 0x50, 0x9d, 0xd9, 0x3a, 0x60, 0x63, 0x76, 0x6d, 0x39, 0x1b, 0x5a, 0x71, 0x99,
0x30, 0x15, 0x62, 0xdf, 0x56, 0x88, 0x0f, 0xe9, 0x99, 0x93, 0x1d, 0xa8, 0x19, 0xc9, 0xbe, 0x7d,
0x20, 0xbd, 0x68, 0xdc, 0x4a, 0x36, 0xee, 0xc3, 0xda, 0xd2, 0x96, 0xef, 0xf9, 0x4d, 0x48, 0xeb,
0x59, 0xf6, 0x96, 0xdd, 0x86, 0xb2, 0x69, 0x22, 0x31, 0x25, 0x70, 0x64, 0xd5, 0xd0, 0x98, 0x3a,
0x86, 0xe3, 0xf8, 0x05, 0xd2, 0x3f, 0x76, 0xf6, 0xa1, 0x6c, 0x9e, 0x58, 0x6c, 0x07, 0x2a, 0xae,
0x67, 0xae, 0x63, 0xa6, 0x24, 0xa0, 0xf0, 0x80, 0x60, 0x1e, 0x8b, 0x9d, 0xbf, 0xe5, 0x01, 0x52,
0xfc, 0x03, 0xba, 0xd2, 0xaf, 0x61, 0x3d, 0x12, 0x9e, 0x0c, 0x46, 0xae, 0x5a, 0x90, 0xd4, 0x3e,
0x25, 0x2e, 0x5b, 0x72, 0x81, 0x99, 0xe9, 0x50, 0x0b, 0xef, 0xee, 0x50, 0x77, 0xa0, 0xe8, 0xc9,
0x70, 0x61, 0x3f, 0x14, 0x6c, 0xf9, 0x20, 0x1d, 0x19, 0x2e, 0xf0, 0x41, 0x89, 0x0c, 0xd6, 0x82,
0xf2, 0xec, 0x8c, 0x1e, 0x9d, 0xa6, 0x61, 0xbf, 0xba, 0xcc, 0x7d, 0x78, 0x86, 0x63, 0x7c, 0xa2,
0x1a, 0x16, 0xbb, 0x05, 0xa5, 0xd9, 0xd9, 0xc8, 0x57, 0xd4, 0xdb, 0xd6, 0x4d, 0x67, 0x98, 0xa5,
0x77, 0x7d, 0x85, 0x0f, 0x51, 0xe2, 0x30, 0x07, 0xf2, 0x6a, 0xd6, 0xac, 0x10, 0xb3, 0x71, 0xc1,
0x9b, 0xb3, 0xc3, 0x15, 0x9e, 0x57, 0xb3, 0x76, 0x15, 0xca, 0xc6, 0xaf, 0xce, 0x1f, 0x0b, 0xb0,
0xbe, 0x6c, 0x25, 0xe6, 0x41, 0xa4, 0xbc, 0x38, 0x0f, 0x22, 0xe5, 0x25, 0xcd, 0x7b, 0x3e, 0xd3,
0xbc, 0x3b, 0x50, 0x92, 0x2f, 0x02, 0xa1, 0xb2, 0xaf, 0xeb, 0xce, 0xa9, 0x7c, 0x11, 0x60, 0x9b,
0x6a, 0x44, 0x4b, 0x5d, 0x5f, 0xc9, 0x76, 0x7d, 0x37, 0x60, 0x6d, 0x2c, 0xa7, 0x53, 0xf9, 0x62,
0xb8, 0x98, 0x4d, 0xfd, 0xe0, 0xcc, 0xb6, 0x7e, 0xcb, 0x20, 0xdb, 0x81, 0x2b, 0x23, 0x5f, 0xa1,
0x39, 0x1d, 0x19, 0x68, 0x11, 0xd0, 0x7b, 0x05, 0x79, 0x17, 0x61, 0xf6, 0x0d, 0x6c, 0xbb, 0x5a,
0x8b, 0x59, 0xa8, 0x1f, 0x07, 0xa1, 0xeb, 0x9d, 0x75, 0xa5, 0x47, 0x77, 0x76, 0x16, 0xba, 0xda,
0x3f, 0xf1, 0xa7, 0xf8, 0x34, 0xab, 0xd0, 0xd2, 0x77, 0xf2, 0xd8, 0xe7, 0xb0, 0xee, 0x29, 0xe1,
0x6a, 0xd1, 0x15, 0x91, 0x3e, 0x76, 0xf5, 0x69, 0xb3, 0x4a, 0x2b, 0x2f, 0xa0, 0x78, 0x06, 0x17,
0xad, 0x7d, 0xea, 0x4f, 0x47, 0x9e, 0xab, 0x46, 0xcd, 0x9a, 0x39, 0xc3, 0x12, 0xc8, 0x5a, 0xc0,
0x08, 0xe8, 0xcd, 0x42, 0xbd, 0x48, 0xa8, 0x40, 0xd4, 0x4b, 0x24, 0x58, 0x38, 0xb5, 0x3f, 0x13,
0x91, 0x76, 0x67, 0x21, 0xfd, 0x2b, 0x50, 0xe0, 0x29, 0xe0, 0x7c, 0x9b, 0x83, 0xc6, 0xc5, 0x14,
0x41, 0x07, 0x87, 0x68, 0xa6, 0xbd, 0x6c, 0x38, 0x4e, 0x9c, 0x9e, 0xcf, 0x38, 0x3d, 0xfe, 0x42,
0x15, 0x32, 0x5f, 0xa8, 0x24, 0x80, 0xc5, 0xb7, 0x07, 0x70, 0xc9, 0xa4, 0xd2, 0x45, 0x93, 0x7e,
0x97, 0x83, 0x2b, 0x17, 0xd2, 0xf0, 0xbd, 0x2d, 0xda, 0x86, 0xfa, 0xcc, 0x3d, 0x13, 0xc7, 0xae,
0xa2, 0xe0, 0x16, 0x4c, 0x0b, 0x97, 0x81, 0xfe, 0x0b, 0xf6, 0x05, 0xb0, 0x9a, 0xcd, 0xfd, 0x4b,
0x6d, 0x8b, 0x43, 0x79, 0x24, 0xf5, 0x3d, 0x39, 0xb7, 0x5f, 0xbf, 0x38, 0x94, 0x31, 0xf8, 0x66,
0xc0, 0x0b, 0x97, 0x04, 0xdc, 0x39, 0x82, 0x6a, 0x6c, 0x20, 0xdb, 0xb2, 0x4f, 0xf5, 0x5c, 0xfa,
0x97, 0xd1, 0xe3, 0x48, 0x28, 0xb4, 0xdd, 0xbc, 0xdb, 0x3f, 0x83, 0xd2, 0x44, 0xc9, 0x79, 0x68,
0x6b, 0xeb, 0x12, 0xc3, 0x48, 0x9c, 0x21, 0x54, 0x2c, 0xc2, 0x76, 0xa1, 0x7c, 0xb2, 0x38, 0x8a,
0x9b, 0x0f, 0x7b, 0xb1, 0x71, 0x3e, 0xb2, 0x0c, 0xac, 0x16, 0x86, 0xc1, 0xae, 0x42, 0xf1, 0x64,
0xd1, 0xef, 0x9a, 0x07, 0x19, 0xd6, 0x1c, 0x9c, 0xb5, 0xcb, 0xc6, 0x20, 0xe7, 0x01, 0xac, 0x66,
0xd7, 0xa1, 0x53, 0x32, 0x4d, 0x0d, 0x8d, 0xd3, 0xe2, 0x9a, 0x7f, 0x47, 0x71, 0xdd, 0xdd, 0x81,
0x8a, 0xfd, 0x53, 0x84, 0xd5, 0xa0, 0xf4, 0xf8, 0x68, 0xd8, 0x7b, 0xd4, 0x58, 0x61, 0x55, 0x28,
0x1e, 0x0e, 0x86, 0x8f, 0x1a, 0x39, 0x1c, 0x1d, 0x0d, 0x8e, 0x7a, 0x8d, 0xfc, 0xee, 0x4d, 0x58,
0xcd, 0xfe, 0x2d, 0xc2, 0xea, 0x50, 0x19, 0x1e, 0x1c, 0x75, 0xdb, 0x83, 0x5f, 0x35, 0x56, 0xd8,
0x2a, 0x54, 0xfb, 0x47, 0xc3, 0x5e, 0xe7, 0x31, 0xef, 0x35, 0x72, 0xbb, 0xbf, 0x84, 0x5a, 0xf2,
0x72, 0x47, 0x0d, 0xed, 0xfe, 0x51, 0xb7, 0xb1, 0xc2, 0x00, 0xca, 0xc3, 0x5e, 0x87, 0xf7, 0x50,
0x6f, 0x05, 0x0a, 0xc3, 0xe1, 0x61, 0x23, 0x8f, 0xbb, 0x76, 0x0e, 0x3a, 0x87, 0xbd, 0x46, 0x01,
0x87, 0x8f, 0x1e, 0x1e, 0xdf, 0x1b, 0x36, 0x8a, 0xbb, 0x5f, 0xc2, 0x95, 0x0b, 0x2f, 0x67, 0x5a,
0x7d, 0x78, 0xc0, 0x7b, 0xa8, 0xa9, 0x0e, 0x95, 0x63, 0xde, 0x7f, 0x72, 0xf0, 0xa8, 0xd7, 0xc8,
0xa1, 0xe0, 0xc1, 0xa0, 0x73, 0xbf, 0xd7, 0x6d, 0xe4, 0xdb, 0xd7, 0xbe, 0x7b, 0xb5, 0x99, 0xfb,
0xfe, 0xd5, 0x66, 0xee, 0x87, 0x57, 0x9b, 0xb9, 0x7f, 0xbe, 0xda, 0xcc, 0x7d, 0xfb, 0x7a, 0x73,
0xe5, 0xfb, 0xd7, 0x9b, 0x2b, 0x3f, 0xbc, 0xde, 0x5c, 0x39, 0x29, 0xd3, 0x9f, 0x94, 0x5f, 0xfc,
0x3b, 0x00, 0x00, 0xff, 0xff, 0x7e, 0x60, 0x46, 0x7d, 0xe4, 0x14, 0x00, 0x00,
// 2201 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x58, 0xcd, 0x6f, 0x1b, 0xc7,
0x15, 0x17, 0xbf, 0xc9, 0x47, 0x49, 0x66, 0x27, 0x4e, 0xc2, 0xa8, 0xae, 0xa4, 0x6c, 0xdc, 0x40,
0x96, 0x6d, 0x0a, 0x50, 0x80, 0x38, 0x08, 0x8a, 0xa2, 0xe2, 0x87, 0x21, 0xc6, 0xb6, 0x28, 0x0c,
0xfd, 0xd1, 0x9b, 0xb1, 0x5a, 0x0e, 0xa9, 0x85, 0xc8, 0x9d, 0xc5, 0xec, 0xd0, 0x16, 0x2f, 0x3d,
0xf8, 0x2f, 0x08, 0x50, 0xa0, 0xb7, 0x16, 0xe8, 0xa5, 0x40, 0xef, 0xbd, 0xf6, 0x9e, 0x63, 0x0e,
0x3d, 0x04, 0x3d, 0xa4, 0x85, 0x7d, 0xef, 0x7f, 0x50, 0xa0, 0x78, 0x6f, 0x66, 0x3f, 0x28, 0xcb,
0xb0, 0x8d, 0x16, 0x3d, 0xed, 0xcc, 0x7b, 0xbf, 0x79, 0xf3, 0xe6, 0x7d, 0xcd, 0x9b, 0x85, 0x9a,
0x0c, 0xa3, 0x56, 0xa8, 0xa4, 0x96, 0x2c, 0x1f, 0x9e, 0x6c, 0xdc, 0x9e, 0xf8, 0xfa, 0x74, 0x7e,
0xd2, 0xf2, 0xe4, 0x6c, 0x6f, 0x22, 0x27, 0x72, 0x8f, 0x58, 0x27, 0xf3, 0x31, 0xcd, 0x68, 0x42,
0x23, 0xb3, 0xc4, 0xf9, 0x63, 0x1e, 0xf2, 0x83, 0x90, 0x7d, 0x0a, 0x65, 0x3f, 0x08, 0xe7, 0x3a,
0x6a, 0xe6, 0xb6, 0x0b, 0x3b, 0xf5, 0xfd, 0x5a, 0x2b, 0x3c, 0x69, 0xf5, 0x91, 0xc2, 0x2d, 0x83,
0x6d, 0x43, 0x51, 0x9c, 0x0b, 0xaf, 0x99, 0xdf, 0xce, 0xed, 0xd4, 0xf7, 0x01, 0x01, 0xbd, 0x73,
0xe1, 0x0d, 0xc2, 0xc3, 0x15, 0x4e, 0x1c, 0xf6, 0x39, 0x94, 0x23, 0x39, 0x57, 0x9e, 0x68, 0x16,
0x08, 0xb3, 0x8a, 0x98, 0x21, 0x51, 0x08, 0x65, 0xb9, 0x28, 0x69, 0xec, 0x4f, 0x45, 0xb3, 0x98,
0x4a, 0xba, 0xeb, 0x4f, 0x0d, 0x86, 0x38, 0xec, 0x33, 0x28, 0x9d, 0xcc, 0xfd, 0xe9, 0xa8, 0x59,
0x22, 0x48, 0x1d, 0x21, 0x6d, 0x24, 0x10, 0xc6, 0xf0, 0xd8, 0x0e, 0x54, 0xc3, 0xa9, 0xab, 0xc7,
0x52, 0xcd, 0x9a, 0x90, 0x6e, 0x78, 0x6c, 0x69, 0x3c, 0xe1, 0xb2, 0x3b, 0x50, 0xf7, 0x64, 0x10,
0x69, 0xe5, 0xfa, 0x81, 0x8e, 0x9a, 0x75, 0x02, 0x7f, 0x88, 0xe0, 0x27, 0x52, 0x9d, 0x09, 0xd5,
0x49, 0x99, 0x3c, 0x8b, 0x6c, 0x17, 0x21, 0x2f, 0x43, 0xe7, 0x77, 0x39, 0xa8, 0xc6, 0x52, 0x99,
0x03, 0xab, 0x07, 0xca, 0x3b, 0xf5, 0xb5, 0xf0, 0xf4, 0x5c, 0x89, 0x66, 0x6e, 0x3b, 0xb7, 0x53,
0xe3, 0x4b, 0x34, 0xb6, 0x0e, 0xf9, 0xc1, 0x90, 0x0c, 0x55, 0xe3, 0xf9, 0xc1, 0x90, 0x35, 0xa1,
0xf2, 0xd8, 0x55, 0xbe, 0x1b, 0x68, 0xb2, 0x4c, 0x8d, 0xc7, 0x53, 0x76, 0x0d, 0x6a, 0x83, 0xe1,
0x63, 0xa1, 0x22, 0x5f, 0x06, 0x64, 0x8f, 0x1a, 0x4f, 0x09, 0x6c, 0x13, 0x60, 0x30, 0xbc, 0x2b,
0x5c, 0x14, 0x1a, 0x35, 0x4b, 0xdb, 0x85, 0x9d, 0x1a, 0xcf, 0x50, 0x9c, 0xdf, 0x40, 0x89, 0x7c,
0xc4, 0xbe, 0x81, 0xf2, 0xc8, 0x9f, 0x88, 0x48, 0x1b, 0x75, 0xda, 0xfb, 0xdf, 0xfd, 0xb8, 0xb5,
0xf2, 0xf7, 0x1f, 0xb7, 0x76, 0x33, 0xc1, 0x20, 0x43, 0x11, 0x78, 0x32, 0xd0, 0xae, 0x1f, 0x08,
0x15, 0xed, 0x4d, 0xe4, 0x6d, 0xb3, 0xa4, 0xd5, 0xa5, 0x0f, 0xb7, 0x12, 0xd8, 0x0d, 0x28, 0xf9,
0xc1, 0x48, 0x9c, 0x93, 0xfe, 0x85, 0xf6, 0x07, 0x56, 0x54, 0x7d, 0x30, 0xd7, 0xe1, 0x5c, 0xf7,
0x91, 0xc5, 0x0d, 0xc2, 0xf9, 0x43, 0x0e, 0xca, 0x26, 0x06, 0xd8, 0x35, 0x28, 0xce, 0x84, 0x76,
0x69, 0xff, 0xfa, 0x7e, 0x15, 0x6d, 0xfb, 0x40, 0x68, 0x97, 0x13, 0x15, 0xc3, 0x6b, 0x26, 0xe7,
0x68, 0xfb, 0x7c, 0x1a, 0x5e, 0x0f, 0x90, 0xc2, 0x2d, 0x83, 0xfd, 0x1c, 0x2a, 0x81, 0xd0, 0xcf,
0xa5, 0x3a, 0x23, 0x1b, 0xad, 0x1b, 0xa7, 0x1f, 0x09, 0xfd, 0x40, 0x8e, 0x04, 0x8f, 0x79, 0xec,
0x16, 0x54, 0x23, 0xe1, 0xcd, 0x95, 0xaf, 0x17, 0x64, 0xaf, 0xf5, 0xfd, 0x06, 0x45, 0x99, 0xa5,
0x11, 0x38, 0x41, 0x38, 0x7f, 0xce, 0x41, 0x11, 0xd5, 0x60, 0x0c, 0x8a, 0xae, 0x9a, 0x98, 0xe8,
0xae, 0x71, 0x1a, 0xb3, 0x06, 0x14, 0x44, 0xf0, 0x8c, 0x34, 0xaa, 0x71, 0x1c, 0x22, 0xc5, 0x7b,
0x3e, 0xb2, 0x3e, 0xc2, 0x21, 0xae, 0x9b, 0x47, 0x42, 0x59, 0xd7, 0xd0, 0x98, 0xdd, 0x80, 0x5a,
0xa8, 0xe4, 0xf9, 0xe2, 0x29, 0xae, 0x2e, 0x65, 0x02, 0x0f, 0x89, 0xbd, 0xe0, 0x19, 0xaf, 0x86,
0x76, 0xc4, 0x76, 0x01, 0xc4, 0xb9, 0x56, 0xee, 0xa1, 0x8c, 0x74, 0xd4, 0x2c, 0xd3, 0xd9, 0x29,
0xde, 0x91, 0xd0, 0x3f, 0xe6, 0x19, 0xae, 0xf3, 0xaf, 0x3c, 0x94, 0xc8, 0x24, 0x6c, 0x07, 0x3d,
0x10, 0xce, 0x8d, 0x33, 0x0b, 0x6d, 0x66, 0x3d, 0x00, 0xe4, 0xeb, 0xc4, 0x01, 0xe8, 0xf7, 0x0d,
0xb4, 0xc6, 0x54, 0x78, 0x5a, 0x2a, 0x1b, 0x6e, 0xc9, 0x1c, 0x55, 0x1f, 0x61, 0x44, 0x98, 0xd3,
0xd0, 0x98, 0xdd, 0x84, 0xb2, 0x24, 0x37, 0xd2, 0x81, 0xde, 0xe0, 0x5c, 0x0b, 0x41, 0xe1, 0x4a,
0xb8, 0x23, 0x19, 0x4c, 0x17, 0x74, 0xcc, 0x2a, 0x4f, 0xe6, 0xec, 0x26, 0xd4, 0xc8, 0x6f, 0x0f,
0x17, 0xa1, 0x68, 0x96, 0xc9, 0x0f, 0x6b, 0x89, 0x4f, 0x91, 0xc8, 0x53, 0x3e, 0x26, 0xaa, 0xe7,
0x7a, 0xa7, 0x62, 0x10, 0xea, 0xe6, 0xd5, 0xd4, 0x5e, 0x1d, 0x4b, 0xe3, 0x09, 0x17, 0xc5, 0x46,
0xc2, 0x53, 0x42, 0x23, 0xf4, 0x43, 0x82, 0xae, 0x59, 0xf7, 0x1a, 0x22, 0x4f, 0xf9, 0xcc, 0x81,
0xf2, 0x70, 0x78, 0x88, 0xc8, 0x8f, 0xd2, 0x42, 0x62, 0x28, 0xdc, 0x72, 0xcc, 0x19, 0xa2, 0xf9,
0x54, 0xf7, 0xbb, 0xcd, 0x8f, 0x8d, 0x81, 0xe2, 0xb9, 0xd3, 0x87, 0x6a, 0xac, 0x02, 0x66, 0x6c,
0xbf, 0x6b, 0x73, 0x39, 0xdf, 0xef, 0xb2, 0xdb, 0x50, 0x89, 0x4e, 0x5d, 0xe5, 0x07, 0x13, 0xb2,
0xeb, 0xfa, 0xfe, 0x07, 0x89, 0xc6, 0x43, 0x43, 0xc7, 0x5d, 0x62, 0x8c, 0x23, 0xa1, 0x96, 0xa8,
0xf8, 0x9a, 0xac, 0x06, 0x14, 0xe6, 0xfe, 0x88, 0xe4, 0xac, 0x71, 0x1c, 0x22, 0x65, 0xe2, 0x9b,
0x38, 0x5b, 0xe3, 0x38, 0x44, 0x67, 0xcd, 0xe4, 0xc8, 0x94, 0xc4, 0x35, 0x4e, 0x63, 0xd4, 0x5d,
0x86, 0xda, 0x97, 0x81, 0x3b, 0x8d, 0xed, 0x1f, 0xcf, 0x9d, 0x69, 0x7c, 0xf6, 0xff, 0xcb, 0x6e,
0xbf, 0xcd, 0x41, 0x35, 0xae, 0xe3, 0x58, 0x94, 0xfc, 0x91, 0x08, 0xb4, 0x3f, 0xf6, 0x85, 0xb2,
0x1b, 0x67, 0x28, 0xec, 0x36, 0x94, 0x5c, 0xad, 0x55, 0x9c, 0xea, 0x1f, 0x67, 0x2f, 0x81, 0xd6,
0x01, 0x72, 0x7a, 0x81, 0x56, 0x0b, 0x6e, 0x50, 0x1b, 0x5f, 0x01, 0xa4, 0x44, 0xd4, 0xf5, 0x4c,
0x2c, 0xac, 0x54, 0x1c, 0xb2, 0xab, 0x50, 0x7a, 0xe6, 0x4e, 0xe7, 0xc2, 0xc6, 0xb7, 0x99, 0x7c,
0x9d, 0xff, 0x2a, 0xe7, 0xfc, 0x35, 0x0f, 0x15, 0x7b, 0x29, 0xb0, 0x5b, 0x50, 0xa1, 0x4b, 0xc1,
0x6a, 0x74, 0x79, 0xd2, 0xc4, 0x10, 0xb6, 0x97, 0xdc, 0x76, 0x19, 0x1d, 0xad, 0x28, 0x73, 0xeb,
0x59, 0x1d, 0xd3, 0xbb, 0xaf, 0x30, 0x12, 0x63, 0x7b, 0xad, 0xad, 0x23, 0xba, 0x2b, 0xc6, 0x7e,
0xe0, 0xa3, 0x7d, 0x38, 0xb2, 0xd8, 0xad, 0xf8, 0xd4, 0x45, 0x92, 0xf8, 0x51, 0x56, 0xe2, 0xeb,
0x87, 0xee, 0x43, 0x3d, 0xb3, 0xcd, 0x25, 0xa7, 0xbe, 0x9e, 0x3d, 0xb5, 0xdd, 0x92, 0xc4, 0x99,
0x3b, 0x39, 0xb5, 0xc2, 0x7f, 0x61, 0xbf, 0x2f, 0x01, 0x52, 0x91, 0xef, 0x5e, 0x74, 0x9c, 0x17,
0x05, 0x80, 0x41, 0x88, 0x65, 0x75, 0xe4, 0x52, 0x6d, 0x5f, 0xf5, 0x27, 0x81, 0x54, 0xe2, 0x29,
0xa5, 0x31, 0xad, 0xaf, 0xf2, 0xba, 0xa1, 0x51, 0xc6, 0xb0, 0x03, 0xa8, 0x8f, 0x44, 0xe4, 0x29,
0x9f, 0x02, 0xca, 0x1a, 0x7d, 0x0b, 0xcf, 0x94, 0xca, 0x69, 0x75, 0x53, 0x84, 0xb1, 0x55, 0x76,
0x0d, 0xdb, 0x87, 0x55, 0x71, 0x1e, 0x4a, 0xa5, 0xed, 0x2e, 0xa6, 0x77, 0xb8, 0x62, 0xba, 0x10,
0xa4, 0xd3, 0x4e, 0xbc, 0x2e, 0xd2, 0x09, 0x73, 0xa1, 0xe8, 0xb9, 0xa1, 0xb9, 0x38, 0xeb, 0xfb,
0xcd, 0x0b, 0xfb, 0x75, 0xdc, 0xd0, 0x18, 0xad, 0xfd, 0x05, 0x9e, 0xf5, 0xc5, 0x3f, 0xb6, 0x6e,
0x66, 0x6e, 0xcb, 0x99, 0x3c, 0x59, 0xec, 0x51, 0xbc, 0x9c, 0xf9, 0x7a, 0x6f, 0xae, 0xfd, 0xe9,
0x9e, 0x1b, 0xfa, 0x28, 0x0e, 0x17, 0xf6, 0xbb, 0x9c, 0x44, 0x6f, 0xfc, 0x12, 0x1a, 0x17, 0xf5,
0x7e, 0x1f, 0x1f, 0x6c, 0xdc, 0x81, 0x5a, 0xa2, 0xc7, 0xdb, 0x16, 0x56, 0xb3, 0xce, 0xfb, 0x4b,
0x0e, 0xca, 0x26, 0xab, 0xd8, 0x1d, 0xa8, 0x4d, 0xa5, 0xe7, 0xa2, 0x02, 0x71, 0xfb, 0xf6, 0x49,
0x9a, 0x74, 0xad, 0xfb, 0x31, 0xcf, 0x58, 0x35, 0xc5, 0x62, 0x90, 0xf9, 0xc1, 0x58, 0xc6, 0x59,
0xb0, 0x9e, 0x2e, 0xea, 0x07, 0x63, 0xc9, 0x0d, 0x73, 0xe3, 0x1e, 0xac, 0x2f, 0x8b, 0xb8, 0x44,
0xcf, 0xcf, 0x96, 0xc3, 0x95, 0x6a, 0x76, 0xb2, 0x28, 0xab, 0xf6, 0x1d, 0xa8, 0x25, 0x74, 0xb6,
0xfb, 0xba, 0xe2, 0xab, 0xd9, 0x95, 0x19, 0x5d, 0x9d, 0x29, 0x40, 0xaa, 0x1a, 0x16, 0x2b, 0xec,
0x13, 0x03, 0x77, 0x16, 0x37, 0x60, 0xc9, 0x9c, 0xee, 0x3d, 0x57, 0xbb, 0xa4, 0xca, 0x2a, 0xa7,
0x31, 0x6b, 0x01, 0x8c, 0x92, 0x84, 0x7d, 0x43, 0x1a, 0x67, 0x10, 0xce, 0x00, 0xaa, 0xb1, 0x12,
0x6c, 0x1b, 0xea, 0x91, 0xdd, 0x19, 0xbb, 0x22, 0xdc, 0xae, 0xc4, 0xb3, 0x24, 0xec, 0x6e, 0x94,
0x1b, 0x4c, 0xc4, 0x52, 0x77, 0xc3, 0x91, 0xc2, 0x2d, 0xc3, 0x79, 0x02, 0x25, 0x22, 0x60, 0x9a,
0x45, 0xda, 0x55, 0xda, 0x36, 0x4a, 0xa6, 0x71, 0x90, 0x11, 0x6d, 0xdb, 0x2e, 0x62, 0x20, 0x72,
0x03, 0x60, 0xd7, 0xb1, 0x3d, 0x19, 0x59, 0x8b, 0x5e, 0x86, 0x43, 0xb6, 0xf3, 0x0b, 0xa8, 0xc6,
0x64, 0x3c, 0xf9, 0x7d, 0x3f, 0x10, 0x56, 0x45, 0x1a, 0x63, 0x83, 0xd9, 0x39, 0x75, 0x95, 0xeb,
0x69, 0x61, 0x5a, 0x84, 0x12, 0x4f, 0x09, 0xce, 0x67, 0x50, 0xcf, 0x64, 0x0f, 0x86, 0xdb, 0x63,
0x72, 0xa3, 0xc9, 0x61, 0x33, 0x71, 0x5e, 0x60, 0xfb, 0x1b, 0x77, 0x34, 0x3f, 0x03, 0x38, 0xd5,
0x3a, 0x7c, 0x4a, 0x2d, 0x8e, 0xb5, 0x7d, 0x0d, 0x29, 0x84, 0x60, 0x5b, 0x50, 0xc7, 0x49, 0x64,
0xf9, 0x26, 0xde, 0x69, 0x45, 0x64, 0x00, 0x3f, 0x85, 0xda, 0x38, 0x59, 0x5e, 0xb0, 0xae, 0x8b,
0x57, 0x7f, 0x02, 0xd5, 0x40, 0x5a, 0x9e, 0xe9, 0xb8, 0x2a, 0x81, 0x24, 0x96, 0x73, 0x13, 0x7e,
0xf2, 0x5a, 0xaf, 0xce, 0x3e, 0x82, 0xf2, 0xd8, 0x9f, 0x6a, 0x2a, 0xfa, 0xd8, 0xc4, 0xd9, 0x99,
0xf3, 0xef, 0x1c, 0x40, 0xea, 0x59, 0x8c, 0x57, 0xac, 0xde, 0x88, 0x59, 0x35, 0xd5, 0x7a, 0x0a,
0xd5, 0x99, 0xad, 0x03, 0xd6, 0x67, 0xd7, 0x96, 0xa3, 0xa1, 0x15, 0x97, 0x09, 0x53, 0x21, 0xf6,
0x6d, 0x85, 0x78, 0x9f, 0x7e, 0x3a, 0xd9, 0x81, 0x1a, 0x95, 0xec, 0xbb, 0x08, 0xd2, 0x44, 0xe3,
0x96, 0xb3, 0x71, 0x0f, 0xd6, 0x96, 0xb6, 0x7c, 0xc7, 0x3b, 0x21, 0xad, 0x67, 0xd9, 0x2c, 0xbb,
0x05, 0x65, 0xd3, 0x60, 0x62, 0x48, 0xe0, 0xc8, 0x8a, 0xa1, 0x31, 0x75, 0x0c, 0xc7, 0xf1, 0xeb,
0xa4, 0x7f, 0xec, 0xec, 0x43, 0xd9, 0x3c, 0xbf, 0xd8, 0x0e, 0x54, 0x5c, 0xcf, 0xa4, 0x63, 0xa6,
0x24, 0x20, 0xf3, 0x80, 0xc8, 0x3c, 0x66, 0x3b, 0x7f, 0xcb, 0x03, 0xa4, 0xf4, 0xf7, 0xe8, 0x58,
0xbf, 0x86, 0xf5, 0x48, 0x78, 0x32, 0x18, 0xb9, 0x6a, 0x41, 0x5c, 0xfb, 0xcc, 0xb8, 0x6c, 0xc9,
0x05, 0x64, 0xa6, 0x7b, 0x2d, 0xbc, 0xbd, 0x7b, 0xdd, 0x81, 0xa2, 0x27, 0xc3, 0x85, 0xbd, 0x28,
0xd8, 0xf2, 0x41, 0x3a, 0x32, 0x5c, 0xe0, 0x63, 0x13, 0x11, 0xac, 0x05, 0xe5, 0xd9, 0x19, 0x3d,
0x48, 0x4d, 0x33, 0x7f, 0x75, 0x19, 0xfb, 0xe0, 0x0c, 0xc7, 0xf8, 0x7c, 0x35, 0x28, 0x76, 0x13,
0x4a, 0xb3, 0xb3, 0x91, 0xaf, 0xa8, 0xef, 0xad, 0x9b, 0xce, 0x30, 0x0b, 0xef, 0xfa, 0x0a, 0x1f,
0xa9, 0x84, 0x61, 0x0e, 0xe4, 0xd5, 0xac, 0x59, 0x21, 0x64, 0xe3, 0x82, 0x35, 0x67, 0x87, 0x2b,
0x3c, 0xaf, 0x66, 0xed, 0x2a, 0x94, 0x8d, 0x5d, 0x9d, 0x3f, 0x15, 0x60, 0x7d, 0x59, 0x4b, 0x8c,
0x83, 0x48, 0x79, 0x71, 0x1c, 0x44, 0xca, 0x4b, 0x1a, 0xfb, 0x7c, 0xa6, 0xb1, 0x77, 0xa0, 0x24,
0x9f, 0x07, 0x42, 0x65, 0x5f, 0xde, 0x9d, 0x53, 0xf9, 0x3c, 0xc0, 0x36, 0xd5, 0xb0, 0x96, 0xba,
0xbe, 0x92, 0xed, 0xfa, 0xae, 0xc3, 0xda, 0x58, 0x4e, 0xa7, 0xf2, 0xf9, 0x70, 0x31, 0x9b, 0xfa,
0xc1, 0x99, 0x6d, 0xfd, 0x96, 0x89, 0x6c, 0x07, 0xae, 0x8c, 0x7c, 0x85, 0xea, 0x74, 0x64, 0xa0,
0x45, 0x40, 0x6f, 0x19, 0xc4, 0x5d, 0x24, 0xb3, 0x6f, 0x60, 0xdb, 0xd5, 0x5a, 0xcc, 0x42, 0xfd,
0x28, 0x08, 0x5d, 0xef, 0xac, 0x2b, 0x3d, 0xca, 0xd9, 0x59, 0xe8, 0x6a, 0xff, 0xc4, 0x9f, 0xe2,
0xb3, 0xad, 0x42, 0x4b, 0xdf, 0x8a, 0x63, 0x9f, 0xc3, 0xba, 0xa7, 0x84, 0xab, 0x45, 0x57, 0x44,
0xfa, 0xd8, 0xd5, 0xa7, 0xcd, 0x2a, 0xad, 0xbc, 0x40, 0xc5, 0x33, 0xb8, 0xa8, 0xed, 0x13, 0x7f,
0x3a, 0xf2, 0x5c, 0x35, 0x6a, 0xd6, 0xcc, 0x19, 0x96, 0x88, 0xac, 0x05, 0x8c, 0x08, 0xbd, 0x59,
0xa8, 0x17, 0x09, 0x14, 0x08, 0x7a, 0x09, 0x07, 0x0b, 0xa7, 0xf6, 0x67, 0x22, 0xd2, 0xee, 0x2c,
0xa4, 0x3f, 0x06, 0x05, 0x9e, 0x12, 0x9c, 0x6f, 0x73, 0xd0, 0xb8, 0x18, 0x22, 0x68, 0xe0, 0x10,
0xd5, 0xb4, 0xc9, 0x86, 0xe3, 0xc4, 0xe8, 0xf9, 0x8c, 0xd1, 0xe3, 0x1b, 0xaa, 0x90, 0xb9, 0xa1,
0x12, 0x07, 0x16, 0xdf, 0xec, 0xc0, 0x25, 0x95, 0x4a, 0x17, 0x55, 0xfa, 0x7d, 0x0e, 0xae, 0x5c,
0x08, 0xc3, 0x77, 0xd6, 0x68, 0x1b, 0xea, 0x33, 0xf7, 0x4c, 0x1c, 0xbb, 0x8a, 0x9c, 0x5b, 0x30,
0x2d, 0x5c, 0x86, 0xf4, 0x3f, 0xd0, 0x2f, 0x80, 0xd5, 0x6c, 0xec, 0x5f, 0xaa, 0x5b, 0xec, 0xca,
0x23, 0xa9, 0xef, 0xca, 0xb9, 0xbd, 0xfd, 0x62, 0x57, 0xc6, 0xc4, 0xd7, 0x1d, 0x5e, 0xb8, 0xc4,
0xe1, 0xce, 0x11, 0x54, 0x63, 0x05, 0xd9, 0x96, 0x7d, 0xc6, 0xe7, 0xd2, 0xdf, 0x49, 0x8f, 0x22,
0xa1, 0x50, 0x77, 0xf3, 0xa6, 0xff, 0x14, 0x4a, 0x13, 0x25, 0xe7, 0xa1, 0xad, 0xad, 0x4b, 0x08,
0xc3, 0x71, 0x86, 0x50, 0xb1, 0x14, 0xb6, 0x0b, 0xe5, 0x93, 0xc5, 0x51, 0xdc, 0x7c, 0xd8, 0xc4,
0xc6, 0xf9, 0xc8, 0x22, 0xb0, 0x5a, 0x18, 0x04, 0xbb, 0x0a, 0xc5, 0x93, 0x45, 0xbf, 0x6b, 0x1e,
0x64, 0x58, 0x73, 0x70, 0xd6, 0x2e, 0x1b, 0x85, 0x9c, 0xfb, 0xb0, 0x9a, 0x5d, 0x87, 0x46, 0xc9,
0x34, 0x35, 0x34, 0x4e, 0x8b, 0x6b, 0xfe, 0x2d, 0xc5, 0x75, 0x77, 0x07, 0x2a, 0xf6, 0x87, 0x09,
0xab, 0x41, 0xe9, 0xd1, 0xd1, 0xb0, 0xf7, 0xb0, 0xb1, 0xc2, 0xaa, 0x50, 0x3c, 0x1c, 0x0c, 0x1f,
0x36, 0x72, 0x38, 0x3a, 0x1a, 0x1c, 0xf5, 0x1a, 0xf9, 0xdd, 0x1b, 0xb0, 0x9a, 0xfd, 0x65, 0xc2,
0xea, 0x50, 0x19, 0x1e, 0x1c, 0x75, 0xdb, 0x83, 0x5f, 0x37, 0x56, 0xd8, 0x2a, 0x54, 0xfb, 0x47,
0xc3, 0x5e, 0xe7, 0x11, 0xef, 0x35, 0x72, 0xbb, 0xbf, 0x82, 0x5a, 0xf2, 0xaa, 0x47, 0x09, 0xed,
0xfe, 0x51, 0xb7, 0xb1, 0xc2, 0x00, 0xca, 0xc3, 0x5e, 0x87, 0xf7, 0x50, 0x6e, 0x05, 0x0a, 0xc3,
0xe1, 0x61, 0x23, 0x8f, 0xbb, 0x76, 0x0e, 0x3a, 0x87, 0xbd, 0x46, 0x01, 0x87, 0x0f, 0x1f, 0x1c,
0xdf, 0x1d, 0x36, 0x8a, 0xbb, 0x5f, 0xc2, 0x95, 0x0b, 0x2f, 0x67, 0x5a, 0x7d, 0x78, 0xc0, 0x7b,
0x28, 0xa9, 0x0e, 0x95, 0x63, 0xde, 0x7f, 0x7c, 0xf0, 0xb0, 0xd7, 0xc8, 0x21, 0xe3, 0xfe, 0xa0,
0x73, 0xaf, 0xd7, 0x6d, 0xe4, 0xdb, 0xd7, 0xbe, 0x7b, 0xb9, 0x99, 0xfb, 0xfe, 0xe5, 0x66, 0xee,
0x87, 0x97, 0x9b, 0xb9, 0x7f, 0xbe, 0xdc, 0xcc, 0x7d, 0xfb, 0x6a, 0x73, 0xe5, 0xfb, 0x57, 0x9b,
0x2b, 0x3f, 0xbc, 0xda, 0x5c, 0x39, 0x29, 0xd3, 0x0f, 0xcc, 0x2f, 0xfe, 0x13, 0x00, 0x00, 0xff,
0xff, 0x69, 0x08, 0x3c, 0x7a, 0x00, 0x15, 0x00, 0x00,
}
func (m *Op) Marshal() (dAtA []byte, err error) {
@ -2865,6 +2874,15 @@ func (m *Mount) MarshalToSizedBuffer(dAtA []byte) (int, error) {
_ = i
var l int
_ = l
if len(m.ResultID) > 0 {
i -= len(m.ResultID)
copy(dAtA[i:], m.ResultID)
i = encodeVarintOps(dAtA, i, uint64(len(m.ResultID)))
i--
dAtA[i] = 0x1
i--
dAtA[i] = 0xba
}
if m.SSHOpt != nil {
{
size, err := m.SSHOpt.MarshalToSizedBuffer(dAtA[:i])
@ -4704,6 +4722,10 @@ func (m *Mount) Size() (n int) {
l = m.SSHOpt.Size()
n += 2 + l + sovOps(uint64(l))
}
l = len(m.ResultID)
if l > 0 {
n += 2 + l + sovOps(uint64(l))
}
return n
}
@ -6661,6 +6683,38 @@ func (m *Mount) Unmarshal(dAtA []byte) error {
return err
}
iNdEx = postIndex
case 23:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field ResultID", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowOps
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthOps
}
postIndex := iNdEx + intStringLen
if postIndex < 0 {
return ErrInvalidLengthOps
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.ResultID = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipOps(dAtA[iNdEx:])

View File

@ -81,6 +81,7 @@ message Mount {
CacheOpt cacheOpt = 20;
SecretOpt secretOpt = 21;
SSHOpt SSHOpt = 22;
string resultID = 23;
}
// MountType defines a type of a mount from a supported set

View File

@ -33,6 +33,7 @@ import (
"github.com/moby/buildkit/snapshot"
"github.com/moby/buildkit/snapshot/imagerefchecker"
"github.com/moby/buildkit/solver"
"github.com/moby/buildkit/solver/llbsolver/mounts"
"github.com/moby/buildkit/solver/llbsolver/ops"
"github.com/moby/buildkit/solver/pb"
"github.com/moby/buildkit/source"
@ -252,15 +253,19 @@ func (w *Worker) CacheManager() cache.Manager {
return w.CacheMgr
}
func (w *Worker) MetadataStore() *metadata.Store {
return w.WorkerOpt.MetadataStore
}
func (w *Worker) ResolveOp(v solver.Vertex, s frontend.FrontendLLBBridge, sm *session.Manager) (solver.Op, error) {
if baseOp, ok := v.Sys().(*pb.Op); ok {
switch op := baseOp.Op.(type) {
case *pb.Op_Source:
return ops.NewSourceOp(v, op, baseOp.Platform, w.SourceManager, sm, w)
case *pb.Op_Exec:
return ops.NewExecOp(v, op, baseOp.Platform, w.CacheMgr, sm, w.MetadataStore, w.WorkerOpt.Executor, w)
return ops.NewExecOp(v, op, baseOp.Platform, w.CacheMgr, sm, w.WorkerOpt.MetadataStore, w.WorkerOpt.Executor, w)
case *pb.Op_File:
return ops.NewFileOp(v, op, w.CacheMgr, w.MetadataStore, w)
return ops.NewFileOp(v, op, w.CacheMgr, w.WorkerOpt.MetadataStore, w)
case *pb.Op_Build:
return ops.NewBuildOp(v, op, s, w)
default:
@ -271,13 +276,13 @@ func (w *Worker) ResolveOp(v solver.Vertex, s frontend.FrontendLLBBridge, sm *se
}
func (w *Worker) PruneCacheMounts(ctx context.Context, ids []string) error {
mu := ops.CacheMountsLocker()
mu := mounts.CacheMountsLocker()
mu.Lock()
defer mu.Unlock()
for _, id := range ids {
id = "cache-dir:" + id
sis, err := w.MetadataStore.Search(id)
sis, err := w.WorkerOpt.MetadataStore.Search(id)
if err != nil {
return err
}
@ -306,7 +311,7 @@ func (w *Worker) PruneCacheMounts(ctx context.Context, ids []string) error {
}
}
ops.ClearActiveCacheMounts()
mounts.ClearActiveCacheMounts()
return nil
}

View File

@ -5,6 +5,7 @@ import (
"github.com/containerd/containerd/content"
"github.com/moby/buildkit/cache"
"github.com/moby/buildkit/cache/metadata"
"github.com/moby/buildkit/client"
"github.com/moby/buildkit/client/llb"
"github.com/moby/buildkit/executor"
@ -35,6 +36,7 @@ type Worker interface {
ContentStore() content.Store
Executor() executor.Executor
CacheManager() cache.Manager
MetadataStore() *metadata.Store
}
// Pre-defined label keys