remove `Get` prefix from Executor/CacheManager funcs and update to avoid naming conflicts

Signed-off-by: Cory Bennett <cbennett@netflix.com>
v0.8
Cory Bennett 2020-07-17 00:54:15 +00:00
parent 453c76d29e
commit 6a56695d3a
6 changed files with 38 additions and 38 deletions

View File

@ -105,7 +105,7 @@ func (gf *gatewayFrontend) Solve(ctx context.Context, llbBridge frontend.Fronten
return nil, errors.Errorf("invalid ref: %T", res.Sys())
}
rootFS, err = workerRef.Worker.GetCacheManager().New(ctx, workerRef.ImmutableRef)
rootFS, err = workerRef.Worker.CacheManager().New(ctx, workerRef.ImmutableRef)
if err != nil {
return nil, err
}
@ -168,7 +168,7 @@ func (gf *gatewayFrontend) Solve(ctx context.Context, llbBridge frontend.Fronten
if !ok {
return nil, errors.Errorf("invalid ref: %T", r.Sys())
}
rootFS, err = workerRef.Worker.GetCacheManager().New(ctx, workerRef.ImmutableRef)
rootFS, err = workerRef.Worker.CacheManager().New(ctx, workerRef.ImmutableRef)
if err != nil {
return nil, err
}

View File

@ -251,7 +251,7 @@ func (s *llbBridge) Run(ctx context.Context, id string, root cache.Mountable, mo
return err
}
span, ctx := tracing.StartSpan(ctx, strings.Join(process.Meta.Args, " "))
err = w.GetExecutor().Run(ctx, id, root, mounts, process, started)
err = w.Executor().Run(ctx, id, root, mounts, process, started)
tracing.FinishWithError(span, err)
return err
}
@ -262,7 +262,7 @@ func (s *llbBridge) Exec(ctx context.Context, id string, process executor.Proces
return err
}
span, ctx := tracing.StartSpan(ctx, strings.Join(process.Meta.Args, " "))
err = w.GetExecutor().Exec(ctx, id, process)
err = w.Executor().Exec(ctx, id, process)
tracing.FinishWithError(span, err)
return err
}

View File

@ -83,7 +83,7 @@ type WorkerOpt struct {
// TODO: s/Worker/OpWorker/g ?
type Worker struct {
WorkerOpt
CacheManager cache.Manager
CacheMgr cache.Manager
SourceManager *source.Manager
imageWriter *imageexporter.ImageWriter
ImageSource *containerimage.Source
@ -181,7 +181,7 @@ func NewWorker(opt WorkerOpt) (*Worker, error) {
return &Worker{
WorkerOpt: opt,
CacheManager: cm,
CacheMgr: cm,
SourceManager: sm,
imageWriter: iw,
ImageSource: is,
@ -225,15 +225,15 @@ func (w *Worker) LoadRef(id string, hidden bool) (cache.ImmutableRef, error) {
if hidden {
opts = append(opts, cache.NoUpdateLastUsed)
}
return w.CacheManager.Get(context.TODO(), id, opts...)
return w.CacheMgr.Get(context.TODO(), id, opts...)
}
func (w *Worker) GetExecutor() executor.Executor {
return w.Executor
func (w *Worker) Executor() executor.Executor {
return w.WorkerOpt.Executor
}
func (w *Worker) GetCacheManager() cache.Manager {
return w.CacheManager
func (w *Worker) CacheManager() cache.Manager {
return w.CacheMgr
}
func (w *Worker) ResolveOp(v solver.Vertex, s frontend.FrontendLLBBridge, sm *session.Manager) (solver.Op, error) {
@ -242,9 +242,9 @@ func (w *Worker) ResolveOp(v solver.Vertex, s frontend.FrontendLLBBridge, sm *se
case *pb.Op_Source:
return ops.NewSourceOp(v, op, baseOp.Platform, w.SourceManager, sm, w)
case *pb.Op_Exec:
return ops.NewExecOp(v, op, baseOp.Platform, w.CacheManager, sm, w.MetadataStore, w.Executor, w)
return ops.NewExecOp(v, op, baseOp.Platform, w.CacheMgr, sm, w.MetadataStore, w.WorkerOpt.Executor, w)
case *pb.Op_File:
return ops.NewFileOp(v, op, w.CacheManager, w.MetadataStore, w)
return ops.NewFileOp(v, op, w.CacheMgr, w.MetadataStore, w)
case *pb.Op_Build:
return ops.NewBuildOp(v, op, s, w)
default:
@ -268,7 +268,7 @@ func (w *Worker) PruneCacheMounts(ctx context.Context, ids []string) error {
for _, si := range sis {
for _, k := range si.Indexes() {
if k == id || strings.HasPrefix(k, id+":") {
if siCached := w.CacheManager.Metadata(si.ID()); siCached != nil {
if siCached := w.CacheMgr.Metadata(si.ID()); siCached != nil {
si = siCached
}
if err := cache.CachePolicyDefault(si); err != nil {
@ -281,7 +281,7 @@ func (w *Worker) PruneCacheMounts(ctx context.Context, ids []string) error {
return err
}
// if ref is unused try to clean it up right away by releasing it
if mref, err := w.CacheManager.GetMutable(ctx, si.ID()); err == nil {
if mref, err := w.CacheMgr.GetMutable(ctx, si.ID()); err == nil {
go mref.Release(context.TODO())
}
break
@ -299,11 +299,11 @@ func (w *Worker) ResolveImageConfig(ctx context.Context, ref string, opt llb.Res
}
func (w *Worker) DiskUsage(ctx context.Context, opt client.DiskUsageInfo) ([]*client.UsageInfo, error) {
return w.CacheManager.DiskUsage(ctx, opt)
return w.CacheMgr.DiskUsage(ctx, opt)
}
func (w *Worker) Prune(ctx context.Context, ch chan client.UsageInfo, opt ...client.PruneInfo) error {
return w.CacheManager.Prune(ctx, ch, opt...)
return w.CacheMgr.Prune(ctx, ch, opt...)
}
func (w *Worker) Exporter(name string, sm *session.Manager) (exporter.Exporter, error) {
@ -473,7 +473,7 @@ func (w *Worker) FromRemote(ctx context.Context, remote *solver.Remote) (ref cac
if v, ok := desc.Annotations["buildkit/description"]; ok {
descr = v
}
ref, err := w.CacheManager.GetByBlob(ctx, desc, current, cache.WithDescription(descr), cache.WithCreationTime(tm))
ref, err := w.CacheMgr.GetByBlob(ctx, desc, current, cache.WithDescription(descr), cache.WithCreationTime(tm))
if current != nil {
current.Release(context.TODO())
}

View File

@ -90,7 +90,7 @@ func TestRuncWorker(t *testing.T) {
lm.Unmount()
require.NoError(t, err)
du, err := w.CacheManager.DiskUsage(ctx, client.DiskUsageInfo{})
du, err := w.CacheMgr.DiskUsage(ctx, client.DiskUsageInfo{})
require.NoError(t, err)
// for _, d := range du {
@ -107,16 +107,16 @@ func TestRuncWorker(t *testing.T) {
}
stderr := bytes.NewBuffer(nil)
err = w.Executor.Run(ctx, "", snap, nil, executor.ProcessInfo{Meta: meta, Stderr: &nopCloser{stderr}}, nil)
err = w.WorkerOpt.Executor.Run(ctx, "", snap, nil, executor.ProcessInfo{Meta: meta, Stderr: &nopCloser{stderr}}, nil)
require.Error(t, err) // Read-only root
// typical error is like `mkdir /.../rootfs/proc: read-only file system`.
// make sure the error is caused before running `echo foo > /bar`.
require.Contains(t, stderr.String(), "read-only file system")
root, err := w.CacheManager.New(ctx, snap)
root, err := w.CacheMgr.New(ctx, snap)
require.NoError(t, err)
err = w.Executor.Run(ctx, "", root, nil, executor.ProcessInfo{Meta: meta, Stderr: &nopCloser{stderr}}, nil)
err = w.WorkerOpt.Executor.Run(ctx, "", root, nil, executor.ProcessInfo{Meta: meta, Stderr: &nopCloser{stderr}}, nil)
require.NoError(t, err)
meta = executor.Meta{
@ -124,7 +124,7 @@ func TestRuncWorker(t *testing.T) {
Cwd: "/",
}
err = w.Executor.Run(ctx, "", root, nil, executor.ProcessInfo{Meta: meta, Stderr: &nopCloser{stderr}}, nil)
err = w.WorkerOpt.Executor.Run(ctx, "", root, nil, executor.ProcessInfo{Meta: meta, Stderr: &nopCloser{stderr}}, nil)
require.NoError(t, err)
rf, err := root.Commit(ctx)
@ -153,7 +153,7 @@ func TestRuncWorker(t *testing.T) {
err = snap.Release(ctx)
require.NoError(t, err)
du2, err := w.CacheManager.DiskUsage(ctx, client.DiskUsageInfo{})
du2, err := w.CacheMgr.DiskUsage(ctx, client.DiskUsageInfo{})
require.NoError(t, err)
require.Equal(t, 1, len(du2)-len(du))
@ -172,7 +172,7 @@ func TestRuncWorkerNoProcessSandbox(t *testing.T) {
sm, err := session.NewManager()
require.NoError(t, err)
snap := tests.NewBusyboxSourceSnapshot(ctx, t, w, sm)
root, err := w.CacheManager.New(ctx, snap)
root, err := w.CacheMgr.New(ctx, snap)
require.NoError(t, err)
// ensure the procfs is shared
@ -185,7 +185,7 @@ func TestRuncWorkerNoProcessSandbox(t *testing.T) {
}
stdout := bytes.NewBuffer(nil)
stderr := bytes.NewBuffer(nil)
err = w.Executor.Run(ctx, "", root, nil, executor.ProcessInfo{Meta: meta, Stdout: &nopCloser{stdout}, Stderr: &nopCloser{stderr}}, nil)
err = w.WorkerOpt.Executor.Run(ctx, "", root, nil, executor.ProcessInfo{Meta: meta, Stdout: &nopCloser{stdout}, Stderr: &nopCloser{stderr}}, nil)
require.NoError(t, err, fmt.Sprintf("stdout=%q, stderr=%q", stdout.String(), stderr.String()))
require.Equal(t, string(selfCmdline), stdout.String())
}

View File

@ -41,7 +41,7 @@ func TestWorkerExec(t *testing.T, w *base.Worker) {
require.NoError(t, err)
snap := NewBusyboxSourceSnapshot(ctx, t, w, sm)
root, err := w.CacheManager.New(ctx, snap)
root, err := w.CacheMgr.New(ctx, snap)
require.NoError(t, err)
id := identity.NewID()
@ -50,7 +50,7 @@ func TestWorkerExec(t *testing.T, w *base.Worker) {
eg := errgroup.Group{}
started := make(chan struct{})
eg.Go(func() error {
return w.Executor.Run(ctx, id, root, nil, executor.ProcessInfo{
return w.WorkerOpt.Executor.Run(ctx, id, root, nil, executor.ProcessInfo{
Meta: executor.Meta{
Args: []string{"sleep", "10"},
Cwd: "/",
@ -69,7 +69,7 @@ func TestWorkerExec(t *testing.T, w *base.Worker) {
stderr := bytes.NewBuffer(nil)
// verify pid1 is the sleep command via Exec
err = w.Executor.Exec(ctx, id, executor.ProcessInfo{
err = w.WorkerOpt.Executor.Exec(ctx, id, executor.ProcessInfo{
Meta: executor.Meta{
Args: []string{"ps", "-o", "pid,comm"},
},
@ -87,7 +87,7 @@ func TestWorkerExec(t *testing.T, w *base.Worker) {
stdin := bytes.NewReader([]byte("hello"))
stdout.Reset()
stderr.Reset()
err = w.Executor.Exec(ctx, id, executor.ProcessInfo{
err = w.WorkerOpt.Executor.Exec(ctx, id, executor.ProcessInfo{
Meta: executor.Meta{
Args: []string{"sh", "-c", "cat > /tmp/msg"},
},
@ -102,7 +102,7 @@ func TestWorkerExec(t *testing.T, w *base.Worker) {
// verify contents of /tmp/msg
stdout.Reset()
stderr.Reset()
err = w.Executor.Exec(ctx, id, executor.ProcessInfo{
err = w.WorkerOpt.Executor.Exec(ctx, id, executor.ProcessInfo{
Meta: executor.Meta{
Args: []string{"cat", "/tmp/msg"},
},
@ -132,7 +132,7 @@ func TestWorkerExecFailures(t *testing.T, w *base.Worker) {
require.NoError(t, err)
snap := NewBusyboxSourceSnapshot(ctx, t, w, sm)
root, err := w.CacheManager.New(ctx, snap)
root, err := w.CacheMgr.New(ctx, snap)
require.NoError(t, err)
id := identity.NewID()
@ -141,7 +141,7 @@ func TestWorkerExecFailures(t *testing.T, w *base.Worker) {
eg := errgroup.Group{}
started := make(chan struct{})
eg.Go(func() error {
return w.Executor.Run(ctx, id, root, nil, executor.ProcessInfo{
return w.WorkerOpt.Executor.Run(ctx, id, root, nil, executor.ProcessInfo{
Meta: executor.Meta{
Args: []string{"/bin/false"},
Cwd: "/",
@ -156,7 +156,7 @@ func TestWorkerExecFailures(t *testing.T, w *base.Worker) {
}
// this should fail since pid1 has already exited
err = w.Executor.Exec(ctx, id, executor.ProcessInfo{
err = w.WorkerOpt.Executor.Exec(ctx, id, executor.ProcessInfo{
Meta: executor.Meta{
Args: []string{"/bin/true"},
},
@ -170,7 +170,7 @@ func TestWorkerExecFailures(t *testing.T, w *base.Worker) {
eg = errgroup.Group{}
started = make(chan struct{})
eg.Go(func() error {
return w.Executor.Run(ctx, id, root, nil, executor.ProcessInfo{
return w.WorkerOpt.Executor.Run(ctx, id, root, nil, executor.ProcessInfo{
Meta: executor.Meta{
Args: []string{"bogus"},
},
@ -184,7 +184,7 @@ func TestWorkerExecFailures(t *testing.T, w *base.Worker) {
}
// this should fail since pid1 never started
err = w.Executor.Exec(ctx, id, executor.ProcessInfo{
err = w.WorkerOpt.Executor.Exec(ctx, id, executor.ProcessInfo{
Meta: executor.Meta{
Args: []string{"/bin/true"},
},

View File

@ -34,8 +34,8 @@ type Worker interface {
FromRemote(ctx context.Context, remote *solver.Remote) (cache.ImmutableRef, error)
PruneCacheMounts(ctx context.Context, ids []string) error
ContentStore() content.Store
GetExecutor() executor.Executor
GetCacheManager() cache.Manager
Executor() executor.Executor
CacheManager() cache.Manager
}
// Pre-defined label keys