cleanup linter warnings

Signed-off-by: Tonis Tiigi <tonistiigi@gmail.com>
docker-18.09
Tonis Tiigi 2017-11-16 18:09:35 -08:00
parent 5e3884ca7d
commit 6c076f5d6a
10 changed files with 38 additions and 45 deletions

5
cache/manager.go vendored
View File

@ -412,8 +412,5 @@ func initializeMetadata(m withMetadata, opts ...RefOption) error {
return err
}
if err := md.Commit(); err != nil {
return err
}
return nil
return md.Commit()
}

22
cache/manager_test.go vendored
View File

@ -31,7 +31,7 @@ func TestManager(t *testing.T) {
_, err = cm.Get(ctx, "foobar")
require.Error(t, err)
checkDiskUsage(t, ctx, cm, 0, 0)
checkDiskUsage(ctx, t, cm, 0, 0)
active, err := cm.New(ctx, nil, CachePolicyRetain)
require.NoError(t, err)
@ -54,12 +54,12 @@ func TestManager(t *testing.T) {
require.Error(t, err)
require.Equal(t, errLocked, errors.Cause(err))
checkDiskUsage(t, ctx, cm, 1, 0)
checkDiskUsage(ctx, t, cm, 1, 0)
snap, err := active.Commit(ctx)
require.NoError(t, err)
checkDiskUsage(t, ctx, cm, 1, 0)
checkDiskUsage(ctx, t, cm, 1, 0)
_, err = cm.GetMutable(ctx, active.ID())
require.Error(t, err)
@ -68,17 +68,17 @@ func TestManager(t *testing.T) {
err = snap.Release(ctx)
require.NoError(t, err)
checkDiskUsage(t, ctx, cm, 0, 1)
checkDiskUsage(ctx, t, cm, 0, 1)
active, err = cm.GetMutable(ctx, active.ID())
require.NoError(t, err)
checkDiskUsage(t, ctx, cm, 1, 0)
checkDiskUsage(ctx, t, cm, 1, 0)
snap, err = active.Commit(ctx)
require.NoError(t, err)
checkDiskUsage(t, ctx, cm, 1, 0)
checkDiskUsage(ctx, t, cm, 1, 0)
err = snap.Finalize(ctx)
require.NoError(t, err)
@ -100,7 +100,7 @@ func TestManager(t *testing.T) {
snap2, err := cm.Get(ctx, snap.ID())
require.NoError(t, err)
checkDiskUsage(t, ctx, cm, 1, 0)
checkDiskUsage(ctx, t, cm, 1, 0)
err = snap.Release(ctx)
require.NoError(t, err)
@ -108,7 +108,7 @@ func TestManager(t *testing.T) {
active2, err := cm.New(ctx, snap2, CachePolicyRetain)
require.NoError(t, err)
checkDiskUsage(t, ctx, cm, 2, 0)
checkDiskUsage(ctx, t, cm, 2, 0)
snap3, err := active2.Commit(ctx)
require.NoError(t, err)
@ -116,12 +116,12 @@ func TestManager(t *testing.T) {
err = snap2.Release(ctx)
require.NoError(t, err)
checkDiskUsage(t, ctx, cm, 2, 0)
checkDiskUsage(ctx, t, cm, 2, 0)
err = snap3.Release(ctx)
require.NoError(t, err)
checkDiskUsage(t, ctx, cm, 0, 2)
checkDiskUsage(ctx, t, cm, 0, 2)
err = cm.Close()
require.NoError(t, err)
@ -267,7 +267,7 @@ func getCacheManager(t *testing.T, tmpdir string, snapshotter snapshot.Snapshott
return cm
}
func checkDiskUsage(t *testing.T, ctx context.Context, cm Manager, inuse, unused int) {
func checkDiskUsage(ctx context.Context, t *testing.T, cm Manager, inuse, unused int) {
du, err := cm.DiskUsage(ctx, client.DiskUsageInfo{})
require.NoError(t, err)
var inuseActual, unusedActual int

20
cache/refs.go vendored
View File

@ -214,30 +214,30 @@ func (sr *immutableRef) Finalize(ctx context.Context) error {
return sr.finalize(ctx)
}
func (sr *cacheRecord) Metadata() *metadata.StorageItem {
return sr.md
func (cr *cacheRecord) Metadata() *metadata.StorageItem {
return cr.md
}
func (sr *cacheRecord) finalize(ctx context.Context) error {
mutable := sr.equalMutable
func (cr *cacheRecord) finalize(ctx context.Context) error {
mutable := cr.equalMutable
if mutable == nil {
return nil
}
err := sr.cm.Snapshotter.Commit(ctx, sr.ID(), mutable.ID())
err := cr.cm.Snapshotter.Commit(ctx, cr.ID(), mutable.ID())
if err != nil {
return errors.Wrapf(err, "failed to commit %s", mutable.ID())
}
mutable.dead = true
go func() {
sr.cm.mu.Lock()
defer sr.cm.mu.Unlock()
cr.cm.mu.Lock()
defer cr.cm.mu.Unlock()
if err := mutable.remove(context.TODO(), false); err != nil {
logrus.Error(err)
}
}()
sr.equalMutable = nil
clearEqualMutable(sr.md)
return sr.md.Commit()
cr.equalMutable = nil
clearEqualMutable(cr.md)
return cr.md.Commit()
}
func (sr *mutableRef) commit(ctx context.Context) (ImmutableRef, error) {

View File

@ -80,26 +80,26 @@ func (c *conn) Write(b []byte) (int, error) {
return len(b), nil
}
func (s *conn) Close() error {
if cs, ok := s.stream.(grpc.ClientStream); ok {
func (c *conn) Close() error {
if cs, ok := c.stream.(grpc.ClientStream); ok {
return cs.CloseSend()
}
return nil
}
func (s *conn) LocalAddr() net.Addr {
func (c *conn) LocalAddr() net.Addr {
return dummyAddr{}
}
func (s *conn) RemoteAddr() net.Addr {
func (c *conn) RemoteAddr() net.Addr {
return dummyAddr{}
}
func (s *conn) SetDeadline(t time.Time) error {
func (c *conn) SetDeadline(t time.Time) error {
return nil
}
func (s *conn) SetReadDeadline(t time.Time) error {
func (c *conn) SetReadDeadline(t time.Time) error {
return nil
}
func (s *conn) SetWriteDeadline(t time.Time) error {
func (c *conn) SetWriteDeadline(t time.Time) error {
return nil
}

View File

@ -45,7 +45,7 @@ func NewSnapshotter(opt Opt) (*Snapshotter, error) {
return s, nil
}
// Remove also removes a refrence to a blob. If it is a last reference then it deletes it the blob as well
// Remove also removes a reference to a blob. If it is a last reference then it deletes it the blob as well
// Remove is not safe to be called concurrently
func (s *Snapshotter) Remove(ctx context.Context, key string) error {
_, blob, err := s.GetBlob(ctx, key)

View File

@ -257,7 +257,7 @@ func (j *job) getRef(ctx context.Context, v *vertex, index Index) (Reference, er
if err != nil {
return nil, err
}
ref, err := getRef(s, ctx, v, index, j.cache)
ref, err := getRef(ctx, s, v, index, j.cache)
if err != nil {
return nil, err
}
@ -284,7 +284,7 @@ func (j *job) cacheExporter(ref Reference) (CacheExporter, error) {
return cr.Cache(cr.index, cr.ref), nil
}
func getRef(s VertexSolver, ctx context.Context, v *vertex, index Index, cache InstructionCache) (Reference, error) {
func getRef(ctx context.Context, s VertexSolver, v *vertex, index Index, cache InstructionCache) (Reference, error) {
k, err := s.CacheKey(ctx, index)
if err != nil {
return nil, err

View File

@ -226,7 +226,7 @@ func (s *Solver) subBuild(ctx context.Context, dgst digest.Digest, req SolveRequ
st = jl.actives[inp.Vertex.Digest()]
jl.mu.Unlock()
return getRef(st.solver, ctx, inp.Vertex.(*vertex), inp.Index, s.cache) // TODO: combine to pass single input // TODO: export cache for subbuilds
return getRef(ctx, st.solver, inp.Vertex.(*vertex), inp.Index, s.cache) // TODO: combine to pass single input // TODO: export cache for subbuilds
}
type VertexSolver interface {

View File

@ -71,6 +71,6 @@ func NewLocalIdentifier(str string) (*LocalIdentifier, error) {
return &LocalIdentifier{Name: str}, nil
}
func (_ *LocalIdentifier) ID() string {
func (*LocalIdentifier) ID() string {
return LocalScheme
}

View File

@ -174,20 +174,16 @@ func TestCancellation(t *testing.T) {
c3 := f.NewCaller()
var cancelFirst func()
firstErr := make(chan error)
go func() {
ctx, cancel := context.WithTimeout(context.Background(), 50*time.Millisecond)
cancelFirst = cancel
ctx, _ := context.WithTimeout(context.Background(), 50*time.Millisecond)
_, err := c1.Call(ctx, fn1)
firstErr <- err
}()
var cancelSecond func()
secondErr := make(chan error)
go func() {
ctx, cancel := context.WithTimeout(context.Background(), 100*time.Millisecond)
cancelSecond = cancel
ctx, _ := context.WithTimeout(context.Background(), 100*time.Millisecond)
_, err := c2.Call(ctx, fn2)
secondErr <- err
}()

View File

@ -76,10 +76,10 @@ func (w containerdWorker) Exec(ctx context.Context, meta worker.Meta, root cache
type emptyReadCloser struct{}
func (_ *emptyReadCloser) Read([]byte) (int, error) {
func (*emptyReadCloser) Read([]byte) (int, error) {
return 0, io.EOF
}
func (_ *emptyReadCloser) Close() error {
func (*emptyReadCloser) Close() error {
return nil
}