Merge pull request #2571 from tonistiigi/lint-update

hack: update linter to v1.43
master
CrazyMax 2022-01-20 17:23:10 +01:00 committed by GitHub
commit 3df7865fbb
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
53 changed files with 76 additions and 127 deletions

View File

@ -14,7 +14,7 @@ linters:
- deadcode
- gofmt
- goimports
- golint
- revive
- govet
- importas
- ineffassign
@ -24,6 +24,10 @@ linters:
- typecheck
- unused
- varcheck
- bodyclose
- errname
- makezero
- whitespace
disable-all: true
linters-settings:
@ -38,5 +42,5 @@ linters-settings:
issues:
exclude-rules:
- linters:
- golint
- revive
text: "stutters"

View File

@ -1,3 +1,3 @@
package moby_buildkit_v1 //nolint:golint
package moby_buildkit_v1 //nolint:revive
//go:generate protoc -I=. -I=../../../vendor/ -I=../../../../../../ --gogo_out=plugins=grpc:. control.proto

View File

@ -1,3 +1,3 @@
package moby_buildkit_v1_types //nolint:golint
package moby_buildkit_v1_types //nolint:revive
//go:generate protoc -I=. -I=../../vendor/ -I=../../../../../ --gogo_out=plugins=grpc:. worker.proto

View File

@ -520,7 +520,7 @@ func (cc *cacheContext) includedPaths(ctx context.Context, m *mount, p string, o
updated bool
iter *iradix.Iterator
k []byte
kOk bool
keyOk bool
origPrefix string
resolvedPrefix string
)
@ -528,7 +528,7 @@ func (cc *cacheContext) includedPaths(ctx context.Context, m *mount, p string, o
iter = root.Iterator()
if opts.Wildcard {
origPrefix, k, kOk, err = wildcardPrefix(root, p)
origPrefix, k, keyOk, err = wildcardPrefix(root, p)
if err != nil {
return nil, err
}
@ -544,17 +544,17 @@ func (cc *cacheContext) includedPaths(ctx context.Context, m *mount, p string, o
if err != nil {
return nil, err
}
kOk = (cr != nil)
keyOk = (cr != nil)
}
if origPrefix != "" {
if kOk {
if keyOk {
iter.SeekLowerBound(append(append([]byte{}, k...), 0))
}
resolvedPrefix = string(convertKeyToPath(k))
} else {
k, _, kOk = iter.Next()
k, _, keyOk = iter.Next()
}
var (
@ -562,7 +562,7 @@ func (cc *cacheContext) includedPaths(ctx context.Context, m *mount, p string, o
lastMatchedDir string
)
for kOk {
for keyOk {
fn := string(convertKeyToPath(k))
// Convert the path prefix from what we found in the prefix
@ -597,7 +597,7 @@ func (cc *cacheContext) includedPaths(ctx context.Context, m *mount, p string, o
fn = fn[:len(fn)-1]
if fn == p && endsInSep {
// We don't include the metadata header for a source dir which ends with a separator
k, _, kOk = iter.Next()
k, _, keyOk = iter.Next()
continue
}
}
@ -611,7 +611,7 @@ func (cc *cacheContext) includedPaths(ctx context.Context, m *mount, p string, o
return nil, err
}
if !include {
k, _, kOk = iter.Next()
k, _, keyOk = iter.Next()
continue
}
lastMatchedDir = fn
@ -645,7 +645,7 @@ func (cc *cacheContext) includedPaths(ctx context.Context, m *mount, p string, o
}
if !shouldInclude && !dirHeader {
k, _, kOk = iter.Next()
k, _, keyOk = iter.Next()
continue
}
@ -684,7 +684,7 @@ func (cc *cacheContext) includedPaths(ctx context.Context, m *mount, p string, o
parentDirHeaders = append(parentDirHeaders, maybeIncludedPath)
}
k, _, kOk = iter.Next()
k, _, keyOk = iter.Next()
}
cc.tree = txn.Commit()

View File

@ -648,7 +648,6 @@ func TestChecksumIncludeDoubleStar(t *testing.T) {
dgst, err = cc.Checksum(context.TODO(), ref, "prefix/a", ChecksumOpts{IncludePatterns: []string{"**/foo", "**/report"}, Wildcard: true}, nil)
require.NoError(t, err)
require.Equal(t, dgstDoubleStar, dgst)
}
func TestChecksumIncludeSymlink(t *testing.T) {

View File

@ -47,7 +47,7 @@ func v1TarHeaderSelect(h *tar.Header) (orderedHeaders [][2]string) {
}
// Get extended attributes.
xAttrKeys := make([]string, len(h.PAXRecords))
xAttrKeys := make([]string, 0, len(h.PAXRecords))
for k := range pax {
if strings.HasPrefix(k, "SCHILY.xattr.") {
k = strings.TrimPrefix(k, "SCHILY.xattr.")

8
cache/manager.go vendored
View File

@ -126,7 +126,7 @@ func (cm *cacheManager) GetByBlob(ctx context.Context, desc ocispecs.Descriptor,
descHandlers := descHandlersOf(opts...)
if desc.Digest != "" && (descHandlers == nil || descHandlers[desc.Digest] == nil) {
if _, err := cm.ContentStore.Info(ctx, desc.Digest); errors.Is(err, errdefs.ErrNotFound) {
return nil, NeedsRemoteProvidersError([]digest.Digest{desc.Digest})
return nil, NeedsRemoteProviderError([]digest.Digest{desc.Digest})
} else if err != nil {
return nil, err
}
@ -171,7 +171,7 @@ func (cm *cacheManager) GetByBlob(ctx context.Context, desc ocispecs.Descriptor,
for _, si := range sis {
ref, err := cm.get(ctx, si.ID(), opts...)
if err != nil {
if errors.As(err, &NeedsRemoteProvidersError{}) {
if errors.As(err, &NeedsRemoteProviderError{}) {
// This shouldn't happen and indicates that blobchain IDs are being set incorrectly,
// but if it does happen it's not fatal as we can just not try to re-use by blobchainID.
// Log the error but continue.
@ -201,7 +201,7 @@ func (cm *cacheManager) GetByBlob(ctx context.Context, desc ocispecs.Descriptor,
for _, si := range sis {
ref, err := cm.get(ctx, si.ID(), opts...)
// if the error was NotFound or NeedsRemoteProvider, we can't re-use the snapshot from the blob so just skip it
if err != nil && !IsNotFound(err) && !errors.As(err, &NeedsRemoteProvidersError{}) {
if err != nil && !IsNotFound(err) && !errors.As(err, &NeedsRemoteProviderError{}) {
return nil, errors.Wrapf(err, "failed to get record %s by chainid", si.ID())
}
if ref != nil {
@ -364,7 +364,7 @@ func (cm *cacheManager) get(ctx context.Context, id string, opts ...RefOption) (
// getRecord returns record for id. Requires manager lock.
func (cm *cacheManager) getRecord(ctx context.Context, id string, opts ...RefOption) (cr *cacheRecord, retErr error) {
checkLazyProviders := func(rec *cacheRecord) error {
missing := NeedsRemoteProvidersError(nil)
missing := NeedsRemoteProviderError(nil)
dhs := descHandlersOf(opts...)
if err := rec.walkUniqueAncestors(func(cr *cacheRecord) error {
blob := cr.getBlob()

1
cache/migrate_v2.go vendored
View File

@ -162,7 +162,6 @@ func MigrateV2(ctx context.Context, from, to string, cs content.Store, s snapsho
if err := md.commitMetadata(); err != nil {
return err
}
}
// calculate new chainid/blobsumid

4
cache/opts.go vendored
View File

@ -30,8 +30,8 @@ func descHandlersOf(opts ...RefOption) DescHandlers {
type DescHandlerKey digest.Digest
type NeedsRemoteProvidersError []digest.Digest
type NeedsRemoteProviderError []digest.Digest //nolint:errname
func (m NeedsRemoteProvidersError) Error() string {
func (m NeedsRemoteProviderError) Error() string {
return fmt.Sprintf("missing descriptor handlers for lazy blobs %+v", []digest.Digest(m))
}

View File

@ -131,5 +131,4 @@ func getRemoteChain(layers []CacheLayer, idx int, provider DescriptorProvider, v
Descriptors: []ocispecs.Descriptor{descPair.Descriptor},
Provider: descPair.Provider,
}, nil
}

View File

@ -695,7 +695,7 @@ func testPushByDigest(t *testing.T, sb integration.Sandbox) {
defer c.Close()
registry, err := sb.NewRegistry()
if errors.Is(err, integration.ErrorRequirements) {
if errors.Is(err, integration.ErrRequirements) {
t.Skip(err.Error())
}
require.NoError(t, err)
@ -847,13 +847,11 @@ func testSecurityModeSysfs(t *testing.T, sb integration.Sandbox) {
}
func testSecurityModeErrors(t *testing.T, sb integration.Sandbox) {
c, err := New(sb.Context(), sb.Address())
require.NoError(t, err)
defer c.Close()
secMode := sb.Value("secmode")
if secMode == securitySandbox {
st := llb.Image("busybox:latest").
Run(llb.Shlex(`sh -c 'echo sandbox'`))
@ -887,7 +885,7 @@ func testFrontendImageNaming(t *testing.T, sb integration.Sandbox) {
defer c.Close()
registry, err := sb.NewRegistry()
if errors.Is(err, integration.ErrorRequirements) {
if errors.Is(err, integration.ErrRequirements) {
t.Skip(err.Error())
}
require.NoError(t, err)
@ -1734,7 +1732,6 @@ func testResolveAndHosts(t *testing.T, sb integration.Sandbox) {
dt, err = ioutil.ReadFile(filepath.Join(destDir, "hosts"))
require.NoError(t, err)
require.Contains(t, string(dt), "127.0.0.1 localhost")
}
func testUser(t *testing.T, sb integration.Sandbox) {
@ -1835,7 +1832,6 @@ func testOCIExporter(t *testing.T, sb integration.Sandbox) {
require.NoError(t, err)
for _, exp := range []string{ExporterOCI, ExporterDocker} {
destDir, err := ioutil.TempDir("", "buildkit")
require.NoError(t, err)
defer os.RemoveAll(destDir)
@ -2172,7 +2168,7 @@ func testBuildExportWithUncompressed(t *testing.T, sb integration.Sandbox) {
require.NoError(t, err)
registry, err := sb.NewRegistry()
if errors.Is(err, integration.ErrorRequirements) {
if errors.Is(err, integration.ErrRequirements) {
t.Skip(err.Error())
}
require.NoError(t, err)
@ -2458,7 +2454,7 @@ func testPullZstdImage(t *testing.T, sb integration.Sandbox) {
require.NoError(t, err)
registry, err := sb.NewRegistry()
if errors.Is(err, integration.ErrorRequirements) {
if errors.Is(err, integration.ErrRequirements) {
t.Skip(err.Error())
}
require.NoError(t, err)
@ -2524,7 +2520,7 @@ func testBuildPushAndValidate(t *testing.T, sb integration.Sandbox) {
require.NoError(t, err)
registry, err := sb.NewRegistry()
if errors.Is(err, integration.ErrorRequirements) {
if errors.Is(err, integration.ErrRequirements) {
t.Skip(err.Error())
}
require.NoError(t, err)
@ -2715,7 +2711,7 @@ func testStargzLazyPull(t *testing.T, sb integration.Sandbox) {
require.NoError(t, err)
defer client.Close()
registry, err := sb.NewRegistry()
if errors.Is(err, integration.ErrorRequirements) {
if errors.Is(err, integration.ErrRequirements) {
t.Skip(err.Error())
}
require.NoError(t, err)
@ -2845,7 +2841,7 @@ func testLazyImagePush(t *testing.T, sb integration.Sandbox) {
ctx := namespaces.WithNamespace(sb.Context(), "buildkit")
registry, err := sb.NewRegistry()
if errors.Is(err, integration.ErrorRequirements) {
if errors.Is(err, integration.ErrRequirements) {
t.Skip(err.Error())
}
require.NoError(t, err)
@ -3035,7 +3031,7 @@ func testBasicCacheImportExport(t *testing.T, sb integration.Sandbox, cacheOptio
func testBasicRegistryCacheImportExport(t *testing.T, sb integration.Sandbox) {
skipDockerd(t, sb)
registry, err := sb.NewRegistry()
if errors.Is(err, integration.ErrorRequirements) {
if errors.Is(err, integration.ErrRequirements) {
t.Skip(err.Error())
}
require.NoError(t, err)
@ -3052,7 +3048,7 @@ func testBasicRegistryCacheImportExport(t *testing.T, sb integration.Sandbox) {
func testMultipleRegistryCacheImportExport(t *testing.T, sb integration.Sandbox) {
skipDockerd(t, sb)
registry, err := sb.NewRegistry()
if errors.Is(err, integration.ErrorRequirements) {
if errors.Is(err, integration.ErrRequirements) {
t.Skip(err.Error())
}
require.NoError(t, err)
@ -3096,7 +3092,7 @@ func testBasicInlineCacheImportExport(t *testing.T, sb integration.Sandbox) {
skipDockerd(t, sb)
requiresLinux(t)
registry, err := sb.NewRegistry()
if errors.Is(err, integration.ErrorRequirements) {
if errors.Is(err, integration.ErrRequirements) {
t.Skip(err.Error())
}
require.NoError(t, err)
@ -3849,7 +3845,6 @@ func testSourceMap(t *testing.T, sb integration.Sandbox) {
require.Equal(t, 1, len(srcs[2].Ranges))
require.Equal(t, int32(7), srcs[2].Ranges[0].Start.Line)
require.Equal(t, int32(0), srcs[2].Ranges[0].Start.Character)
}
func testSourceMapFromRef(t *testing.T, sb integration.Sandbox) {
@ -4029,7 +4024,7 @@ func testMergeOp(t *testing.T, sb integration.Sandbox) {
ctx := sb.Context()
registry, err := sb.NewRegistry()
if !errors.Is(err, integration.ErrorRequirements) {
if !errors.Is(err, integration.ErrRequirements) {
require.NoError(t, err)
}
@ -4148,7 +4143,7 @@ func testMergeOpCache(t *testing.T, sb integration.Sandbox, mode string) {
ctx := namespaces.WithNamespace(sb.Context(), "buildkit")
registry, err := sb.NewRegistry()
if errors.Is(err, integration.ErrorRequirements) {
if errors.Is(err, integration.ErrRequirements) {
t.Skip(err.Error())
}
require.NoError(t, err)

View File

@ -160,7 +160,6 @@ func (d *DefinitionOp) Marshal(ctx context.Context, c *Constraints) (digest.Dige
meta := d.metas[d.dgst]
return d.dgst, d.defs[d.dgst], &meta, d.sources[d.dgst], nil
}
func (d *DefinitionOp) Output() Output {

View File

@ -1126,7 +1126,7 @@ func (tc verifyContents) Run(t *testing.T, sb integration.Sandbox) {
defer c.Close()
registry, err := sb.NewRegistry()
if errors.Is(err, integration.ErrorRequirements) {
if errors.Is(err, integration.ErrRequirements) {
t.Skip(err.Error())
}
require.NoError(t, err)
@ -1248,7 +1248,7 @@ func (tc verifyBlobReuse) Run(t *testing.T, sb integration.Sandbox) {
ctx := namespaces.WithNamespace(sb.Context(), "buildkit")
registry, err := sb.NewRegistry()
if errors.Is(err, integration.ErrorRequirements) {
if errors.Is(err, integration.ErrRequirements) {
t.Skip(err.Error())
}
require.NoError(t, err)

View File

@ -459,7 +459,6 @@ func parseCacheOptions(ctx context.Context, opt SolveOpt) (*cacheOptions, error)
}
}
contentStores["local:"+csDir] = cs
}
if im.Type == "registry" {
legacyImportRef := attrs["ref"]

View File

@ -78,5 +78,4 @@ func ParseExportCache(exportCaches, legacyExportCacheOpts []string) ([]client.Ca
}
}
return exports, nil
}

View File

@ -8,7 +8,6 @@ import (
)
func TestLoad(t *testing.T) {
const testConfig = `
root = "/foo/bar"
debug=true

View File

@ -27,5 +27,4 @@ func parseIdentityMapping(str string) (*idtools.IdentityMapping, error) {
return nil, errors.Wrap(err, "failed to create ID mappings")
}
return mappings, nil
}

View File

@ -46,7 +46,6 @@ func withCGroup() oci.SpecOpts {
})
return nil
}
}
func hasPrefix(p, prefixDir string) bool {

View File

@ -51,7 +51,6 @@ func (e *localExporter) Config() exporter.Config {
}
func (e *localExporterInstance) Export(ctx context.Context, inp exporter.Source, sessionID string) (map[string]string, error) {
timeoutCtx, cancel := context.WithTimeout(ctx, 5*time.Second)
defer cancel()

View File

@ -68,5 +68,4 @@ RUN ls
require.False(t, ok)
require.Equal(t, ref, "")
require.Equal(t, cmdline, "")
}

View File

@ -509,7 +509,7 @@ func testOnBuildHeredoc(t *testing.T, sb integration.Sandbox) {
f := getFrontend(t, sb)
registry, err := sb.NewRegistry()
if errors.Is(err, integration.ErrorRequirements) {
if errors.Is(err, integration.ErrRequirements) {
t.Skip(err.Error())
}
require.NoError(t, err)

View File

@ -31,7 +31,6 @@ func init() {
}
securityTests = append(securityTests, runSecurityTests...)
}
func testInsecureDevicesWhitelist(t *testing.T, sb integration.Sandbox) {

View File

@ -1817,7 +1817,6 @@ COPY --from=0 /foo /foo
dt, err := ioutil.ReadFile(filepath.Join(destDir, "foo"))
require.NoError(t, err)
require.Equal(t, "foo-contents", string(dt))
}
func testCmdShell(t *testing.T, sb integration.Sandbox) {
@ -3993,7 +3992,7 @@ func testOnBuildCleared(t *testing.T, sb integration.Sandbox) {
f := getFrontend(t, sb)
registry, err := sb.NewRegistry()
if errors.Is(err, integration.ErrorRequirements) {
if errors.Is(err, integration.ErrRequirements) {
t.Skip(err.Error())
}
require.NoError(t, err)
@ -4101,7 +4100,7 @@ func testCacheMultiPlatformImportExport(t *testing.T, sb integration.Sandbox) {
f := getFrontend(t, sb)
registry, err := sb.NewRegistry()
if errors.Is(err, integration.ErrorRequirements) {
if errors.Is(err, integration.ErrRequirements) {
t.Skip(err.Error())
}
require.NoError(t, err)
@ -4227,7 +4226,7 @@ func testCacheImportExport(t *testing.T, sb integration.Sandbox) {
f := getFrontend(t, sb)
registry, err := sb.NewRegistry()
if errors.Is(err, integration.ErrorRequirements) {
if errors.Is(err, integration.ErrRequirements) {
t.Skip(err.Error())
}
require.NoError(t, err)
@ -4404,7 +4403,7 @@ func testImportExportReproducibleIDs(t *testing.T, sb integration.Sandbox) {
f := getFrontend(t, sb)
registry, err := sb.NewRegistry()
if errors.Is(err, integration.ErrorRequirements) {
if errors.Is(err, integration.ErrRequirements) {
t.Skip(err.Error())
}
require.NoError(t, err)

View File

@ -204,7 +204,6 @@ func (bf *BFlags) Parse() error {
default:
panic("No idea what kind of flag we have! Should never get here!")
}
}
return nil

View File

@ -102,7 +102,7 @@ func ParseInstruction(node *parser.Node) (v interface{}, err error) {
case command.Shell:
return parseShell(req)
}
return nil, suggest.WrapError(&UnknownInstruction{Instruction: node.Value, Line: node.StartLine}, node.Value, allInstructionNames(), false)
return nil, suggest.WrapError(&UnknownInstructionError{Instruction: node.Value, Line: node.StartLine}, node.Value, allInstructionNames(), false)
}
// ParseCommand converts an AST to a typed Command
@ -117,13 +117,13 @@ func ParseCommand(node *parser.Node) (Command, error) {
return nil, parser.WithLocation(errors.Errorf("%T is not a command type", s), node.Location())
}
// UnknownInstruction represents an error occurring when a command is unresolvable
type UnknownInstruction struct {
// UnknownInstructionError represents an error occurring when a command is unresolvable
type UnknownInstructionError struct {
Line int
Instruction string
}
func (e *UnknownInstruction) Error() string {
func (e *UnknownInstructionError) Error() string {
return fmt.Sprintf("unknown instruction: %s", e.Instruction)
}
@ -167,7 +167,6 @@ func Parse(ast *parser.Node) (stages []Stage, metaArgs []ArgCommand, err error)
default:
return nil, nil, parser.WithLocation(errors.Errorf("%T is not a command type", cmd), n.Location())
}
}
return stages, metaArgs, nil
}
@ -193,7 +192,6 @@ func parseKvps(args []string, cmdName string) (KeyValuePairs, error) {
}
func parseEnv(req parseRequest) (*EnvCommand, error) {
if err := req.flags.Parse(); err != nil {
return nil, err
}
@ -222,7 +220,6 @@ func parseMaintainer(req parseRequest) (*MaintainerCommand, error) {
}
func parseLabel(req parseRequest) (*LabelCommand, error) {
if err := req.flags.Parse(); err != nil {
return nil, err
}
@ -346,7 +343,6 @@ func parseFrom(req parseRequest) (*Stage, error) {
Location: req.location,
Comment: getComment(req.comments, stageName),
}, nil
}
func parseBuildStageName(args []string) (string, error) {
@ -404,7 +400,6 @@ func parseWorkdir(req parseRequest) (*WorkdirCommand, error) {
Path: req.args[0],
withNameAndCode: newWithNameAndCode(req),
}, nil
}
func parseShellDependentCommand(req parseRequest, command string, emptyAsNil bool) (ShellDependantCmdLine, error) {
@ -528,7 +523,6 @@ func parseHealthcheck(req parseRequest) (*HealthCheckCommand, error) {
Test: test,
}
} else {
healthcheck := container.HealthConfig{}
flInterval := req.flags.AddString("interval", "")
@ -645,7 +639,6 @@ func parseVolume(req parseRequest) (*VolumeCommand, error) {
cmd.Volumes = append(cmd.Volumes, v)
}
return cmd, nil
}
func parseStopSignal(req parseRequest) (*StopSignalCommand, error) {
@ -659,7 +652,6 @@ func parseStopSignal(req parseRequest) (*StopSignalCommand, error) {
withNameAndCode: newWithNameAndCode(req),
}
return cmd, nil
}
func parseArg(req parseRequest) (*ArgCommand, error) {

View File

@ -85,7 +85,6 @@ func NewContainer(ctx context.Context, w worker.Worker, sm *session.Manager, g s
cm = refs[m.Input].Worker.CacheManager()
}
return cm.New(ctx, ref, g)
})
if err != nil {
for i := len(p.Actives) - 1; i >= 0; i-- { // call in LIFO order

View File

@ -177,7 +177,6 @@ func (gf *gatewayFrontend) Solve(ctx context.Context, llbBridge frontend.Fronten
}()
if res.Ref == nil {
return nil, errors.Errorf("gateway source didn't return default result")
}
frontendDef = res.Ref.Definition()
r, err := res.Ref.Result(ctx)
@ -827,7 +826,6 @@ func (lbf *llbBridgeForwarder) StatFile(ctx context.Context, req *pb.StatFileReq
}
func (lbf *llbBridgeForwarder) Ping(context.Context, *pb.PingRequest) (*pb.PongResponse, error) {
workers := lbf.workers.WorkerInfos()
pbWorkers := make([]*apitypes.WorkerRecord, 0, len(workers))
for _, w := range workers {
@ -930,7 +928,6 @@ func (lbf *llbBridgeForwarder) NewContainer(ctx context.Context, in *pb.NewConta
return nil, errors.Errorf("invalid reference %T", res.Sys())
}
}
}
ctrReq.Mounts = append(ctrReq.Mounts, Mount{
WorkerRef: workerRef,

View File

@ -1,4 +1,4 @@
package moby_buildkit_v1_frontend //nolint:golint
package moby_buildkit_v1_frontend //nolint:revive
import "github.com/moby/buildkit/util/apicaps"
@ -57,7 +57,6 @@ const (
)
func init() {
Caps.Init(apicaps.Cap{
ID: CapSolveBase,
Enabled: true,

View File

@ -1,4 +1,4 @@
package moby_buildkit_v1_frontend //nolint:golint
package moby_buildkit_v1_frontend //nolint:revive
import (
"fmt"

View File

@ -1,3 +1,3 @@
package moby_buildkit_v1_frontend //nolint:golint
package moby_buildkit_v1_frontend //nolint:revive
//go:generate protoc -I=. -I=../../../vendor/ -I=../../../../../../ --gogo_out=plugins=grpc:. gateway.proto

View File

@ -2,9 +2,10 @@
FROM golang:1.17-alpine
RUN apk add --no-cache gcc musl-dev yamllint
RUN wget -O- -nv https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s v1.41.1
RUN wget -O- -nv https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s v1.43.0
WORKDIR /go/src/github.com/moby/buildkit
RUN --mount=target=/go/src/github.com/moby/buildkit --mount=target=/root/.cache,type=cache \
golangci-lint run
GOARCH=amd64 golangci-lint run && \
GOARCH=arm64 golangci-lint run
RUN --mount=target=/go/src/github.com/moby/buildkit --mount=target=/root/.cache,type=cache \
yamllint -c .yamllint.yml --strict .

View File

@ -16,11 +16,8 @@ import (
func Dialer(api controlapi.ControlClient) session.Dialer {
return func(ctx context.Context, proto string, meta map[string][]string) (net.Conn, error) {
meta = lowerHeaders(meta)
md := metadata.MD(meta)
ctx = metadata.NewOutgoingContext(ctx, md)
stream, err := api.Session(ctx)
@ -126,7 +123,6 @@ func (c *conn) Close() (err error) {
c.lastBuf = append(c.lastBuf, c.buf...)
}
c.readMu.Unlock()
})
return nil
}

View File

@ -365,7 +365,6 @@ func (e *edge) unpark(incoming []pipe.Sender, updates, allPipes []pipe.Receiver,
e.createInputRequests(desiredState, f, true)
}
}
}
func (e *edge) makeExportable(k *CacheKey, records []*CacheRecord) ExportableCacheKey {

View File

@ -14,7 +14,7 @@ func init() {
typeurl.Register((*Solve)(nil), "github.com/moby/buildkit", "errdefs.Solve+json")
}
//nolint:golint
//nolint:revive
type IsSolve_Subject isSolve_Subject
// SolveError will be returned when an error is encountered during a solve that

View File

@ -77,8 +77,8 @@ func (m *staticEmulatorMount) Mount() ([]mount.Mount, func() error, error) {
}}, func() error {
return os.RemoveAll(tmpdir)
}, nil
}
func (m *staticEmulatorMount) IdentityMapping() *idtools.IdentityMapping {
return m.idmap
}

View File

@ -73,7 +73,6 @@ const (
)
func init() {
Caps.Init(apicaps.Cap{
ID: CapSourceImage,
Enabled: true,

View File

@ -310,7 +310,6 @@ func TestSingleLevelCache(t *testing.T) {
require.NoError(t, j2.Discard())
j2 = nil
}
func TestSingleLevelCacheParallel(t *testing.T) {
@ -388,7 +387,6 @@ func TestSingleLevelCacheParallel(t *testing.T) {
require.Equal(t, int64(1), *g1.Vertex.(*vertex).cacheCallCount)
// only one execution ran
require.Equal(t, int64(1), *g0.Vertex.(*vertex).execCallCount+*g1.Vertex.(*vertex).execCallCount)
}
func TestMultiLevelCacheParallel(t *testing.T) {
@ -525,7 +523,6 @@ func TestSingleCancelCache(t *testing.T) {
require.NoError(t, j0.Discard())
j0 = nil
}
func TestSingleCancelExec(t *testing.T) {
t.Parallel()
@ -729,7 +726,6 @@ func TestMultiLevelCalculation(t *testing.T) {
require.NoError(t, err)
require.Equal(t, unwrapInt(res), 42)
require.Equal(t, len(bi), 0)
}
func TestHugeGraph(t *testing.T) {
@ -1139,7 +1135,6 @@ func TestSlowCache(t *testing.T) {
require.NoError(t, j1.Discard())
j1 = nil
}
// TestParallelInputs validates that inputs are processed in parallel
@ -1334,7 +1329,6 @@ func TestErrorReturns(t *testing.T) {
require.NoError(t, j2.Discard())
j1 = nil
}
func TestMultipleCacheSources(t *testing.T) {
@ -1893,7 +1887,6 @@ func TestSubbuild(t *testing.T) {
require.NoError(t, j1.Discard())
j1 = nil
}
func TestCacheWithSelector(t *testing.T) {
@ -3817,10 +3810,11 @@ func (t *testExporterTarget) Add(dgst digest.Digest) CacheExporterRecord {
t.records = append(t.records, r)
return r
}
func (t *testExporterTarget) Visit(v interface{}) {
t.visited[v] = struct{}{}
}
func (t *testExporterTarget) Visited(v interface{}) bool {
_, ok := t.visited[v]
return ok

View File

@ -217,7 +217,6 @@ func (p *puller) CacheKey(ctx context.Context, g session.Group, index int) (cach
p.descHandlers = cache.DescHandlers(make(map[digest.Digest]*cache.DescHandler))
for i, desc := range p.manifest.Descriptors {
// Hints for remote/stargz snapshotter for searching for remote snapshots
labels := snapshots.FilterInheritedLabels(desc.Annotations)
if labels == nil {

View File

@ -30,5 +30,4 @@ func TestNewGitIdentifier(t *testing.T) {
require.Equal(t, "https://github.com/moby/buildkit.git", gi.Remote)
require.Equal(t, "main", gi.Ref)
require.Equal(t, "", gi.Subdir)
}

View File

@ -312,7 +312,6 @@ func TestHTTPChecksum(t *testing.T) {
ref.Release(context.TODO())
ref = nil
}
func readFile(ctx context.Context, ref cache.ImmutableRef, fp string) ([]byte, error) {

View File

@ -1,3 +1,3 @@
package moby_buildkit_v1_apicaps //nolint:golint
package moby_buildkit_v1_apicaps //nolint:revive
//go:generate protoc -I=. -I=../../../vendor/ -I=../../../../../../ --gogo_out=plugins=grpc:. caps.proto

View File

@ -120,7 +120,7 @@ func getFreeLoopID() (int, error) {
}
defer fd.Close()
const _LOOP_CTL_GET_FREE = 0x4C82 //nolint:golint
const _LOOP_CTL_GET_FREE = 0x4C82 //nolint:revive
r1, _, uerr := unix.Syscall(unix.SYS_IOCTL, fd.Fd(), _LOOP_CTL_GET_FREE, 0)
if uerr == 0 {
return int(r1), nil
@ -129,17 +129,17 @@ func getFreeLoopID() (int, error) {
}
var (
currentCaps []string
currentCapsErr error
currentCapsOnce sync.Once
currentCaps []string
currentCapsError error //nolint:errname
currentCapsOnce sync.Once
)
func getCurrentCaps() ([]string, error) {
currentCapsOnce.Do(func() {
currentCaps, currentCapsErr = cap.Current()
currentCaps, currentCapsError = cap.Current()
})
return currentCaps, currentCapsErr
return currentCaps, currentCapsError
}
func getAllCaps() ([]string, error) {

View File

@ -169,7 +169,7 @@ func FromGRPC(err error) error {
}
}
err = &grpcStatusErr{st: status.FromProto(n)}
err = &grpcStatusError{st: status.FromProto(n)}
for _, s := range stacks {
if s != nil {
@ -188,18 +188,18 @@ func FromGRPC(err error) error {
return stack.Enable(err)
}
type grpcStatusErr struct {
type grpcStatusError struct {
st *status.Status
}
func (e *grpcStatusErr) Error() string {
func (e *grpcStatusError) Error() string {
if e.st.Code() == codes.OK || e.st.Code() == codes.Unknown {
return e.st.Message()
}
return e.st.Code().String() + ": " + e.st.Message()
}
func (e *grpcStatusErr) GRPCStatus() *status.Status {
func (e *grpcStatusError) GRPCStatus() *status.Status {
return e.st
}

View File

@ -456,7 +456,6 @@ func collectAndCheckChanges(base, upperdir string, expected []TestChange) error
func diffString(c1, c2 []TestChange) string {
return fmt.Sprintf("got(%d):\n%s\nexpected(%d):\n%s", len(c1), changesString(c1), len(c2), changesString(c2))
}
func changesString(c []TestChange) string {

View File

@ -65,7 +65,6 @@ func (p *textMux) printVtx(t *trace, dgst digest.Digest) {
} else {
fmt.Fprintf(p.w, "#%d %s\n", v.index, v.Name)
}
}
if len(v.events) != 0 {
@ -180,7 +179,6 @@ func (p *textMux) printVtx(t *trace, dgst digest.Digest) {
}
fmt.Fprintf(p.w, "#%d DONE%s\n", v.index, tm)
}
}
delete(t.updates, dgst)

View File

@ -86,7 +86,6 @@ func (r readerWithCancel) Close() error {
}
func trackProgress(ctx context.Context, desc ocispecs.Descriptor, manager PullManager, doneCh chan<- struct{}) {
defer close(doneCh)
ticker := time.NewTicker(150 * time.Millisecond)
@ -133,6 +132,5 @@ func trackProgress(ctx context.Context, desc ocispecs.Descriptor, manager PullMa
})
return
}
}
}

View File

@ -197,7 +197,7 @@ func (r *Resolver) Resolve(ctx context.Context, ref string) (string, ocispecs.De
n, desc, err := r.Resolver.Resolve(ctx, ref)
if err == nil {
atomic.AddInt64(&r.handler.counter, 1)
return n, desc, err
return n, desc, nil
}
if r.mode == source.ResolveModeDefault && r.is != nil {

View File

@ -116,19 +116,19 @@ func (mc *multiCloser) append(f func() error) {
mc.fns = append(mc.fns, f)
}
var ErrorRequirements = errors.Errorf("missing requirements")
var ErrRequirements = errors.Errorf("missing requirements")
func lookupBinary(name string) error {
_, err := exec.LookPath(name)
if err != nil {
return errors.Wrapf(ErrorRequirements, "failed to lookup %s binary", name)
return errors.Wrapf(ErrRequirements, "failed to lookup %s binary", name)
}
return nil
}
func requireRoot() error {
if os.Getuid() != 0 {
return errors.Wrap(ErrorRequirements, "requires root")
return errors.Wrap(ErrRequirements, "requires root")
}
return nil
}

View File

@ -52,7 +52,6 @@ func TestThrottle(t *testing.T) {
}
retries++
}
}
func TestAfter(t *testing.T) {

View File

@ -57,7 +57,7 @@ func ContextWithSpanFromContext(ctx, ctx2 context.Context) context.Context {
return ctx
}
var DefaultTransport http.RoundTripper = NewTransport(http.DefaultTransport)
var DefaultTransport = NewTransport(http.DefaultTransport)
var DefaultClient = &http.Client{
Transport: DefaultTransport,

View File

@ -98,7 +98,6 @@ func (s *winApplier) Apply(ctx context.Context, desc ocispecs.Descriptor, mounts
Digest: digester.Digest(),
}
return nil
}); err != nil {
return ocispecs.Descriptor{}, err
}

View File

@ -109,8 +109,7 @@ func (s *winDiffer) Compare(ctx context.Context, lower, upper []mount.Mount, opt
if err != nil {
return errors.Wrap(err, "failed to get compressed stream")
}
var w io.Writer = io.MultiWriter(compressed, dgstr.Hash())
w, discard, done := makeWindowsLayer(w)
w, discard, done := makeWindowsLayer(io.MultiWriter(compressed, dgstr.Hash()))
err = archive.WriteDiff(ctx, w, lowerRoot, upperRoot)
if err != nil {
discard(err)
@ -213,7 +212,6 @@ func makeWindowsLayer(w io.Writer) (io.Writer, func(error), chan error) {
tarWriter := tar.NewWriter(w)
err := func() error {
h := &tar.Header{
Name: "Hives",
Typeflag: tar.TypeDir,

View File

@ -228,7 +228,7 @@ func (w *Worker) LoadRef(ctx context.Context, id string, hidden bool) (cache.Imm
}
ref, err := w.CacheMgr.Get(ctx, id, opts...)
var needsRemoteProviders cache.NeedsRemoteProvidersError
var needsRemoteProviders cache.NeedsRemoteProviderError
if errors.As(err, &needsRemoteProviders) {
if optGetter := solver.CacheOptGetterOf(ctx); optGetter != nil {
var keys []interface{}