update fsutil and docker

update fsutil to include this patch: d952e50eae

docker also had to be updated due to 2d121ce88f

Signed-off-by: Alex Couture-Beil <alex@earthly.dev>
master
Alex Couture-Beil 2021-11-25 09:50:28 -08:00
parent 90690ca0c9
commit 197f619956
No known key found for this signature in database
GPG Key ID: 097FC39A210CE7F7
100 changed files with 1116 additions and 767 deletions

View File

@ -1,3 +1,4 @@
//go:build !windows
// +build !windows
package api // import "github.com/docker/docker/api"

View File

@ -382,11 +382,13 @@ definitions:
type: "string"
description: |
- Empty string means not to restart
- `no` Do not automatically restart
- `always` Always restart
- `unless-stopped` Restart always except when the user has manually stopped the container
- `on-failure` Restart only when the container exit code is non-zero
enum:
- ""
- "no"
- "always"
- "unless-stopped"
- "on-failure"
@ -744,6 +746,7 @@ definitions:
description: |
Health stores information about the container's healthcheck results.
type: "object"
x-nullable: true
properties:
Status:
description: |
@ -769,13 +772,13 @@ definitions:
description: |
Log contains the last few results (oldest first)
items:
x-nullable: true
$ref: "#/definitions/HealthcheckResult"
HealthcheckResult:
description: |
HealthcheckResult stores information about a single run of a healthcheck probe
type: "object"
x-nullable: true
properties:
Start:
description: |
@ -2188,6 +2191,25 @@ definitions:
type: "string"
x-nullable: false
PluginPrivilege:
description: |
Describes a permission the user has to accept upon installing
the plugin.
type: "object"
x-go-name: "PluginPrivilege"
properties:
Name:
type: "string"
example: "network"
Description:
type: "string"
Value:
type: "array"
items:
type: "string"
example:
- "host"
Plugin:
description: "A plugin for the Engine API"
type: "object"
@ -2970,19 +2992,7 @@ definitions:
PluginPrivilege:
type: "array"
items:
description: |
Describes a permission accepted by the user upon installing the
plugin.
type: "object"
properties:
Name:
type: "string"
Description:
type: "string"
Value:
type: "array"
items:
type: "string"
$ref: "#/definitions/PluginPrivilege"
ContainerSpec:
type: "object"
description: |
@ -4022,73 +4032,71 @@ definitions:
Warning: "unable to pin image doesnotexist:latest to digest: image library/doesnotexist:latest not found"
ContainerSummary:
type: "array"
items:
type: "object"
properties:
Id:
description: "The ID of this container"
type: "object"
properties:
Id:
description: "The ID of this container"
type: "string"
x-go-name: "ID"
Names:
description: "The names that this container has been given"
type: "array"
items:
type: "string"
x-go-name: "ID"
Names:
description: "The names that this container has been given"
type: "array"
items:
Image:
description: "The name of the image used when creating this container"
type: "string"
ImageID:
description: "The ID of the image that this container was created from"
type: "string"
Command:
description: "Command to run when starting the container"
type: "string"
Created:
description: "When the container was created"
type: "integer"
format: "int64"
Ports:
description: "The ports exposed by this container"
type: "array"
items:
$ref: "#/definitions/Port"
SizeRw:
description: "The size of files that have been created or changed by this container"
type: "integer"
format: "int64"
SizeRootFs:
description: "The total size of all the files in this container"
type: "integer"
format: "int64"
Labels:
description: "User-defined key/value metadata."
type: "object"
additionalProperties:
type: "string"
State:
description: "The state of this container (e.g. `Exited`)"
type: "string"
Status:
description: "Additional human-readable status of this container (e.g. `Exit 0`)"
type: "string"
HostConfig:
type: "object"
properties:
NetworkMode:
type: "string"
Image:
description: "The name of the image used when creating this container"
type: "string"
ImageID:
description: "The ID of the image that this container was created from"
type: "string"
Command:
description: "Command to run when starting the container"
type: "string"
Created:
description: "When the container was created"
type: "integer"
format: "int64"
Ports:
description: "The ports exposed by this container"
type: "array"
items:
$ref: "#/definitions/Port"
SizeRw:
description: "The size of files that have been created or changed by this container"
type: "integer"
format: "int64"
SizeRootFs:
description: "The total size of all the files in this container"
type: "integer"
format: "int64"
Labels:
description: "User-defined key/value metadata."
type: "object"
additionalProperties:
type: "string"
State:
description: "The state of this container (e.g. `Exited`)"
type: "string"
Status:
description: "Additional human-readable status of this container (e.g. `Exit 0`)"
type: "string"
HostConfig:
type: "object"
properties:
NetworkMode:
type: "string"
NetworkSettings:
description: "A summary of the container's network settings"
type: "object"
properties:
Networks:
type: "object"
additionalProperties:
$ref: "#/definitions/EndpointSettings"
Mounts:
type: "array"
items:
$ref: "#/definitions/Mount"
NetworkSettings:
description: "A summary of the container's network settings"
type: "object"
properties:
Networks:
type: "object"
additionalProperties:
$ref: "#/definitions/EndpointSettings"
Mounts:
type: "array"
items:
$ref: "#/definitions/Mount"
Driver:
description: "Driver represents a driver (network, logging, secrets)."
@ -4210,6 +4218,7 @@ definitions:
ContainerState stores container's running state. It's part of ContainerJSONBase
and will be returned by the "inspect" command.
type: "object"
x-nullable: true
properties:
Status:
description: |
@ -4267,7 +4276,6 @@ definitions:
type: "string"
example: "2020-01-06T09:07:59.461876391Z"
Health:
x-nullable: true
$ref: "#/definitions/Health"
SystemVersion:
@ -4366,7 +4374,6 @@ definitions:
type: "string"
example: "2020-06-22T15:49:27.000000000+00:00"
SystemInfo:
type: "object"
properties:
@ -5199,6 +5206,158 @@ definitions:
additionalProperties:
type: "string"
EventActor:
description: |
Actor describes something that generates events, like a container, network,
or a volume.
type: "object"
properties:
ID:
description: "The ID of the object emitting the event"
type: "string"
example: "ede54ee1afda366ab42f824e8a5ffd195155d853ceaec74a927f249ea270c743"
Attributes:
description: |
Various key/value attributes of the object, depending on its type.
type: "object"
additionalProperties:
type: "string"
example:
com.example.some-label: "some-label-value"
image: "alpine:latest"
name: "my-container"
EventMessage:
description: |
EventMessage represents the information an event contains.
type: "object"
title: "SystemEventsResponse"
properties:
Type:
description: "The type of object emitting the event"
type: "string"
enum: ["builder", "config", "container", "daemon", "image", "network", "node", "plugin", "secret", "service", "volume"]
example: "container"
Action:
description: "The type of event"
type: "string"
example: "create"
Actor:
$ref: "#/definitions/EventActor"
scope:
description: |
Scope of the event. Engine events are `local` scope. Cluster (Swarm)
events are `swarm` scope.
type: "string"
enum: ["local", "swarm"]
time:
description: "Timestamp of event"
type: "integer"
format: "int64"
example: 1629574695
timeNano:
description: "Timestamp of event, with nanosecond accuracy"
type: "integer"
format: "int64"
example: 1629574695515050031
OCIDescriptor:
type: "object"
x-go-name: Descriptor
description: |
A descriptor struct containing digest, media type, and size, as defined in
the [OCI Content Descriptors Specification](https://github.com/opencontainers/image-spec/blob/v1.0.1/descriptor.md).
properties:
mediaType:
description: |
The media type of the object this schema refers to.
type: "string"
example: "application/vnd.docker.distribution.manifest.v2+json"
digest:
description: |
The digest of the targeted content.
type: "string"
example: "sha256:c0537ff6a5218ef531ece93d4984efc99bbf3f7497c0a7726c88e2bb7584dc96"
size:
description: |
The size in bytes of the blob.
type: "integer"
format: "int64"
example: 3987495
# TODO Not yet including these fields for now, as they are nil / omitted in our response.
# urls:
# description: |
# List of URLs from which this object MAY be downloaded.
# type: "array"
# items:
# type: "string"
# format: "uri"
# annotations:
# description: |
# Arbitrary metadata relating to the targeted content.
# type: "object"
# additionalProperties:
# type: "string"
# platform:
# $ref: "#/definitions/OCIPlatform"
OCIPlatform:
type: "object"
x-go-name: Platform
description: |
Describes the platform which the image in the manifest runs on, as defined
in the [OCI Image Index Specification](https://github.com/opencontainers/image-spec/blob/v1.0.1/image-index.md).
properties:
architecture:
description: |
The CPU architecture, for example `amd64` or `ppc64`.
type: "string"
example: "arm"
os:
description: |
The operating system, for example `linux` or `windows`.
type: "string"
example: "windows"
os.version:
description: |
Optional field specifying the operating system version, for example on
Windows `10.0.19041.1165`.
type: "string"
example: "10.0.19041.1165"
os.features:
description: |
Optional field specifying an array of strings, each listing a required
OS feature (for example on Windows `win32k`).
type: "array"
items:
type: "string"
example:
- "win32k"
variant:
description: |
Optional field specifying a variant of the CPU, for example `v7` to
specify ARMv7 when architecture is `arm`.
type: "string"
example: "v7"
DistributionInspect:
type: "object"
x-go-name: DistributionInspect
title: "DistributionInspectResponse"
required: [Descriptor, Platforms]
description: |
Describes the result obtained from contacting the registry to retrieve
image metadata.
properties:
Descriptor:
$ref: "#/definitions/OCIDescriptor"
Platforms:
type: "array"
description: |
An array containing all platforms supported by the image.
items:
$ref: "#/definitions/OCIPlatform"
paths:
/containers/json:
get:
@ -5261,7 +5420,9 @@ paths:
200:
description: "no error"
schema:
$ref: "#/definitions/ContainerSummary"
type: "array"
items:
$ref: "#/definitions/ContainerSummary"
examples:
application/json:
- Id: "8dfafdbc3a40"
@ -5627,7 +5788,6 @@ paths:
items:
type: "string"
State:
x-nullable: true
$ref: "#/definitions/ContainerState"
Image:
description: "The container's image ID"
@ -7505,6 +7665,18 @@ paths:
Refer to the [authentication section](#section/Authentication) for
details.
type: "string"
- name: "changes"
in: "query"
description: |
Apply `Dockerfile` instructions to the image that is created,
for example: `changes=ENV DEBUG=true`.
Note that `ENV DEBUG=true` should be URI component encoded.
Supported `Dockerfile` instructions:
`CMD`|`ENTRYPOINT`|`ENV`|`EXPOSE`|`ONBUILD`|`USER`|`VOLUME`|`WORKDIR`
type: "array"
items:
type: "string"
- name: "platform"
in: "query"
description: "Platform in the format os[/arch[/variant]]"
@ -8179,44 +8351,7 @@ paths:
200:
description: "no error"
schema:
type: "object"
title: "SystemEventsResponse"
properties:
Type:
description: "The type of object emitting the event"
type: "string"
Action:
description: "The type of event"
type: "string"
Actor:
type: "object"
properties:
ID:
description: "The ID of the object emitting the event"
type: "string"
Attributes:
description: "Various key/value attributes of the object, depending on its type"
type: "object"
additionalProperties:
type: "string"
time:
description: "Timestamp of event"
type: "integer"
timeNano:
description: "Timestamp of event, with nanosecond accuracy"
type: "integer"
format: "int64"
examples:
application/json:
Type: "container"
Action: "create"
Actor:
ID: "ede54ee1afda366ab42f824e8a5ffd195155d853ceaec74a927f249ea270c743"
Attributes:
com.example.some-label: "some-label-value"
image: "alpine"
name: "my-container"
time: 1461943101
$ref: "#/definitions/EventMessage"
400:
description: "bad parameter"
schema:
@ -8531,6 +8666,7 @@ paths:
description: "Exec configuration"
schema:
type: "object"
title: "ExecConfig"
properties:
AttachStdin:
type: "boolean"
@ -8621,6 +8757,7 @@ paths:
in: "body"
schema:
type: "object"
title: "ExecStartConfig"
properties:
Detach:
type: "boolean"
@ -9155,6 +9292,7 @@ paths:
required: true
schema:
type: "object"
title: "NetworkCreateRequest"
required: ["Name"]
properties:
Name:
@ -9265,6 +9403,7 @@ paths:
required: true
schema:
type: "object"
title: "NetworkConnectRequest"
properties:
Container:
type: "string"
@ -9311,6 +9450,7 @@ paths:
required: true
schema:
type: "object"
title: "NetworkDisconnectRequest"
properties:
Container:
type: "string"
@ -9395,20 +9535,7 @@ paths:
schema:
type: "array"
items:
description: |
Describes a permission the user has to accept upon installing
the plugin.
type: "object"
title: "PluginPrivilegeItem"
properties:
Name:
type: "string"
Description:
type: "string"
Value:
type: "array"
items:
type: "string"
$ref: "#/definitions/PluginPrivilege"
example:
- Name: "network"
Description: ""
@ -9484,19 +9611,7 @@ paths:
schema:
type: "array"
items:
description: |
Describes a permission accepted by the user upon installing the
plugin.
type: "object"
properties:
Name:
type: "string"
Description:
type: "string"
Value:
type: "array"
items:
type: "string"
$ref: "#/definitions/PluginPrivilege"
example:
- Name: "network"
Description: ""
@ -9668,19 +9783,7 @@ paths:
schema:
type: "array"
items:
description: |
Describes a permission accepted by the user upon installing the
plugin.
type: "object"
properties:
Name:
type: "string"
Description:
type: "string"
Value:
type: "array"
items:
type: "string"
$ref: "#/definitions/PluginPrivilege"
example:
- Name: "network"
Description: ""
@ -9970,6 +10073,7 @@ paths:
required: true
schema:
type: "object"
title: "SwarmInitRequest"
properties:
ListenAddr:
description: |
@ -10068,6 +10172,7 @@ paths:
required: true
schema:
type: "object"
title: "SwarmJoinRequest"
properties:
ListenAddr:
description: |
@ -10228,6 +10333,7 @@ paths:
required: true
schema:
type: "object"
title: "SwarmUnlockRequest"
properties:
UnlockKey:
description: "The swarm's unlock key."
@ -11339,67 +11445,7 @@ paths:
200:
description: "descriptor and platform information"
schema:
type: "object"
x-go-name: DistributionInspect
title: "DistributionInspectResponse"
required: [Descriptor, Platforms]
properties:
Descriptor:
type: "object"
description: |
A descriptor struct containing digest, media type, and size.
properties:
MediaType:
type: "string"
Size:
type: "integer"
format: "int64"
Digest:
type: "string"
URLs:
type: "array"
items:
type: "string"
Platforms:
type: "array"
description: |
An array containing all platforms supported by the image.
items:
type: "object"
properties:
Architecture:
type: "string"
OS:
type: "string"
OSVersion:
type: "string"
OSFeatures:
type: "array"
items:
type: "string"
Variant:
type: "string"
Features:
type: "array"
items:
type: "string"
examples:
application/json:
Descriptor:
MediaType: "application/vnd.docker.distribution.manifest.v2+json"
Digest: "sha256:c0537ff6a5218ef531ece93d4984efc99bbf3f7497c0a7726c88e2bb7584dc96"
Size: 3987495
URLs:
- ""
Platforms:
- Architecture: "amd64"
OS: "linux"
OSVersion: ""
OSFeatures:
- ""
Variant: ""
Features:
- ""
$ref: "#/definitions/DistributionInspect"
401:
description: "Failed authentication or no image found"
schema:

View File

@ -13,19 +13,26 @@ import (
// CgroupnsMode represents the cgroup namespace mode of the container
type CgroupnsMode string
// cgroup namespace modes for containers
const (
CgroupnsModeEmpty CgroupnsMode = ""
CgroupnsModePrivate CgroupnsMode = "private"
CgroupnsModeHost CgroupnsMode = "host"
)
// IsPrivate indicates whether the container uses its own private cgroup namespace
func (c CgroupnsMode) IsPrivate() bool {
return c == "private"
return c == CgroupnsModePrivate
}
// IsHost indicates whether the container shares the host's cgroup namespace
func (c CgroupnsMode) IsHost() bool {
return c == "host"
return c == CgroupnsModeHost
}
// IsEmpty indicates whether the container cgroup namespace mode is unset
func (c CgroupnsMode) IsEmpty() bool {
return c == ""
return c == CgroupnsModeEmpty
}
// Valid indicates whether the cgroup namespace mode is valid
@ -37,60 +44,69 @@ func (c CgroupnsMode) Valid() bool {
// values are platform specific
type Isolation string
// Isolation modes for containers
const (
IsolationEmpty Isolation = "" // IsolationEmpty is unspecified (same behavior as default)
IsolationDefault Isolation = "default" // IsolationDefault is the default isolation mode on current daemon
IsolationProcess Isolation = "process" // IsolationProcess is process isolation mode
IsolationHyperV Isolation = "hyperv" // IsolationHyperV is HyperV isolation mode
)
// IsDefault indicates the default isolation technology of a container. On Linux this
// is the native driver. On Windows, this is a Windows Server Container.
func (i Isolation) IsDefault() bool {
return strings.ToLower(string(i)) == "default" || string(i) == ""
// TODO consider making isolation-mode strict (case-sensitive)
v := Isolation(strings.ToLower(string(i)))
return v == IsolationDefault || v == IsolationEmpty
}
// IsHyperV indicates the use of a Hyper-V partition for isolation
func (i Isolation) IsHyperV() bool {
return strings.ToLower(string(i)) == "hyperv"
// TODO consider making isolation-mode strict (case-sensitive)
return Isolation(strings.ToLower(string(i))) == IsolationHyperV
}
// IsProcess indicates the use of process isolation
func (i Isolation) IsProcess() bool {
return strings.ToLower(string(i)) == "process"
// TODO consider making isolation-mode strict (case-sensitive)
return Isolation(strings.ToLower(string(i))) == IsolationProcess
}
const (
// IsolationEmpty is unspecified (same behavior as default)
IsolationEmpty = Isolation("")
// IsolationDefault is the default isolation mode on current daemon
IsolationDefault = Isolation("default")
// IsolationProcess is process isolation mode
IsolationProcess = Isolation("process")
// IsolationHyperV is HyperV isolation mode
IsolationHyperV = Isolation("hyperv")
)
// IpcMode represents the container ipc stack.
type IpcMode string
// IpcMode constants
const (
IPCModeNone IpcMode = "none"
IPCModeHost IpcMode = "host"
IPCModeContainer IpcMode = "container"
IPCModePrivate IpcMode = "private"
IPCModeShareable IpcMode = "shareable"
)
// IsPrivate indicates whether the container uses its own private ipc namespace which can not be shared.
func (n IpcMode) IsPrivate() bool {
return n == "private"
return n == IPCModePrivate
}
// IsHost indicates whether the container shares the host's ipc namespace.
func (n IpcMode) IsHost() bool {
return n == "host"
return n == IPCModeHost
}
// IsShareable indicates whether the container's ipc namespace can be shared with another container.
func (n IpcMode) IsShareable() bool {
return n == "shareable"
return n == IPCModeShareable
}
// IsContainer indicates whether the container uses another container's ipc namespace.
func (n IpcMode) IsContainer() bool {
parts := strings.SplitN(string(n), ":", 2)
return len(parts) > 1 && parts[0] == "container"
return strings.HasPrefix(string(n), string(IPCModeContainer)+":")
}
// IsNone indicates whether container IpcMode is set to "none".
func (n IpcMode) IsNone() bool {
return n == "none"
return n == IPCModeNone
}
// IsEmpty indicates whether container IpcMode is empty
@ -105,9 +121,8 @@ func (n IpcMode) Valid() bool {
// Container returns the name of the container ipc stack is going to be used.
func (n IpcMode) Container() string {
parts := strings.SplitN(string(n), ":", 2)
if len(parts) > 1 && parts[0] == "container" {
return parts[1]
if n.IsContainer() {
return strings.TrimPrefix(string(n), string(IPCModeContainer)+":")
}
return ""
}
@ -326,7 +341,7 @@ type LogMode string
// Available logging modes
const (
LogModeUnset = ""
LogModeUnset LogMode = ""
LogModeBlocking LogMode = "blocking"
LogModeNonBlock LogMode = "non-blocking"
)

View File

@ -1,3 +1,4 @@
//go:build !windows
// +build !windows
package container // import "github.com/docker/docker/api/types/container"

View File

@ -1,33 +1,26 @@
package events // import "github.com/docker/docker/api/types/events"
// Type is used for event-types.
type Type = string
// List of known event types.
const (
// BuilderEventType is the event type that the builder generates
BuilderEventType = "builder"
// ContainerEventType is the event type that containers generate
ContainerEventType = "container"
// DaemonEventType is the event type that daemon generate
DaemonEventType = "daemon"
// ImageEventType is the event type that images generate
ImageEventType = "image"
// NetworkEventType is the event type that networks generate
NetworkEventType = "network"
// PluginEventType is the event type that plugins generate
PluginEventType = "plugin"
// VolumeEventType is the event type that volumes generate
VolumeEventType = "volume"
// ServiceEventType is the event type that services generate
ServiceEventType = "service"
// NodeEventType is the event type that nodes generate
NodeEventType = "node"
// SecretEventType is the event type that secrets generate
SecretEventType = "secret"
// ConfigEventType is the event type that configs generate
ConfigEventType = "config"
BuilderEventType Type = "builder" // BuilderEventType is the event type that the builder generates.
ConfigEventType Type = "config" // ConfigEventType is the event type that configs generate.
ContainerEventType Type = "container" // ContainerEventType is the event type that containers generate.
DaemonEventType Type = "daemon" // DaemonEventType is the event type that daemon generate.
ImageEventType Type = "image" // ImageEventType is the event type that images generate.
NetworkEventType Type = "network" // NetworkEventType is the event type that networks generate.
NodeEventType Type = "node" // NodeEventType is the event type that nodes generate.
PluginEventType Type = "plugin" // PluginEventType is the event type that plugins generate.
SecretEventType Type = "secret" // SecretEventType is the event type that secrets generate.
ServiceEventType Type = "service" // ServiceEventType is the event type that services generate.
VolumeEventType Type = "volume" // VolumeEventType is the event type that volumes generate.
)
// Actor describes something that generates events,
// like a container, or a network, or a volume.
// It has a defined name and a set or attributes.
// It has a defined name and a set of attributes.
// The container attributes are its labels, other actors
// can generate these attributes from other properties.
type Actor struct {
@ -39,11 +32,11 @@ type Actor struct {
type Message struct {
// Deprecated information from JSONMessage.
// With data only in container events.
Status string `json:"status,omitempty"`
ID string `json:"id,omitempty"`
From string `json:"from,omitempty"`
Status string `json:"status,omitempty"` // Deprecated: use Action instead.
ID string `json:"id,omitempty"` // Deprecated: use Actor.ID instead.
From string `json:"from,omitempty"` // Deprecated: use Actor.Attributes["image"] instead.
Type string
Type Type
Action string
Actor Actor
// Engine events are local scope. Cluster events are swarm scope.

View File

@ -8,6 +8,9 @@ import (
// compare compares two version strings
// returns -1 if v1 < v2, 1 if v1 > v2, 0 otherwise.
func compare(v1, v2 string) int {
if v1 == v2 {
return 0
}
var (
currTab = strings.Split(v1, ".")
otherTab = strings.Split(v2, ".")

View File

@ -281,21 +281,6 @@ func ParseHostURL(host string) (*url.URL, error) {
}, nil
}
// CustomHTTPHeaders returns the custom http headers stored by the client.
func (cli *Client) CustomHTTPHeaders() map[string]string {
m := make(map[string]string)
for k, v := range cli.customHTTPHeaders {
m[k] = v
}
return m
}
// SetCustomHTTPHeaders that will be set on every HTTP request made by the client.
// Deprecated: use WithHTTPHeaders when creating the client.
func (cli *Client) SetCustomHTTPHeaders(headers map[string]string) {
cli.customHTTPHeaders = headers
}
// Dialer returns a dialer for a raw stream connection, with HTTP/1.1 header, that can be used for proxying the daemon connection.
// Used by `docker dial-stdio` (docker/cli#889).
func (cli *Client) Dialer() func(context.Context) (net.Conn, error) {

View File

@ -1,3 +1,4 @@
//go:build linux || freebsd || openbsd || netbsd || darwin || solaris || illumos || dragonfly
// +build linux freebsd openbsd netbsd darwin solaris illumos dragonfly
package client // import "github.com/docker/docker/client"

View File

@ -4,7 +4,7 @@ import (
"bytes"
"context"
"encoding/json"
"io/ioutil"
"io"
"github.com/docker/docker/api/types/swarm"
)
@ -23,7 +23,7 @@ func (cli *Client) ConfigInspectWithRaw(ctx context.Context, id string) (swarm.C
return swarm.Config{}, nil, wrapResponseError(err, resp, "config", id)
}
body, err := ioutil.ReadAll(resp.body)
body, err := io.ReadAll(resp.body)
if err != nil {
return swarm.Config{}, nil, err
}

View File

@ -4,7 +4,7 @@ import (
"bytes"
"context"
"encoding/json"
"io/ioutil"
"io"
"net/url"
"github.com/docker/docker/api/types"
@ -41,7 +41,7 @@ func (cli *Client) ContainerInspectWithRaw(ctx context.Context, containerID stri
return types.ContainerJSON{}, nil, wrapResponseError(err, serverResp, "container", containerID)
}
body, err := ioutil.ReadAll(serverResp.body)
body, err := io.ReadAll(serverResp.body)
if err != nil {
return types.ContainerJSON{}, nil, err
}

View File

@ -4,7 +4,7 @@ import (
"bytes"
"context"
"encoding/json"
"io/ioutil"
"io"
"github.com/docker/docker/api/types"
)
@ -20,7 +20,7 @@ func (cli *Client) ImageInspectWithRaw(ctx context.Context, imageID string) (typ
return types.ImageInspect{}, nil, wrapResponseError(err, serverResp, "image", imageID)
}
body, err := ioutil.ReadAll(serverResp.body)
body, err := io.ReadAll(serverResp.body)
if err != nil {
return types.ImageInspect{}, nil, err
}

View File

@ -4,7 +4,7 @@ import (
"bytes"
"context"
"encoding/json"
"io/ioutil"
"io"
"net/url"
"github.com/docker/docker/api/types"
@ -39,7 +39,7 @@ func (cli *Client) NetworkInspectWithRaw(ctx context.Context, networkID string,
return networkResource, nil, wrapResponseError(err, resp, "network", networkID)
}
body, err := ioutil.ReadAll(resp.body)
body, err := io.ReadAll(resp.body)
if err != nil {
return networkResource, nil, err
}

View File

@ -4,7 +4,7 @@ import (
"bytes"
"context"
"encoding/json"
"io/ioutil"
"io"
"github.com/docker/docker/api/types/swarm"
)
@ -20,7 +20,7 @@ func (cli *Client) NodeInspectWithRaw(ctx context.Context, nodeID string) (swarm
return swarm.Node{}, nil, wrapResponseError(err, serverResp, "node", nodeID)
}
body, err := ioutil.ReadAll(serverResp.body)
body, err := io.ReadAll(serverResp.body)
if err != nil {
return swarm.Node{}, nil, err
}

View File

@ -4,7 +4,7 @@ import (
"bytes"
"context"
"encoding/json"
"io/ioutil"
"io"
"github.com/docker/docker/api/types"
)
@ -20,7 +20,7 @@ func (cli *Client) PluginInspectWithRaw(ctx context.Context, name string) (*type
return nil, nil, wrapResponseError(err, resp, "plugin", name)
}
body, err := ioutil.ReadAll(resp.body)
body, err := io.ReadAll(resp.body)
if err != nil {
return nil, nil, err
}

View File

@ -6,7 +6,6 @@ import (
"encoding/json"
"fmt"
"io"
"io/ioutil"
"net"
"net/http"
"net/url"
@ -206,7 +205,7 @@ func (cli *Client) checkResponseErr(serverResp serverResponse) error {
R: serverResp.body,
N: int64(bodyMax),
}
body, err = ioutil.ReadAll(bodyR)
body, err = io.ReadAll(bodyR)
if err != nil {
return err
}
@ -266,7 +265,7 @@ func encodeData(data interface{}) (*bytes.Buffer, error) {
func ensureReaderClosed(response serverResponse) {
if response.body != nil {
// Drain up to 512 bytes and close the body to let the Transport reuse the connection
io.CopyN(ioutil.Discard, response.body, 512)
io.CopyN(io.Discard, response.body, 512)
response.body.Close()
}
}

View File

@ -4,7 +4,7 @@ import (
"bytes"
"context"
"encoding/json"
"io/ioutil"
"io"
"github.com/docker/docker/api/types/swarm"
)
@ -23,7 +23,7 @@ func (cli *Client) SecretInspectWithRaw(ctx context.Context, id string) (swarm.S
return swarm.Secret{}, nil, wrapResponseError(err, resp, "secret", id)
}
body, err := ioutil.ReadAll(resp.body)
body, err := io.ReadAll(resp.body)
if err != nil {
return swarm.Secret{}, nil, err
}

View File

@ -5,7 +5,7 @@ import (
"context"
"encoding/json"
"fmt"
"io/ioutil"
"io"
"net/url"
"github.com/docker/docker/api/types"
@ -25,7 +25,7 @@ func (cli *Client) ServiceInspectWithRaw(ctx context.Context, serviceID string,
return swarm.Service{}, nil, wrapResponseError(err, serverResp, "service", serviceID)
}
body, err := ioutil.ReadAll(serverResp.body)
body, err := io.ReadAll(serverResp.body)
if err != nil {
return swarm.Service{}, nil, err
}

View File

@ -4,7 +4,7 @@ import (
"bytes"
"context"
"encoding/json"
"io/ioutil"
"io"
"github.com/docker/docker/api/types/swarm"
)
@ -20,7 +20,7 @@ func (cli *Client) TaskInspectWithRaw(ctx context.Context, taskID string) (swarm
return swarm.Task{}, nil, wrapResponseError(err, serverResp, "task", taskID)
}
body, err := ioutil.ReadAll(serverResp.body)
body, err := io.ReadAll(serverResp.body)
if err != nil {
return swarm.Task{}, nil, err
}

View File

@ -4,7 +4,7 @@ import (
"bytes"
"context"
"encoding/json"
"io/ioutil"
"io"
"github.com/docker/docker/api/types"
)
@ -28,7 +28,7 @@ func (cli *Client) VolumeInspectWithRaw(ctx context.Context, volumeID string) (t
return volume, nil, wrapResponseError(err, resp, "volume", volumeID)
}
body, err := ioutil.ReadAll(resp.body)
body, err := io.ReadAll(resp.body)
if err != nil {
return volume, nil, err
}

View File

@ -100,10 +100,10 @@ func FromStatusCode(err error, statusCode int) error {
err = System(err)
}
default:
logrus.WithFields(logrus.Fields{
logrus.WithError(err).WithFields(logrus.Fields{
"module": "api",
"status_code": fmt.Sprintf("%d", statusCode),
}).Debugf("FIXME: Got an status-code for which error does not match any expected type!!!: %d", statusCode)
"status_code": statusCode,
}).Debug("FIXME: Got an status-code for which error does not match any expected type!!!")
switch {
case statusCode >= 200 && statusCode < 400:

View File

@ -1 +0,0 @@
Package resolvconf provides utility code to query and update DNS configuration in /etc/resolv.conf

View File

@ -1,26 +0,0 @@
package dns
import (
"regexp"
)
// IPLocalhost is a regex pattern for IPv4 or IPv6 loopback range.
const IPLocalhost = `((127\.([0-9]{1,3}\.){2}[0-9]{1,3})|(::1)$)`
// IPv4Localhost is a regex pattern for IPv4 localhost address range.
const IPv4Localhost = `(127\.([0-9]{1,3}\.){2}[0-9]{1,3})`
var localhostIPRegexp = regexp.MustCompile(IPLocalhost)
var localhostIPv4Regexp = regexp.MustCompile(IPv4Localhost)
// IsLocalhost returns true if ip matches the localhost IP regular expression.
// Used for determining if nameserver settings are being passed which are
// localhost addresses
func IsLocalhost(ip string) bool {
return localhostIPRegexp.MatchString(ip)
}
// IsIPv4Localhost returns true if ip matches the IPv4 localhost regular expression.
func IsIPv4Localhost(ip string) bool {
return localhostIPv4Regexp.MatchString(ip)
}

View File

@ -3,14 +3,11 @@ package resolvconf
import (
"bytes"
"io/ioutil"
"os"
"regexp"
"strings"
"sync"
"github.com/docker/docker/libnetwork/resolvconf/dns"
"github.com/docker/docker/libnetwork/types"
"github.com/docker/docker/pkg/ioutils"
"github.com/sirupsen/logrus"
)
@ -21,6 +18,13 @@ const (
alternatePath = "/run/systemd/resolve/resolv.conf"
)
// constants for the IP address type
const (
IP = iota // IPv4 and IPv6
IPv4
IPv6
)
var (
detectSystemdResolvConfOnce sync.Once
pathAfterSystemdDetection = defaultPath
@ -39,12 +43,12 @@ var (
// More information at https://www.freedesktop.org/software/systemd/man/systemd-resolved.service.html#/etc/resolv.conf
func Path() string {
detectSystemdResolvConfOnce.Do(func() {
candidateResolvConf, err := ioutil.ReadFile(defaultPath)
candidateResolvConf, err := os.ReadFile(defaultPath)
if err != nil {
// silencing error as it will resurface at next calls trying to read defaultPath
return
}
ns := GetNameservers(candidateResolvConf, types.IP)
ns := GetNameservers(candidateResolvConf, IP)
if len(ns) == 1 && ns[0] == "127.0.0.53" {
pathAfterSystemdDetection = alternatePath
logrus.Infof("detected 127.0.0.53 nameserver, assuming systemd-resolved, so using resolv.conf: %s", alternatePath)
@ -53,20 +57,26 @@ func Path() string {
return pathAfterSystemdDetection
}
var (
// Note: the default IPv4 & IPv6 resolvers are set to Google's Public DNS
defaultIPv4Dns = []string{"nameserver 8.8.8.8", "nameserver 8.8.4.4"}
defaultIPv6Dns = []string{"nameserver 2001:4860:4860::8888", "nameserver 2001:4860:4860::8844"}
ipv4NumBlock = `(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)`
ipv4Address = `(` + ipv4NumBlock + `\.){3}` + ipv4NumBlock
const (
// ipLocalhost is a regex pattern for IPv4 or IPv6 loopback range.
ipLocalhost = `((127\.([0-9]{1,3}\.){2}[0-9]{1,3})|(::1)$)`
ipv4NumBlock = `(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)`
ipv4Address = `(` + ipv4NumBlock + `\.){3}` + ipv4NumBlock
// This is not an IPv6 address verifier as it will accept a super-set of IPv6, and also
// will *not match* IPv4-Embedded IPv6 Addresses (RFC6052), but that and other variants
// -- e.g. other link-local types -- either won't work in containers or are unnecessary.
// For readability and sufficiency for Docker purposes this seemed more reasonable than a
// 1000+ character regexp with exact and complete IPv6 validation
ipv6Address = `([0-9A-Fa-f]{0,4}:){2,7}([0-9A-Fa-f]{0,4})(%\w+)?`
)
localhostNSRegexp = regexp.MustCompile(`(?m)^nameserver\s+` + dns.IPLocalhost + `\s*\n*`)
var (
// Note: the default IPv4 & IPv6 resolvers are set to Google's Public DNS
defaultIPv4Dns = []string{"nameserver 8.8.8.8", "nameserver 8.8.4.4"}
defaultIPv6Dns = []string{"nameserver 2001:4860:4860::8888", "nameserver 2001:4860:4860::8844"}
localhostNSRegexp = regexp.MustCompile(`(?m)^nameserver\s+` + ipLocalhost + `\s*\n*`)
nsIPv6Regexp = regexp.MustCompile(`(?m)^nameserver\s+` + ipv6Address + `\s*\n*`)
nsRegexp = regexp.MustCompile(`^\s*nameserver\s*((` + ipv4Address + `)|(` + ipv6Address + `))\s*$`)
nsIPv6Regexpmatch = regexp.MustCompile(`^\s*nameserver\s*((` + ipv6Address + `))\s*$`)
@ -94,11 +104,11 @@ func Get() (*File, error) {
// GetSpecific returns the contents of the user specified resolv.conf file and its hash
func GetSpecific(path string) (*File, error) {
resolv, err := ioutil.ReadFile(path)
resolv, err := os.ReadFile(path)
if err != nil {
return nil, err
}
hash, err := ioutils.HashData(bytes.NewReader(resolv))
hash, err := hashData(bytes.NewReader(resolv))
if err != nil {
return nil, err
}
@ -112,11 +122,11 @@ func GetIfChanged() (*File, error) {
lastModified.Lock()
defer lastModified.Unlock()
resolv, err := ioutil.ReadFile(Path())
resolv, err := os.ReadFile(Path())
if err != nil {
return nil, err
}
newHash, err := ioutils.HashData(bytes.NewReader(resolv))
newHash, err := hashData(bytes.NewReader(resolv))
if err != nil {
return nil, err
}
@ -153,7 +163,7 @@ func FilterResolvDNS(resolvConf []byte, ipv6Enabled bool) (*File, error) {
}
// if the resulting resolvConf has no more nameservers defined, add appropriate
// default DNS servers for IPv4 and (optionally) IPv6
if len(GetNameservers(cleanedResolvConf, types.IP)) == 0 {
if len(GetNameservers(cleanedResolvConf, IP)) == 0 {
logrus.Infof("No non-localhost DNS nameservers are left in resolv.conf. Using default external servers: %v", defaultIPv4Dns)
dns := defaultIPv4Dns
if ipv6Enabled {
@ -162,7 +172,7 @@ func FilterResolvDNS(resolvConf []byte, ipv6Enabled bool) (*File, error) {
}
cleanedResolvConf = append(cleanedResolvConf, []byte("\n"+strings.Join(dns, "\n"))...)
}
hash, err := ioutils.HashData(bytes.NewReader(cleanedResolvConf))
hash, err := hashData(bytes.NewReader(cleanedResolvConf))
if err != nil {
return nil, err
}
@ -189,11 +199,11 @@ func GetNameservers(resolvConf []byte, kind int) []string {
nameservers := []string{}
for _, line := range getLines(resolvConf, []byte("#")) {
var ns [][]byte
if kind == types.IP {
if kind == IP {
ns = nsRegexp.FindSubmatch(line)
} else if kind == types.IPv4 {
} else if kind == IPv4 {
ns = nsIPv4Regexpmatch.FindSubmatch(line)
} else if kind == types.IPv6 {
} else if kind == IPv6 {
ns = nsIPv6Regexpmatch.FindSubmatch(line)
}
if len(ns) > 0 {
@ -208,7 +218,7 @@ func GetNameservers(resolvConf []byte, kind int) []string {
// This function's output is intended for net.ParseCIDR
func GetNameserversAsCIDR(resolvConf []byte) []string {
nameservers := []string{}
for _, nameserver := range GetNameservers(resolvConf, types.IP) {
for _, nameserver := range GetNameservers(resolvConf, IP) {
var address string
// If IPv6, strip zone if present
if strings.Contains(nameserver, ":") {
@ -276,10 +286,10 @@ func Build(path string, dns, dnsSearch, dnsOptions []string) (*File, error) {
}
}
hash, err := ioutils.HashData(bytes.NewReader(content.Bytes()))
hash, err := hashData(bytes.NewReader(content.Bytes()))
if err != nil {
return nil, err
}
return &File{Content: content.Bytes(), Hash: hash}, ioutil.WriteFile(path, content.Bytes(), 0644)
return &File{Content: content.Bytes(), Hash: hash}, os.WriteFile(path, content.Bytes(), 0644)
}

View File

@ -0,0 +1,16 @@
package resolvconf
import (
"crypto/sha256"
"encoding/hex"
"io"
)
// hashData returns the sha256 sum of src.
func hashData(src io.Reader) (string, error) {
h := sha256.New()
if _, err := io.Copy(h, src); err != nil {
return "", err
}
return "sha256:" + hex.EncodeToString(h.Sum(nil)), nil
}

View File

@ -5,13 +5,13 @@ import (
"bytes"
"fmt"
"net"
"strconv"
"strings"
"github.com/ishidawataru/sctp"
)
// constants for the IP address type
// Deprecated: use the consts defined in github.com/docker/docker/libnetwork/resolvconf
const (
IP = iota // IPv4 and IPv6
IPv4
@ -68,19 +68,6 @@ func (t *TransportPort) String() string {
return fmt.Sprintf("%s/%d", t.Proto.String(), t.Port)
}
// FromString reads the TransportPort structure from string
func (t *TransportPort) FromString(s string) error {
ps := strings.Split(s, "/")
if len(ps) == 2 {
t.Proto = ParseProtocol(ps[0])
if p, err := strconv.ParseUint(ps[1], 10, 16); err == nil {
t.Port = uint16(p)
return nil
}
}
return BadRequestErrorf("invalid format for transport port: %s", s)
}
// PortBinding represents a port binding between the container and the host
type PortBinding struct {
Proto Protocol
@ -145,51 +132,6 @@ func (p *PortBinding) String() string {
return ret
}
// FromString reads the PortBinding structure from string s.
// String s is a triple of "protocol/containerIP:port/hostIP:port"
// containerIP and hostIP can be in dotted decimal ("192.0.2.1") or IPv6 ("2001:db8::68") form.
// Zoned addresses ("169.254.0.23%eth0" or "fe80::1ff:fe23:4567:890a%eth0") are not supported.
// If string s is incorrectly formatted or the IP addresses or ports cannot be parsed, FromString
// returns an error.
func (p *PortBinding) FromString(s string) error {
ps := strings.Split(s, "/")
if len(ps) != 3 {
return BadRequestErrorf("invalid format for port binding: %s", s)
}
p.Proto = ParseProtocol(ps[0])
var err error
if p.IP, p.Port, err = parseIPPort(ps[1]); err != nil {
return BadRequestErrorf("failed to parse Container IP/Port in port binding: %s", err.Error())
}
if p.HostIP, p.HostPort, err = parseIPPort(ps[2]); err != nil {
return BadRequestErrorf("failed to parse Host IP/Port in port binding: %s", err.Error())
}
return nil
}
func parseIPPort(s string) (net.IP, uint16, error) {
hoststr, portstr, err := net.SplitHostPort(s)
if err != nil {
return nil, 0, err
}
ip := net.ParseIP(hoststr)
if ip == nil {
return nil, 0, BadRequestErrorf("invalid ip: %s", hoststr)
}
port, err := strconv.ParseUint(portstr, 10, 16)
if err != nil {
return nil, 0, BadRequestErrorf("invalid port: %s", portstr)
}
return ip, uint16(port), nil
}
// Equal checks if this instance of PortBinding is equal to the passed one
func (p *PortBinding) Equal(o *PortBinding) bool {
if p == o {
@ -341,21 +283,6 @@ func GetMinimalIP(ip net.IP) net.IP {
return ip
}
// GetMinimalIPNet returns a copy of the passed IP Network with congruent ip and mask notation
func GetMinimalIPNet(nw *net.IPNet) *net.IPNet {
if nw == nil {
return nil
}
if len(nw.IP) == 16 && nw.IP.To4() != nil {
m := nw.Mask
if len(m) == 16 {
m = m[12:16]
}
return &net.IPNet{IP: nw.IP.To4(), Mask: m}
}
return nw
}
// IsIPNetValid returns true if the ipnet is a valid network/mask
// combination. Otherwise returns false.
func IsIPNetValid(nw *net.IPNet) bool {

View File

@ -1,3 +1,4 @@
//go:build !windows
// +build !windows
package opts // import "github.com/docker/docker/opts"

View File

@ -7,9 +7,9 @@ import (
"compress/bzip2"
"compress/gzip"
"context"
"encoding/binary"
"fmt"
"io"
"io/ioutil"
"os"
"path/filepath"
"runtime"
@ -23,6 +23,7 @@ import (
"github.com/docker/docker/pkg/ioutils"
"github.com/docker/docker/pkg/pools"
"github.com/docker/docker/pkg/system"
"github.com/klauspost/compress/zstd"
"github.com/sirupsen/logrus"
exec "golang.org/x/sys/execabs"
)
@ -84,6 +85,8 @@ const (
Gzip
// Xz is xz compression algorithm.
Xz
// Zstd is zstd compression algorithm.
Zstd
)
const (
@ -122,14 +125,59 @@ func IsArchivePath(path string) bool {
return err == nil
}
const (
zstdMagicSkippableStart = 0x184D2A50
zstdMagicSkippableMask = 0xFFFFFFF0
)
var (
bzip2Magic = []byte{0x42, 0x5A, 0x68}
gzipMagic = []byte{0x1F, 0x8B, 0x08}
xzMagic = []byte{0xFD, 0x37, 0x7A, 0x58, 0x5A, 0x00}
zstdMagic = []byte{0x28, 0xb5, 0x2f, 0xfd}
)
type matcher = func([]byte) bool
func magicNumberMatcher(m []byte) matcher {
return func(source []byte) bool {
return bytes.HasPrefix(source, m)
}
}
// zstdMatcher detects zstd compression algorithm.
// Zstandard compressed data is made of one or more frames.
// There are two frame formats defined by Zstandard: Zstandard frames and Skippable frames.
// See https://tools.ietf.org/id/draft-kucherawy-dispatch-zstd-00.html#rfc.section.2 for more details.
func zstdMatcher() matcher {
return func(source []byte) bool {
if bytes.HasPrefix(source, zstdMagic) {
// Zstandard frame
return true
}
// skippable frame
if len(source) < 8 {
return false
}
// magic number from 0x184D2A50 to 0x184D2A5F.
if binary.LittleEndian.Uint32(source[:4])&zstdMagicSkippableMask == zstdMagicSkippableStart {
return true
}
return false
}
}
// DetectCompression detects the compression algorithm of the source.
func DetectCompression(source []byte) Compression {
for compression, m := range map[Compression][]byte{
Bzip2: {0x42, 0x5A, 0x68},
Gzip: {0x1F, 0x8B, 0x08},
Xz: {0xFD, 0x37, 0x7A, 0x58, 0x5A, 0x00},
} {
if bytes.HasPrefix(source, m) {
compressionMap := map[Compression]matcher{
Bzip2: magicNumberMatcher(bzip2Magic),
Gzip: magicNumberMatcher(gzipMagic),
Xz: magicNumberMatcher(xzMagic),
Zstd: zstdMatcher(),
}
for _, compression := range []Compression{Bzip2, Gzip, Xz, Zstd} {
fn := compressionMap[compression]
if fn(source) {
return compression
}
}
@ -216,6 +264,13 @@ func DecompressStream(archive io.Reader) (io.ReadCloser, error) {
}
readBufWrapper := p.NewReadCloserWrapper(buf, xzReader)
return wrapReadCloser(readBufWrapper, cancel), nil
case Zstd:
zstdReader, err := zstd.NewReader(buf)
if err != nil {
return nil, err
}
readBufWrapper := p.NewReadCloserWrapper(buf, zstdReader)
return readBufWrapper, nil
default:
return nil, fmt.Errorf("Unsupported compression format %s", (&compression).Extension())
}
@ -342,6 +397,8 @@ func (compression *Compression) Extension() string {
return "tar.gz"
case Xz:
return "tar.xz"
case Zstd:
return "tar.zst"
}
return ""
}
@ -809,8 +866,8 @@ func TarWithOptions(srcPath string, options *TarOptions) (io.ReadCloser, error)
rebaseName := options.RebaseNames[include]
var (
parentMatched []bool
parentDirs []string
parentMatchInfo []fileutils.MatchInfo
parentDirs []string
)
walkRoot := getWalkRoot(srcPath, include)
@ -845,13 +902,14 @@ func TarWithOptions(srcPath string, options *TarOptions) (io.ReadCloser, error)
break
}
parentDirs = parentDirs[:len(parentDirs)-1]
parentMatched = parentMatched[:len(parentMatched)-1]
parentMatchInfo = parentMatchInfo[:len(parentMatchInfo)-1]
}
if len(parentMatched) != 0 {
skip, err = pm.MatchesUsingParentResult(relFilePath, parentMatched[len(parentMatched)-1])
var matchInfo fileutils.MatchInfo
if len(parentMatchInfo) != 0 {
skip, matchInfo, err = pm.MatchesUsingParentResults(relFilePath, parentMatchInfo[len(parentMatchInfo)-1])
} else {
skip, err = pm.MatchesOrParentMatches(relFilePath)
skip, matchInfo, err = pm.MatchesUsingParentResults(relFilePath, fileutils.MatchInfo{})
}
if err != nil {
logrus.Errorf("Error matching %s: %v", relFilePath, err)
@ -860,7 +918,7 @@ func TarWithOptions(srcPath string, options *TarOptions) (io.ReadCloser, error)
if f.IsDir() {
parentDirs = append(parentDirs, relFilePath)
parentMatched = append(parentMatched, skip)
parentMatchInfo = append(parentMatchInfo, matchInfo)
}
}
@ -1284,7 +1342,7 @@ func cmdStream(cmd *exec.Cmd, input io.Reader) (io.ReadCloser, error) {
// of that file as an archive. The archive can only be read once - as soon as reading completes,
// the file will be deleted.
func NewTempArchive(src io.Reader, dir string) (*TempArchive, error) {
f, err := ioutil.TempFile(dir, "")
f, err := os.CreateTemp(dir, "")
if err != nil {
return nil, err
}

View File

@ -1,3 +1,4 @@
//go:build !linux
// +build !linux
package archive // import "github.com/docker/docker/pkg/archive"

View File

@ -1,3 +1,4 @@
//go:build !windows
// +build !windows
package archive // import "github.com/docker/docker/pkg/archive"

View File

@ -5,7 +5,6 @@ import (
"bytes"
"fmt"
"io"
"io/ioutil"
"os"
"path/filepath"
"sort"
@ -348,7 +347,7 @@ func ChangesDirs(newDir, oldDir string) ([]Change, error) {
oldRoot, newRoot *FileInfo
)
if oldDir == "" {
emptyDir, err := ioutil.TempDir("", "empty")
emptyDir, err := os.MkdirTemp("", "empty")
if err != nil {
return nil, err
}

View File

@ -1,3 +1,4 @@
//go:build !linux
// +build !linux
package archive // import "github.com/docker/docker/pkg/archive"

View File

@ -1,3 +1,4 @@
//go:build !windows
// +build !windows
package archive // import "github.com/docker/docker/pkg/archive"

View File

@ -4,7 +4,6 @@ import (
"archive/tar"
"errors"
"io"
"io/ioutil"
"os"
"path/filepath"
"strings"
@ -261,7 +260,7 @@ func PrepareArchiveCopy(srcContent io.Reader, srcInfo, dstInfo CopyInfo) (dstDir
// The destination exists as a directory. No alteration
// to srcContent is needed as its contents can be
// simply extracted to the destination directory.
return dstInfo.Path, ioutil.NopCloser(srcContent), nil
return dstInfo.Path, io.NopCloser(srcContent), nil
case dstInfo.Exists && srcInfo.IsDir:
// The destination exists as some type of file and the source
// content is a directory. This is an error condition since

View File

@ -1,3 +1,4 @@
//go:build !windows
// +build !windows
package archive // import "github.com/docker/docker/pkg/archive"

View File

@ -4,7 +4,6 @@ import (
"archive/tar"
"fmt"
"io"
"io/ioutil"
"os"
"path/filepath"
"runtime"
@ -100,7 +99,7 @@ func UnpackLayer(dest string, layer io.Reader, options *TarOptions) (size int64,
basename := filepath.Base(hdr.Name)
aufsHardlinks[basename] = hdr
if aufsTempdir == "" {
if aufsTempdir, err = ioutil.TempDir("", "dockerplnk"); err != nil {
if aufsTempdir, err = os.MkdirTemp("", "dockerplnk"); err != nil {
return 0, err
}
defer os.RemoveAll(aufsTempdir)

View File

@ -1,3 +1,4 @@
//go:build !linux
// +build !linux
package archive // import "github.com/docker/docker/pkg/archive"

View File

@ -3,7 +3,6 @@ package chrootarchive // import "github.com/docker/docker/pkg/chrootarchive"
import (
"fmt"
"io"
"io/ioutil"
"net"
"os"
"os/user"
@ -74,17 +73,21 @@ func untarHandler(tarArchive io.Reader, dest string, options *archive.TarOptions
options.ExcludePatterns = []string{}
}
idMapping := idtools.NewIDMappingsFromMaps(options.UIDMaps, options.GIDMaps)
rootIDs := idMapping.RootPair()
// If dest is inside a root then directory is created within chroot by extractor.
// This case is only currently used by cp.
if dest == root {
idMapping := idtools.NewIDMappingsFromMaps(options.UIDMaps, options.GIDMaps)
rootIDs := idMapping.RootPair()
dest = filepath.Clean(dest)
if _, err := os.Stat(dest); os.IsNotExist(err) {
if err := idtools.MkdirAllAndChownNew(dest, 0755, rootIDs); err != nil {
return err
dest = filepath.Clean(dest)
if _, err := os.Stat(dest); os.IsNotExist(err) {
if err := idtools.MkdirAllAndChownNew(dest, 0755, rootIDs); err != nil {
return err
}
}
}
r := ioutil.NopCloser(tarArchive)
r := io.NopCloser(tarArchive)
if decompress {
decompressedArchive, err := archive.DecompressStream(tarArchive)
if err != nil {

View File

@ -1,3 +1,4 @@
//go:build !windows
// +build !windows
package chrootarchive // import "github.com/docker/docker/pkg/chrootarchive"
@ -8,7 +9,6 @@ import (
"flag"
"fmt"
"io"
"io/ioutil"
"os"
"path/filepath"
"runtime"
@ -111,7 +111,7 @@ func invokeUnpack(decompressedArchive io.Reader, dest string, options *archive.T
// when `xz -d -c -q | docker-untar ...` failed on docker-untar side,
// we need to exhaust `xz`'s output, otherwise the `xz` side will be
// pending on write pipe forever
io.Copy(ioutil.Discard, decompressedArchive)
io.Copy(io.Discard, decompressedArchive)
return fmt.Errorf("Error processing tar file(%v): %s", err, output)
}

View File

@ -2,7 +2,6 @@ package chrootarchive // import "github.com/docker/docker/pkg/chrootarchive"
import (
"fmt"
"io/ioutil"
"os"
"path/filepath"
@ -44,7 +43,7 @@ func chroot(path string) (err error) {
}
// setup oldRoot for pivot_root
pivotDir, err := ioutil.TempDir(path, ".pivot_root")
pivotDir, err := os.MkdirTemp(path, ".pivot_root")
if err != nil {
return fmt.Errorf("Error setting up pivot dir: %v", err)
}

View File

@ -1,3 +1,4 @@
//go:build !windows && !linux
// +build !windows,!linux
package chrootarchive // import "github.com/docker/docker/pkg/chrootarchive"

View File

@ -1,4 +1,5 @@
//+build !windows
//go:build !windows
// +build !windows
package chrootarchive // import "github.com/docker/docker/pkg/chrootarchive"
@ -8,7 +9,6 @@ import (
"flag"
"fmt"
"io"
"io/ioutil"
"os"
"path/filepath"
"runtime"
@ -56,7 +56,7 @@ func applyLayer() {
options.InUserNS = true
}
if tmpDir, err = ioutil.TempDir("/", "temp-docker-extract"); err != nil {
if tmpDir, err = os.MkdirTemp("/", "temp-docker-extract"); err != nil {
fatal(err)
}

View File

@ -3,7 +3,6 @@ package chrootarchive // import "github.com/docker/docker/pkg/chrootarchive"
import (
"fmt"
"io"
"io/ioutil"
"os"
"path/filepath"
@ -30,7 +29,7 @@ func applyLayerHandler(dest string, layer io.Reader, options *archive.TarOptions
layer = decompressed
}
tmpDir, err := ioutil.TempDir(os.Getenv("temp"), "temp-docker-extract")
tmpDir, err := os.MkdirTemp(os.Getenv("temp"), "temp-docker-extract")
if err != nil {
return 0, fmt.Errorf("ApplyLayer failed to create temp-docker-extract under %s. %s", dest, err)
}

View File

@ -1,3 +1,4 @@
//go:build !windows
// +build !windows
package chrootarchive // import "github.com/docker/docker/pkg/chrootarchive"
@ -5,7 +6,6 @@ package chrootarchive // import "github.com/docker/docker/pkg/chrootarchive"
import (
"fmt"
"io"
"io/ioutil"
"os"
"github.com/docker/docker/pkg/reexec"
@ -25,5 +25,5 @@ func fatal(err error) {
// flush consumes all the bytes from the reader discarding
// any errors
func flush(r io.Reader) (bytes int64, err error) {
return io.Copy(ioutil.Discard, r)
return io.Copy(io.Discard, r)
}

View File

@ -9,8 +9,30 @@ import (
"regexp"
"strings"
"text/scanner"
"unicode/utf8"
)
// escapeBytes is a bitmap used to check whether a character should be escaped when creating the regex.
var escapeBytes [8]byte
// shouldEscape reports whether a rune should be escaped as part of the regex.
//
// This only includes characters that require escaping in regex but are also NOT valid filepath pattern characters.
// Additionally, '\' is not excluded because there is specific logic to properly handle this, as it's a path separator
// on Windows.
//
// Adapted from regexp::QuoteMeta in go stdlib.
// See https://cs.opensource.google/go/go/+/refs/tags/go1.17.2:src/regexp/regexp.go;l=703-715;drc=refs%2Ftags%2Fgo1.17.2
func shouldEscape(b rune) bool {
return b < utf8.RuneSelf && escapeBytes[b%8]&(1<<(b/8)) != 0
}
func init() {
for _, b := range []byte(`.+()|{}$`) {
escapeBytes[b%8] |= 1 << (b / 8)
}
}
// PatternMatcher allows checking paths against a list of patterns
type PatternMatcher struct {
patterns []*Pattern
@ -62,9 +84,9 @@ func NewPatternMatcher(patterns []string) (*PatternMatcher, error) {
//
// Matches is not safe to call concurrently.
//
// This implementation is buggy (it only checks a single parent dir against the
// pattern) and will be removed soon. Use either MatchesOrParentMatches or
// MatchesUsingParentResult instead.
// Deprecated: This implementation is buggy (it only checks a single parent dir
// against the pattern) and will be removed soon. Use either
// MatchesOrParentMatches or MatchesUsingParentResults instead.
func (pm *PatternMatcher) Matches(file string) (bool, error) {
matched := false
file = filepath.FromSlash(file)
@ -150,6 +172,11 @@ func (pm *PatternMatcher) MatchesOrParentMatches(file string) (bool, error) {
// The "file" argument should be a slash-delimited path.
//
// MatchesUsingParentResult is not safe to call concurrently.
//
// Deprecated: this function does behave correctly in some cases (see
// https://github.com/docker/buildx/issues/850).
//
// Use MatchesUsingParentResults instead.
func (pm *PatternMatcher) MatchesUsingParentResult(file string, parentMatched bool) (bool, error) {
matched := parentMatched
file = filepath.FromSlash(file)
@ -174,6 +201,78 @@ func (pm *PatternMatcher) MatchesUsingParentResult(file string, parentMatched bo
return matched, nil
}
// MatchInfo tracks information about parent dir matches while traversing a
// filesystem.
type MatchInfo struct {
parentMatched []bool
}
// MatchesUsingParentResults returns true if "file" matches any of the patterns
// and isn't excluded by any of the subsequent patterns. The functionality is
// the same as Matches, but as an optimization, the caller passes in
// intermediate results from matching the parent directory.
//
// The "file" argument should be a slash-delimited path.
//
// MatchesUsingParentResults is not safe to call concurrently.
func (pm *PatternMatcher) MatchesUsingParentResults(file string, parentMatchInfo MatchInfo) (bool, MatchInfo, error) {
parentMatched := parentMatchInfo.parentMatched
if len(parentMatched) != 0 && len(parentMatched) != len(pm.patterns) {
return false, MatchInfo{}, errors.New("wrong number of values in parentMatched")
}
file = filepath.FromSlash(file)
matched := false
matchInfo := MatchInfo{
parentMatched: make([]bool, len(pm.patterns)),
}
for i, pattern := range pm.patterns {
match := false
// If the parent matched this pattern, we don't need to recheck.
if len(parentMatched) != 0 {
match = parentMatched[i]
}
if !match {
// Skip evaluation if this is an inclusion and the filename
// already matched the pattern, or it's an exclusion and it has
// not matched the pattern yet.
if pattern.exclusion != matched {
continue
}
var err error
match, err = pattern.match(file)
if err != nil {
return false, matchInfo, err
}
// If the zero value of MatchInfo was passed in, we don't have
// any information about the parent dir's match results, and we
// apply the same logic as MatchesOrParentMatches.
if len(parentMatched) == 0 {
if parentPath := filepath.Dir(file); parentPath != "." {
parentPathDirs := strings.Split(parentPath, string(os.PathSeparator))
// Check to see if the pattern matches one of our parent dirs.
for i := range parentPathDirs {
match, _ = pattern.match(strings.Join(parentPathDirs[:i+1], string(os.PathSeparator)))
if match {
break
}
}
}
}
}
matchInfo.parentMatched[i] = match
if match {
matched = !pattern.exclusion
}
}
return matched, matchInfo, nil
}
// Exclusions returns true if any of the patterns define exclusions
func (pm *PatternMatcher) Exclusions() bool {
return pm.exclusions
@ -256,7 +355,7 @@ func (p *Pattern) compile() error {
} else if ch == '?' {
// "?" is any char except "/"
regStr += "[^" + escSL + "]"
} else if ch == '.' || ch == '$' {
} else if shouldEscape(ch) {
// Escape some regexp special chars that have no meaning
// in golang's filepath.Match
regStr += `\` + string(ch)

View File

@ -1,10 +1,10 @@
//go:build linux || freebsd
// +build linux freebsd
package fileutils // import "github.com/docker/docker/pkg/fileutils"
import (
"fmt"
"io/ioutil"
"os"
"github.com/sirupsen/logrus"
@ -13,7 +13,7 @@ import (
// GetTotalUsedFds Returns the number of used File Descriptors by
// reading it via /proc filesystem.
func GetTotalUsedFds() int {
if fds, err := ioutil.ReadDir(fmt.Sprintf("/proc/%d/fd", os.Getpid())); err != nil {
if fds, err := os.ReadDir(fmt.Sprintf("/proc/%d/fd", os.Getpid())); err != nil {
logrus.Errorf("Error opening /proc/%d/fd: %s", os.Getpid(), err)
} else {
return len(fds)

View File

@ -1,3 +1,4 @@
//go:build !linux
// +build !linux
package homedir // import "github.com/docker/docker/pkg/homedir"

View File

@ -1,3 +1,4 @@
//go:build !windows
// +build !windows
package homedir // import "github.com/docker/docker/pkg/homedir"

View File

@ -1,3 +1,4 @@
//go:build !windows
// +build !windows
package idtools // import "github.com/docker/docker/pkg/idtools"

View File

@ -1,3 +1,4 @@
//go:build !linux
// +build !linux
package idtools // import "github.com/docker/docker/pkg/idtools"

View File

@ -1,3 +1,4 @@
//go:build !windows
// +build !windows
package idtools // import "github.com/docker/docker/pkg/idtools"

View File

@ -50,12 +50,12 @@ func NewBytesPipe() *BytesPipe {
// It can allocate new []byte slices in a process of writing.
func (bp *BytesPipe) Write(p []byte) (int, error) {
bp.mu.Lock()
defer bp.mu.Unlock()
written := 0
loop0:
for {
if bp.closeErr != nil {
bp.mu.Unlock()
return written, ErrClosed
}
@ -72,7 +72,6 @@ loop0:
// errBufferFull is an error we expect to get if the buffer is full
if err != nil && err != errBufferFull {
bp.wait.Broadcast()
bp.mu.Unlock()
return written, err
}
@ -100,7 +99,6 @@ loop0:
bp.buf = append(bp.buf, getBuffer(nextCap))
}
bp.wait.Broadcast()
bp.mu.Unlock()
return written, nil
}
@ -126,17 +124,14 @@ func (bp *BytesPipe) Close() error {
// Data could be read only once.
func (bp *BytesPipe) Read(p []byte) (n int, err error) {
bp.mu.Lock()
defer bp.mu.Unlock()
if bp.bufLen == 0 {
if bp.closeErr != nil {
err := bp.closeErr
bp.mu.Unlock()
return 0, err
return 0, bp.closeErr
}
bp.wait.Wait()
if bp.bufLen == 0 && bp.closeErr != nil {
err := bp.closeErr
bp.mu.Unlock()
return 0, err
return 0, bp.closeErr
}
}
@ -161,7 +156,6 @@ func (bp *BytesPipe) Read(p []byte) (n int, err error) {
}
bp.wait.Broadcast()
bp.mu.Unlock()
return
}

View File

@ -2,7 +2,6 @@ package ioutils // import "github.com/docker/docker/pkg/ioutils"
import (
"io"
"io/ioutil"
"os"
"path/filepath"
)
@ -11,7 +10,7 @@ import (
// temporary file and closing it atomically changes the temporary file to
// destination path. Writing and closing concurrently is not allowed.
func NewAtomicFileWriter(filename string, perm os.FileMode) (io.WriteCloser, error) {
f, err := ioutil.TempFile(filepath.Dir(filename), ".tmp-"+filepath.Base(filename))
f, err := os.CreateTemp(filepath.Dir(filename), ".tmp-"+filepath.Base(filename))
if err != nil {
return nil, err
}
@ -94,7 +93,7 @@ type AtomicWriteSet struct {
// commit. If no temporary directory is given the system
// default is used.
func NewAtomicWriteSet(tmpDir string) (*AtomicWriteSet, error) {
td, err := ioutil.TempDir(tmpDir, "write-set-")
td, err := os.MkdirTemp(tmpDir, "write-set-")
if err != nil {
return nil, err
}

View File

@ -2,9 +2,12 @@ package ioutils // import "github.com/docker/docker/pkg/ioutils"
import (
"context"
"crypto/sha256"
"encoding/hex"
"io"
// make sure crypto.SHA256, crypto.sha512 and crypto.SHA384 are registered
// TODO remove once https://github.com/opencontainers/go-digest/pull/64 is merged.
_ "crypto/sha256"
_ "crypto/sha512"
)
// ReadCloserWrapper wraps an io.Reader, and implements an io.ReadCloser
@ -49,15 +52,6 @@ func NewReaderErrWrapper(r io.Reader, closer func()) io.Reader {
}
}
// HashData returns the sha256 sum of src.
func HashData(src io.Reader) (string, error) {
h := sha256.New()
if _, err := io.Copy(h, src); err != nil {
return "", err
}
return "sha256:" + hex.EncodeToString(h.Sum(nil)), nil
}
// OnEOFReader wraps an io.ReadCloser and a function
// the function will run at the end of file or close the file.
type OnEOFReader struct {

View File

@ -1,10 +1,11 @@
//go:build !windows
// +build !windows
package ioutils // import "github.com/docker/docker/pkg/ioutils"
import "io/ioutil"
import "os"
// TempDir on Unix systems is equivalent to ioutil.TempDir.
// TempDir on Unix systems is equivalent to os.MkdirTemp.
func TempDir(dir, prefix string) (string, error) {
return ioutil.TempDir(dir, prefix)
return os.MkdirTemp(dir, prefix)
}

View File

@ -1,14 +1,14 @@
package ioutils // import "github.com/docker/docker/pkg/ioutils"
import (
"io/ioutil"
"os"
"github.com/docker/docker/pkg/longpath"
)
// TempDir is the equivalent of ioutil.TempDir, except that the result is in Windows longpath format.
// TempDir is the equivalent of os.MkdirTemp, except that the result is in Windows longpath format.
func TempDir(dir, prefix string) (string, error) {
tempDir, err := ioutil.TempDir(dir, prefix)
tempDir, err := os.MkdirTemp(dir, prefix)
if err != nil {
return "", err
}

View File

@ -1,3 +1,4 @@
//go:build freebsd || darwin
// +build freebsd darwin
package reexec // import "github.com/docker/docker/pkg/reexec"

View File

@ -1,3 +1,4 @@
//go:build !linux && !windows && !freebsd && !darwin
// +build !linux,!windows,!freebsd,!darwin
package reexec // import "github.com/docker/docker/pkg/reexec"

View File

@ -1,3 +1,4 @@
//go:build !windows
// +build !windows
package system // import "github.com/docker/docker/pkg/system"

View File

@ -1,9 +1,9 @@
//go:build !windows
// +build !windows
package system // import "github.com/docker/docker/pkg/system"
import (
"io/ioutil"
"os"
"path/filepath"
)
@ -63,5 +63,5 @@ func OpenFileSequential(name string, flag int, perm os.FileMode) (*os.File, erro
// to find the pathname of the file. It is the caller's responsibility
// to remove the file when no longer needed.
func TempFileSequential(dir, prefix string) (f *os.File, err error) {
return ioutil.TempFile(dir, prefix)
return os.CreateTemp(dir, prefix)
}

View File

@ -258,7 +258,7 @@ func nextSuffix() string {
return strconv.Itoa(int(1e9 + r%1e9))[1:]
}
// TempFileSequential is a copy of ioutil.TempFile, modified to use sequential
// TempFileSequential is a copy of os.CreateTemp, modified to use sequential
// file access. Below is the original comment from golang:
// TempFile creates a new temporary file in the directory dir
// with a name beginning with prefix, opens the file for reading

View File

@ -1,29 +1,18 @@
package system // import "github.com/docker/docker/pkg/system"
import (
"os"
"github.com/sirupsen/logrus"
)
var (
// containerdRuntimeSupported determines if ContainerD should be the runtime.
// As of March 2019, this is an experimental feature.
// containerdRuntimeSupported determines if containerd should be the runtime.
containerdRuntimeSupported = false
)
// InitContainerdRuntime sets whether to use ContainerD for runtime
// on Windows. This is an experimental feature still in development, and
// also requires an environment variable to be set (so as not to turn the
// feature on from simply experimental which would also mean LCOW.
func InitContainerdRuntime(experimental bool, cdPath string) {
if experimental && len(cdPath) > 0 && len(os.Getenv("DOCKER_WINDOWS_CONTAINERD_RUNTIME")) > 0 {
logrus.Warnf("Using ContainerD runtime. This feature is experimental")
// InitContainerdRuntime sets whether to use containerd for runtime on Windows.
func InitContainerdRuntime(cdPath string) {
if len(cdPath) > 0 {
containerdRuntimeSupported = true
}
}
// ContainerdRuntimeSupported returns true if the use of ContainerD runtime is supported.
// ContainerdRuntimeSupported returns true if the use of containerd runtime is supported.
func ContainerdRuntimeSupported() bool {
return containerdRuntimeSupported
}

View File

@ -1,3 +1,4 @@
//go:build !windows
// +build !windows
package system // import "github.com/docker/docker/pkg/system"

View File

@ -1,3 +1,4 @@
//go:build !linux && !windows
// +build !linux,!windows
package system // import "github.com/docker/docker/pkg/system"

View File

@ -1,3 +1,4 @@
//go:build !windows
// +build !windows
package system // import "github.com/docker/docker/pkg/system"
@ -6,12 +7,6 @@ import (
"golang.org/x/sys/unix"
)
// Mknod creates a filesystem node (file, device special file or named pipe) named path
// with attributes specified by mode and dev.
func Mknod(path string, mode uint32, dev int) error {
return unix.Mknod(path, mode, dev)
}
// Mkdev is used to build the value of linux devices (in /dev/) which specifies major
// and minor number of the newly created device special file.
// Linux device nodes are a bit weird due to backwards compat with 16 bit device nodes.

View File

@ -0,0 +1,14 @@
//go:build freebsd
// +build freebsd
package system // import "github.com/docker/docker/pkg/system"
import (
"golang.org/x/sys/unix"
)
// Mknod creates a filesystem node (file, device special file or named pipe) named path
// with attributes specified by mode and dev.
func Mknod(path string, mode uint32, dev int) error {
return unix.Mknod(path, mode, uint64(dev))
}

View File

@ -0,0 +1,11 @@
package system // import "github.com/docker/docker/pkg/system"
import (
"golang.org/x/sys/unix"
)
// Mknod creates a filesystem node (file, device special file or named pipe) named path
// with attributes specified by mode and dev.
func Mknod(path string, mode uint32, dev int) error {
return unix.Mknod(path, mode, dev)
}

View File

@ -1,3 +1,4 @@
//go:build !windows
// +build !windows
package system // import "github.com/docker/docker/pkg/system"

View File

@ -1,10 +1,11 @@
//go:build linux || freebsd || darwin
// +build linux freebsd darwin
package system // import "github.com/docker/docker/pkg/system"
import (
"fmt"
"io/ioutil"
"os"
"strings"
"syscall"
@ -30,7 +31,7 @@ func KillProcess(pid int) {
// http://man7.org/linux/man-pages/man5/proc.5.html
func IsProcessZombie(pid int) (bool, error) {
statPath := fmt.Sprintf("/proc/%d/stat", pid)
dataBytes, err := ioutil.ReadFile(statPath)
dataBytes, err := os.ReadFile(statPath)
if err != nil {
return false, err
}

View File

@ -1,3 +1,4 @@
//go:build !darwin && !windows
// +build !darwin,!windows
package system // import "github.com/docker/docker/pkg/system"

View File

@ -1,3 +1,4 @@
//go:build freebsd || netbsd
// +build freebsd netbsd
package system // import "github.com/docker/docker/pkg/system"

View File

@ -1,3 +1,4 @@
//go:build !windows
// +build !windows
package system // import "github.com/docker/docker/pkg/system"

View File

@ -1,3 +1,4 @@
//go:build !windows
// +build !windows
package system // import "github.com/docker/docker/pkg/system"

View File

@ -1,3 +1,4 @@
//go:build linux || freebsd
// +build linux freebsd
package system // import "github.com/docker/docker/pkg/system"

View File

@ -1,3 +1,4 @@
//go:build !linux && !freebsd
// +build !linux,!freebsd
package system // import "github.com/docker/docker/pkg/system"

View File

@ -1,3 +1,4 @@
//go:build !linux
// +build !linux
package system // import "github.com/docker/docker/pkg/system"

View File

@ -1,3 +1,4 @@
//go:build seccomp
// +build seccomp
package seccomp // import "github.com/docker/docker/profiles/seccomp"

View File

@ -1,3 +1,4 @@
//go:build linux && !seccomp
// +build linux,!seccomp
package seccomp // import "github.com/docker/docker/profiles/seccomp"

View File

@ -4,7 +4,6 @@ import (
"context"
"encoding/json"
"fmt"
"io/ioutil"
"net/http"
"os"
"os/exec"
@ -18,7 +17,6 @@ import (
"github.com/docker/docker/api/types"
"github.com/docker/docker/api/types/events"
"github.com/docker/docker/client"
"github.com/docker/docker/opts"
"github.com/docker/docker/pkg/ioutils"
"github.com/docker/docker/pkg/stringid"
"github.com/docker/docker/testutil/request"
@ -43,6 +41,8 @@ const (
defaultDockerdBinary = "dockerd"
defaultContainerdSocket = "/var/run/docker/containerd/containerd.sock"
defaultDockerdRootlessBinary = "dockerd-rootless.sh"
defaultUnixSocket = "/var/run/docker.sock"
defaultTLSHost = "localhost:2376"
)
var errDaemonNotStarted = errors.New("daemon not started")
@ -262,7 +262,8 @@ func (d *Daemon) LogFileName() string {
// ReadLogFile returns the content of the daemon log file
func (d *Daemon) ReadLogFile() ([]byte, error) {
return ioutil.ReadFile(d.logFile.Name())
_ = d.logFile.Sync()
return os.ReadFile(d.logFile.Name())
}
// NewClientT creates new client based on daemon's socket path
@ -290,6 +291,7 @@ func (d *Daemon) Cleanup(t testing.TB) {
t.Helper()
cleanupMount(t, d)
cleanupRaftDir(t, d)
cleanupDaemonStorage(t, d)
cleanupNetworkNamespace(t, d)
}
@ -739,11 +741,11 @@ func (d *Daemon) getClientConfig() (*clientConfig, error) {
transport = &http.Transport{
TLSClientConfig: tlsConfig,
}
addr = fmt.Sprintf("%s:%d", opts.DefaultHTTPHost, opts.DefaultTLSHTTPPort)
addr = defaultTLSHost
scheme = "https"
proto = "tcp"
} else if d.UseDefaultHost {
addr = opts.DefaultUnixSocket
addr = defaultUnixSocket
proto = "unix"
scheme = "http"
transport = &http.Transport{}
@ -831,3 +833,36 @@ func cleanupRaftDir(t testing.TB, d *Daemon) {
}
}
}
// cleanupDaemonStorage removes the daemon's storage directory.
//
// Note that we don't delete the whole directory, as some files (e.g. daemon
// logs) are collected for inclusion in the "bundles" that are stored as Jenkins
// artifacts.
//
// We currently do not include container logs in the bundles, so this also
// removes the "containers" sub-directory.
func cleanupDaemonStorage(t testing.TB, d *Daemon) {
t.Helper()
dirs := []string{
"builder",
"buildkit",
"containers",
"image",
"network",
"plugins",
"tmp",
"trust",
"volumes",
// note: this assumes storage-driver name matches the subdirectory,
// which is currently true, but not guaranteed.
d.storageDriver,
}
for _, p := range dirs {
dir := filepath.Join(d.Root, p)
if err := os.RemoveAll(dir); err != nil {
t.Logf("[%s] error removing %v: %v", d.id, dir, err)
}
}
}

View File

@ -0,0 +1,18 @@
//go:build freebsd
// +build freebsd
package daemon // import "github.com/docker/docker/testutil/daemon"
import (
"testing"
"gotest.tools/v3/assert"
)
func cleanupNetworkNamespace(_ testing.TB, _ *Daemon) {}
// CgroupNamespace returns the cgroup namespace the daemon is running in
func (d *Daemon) CgroupNamespace(t testing.TB) string {
assert.Assert(t, false, "cgroup namespaces are not supported on FreeBSD")
return ""
}

View File

@ -0,0 +1,37 @@
package daemon // import "github.com/docker/docker/testutil/daemon"
import (
"fmt"
"os"
"path/filepath"
"strings"
"testing"
"golang.org/x/sys/unix"
"gotest.tools/v3/assert"
)
func cleanupNetworkNamespace(t testing.TB, d *Daemon) {
t.Helper()
// Cleanup network namespaces in the exec root of this
// daemon because this exec root is specific to this
// daemon instance and has no chance of getting
// cleaned up when a new daemon is instantiated with a
// new exec root.
netnsPath := filepath.Join(d.execRoot, "netns")
filepath.Walk(netnsPath, func(path string, info os.FileInfo, err error) error {
if err := unix.Unmount(path, unix.MNT_DETACH); err != nil && err != unix.EINVAL && err != unix.ENOENT {
t.Logf("[%s] unmount of %s failed: %v", d.id, path, err)
}
os.Remove(path)
return nil
})
}
// CgroupNamespace returns the cgroup namespace the daemon is running in
func (d *Daemon) CgroupNamespace(t testing.TB) string {
link, err := os.Readlink(fmt.Sprintf("/proc/%d/ns/cgroup", d.Pid()))
assert.NilError(t, err)
return strings.TrimSpace(link)
}

View File

@ -1,19 +1,15 @@
//go:build !windows
// +build !windows
package daemon // import "github.com/docker/docker/testutil/daemon"
import (
"fmt"
"os"
"os/exec"
"path/filepath"
"strings"
"syscall"
"testing"
"github.com/moby/sys/mount"
"golang.org/x/sys/unix"
"gotest.tools/v3/assert"
)
// cleanupMount unmounts the daemon root directory, or logs a message if
@ -25,31 +21,6 @@ func cleanupMount(t testing.TB, d *Daemon) {
}
}
func cleanupNetworkNamespace(t testing.TB, d *Daemon) {
t.Helper()
// Cleanup network namespaces in the exec root of this
// daemon because this exec root is specific to this
// daemon instance and has no chance of getting
// cleaned up when a new daemon is instantiated with a
// new exec root.
netnsPath := filepath.Join(d.execRoot, "netns")
filepath.Walk(netnsPath, func(path string, info os.FileInfo, err error) error {
if err := unix.Unmount(path, unix.MNT_DETACH); err != nil && err != unix.EINVAL && err != unix.ENOENT {
t.Logf("[%s] unmount of %s failed: %v", d.id, path, err)
}
os.Remove(path)
return nil
})
}
// CgroupNamespace returns the cgroup namespace the daemon is running in
func (d *Daemon) CgroupNamespace(t testing.TB) string {
link, err := os.Readlink(fmt.Sprintf("/proc/%d/ns/cgroup", d.Pid()))
assert.NilError(t, err)
return strings.TrimSpace(link)
}
// SignalDaemonDump sends a signal to the daemon to write a dump file
func SignalDaemonDump(pid int) {
unix.Kill(pid, unix.SIGQUIT)

View File

@ -162,6 +162,11 @@ func (e *Execution) IsUserNamespace() bool {
return root != ""
}
// RuntimeIsWindowsContainerd returns whether containerd runtime is used on Windows
func (e *Execution) RuntimeIsWindowsContainerd() bool {
return os.Getenv("DOCKER_WINDOWS_CONTAINERD_RUNTIME") == "1"
}
// IsRootless returns whether the rootless mode is enabled
func (e *Execution) IsRootless() bool {
return os.Getenv("DOCKER_ROOTLESS") != ""

View File

@ -10,7 +10,7 @@ import (
"gotest.tools/v3/assert"
)
var frozenImages = []string{"busybox:latest", "busybox:glibc", "hello-world:frozen", "debian:bullseye"}
var frozenImages = []string{"busybox:latest", "busybox:glibc", "hello-world:frozen", "debian:bullseye-slim"}
type protectedElements struct {
containers map[string]struct{}

View File

@ -1,3 +1,4 @@
//go:build !windows
// +build !windows
package request

View File

@ -4,7 +4,6 @@ import (
"bytes"
"encoding/json"
"io"
"io/ioutil"
"net/http"
"strings"
)
@ -39,7 +38,7 @@ func Method(method string) func(*Options) {
// RawString sets the specified string as body for the request
func RawString(content string) func(*Options) {
return RawContent(ioutil.NopCloser(strings.NewReader(content)))
return RawContent(io.NopCloser(strings.NewReader(content)))
}
// RawContent sets the specified reader as body for the request
@ -71,7 +70,7 @@ func JSONBody(data interface{}) func(*Options) {
if err := json.NewEncoder(jsonData).Encode(data); err != nil {
return err
}
req.Body = ioutil.NopCloser(jsonData)
req.Body = io.NopCloser(jsonData)
req.Header.Set("Content-Type", "application/json")
return nil
})

View File

@ -5,7 +5,6 @@ import (
"crypto/tls"
"fmt"
"io"
"io/ioutil"
"net"
"net/http"
"net/url"
@ -15,7 +14,6 @@ import (
"time"
"github.com/docker/docker/client"
"github.com/docker/docker/opts"
"github.com/docker/docker/pkg/ioutils"
"github.com/docker/docker/testutil/environment"
"github.com/docker/go-connections/sockets"
@ -106,7 +104,7 @@ func Do(endpoint string, modifiers ...func(*Options)) (*http.Response, io.ReadCl
// ReadBody read the specified ReadCloser content and returns it
func ReadBody(b io.ReadCloser) ([]byte, error) {
defer b.Close()
return ioutil.ReadAll(b)
return io.ReadAll(b)
}
// newRequest creates a new http Request to the specified host and endpoint, with the specified request modifiers
@ -180,7 +178,7 @@ func getTLSConfig() (*tls.Config, error) {
// DaemonHost return the daemon host string for this test execution
func DaemonHost() string {
daemonURLStr := "unix://" + opts.DefaultUnixSocket
daemonURLStr := client.DefaultDockerHost
if daemonHostVar := os.Getenv("DOCKER_HOST"); daemonHostVar != "" {
daemonURLStr = daemonHostVar
}

View File

@ -4,6 +4,7 @@ import (
"context"
"io/ioutil"
"os"
"path"
"path/filepath"
"runtime"
"strings"
@ -13,6 +14,7 @@ import (
"github.com/containerd/continuity/fs"
"github.com/docker/docker/pkg/fileutils"
"github.com/pkg/errors"
"github.com/tonistiigi/fsutil"
)
var bufferPool = &sync.Pool{
@ -87,7 +89,7 @@ func Copy(ctx context.Context, srcRoot, src, dstRoot, dst string, opts ...Opt) e
return err
}
c, err := newCopier(ci.Chown, ci.Utime, ci.Mode, ci.XAttrErrorHandler, ci.IncludePatterns, ci.ExcludePatterns)
c, err := newCopier(dstRoot, ci.Chown, ci.Utime, ci.Mode, ci.XAttrErrorHandler, ci.IncludePatterns, ci.ExcludePatterns, ci.ChangeFunc)
if err != nil {
return err
}
@ -113,7 +115,7 @@ func Copy(ctx context.Context, srcRoot, src, dstRoot, dst string, opts ...Opt) e
if err != nil {
return err
}
if err := c.copy(ctx, srcFollowed, "", dst, false, false, false); err != nil {
if err := c.copy(ctx, srcFollowed, "", dst, false, fileutils.MatchInfo{}, fileutils.MatchInfo{}); err != nil {
return err
}
}
@ -170,6 +172,7 @@ type CopyInfo struct {
IncludePatterns []string
// Exclude files/dir matching any of these patterns (even if they match an include pattern)
ExcludePatterns []string
ChangeFunc fsutil.ChangeFunc
}
type Opt func(*CopyInfo)
@ -217,6 +220,12 @@ func WithExcludePattern(excludePattern string) Opt {
}
}
func WithChangeNotifier(fn fsutil.ChangeFunc) Opt {
return func(ci *CopyInfo) {
ci.ChangeFunc = fn
}
}
type copier struct {
chown Chowner
utime *time.Time
@ -226,6 +235,8 @@ type copier struct {
includePatternMatcher *fileutils.PatternMatcher
excludePatternMatcher *fileutils.PatternMatcher
parentDirs []parentDir
changefn fsutil.ChangeFunc
root string
}
type parentDir struct {
@ -234,7 +245,7 @@ type parentDir struct {
copied bool
}
func newCopier(chown Chowner, tm *time.Time, mode *int, xeh XAttrErrorHandler, includePatterns, excludePatterns []string) (*copier, error) {
func newCopier(root string, chown Chowner, tm *time.Time, mode *int, xeh XAttrErrorHandler, includePatterns, excludePatterns []string, changeFunc fsutil.ChangeFunc) (*copier, error) {
if xeh == nil {
xeh = func(dst, src, key string, err error) error {
return err
@ -260,6 +271,7 @@ func newCopier(chown Chowner, tm *time.Time, mode *int, xeh XAttrErrorHandler, i
}
return &copier{
root: root,
inodes: map[uint64]string{},
chown: chown,
utime: tm,
@ -267,11 +279,12 @@ func newCopier(chown Chowner, tm *time.Time, mode *int, xeh XAttrErrorHandler, i
mode: mode,
includePatternMatcher: includePatternMatcher,
excludePatternMatcher: excludePatternMatcher,
changefn: changeFunc,
}, nil
}
// dest is always clean
func (c *copier) copy(ctx context.Context, src, srcComponents, target string, overwriteTargetMetadata, parentMatchedInclude, parentMatchedExclude bool) error {
func (c *copier) copy(ctx context.Context, src, srcComponents, target string, overwriteTargetMetadata bool, parentIncludeMatchInfo, parentExcludeMatchInfo fileutils.MatchInfo) error {
select {
case <-ctx.Done():
return ctx.Err()
@ -284,16 +297,20 @@ func (c *copier) copy(ctx context.Context, src, srcComponents, target string, ov
}
include := true
matchesIncludePattern := false
matchesExcludePattern := false
var (
includeMatchInfo fileutils.MatchInfo
excludeMatchInfo fileutils.MatchInfo
)
if srcComponents != "" {
matchesIncludePattern, err = c.include(srcComponents, fi, parentMatchedInclude)
matchesIncludePattern := false
matchesExcludePattern := false
matchesIncludePattern, includeMatchInfo, err = c.include(srcComponents, fi, parentIncludeMatchInfo)
if err != nil {
return err
}
include = matchesIncludePattern
matchesExcludePattern, err = c.exclude(srcComponents, fi, parentMatchedExclude)
matchesExcludePattern, excludeMatchInfo, err = c.exclude(srcComponents, fi, parentExcludeMatchInfo)
if err != nil {
return err
}
@ -319,17 +336,19 @@ func (c *copier) copy(ctx context.Context, src, srcComponents, target string, ov
}
copyFileInfo := true
notify := true
switch {
case fi.IsDir():
if created, err := c.copyDirectory(
ctx, src, srcComponents, target, fi, overwriteTargetMetadata,
include, matchesIncludePattern, matchesExcludePattern,
include, includeMatchInfo, excludeMatchInfo,
); err != nil {
return err
} else if !overwriteTargetMetadata || c.includePatternMatcher != nil {
copyFileInfo = created
}
notify = false
case (fi.Mode() & os.ModeType) == 0:
link, err := getLinkSource(target, fi, c.inodes)
if err != nil {
@ -368,31 +387,45 @@ func (c *copier) copy(ctx context.Context, src, srcComponents, target string, ov
return errors.Wrap(err, "failed to copy xattrs")
}
}
if notify {
if err := c.notifyChange(target, fi); err != nil {
return err
}
}
return nil
}
func (c *copier) include(path string, fi os.FileInfo, parentMatchedInclude bool) (bool, error) {
if c.includePatternMatcher == nil {
return true, nil
func (c *copier) notifyChange(target string, fi os.FileInfo) error {
if c.changefn != nil {
if err := c.changefn(fsutil.ChangeKindAdd, path.Clean(strings.TrimPrefix(target, c.root)), fi, nil); err != nil {
return errors.Wrap(err, "failed to notify file change")
}
}
m, err := c.includePatternMatcher.MatchesUsingParentResult(path, parentMatchedInclude)
if err != nil {
return false, errors.Wrap(err, "failed to match includepatterns")
}
return m, nil
return nil
}
func (c *copier) exclude(path string, fi os.FileInfo, parentMatchedExclude bool) (bool, error) {
if c.excludePatternMatcher == nil {
return false, nil
func (c *copier) include(path string, fi os.FileInfo, parentIncludeMatchInfo fileutils.MatchInfo) (bool, fileutils.MatchInfo, error) {
if c.includePatternMatcher == nil {
return true, fileutils.MatchInfo{}, nil
}
m, err := c.excludePatternMatcher.MatchesUsingParentResult(path, parentMatchedExclude)
m, matchInfo, err := c.includePatternMatcher.MatchesUsingParentResults(path, parentIncludeMatchInfo)
if err != nil {
return false, errors.Wrap(err, "failed to match excludepatterns")
return false, matchInfo, errors.Wrap(err, "failed to match includepatterns")
}
return m, nil
return m, matchInfo, nil
}
func (c *copier) exclude(path string, fi os.FileInfo, parentExcludeMatchInfo fileutils.MatchInfo) (bool, fileutils.MatchInfo, error) {
if c.excludePatternMatcher == nil {
return false, fileutils.MatchInfo{}, nil
}
m, matchInfo, err := c.excludePatternMatcher.MatchesUsingParentResults(path, parentExcludeMatchInfo)
if err != nil {
return false, matchInfo, errors.Wrap(err, "failed to match excludepatterns")
}
return m, matchInfo, nil
}
// Delayed creation of parent directories when a file or dir matches an include
@ -438,8 +471,8 @@ func (c *copier) copyDirectory(
stat os.FileInfo,
overwriteTargetMetadata bool,
include bool,
matchesIncludePattern bool,
matchesExcludePattern bool,
includeMatchInfo fileutils.MatchInfo,
excludeMatchInfo fileutils.MatchInfo,
) (bool, error) {
if !stat.IsDir() {
return false, errors.Errorf("source is not directory")
@ -462,6 +495,11 @@ func (c *copier) copyDirectory(
if err != nil {
return created, err
}
if created || overwriteTargetMetadata {
if err := c.notifyChange(dst, stat); err != nil {
return created, err
}
}
parentDir.copied = true
}
@ -481,7 +519,7 @@ func (c *copier) copyDirectory(
ctx,
filepath.Join(src, fi.Name()), filepath.Join(srcComponents, fi.Name()),
filepath.Join(dst, fi.Name()),
true, matchesIncludePattern, matchesExcludePattern,
true, includeMatchInfo, excludeMatchInfo,
); err != nil {
return false, err
}

View File

@ -0,0 +1,32 @@
// +build freebsd
package fs
import (
"io"
"os"
"github.com/pkg/errors"
)
func copyFile(source, target string) error {
src, err := os.Open(source)
if err != nil {
return errors.Wrapf(err, "failed to open source %s", source)
}
defer src.Close()
tgt, err := os.Create(target)
if err != nil {
return errors.Wrapf(err, "failed to open target %s", target)
}
defer tgt.Close()
return copyFileContent(tgt, src)
}
func copyFileContent(dst, src *os.File) error {
buf := bufferPool.Get().(*[]byte)
_, err := io.CopyBuffer(dst, src, *buf)
bufferPool.Put(buf)
return err
}

View File

@ -97,7 +97,10 @@ func copyFileContent(dst, src *os.File) error {
buf := bufferPool.Get().(*[]byte)
_, err = io.CopyBuffer(dst, src, *buf)
bufferPool.Put(buf)
return errors.Wrap(err, "userspace copy failed")
if err != nil {
return errors.Wrap(err, "userspace copy failed")
}
return nil
}
first = false

View File

@ -1,3 +1,4 @@
//go:build solaris || darwin || freebsd
// +build solaris darwin freebsd
package fs
@ -51,11 +52,3 @@ func (c *copier) copyFileInfo(fi os.FileInfo, name string) error {
}
return nil
}
func copyDevice(dst string, fi os.FileInfo) error {
st, ok := fi.Sys().(*syscall.Stat_t)
if !ok {
return errors.New("unsupported stat type")
}
return unix.Mknod(dst, uint32(fi.Mode()), int(st.Rdev))
}

View File

@ -0,0 +1,20 @@
//go:build darwin
// +build darwin
package fs
import (
"os"
"syscall"
"github.com/pkg/errors"
"golang.org/x/sys/unix"
)
func copyDevice(dst string, fi os.FileInfo) error {
st, ok := fi.Sys().(*syscall.Stat_t)
if !ok {
return errors.New("unsupported stat type")
}
return unix.Mknod(dst, uint32(fi.Mode()), int(st.Rdev))
}

View File

@ -0,0 +1,20 @@
//go:build freebsd || solaris
// +build freebsd solaris
package fs
import (
"os"
"syscall"
"github.com/pkg/errors"
"golang.org/x/sys/unix"
)
func copyDevice(dst string, fi os.FileInfo) error {
st, ok := fi.Sys().(*syscall.Stat_t)
if !ok {
return errors.New("unsupported stat type")
}
return unix.Mknod(dst, uint32(fi.Mode()), st.Rdev)
}

View File

@ -33,6 +33,19 @@ const (
ChangeKindDelete
)
func (k ChangeKind) String() string {
switch k {
case ChangeKindAdd:
return "add"
case ChangeKindModify:
return "modify"
case ChangeKindDelete:
return "delete"
default:
return "unknown"
}
}
// ChangeFunc is the type of function called for each change
// computed during a directory changes calculation.
type ChangeFunc func(ChangeKind, string, os.FileInfo, error) error

View File

@ -0,0 +1,14 @@
// +build freebsd
package fsutil
import (
"github.com/tonistiigi/fsutil/types"
"golang.org/x/sys/unix"
)
func createSpecialFile(path string, mode uint32, stat *types.Stat) error {
dev := unix.Mkdev(uint32(stat.Devmajor), uint32(stat.Devminor))
return unix.Mknod(path, mode, dev)
}

View File

@ -45,7 +45,7 @@ func handleTarTypeBlockCharFifo(path string, stat *types.Stat) error {
mode |= syscall.S_IFBLK
}
if err := syscall.Mknod(path, mode, int(mkdev(stat.Devmajor, stat.Devminor))); err != nil {
if err := createSpecialFile(path, mode, stat); err != nil {
return errors.WithStack(err)
}
return nil

View File

@ -0,0 +1,13 @@
// +build !windows,!freebsd
package fsutil
import (
"syscall"
"github.com/tonistiigi/fsutil/types"
)
func createSpecialFile(path string, mode uint32, stat *types.Stat) error {
return syscall.Mknod(path, mode, int(mkdev(stat.Devmajor, stat.Devminor)))
}

View File

@ -63,5 +63,5 @@ target "shfmt" {
target "cross" {
inherits = ["build"]
platforms = ["linux/amd64", "linux/386", "linux/arm64", "linux/arm", "linux/ppc64le", "linux/s390x"]
platforms = ["linux/amd64", "linux/386", "linux/arm64", "linux/arm", "linux/ppc64le", "linux/s390x", "darwin/amd64", "darwin/arm64", "windows/amd64", "freebsd/amd64", "freebsd/arm64"]
}

View File

@ -1,45 +0,0 @@
package prefix
import (
"path"
"path/filepath"
"strings"
)
// Match matches a path against a pattern. It returns m = true if the path
// matches the pattern, and partial = true if the pattern has more separators
// than the path and the common components match (for example, name = foo and
// pattern = foo/bar/*). slashSeparator determines whether the path and pattern
// are '/' delimited (true) or use the native path separator (false).
func Match(pattern, name string, slashSeparator bool) (m bool, partial bool) {
separator := filepath.Separator
if slashSeparator {
separator = '/'
}
count := strings.Count(name, string(separator))
if strings.Count(pattern, string(separator)) > count {
pattern = trimUntilIndex(pattern, string(separator), count)
partial = true
}
if slashSeparator {
m, _ = path.Match(pattern, name)
} else {
m, _ = filepath.Match(pattern, name)
}
return m, partial
}
func trimUntilIndex(str, sep string, count int) string {
s := str
i := 0
c := 0
for {
idx := strings.Index(s, sep)
s = s[idx+len(sep):]
i += idx + len(sep)
c++
if c > count {
return str[:i-len(sep)]
}
}
}

View File

@ -10,7 +10,6 @@ import (
"github.com/docker/docker/pkg/fileutils"
"github.com/pkg/errors"
"github.com/tonistiigi/fsutil/prefix"
"github.com/tonistiigi/fsutil/types"
)
@ -36,20 +35,15 @@ func Walk(ctx context.Context, p string, opt *WalkOpt, fn filepath.WalkFunc) err
return errors.WithStack(&os.PathError{Op: "walk", Path: root, Err: syscall.ENOTDIR})
}
var pm *fileutils.PatternMatcher
if opt != nil && opt.ExcludePatterns != nil {
pm, err = fileutils.NewPatternMatcher(opt.ExcludePatterns)
if err != nil {
return errors.Wrapf(err, "invalid excludepatterns: %s", opt.ExcludePatterns)
}
}
var (
includePatterns []string
includeMatcher *fileutils.PatternMatcher
excludeMatcher *fileutils.PatternMatcher
)
var includePatterns []string
if opt != nil && opt.IncludePatterns != nil {
includePatterns = make([]string, len(opt.IncludePatterns))
for k := range opt.IncludePatterns {
includePatterns[k] = filepath.Clean(opt.IncludePatterns[k])
}
copy(includePatterns, opt.IncludePatterns)
}
if opt != nil && opt.FollowPaths != nil {
targets, err := FollowLinks(p, opt.FollowPaths)
@ -61,13 +55,32 @@ func Walk(ctx context.Context, p string, opt *WalkOpt, fn filepath.WalkFunc) err
includePatterns = dedupePaths(includePatterns)
}
}
if len(includePatterns) != 0 {
includeMatcher, err = fileutils.NewPatternMatcher(includePatterns)
if err != nil {
return errors.Wrapf(err, "invalid includepatterns: %s", opt.IncludePatterns)
}
}
var (
lastIncludedDir string
if opt != nil && opt.ExcludePatterns != nil {
excludeMatcher, err = fileutils.NewPatternMatcher(opt.ExcludePatterns)
if err != nil {
return errors.Wrapf(err, "invalid excludepatterns: %s", opt.ExcludePatterns)
}
}
parentDirs []string // used only for exclude handling
parentMatchedExclude []bool
)
type visitedDir struct {
fi os.FileInfo
path string
origpath string
pathWithSep string
includeMatchInfo fileutils.MatchInfo
excludeMatchInfo fileutils.MatchInfo
calledFn bool
}
// used only for include/exclude handling
var parentDirs []visitedDir
seenFiles := make(map[uint64]string)
return filepath.Walk(root, func(path string, fi os.FileInfo, err error) (retErr error) {
@ -90,87 +103,84 @@ func Walk(ctx context.Context, p string, opt *WalkOpt, fn filepath.WalkFunc) err
return nil
}
if opt != nil {
if includePatterns != nil {
skip := false
if lastIncludedDir != "" {
if strings.HasPrefix(path, lastIncludedDir+string(filepath.Separator)) {
skip = true
}
}
var dir visitedDir
if !skip {
matched := false
partial := true
for _, pattern := range includePatterns {
if ok, p := prefix.Match(pattern, path, false); ok {
matched = true
if !p {
partial = false
break
}
}
}
if !matched {
if fi.IsDir() {
return filepath.SkipDir
}
return nil
}
if !partial && fi.IsDir() {
lastIncludedDir = path
}
if includeMatcher != nil || excludeMatcher != nil {
for len(parentDirs) != 0 {
lastParentDir := parentDirs[len(parentDirs)-1].pathWithSep
if strings.HasPrefix(path, lastParentDir) {
break
}
parentDirs = parentDirs[:len(parentDirs)-1]
}
if pm != nil {
for len(parentMatchedExclude) != 0 {
lastParentDir := parentDirs[len(parentDirs)-1]
if strings.HasPrefix(path, lastParentDir) {
break
}
parentDirs = parentDirs[:len(parentDirs)-1]
parentMatchedExclude = parentMatchedExclude[:len(parentMatchedExclude)-1]
}
var m bool
if len(parentMatchedExclude) != 0 {
m, err = pm.MatchesUsingParentResult(path, parentMatchedExclude[len(parentMatchedExclude)-1])
} else {
m, err = pm.MatchesOrParentMatches(path)
}
if err != nil {
return errors.Wrap(err, "failed to match excludepatterns")
}
var dirSlash string
if fi.IsDir() {
dirSlash = path + string(filepath.Separator)
parentDirs = append(parentDirs, dirSlash)
parentMatchedExclude = append(parentMatchedExclude, m)
}
if m {
if fi.IsDir() {
if !pm.Exclusions() {
return filepath.SkipDir
}
for _, pat := range pm.Patterns() {
if !pat.Exclusion() {
continue
}
patStr := pat.String() + string(filepath.Separator)
if strings.HasPrefix(patStr, dirSlash) {
goto passedFilter
}
}
return filepath.SkipDir
}
return nil
if fi.IsDir() {
dir = visitedDir{
fi: fi,
path: path,
origpath: origpath,
pathWithSep: path + string(filepath.Separator),
}
}
}
passedFilter:
skip := false
if includeMatcher != nil {
var parentIncludeMatchInfo fileutils.MatchInfo
if len(parentDirs) != 0 {
parentIncludeMatchInfo = parentDirs[len(parentDirs)-1].includeMatchInfo
}
m, matchInfo, err := includeMatcher.MatchesUsingParentResults(path, parentIncludeMatchInfo)
if err != nil {
return errors.Wrap(err, "failed to match includepatterns")
}
if fi.IsDir() {
dir.includeMatchInfo = matchInfo
}
if !m {
skip = true
}
}
if excludeMatcher != nil {
var parentExcludeMatchInfo fileutils.MatchInfo
if len(parentDirs) != 0 {
parentExcludeMatchInfo = parentDirs[len(parentDirs)-1].excludeMatchInfo
}
m, matchInfo, err := excludeMatcher.MatchesUsingParentResults(path, parentExcludeMatchInfo)
if err != nil {
return errors.Wrap(err, "failed to match excludepatterns")
}
if fi.IsDir() {
dir.excludeMatchInfo = matchInfo
}
if m {
if fi.IsDir() && !excludeMatcher.Exclusions() {
return filepath.SkipDir
}
skip = true
}
}
if includeMatcher != nil || excludeMatcher != nil {
defer func() {
if fi.IsDir() {
parentDirs = append(parentDirs, dir)
}
}()
}
if skip {
return nil
}
dir.calledFn = true
stat, err := mkstat(origpath, path, fi, seenFiles)
if err != nil {
return err
@ -185,6 +195,31 @@ func Walk(ctx context.Context, p string, opt *WalkOpt, fn filepath.WalkFunc) err
return nil
}
}
for i, parentDir := range parentDirs {
if parentDir.calledFn {
continue
}
parentStat, err := mkstat(parentDir.origpath, parentDir.path, parentDir.fi, seenFiles)
if err != nil {
return err
}
select {
case <-ctx.Done():
return ctx.Err()
default:
}
if opt != nil && opt.Map != nil {
if allowed := opt.Map(parentStat.Path, parentStat); !allowed {
continue
}
}
if err := fn(parentStat.Path, &StatInfo{parentStat}, nil); err != nil {
return err
}
parentDirs[i].calledFn = true
}
if err := fn(stat.Path, &StatInfo{stat}, nil); err != nil {
return err
}

8
vendor/modules.txt vendored
View File

@ -218,7 +218,7 @@ github.com/docker/cli/cli/connhelper/commandconn
github.com/docker/distribution/digestset
github.com/docker/distribution/reference
github.com/docker/distribution/registry/api/errcode
# github.com/docker/docker v20.10.7+incompatible => github.com/docker/docker v20.10.3-0.20210817025855-ba2adeebdb8d+incompatible
# github.com/docker/docker v20.10.7+incompatible => github.com/docker/docker v20.10.3-0.20211124173851-93d560d5b3b0+incompatible
## explicit
github.com/docker/docker/api
github.com/docker/docker/api/types
@ -240,7 +240,6 @@ github.com/docker/docker/client
github.com/docker/docker/errdefs
github.com/docker/docker/libnetwork/ipamutils
github.com/docker/docker/libnetwork/resolvconf
github.com/docker/docker/libnetwork/resolvconf/dns
github.com/docker/docker/libnetwork/types
github.com/docker/docker/opts
github.com/docker/docker/pkg/archive
@ -459,11 +458,10 @@ github.com/sirupsen/logrus
## explicit; go 1.13
github.com/stretchr/testify/assert
github.com/stretchr/testify/require
# github.com/tonistiigi/fsutil v0.0.0-20210818161904-4442383b5028
# github.com/tonistiigi/fsutil v0.0.0-20211125000220-d952e50eae30
## explicit; go 1.13
github.com/tonistiigi/fsutil
github.com/tonistiigi/fsutil/copy
github.com/tonistiigi/fsutil/prefix
github.com/tonistiigi/fsutil/types
# github.com/tonistiigi/go-actions-cache v0.0.0-20211202175116-9642704158ff
## explicit; go 1.16
@ -710,6 +708,6 @@ gotest.tools/v3/internal/difflib
gotest.tools/v3/internal/format
gotest.tools/v3/internal/source
gotest.tools/v3/poll
# github.com/docker/docker => github.com/docker/docker v20.10.3-0.20210817025855-ba2adeebdb8d+incompatible
# github.com/docker/docker => github.com/docker/docker v20.10.3-0.20211124173851-93d560d5b3b0+incompatible
# go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace => github.com/tonistiigi/opentelemetry-go-contrib/instrumentation/net/http/httptrace/otelhttptrace v0.0.0-20211026174723-2f82a1e0c997
# go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp => github.com/tonistiigi/opentelemetry-go-contrib/instrumentation/net/http/otelhttp v0.0.0-20211026174723-2f82a1e0c997