worker: runc worker exection

Signed-off-by: Tonis Tiigi <tonistiigi@gmail.com>
docker-18.09
Tonis Tiigi 2017-06-01 15:24:23 -07:00
parent 25d9c1db0e
commit 08134677cd
122 changed files with 35656 additions and 49 deletions

8
cache/manager.go vendored
View File

@ -128,7 +128,7 @@ func (cm *cacheManager) get(id string) (ImmutableRef, error) {
rec = &cacheRecord{
id: id,
cm: cm,
refs: make(map[*cacheRef]struct{}),
refs: make(map[Mountable]struct{}),
parent: parent,
size: sizeUnknown,
}
@ -173,7 +173,7 @@ func (cm *cacheManager) New(s ImmutableRef) (MutableRef, error) {
mutable: true,
id: id,
cm: cm,
refs: make(map[*cacheRef]struct{}),
refs: make(map[Mountable]struct{}),
parent: parent,
size: sizeUnknown,
}
@ -183,7 +183,7 @@ func (cm *cacheManager) New(s ImmutableRef) (MutableRef, error) {
cm.records[id] = rec // TODO: save to db
return rec.ref(), nil
return rec.mref(), nil
}
func (cm *cacheManager) GetMutable(id string) (MutableRef, error) { // Rebase?
cm.mu.Lock()
@ -204,7 +204,7 @@ func (cm *cacheManager) GetMutable(id string) (MutableRef, error) { // Rebase?
return nil, errors.Wrapf(errLocked, "%s is locked", id)
}
return rec.ref(), nil
return rec.mref(), nil
}
func (cm *cacheManager) DiskUsage(ctx context.Context) ([]*UsageInfo, error) {

81
cache/refs.go vendored
View File

@ -38,7 +38,7 @@ type cacheRecord struct {
mutable bool
frozen bool
// meta SnapMeta
refs map[*cacheRef]struct{}
refs map[Mountable]struct{}
id string
cm *cacheManager
parent ImmutableRef
@ -50,8 +50,15 @@ type cacheRecord struct {
}
// hold manager lock before calling
func (cr *cacheRecord) ref() *cacheRef {
ref := &cacheRef{cacheRecord: cr}
func (cr *cacheRecord) ref() *immutableRef {
ref := &immutableRef{cacheRecord: cr}
cr.refs[ref] = struct{}{}
return ref
}
// hold manager lock before calling
func (cr *cacheRecord) mref() *mutableRef {
ref := &mutableRef{cacheRecord: cr}
cr.refs[ref] = struct{}{}
return ref
}
@ -77,37 +84,45 @@ func (cr *cacheRecord) Size(ctx context.Context) (int64, error) {
return s.(int64), err
}
type cacheRef struct {
*cacheRecord
}
func (cr *cacheRecord) Mount() ([]mount.Mount, error) {
cr.mu.Lock()
defer cr.mu.Unlock()
func (sr *cacheRef) Mount() ([]mount.Mount, error) {
sr.mu.Lock()
defer sr.mu.Unlock()
if sr.mutable {
m, err := sr.cm.Snapshotter.Mounts(context.TODO(), sr.id)
if cr.mutable {
m, err := cr.cm.Snapshotter.Mounts(context.TODO(), cr.id)
if err != nil {
return nil, errors.Wrapf(err, "failed to mount %s", sr.id)
return nil, errors.Wrapf(err, "failed to mount %s", cr.id)
}
return m, nil
} else {
if sr.viewMount == nil { // TODO: handle this better
sr.view = generateID()
m, err := sr.cm.Snapshotter.View(context.TODO(), sr.view, sr.id)
if cr.viewMount == nil { // TODO: handle this better
cr.view = generateID()
m, err := cr.cm.Snapshotter.View(context.TODO(), cr.view, cr.id)
if err != nil {
sr.view = ""
return nil, errors.Wrapf(err, "failed to mount %s", sr.id)
cr.view = ""
return nil, errors.Wrapf(err, "failed to mount %s", cr.id)
}
sr.viewMount = m
cr.viewMount = m
}
return sr.viewMount, nil
return cr.viewMount, nil
}
return nil, errors.New("snapshot mount not implemented")
}
func (sr *cacheRef) Release() error {
func (cr *cacheRecord) ID() string {
return cr.id
}
type immutableRef struct {
*cacheRecord
}
type mutableRef struct {
*cacheRecord
}
func (sr *immutableRef) Release() error {
sr.cm.mu.Lock()
defer sr.cm.mu.Unlock()
@ -117,9 +132,9 @@ func (sr *cacheRef) Release() error {
return sr.release()
}
func (sr *cacheRef) release() error {
func (sr *immutableRef) release() error {
if sr.parent != nil {
if err := sr.parent.(*cacheRef).release(); err != nil {
if err := sr.parent.(*immutableRef).release(); err != nil {
return err
}
}
@ -141,7 +156,7 @@ func (sr *cacheRef) release() error {
return nil
}
func (sr *cacheRef) Freeze() (ImmutableRef, error) {
func (sr *mutableRef) Freeze() (ImmutableRef, error) {
sr.cm.mu.Lock()
defer sr.cm.mu.Unlock()
@ -156,13 +171,17 @@ func (sr *cacheRef) Freeze() (ImmutableRef, error) {
return nil, errors.Wrapf(errInvalid, "invalid mutable")
}
sr.frozen = true
sr.size = sizeUnknown
delete(sr.refs, sr)
return sr, nil
sri := sr.ref()
sri.frozen = true
sri.size = sizeUnknown
return sri, nil
}
func (sr *cacheRef) ReleaseAndCommit(ctx context.Context) (ImmutableRef, error) {
func (sr *mutableRef) ReleaseAndCommit(ctx context.Context) (ImmutableRef, error) {
sr.cm.mu.Lock()
defer sr.cm.mu.Unlock()
@ -191,7 +210,7 @@ func (sr *cacheRef) ReleaseAndCommit(ctx context.Context) (ImmutableRef, error)
rec := &cacheRecord{
id: id,
cm: sr.cm,
refs: make(map[*cacheRef]struct{}),
refs: make(map[Mountable]struct{}),
size: sizeUnknown,
}
sr.cm.records[id] = rec // TODO: save to db
@ -199,10 +218,6 @@ func (sr *cacheRef) ReleaseAndCommit(ctx context.Context) (ImmutableRef, error)
return rec.ref(), nil
}
func (sr *cacheRef) ID() string {
return sr.id
}
func generateID() string {
b := make([]byte, 32)
if _, err := rand.Read(b); err != nil {

View File

@ -26,6 +26,8 @@ import (
"github.com/tonistiigi/buildkit_poc/snapshot/blobmapping"
"github.com/tonistiigi/buildkit_poc/source"
"github.com/tonistiigi/buildkit_poc/source/containerimage"
"github.com/tonistiigi/buildkit_poc/worker"
"github.com/tonistiigi/buildkit_poc/worker/runcworker"
)
func TestControl(t *testing.T) {
@ -62,7 +64,7 @@ func TestControl(t *testing.T) {
sm.Register(is)
img, err := source.NewImageIdentifier("docker.io/library/redis:latest")
img, err := source.NewImageIdentifier("docker.io/library/busybox:latest")
assert.NoError(t, err)
snap, err := sm.Pull(context.TODO(), img)
@ -81,7 +83,7 @@ func TestControl(t *testing.T) {
names, err := f.Readdirnames(-1)
assert.NoError(t, err)
assert.True(t, len(names) > 10)
assert.True(t, len(names) > 5)
err = f.Close()
assert.NoError(t, err)
@ -96,15 +98,60 @@ func TestControl(t *testing.T) {
// fmt.Printf("du: %+v\n", d)
// }
err = snap.Release()
assert.NoError(t, err)
du, err = cm.DiskUsage(context.TODO())
assert.NoError(t, err)
for _, d := range du {
assert.True(t, d.Size >= 8192)
}
w, err := runcworker.New(tmpdir)
assert.NoError(t, err)
meta := worker.Meta{
Args: []string{"/bin/sh", "-c", "echo \"foo\" > /bar"},
// Args: []string{"/bin/sleep", "3"},
Cwd: "/",
}
m := make(map[string]cache.Mountable)
m["/"] = snap
err = w.Exec(context.TODO(), meta, m)
assert.Error(t, err) // Read-only root
root, err := cm.New(snap)
assert.NoError(t, err)
m["/"] = root
err = w.Exec(context.TODO(), meta, m)
assert.NoError(t, err)
rf, err := root.Freeze()
assert.NoError(t, err)
mounts, err = rf.Mount()
assert.NoError(t, err)
lm = snapshot.LocalMounter(mounts)
target, err = lm.Mount()
assert.NoError(t, err)
dt, err := ioutil.ReadFile(filepath.Join(target, "bar"))
assert.NoError(t, err)
assert.Equal(t, string(dt), "foo\n")
lm.Unmount()
assert.NoError(t, err)
err = rf.Release()
assert.NoError(t, err)
err = snap.Release()
assert.NoError(t, err)
du2, err := cm.DiskUsage(context.TODO())
assert.NoError(t, err)
assert.Equal(t, 1, len(du2)-len(du))
}
type containerd struct {

View File

@ -1,4 +1,17 @@
FROM golang:1.8-alpine AS vndr
FROM golang:1.8-alpine AS runc
RUN apk add --no-cache g++ linux-headers
RUN apk add --no-cache git make
ARG RUNC_VERSION=v1.0.0-rc3
RUN git clone https://github.com/opencontainers/runc.git "$GOPATH/src/github.com/opencontainers/runc" \
&& cd "$GOPATH/src/github.com/opencontainers/runc" \
&& git checkout -q "$RUNC_VERSION" \
&& go build -o /usr/bin/runc ./
FROM golang:1.8-alpine
RUN apk add --no-cache g++ linux-headers
COPY --from=runc /usr/bin/runc /usr/bin/runc
WORKDIR /go/src/github.com/tonistiigi/buildkit_poc
COPY . .
COPY . .
# EXPORT COPY --from=% /go/src/github.com/opencontainers/runc/runc /usr/local/bin/docker-runc

View File

@ -4,4 +4,4 @@ set -eu -o pipefail -x
# update this to iidfile after 17.06
docker build -t buildkit_poc:test -f ./hack/dockerfiles/test.Dockerfile --force-rm .
docker run -v /tmp --cap-add=SYS_ADMIN buildkit_poc:test go test ${TESTFLAGS:--v} ${TESTPKGS:-./...}
docker run -v /tmp --privileged buildkit_poc:test go test ${TESTFLAGS:--v} ${TESTPKGS:-./...}

View File

@ -55,7 +55,9 @@ func (lm *localMounter) Unmount() error {
if err := mount.Unmount(lm.target, 0); err != nil {
return err
}
os.RemoveAll(lm.target)
lm.target = ""
}
return nil
}

View File

@ -18,4 +18,9 @@ github.com/golang/protobuf 8ee79997227bf9b34611aee7946ae64735e6fd93
github.com/containerd/continuity f4ad4294c92f596c9241947c416d1297f9faf3ea
github.com/opencontainers/image-spec v1.0.0-rc5
github.com/opencontainers/runc 639454475cb9c8b861cc599f8bcd5c8c790ae402
github.com/Microsoft/go-winio v0.4.1
github.com/Microsoft/go-winio v0.4.1
github.com/containerd/fifo 69b99525e472735860a5269b75af1970142b3062
github.com/opencontainers/runtime-spec v1.0.0-rc5
github.com/containerd/go-runc 60e87b3b047d4c93faa996699f6fdcfa34685e65
github.com/containerd/console e0a2cdcf03d4d99c3bc061635a66cf92336c6c82
github.com/Azure/go-ansiterm fa152c58bc15761d0200cb75fe958b89a9d4888e

21
vendor/github.com/Azure/go-ansiterm/LICENSE generated vendored Normal file
View File

@ -0,0 +1,21 @@
The MIT License (MIT)
Copyright (c) 2015 Microsoft Corporation
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.

12
vendor/github.com/Azure/go-ansiterm/README.md generated vendored Normal file
View File

@ -0,0 +1,12 @@
# go-ansiterm
This is a cross platform Ansi Terminal Emulation library. It reads a stream of Ansi characters and produces the appropriate function calls. The results of the function calls are platform dependent.
For example the parser might receive "ESC, [, A" as a stream of three characters. This is the code for Cursor Up (http://www.vt100.net/docs/vt510-rm/CUU). The parser then calls the cursor up function (CUU()) on an event handler. The event handler determines what platform specific work must be done to cause the cursor to move up one position.
The parser (parser.go) is a partial implementation of this state machine (http://vt100.net/emu/vt500_parser.png). There are also two event handler implementations, one for tests (test_event_handler.go) to validate that the expected events are being produced and called, the other is a Windows implementation (winterm/win_event_handler.go).
See parser_test.go for examples exercising the state machine and generating appropriate function calls.
-----
This project has adopted the [Microsoft Open Source Code of Conduct](https://opensource.microsoft.com/codeofconduct/). For more information see the [Code of Conduct FAQ](https://opensource.microsoft.com/codeofconduct/faq/) or contact [opencode@microsoft.com](mailto:opencode@microsoft.com) with any additional questions or comments.

188
vendor/github.com/Azure/go-ansiterm/constants.go generated vendored Normal file
View File

@ -0,0 +1,188 @@
package ansiterm
const LogEnv = "DEBUG_TERMINAL"
// ANSI constants
// References:
// -- http://www.ecma-international.org/publications/standards/Ecma-048.htm
// -- http://man7.org/linux/man-pages/man4/console_codes.4.html
// -- http://manpages.ubuntu.com/manpages/intrepid/man4/console_codes.4.html
// -- http://en.wikipedia.org/wiki/ANSI_escape_code
// -- http://vt100.net/emu/dec_ansi_parser
// -- http://vt100.net/emu/vt500_parser.svg
// -- http://invisible-island.net/xterm/ctlseqs/ctlseqs.html
// -- http://www.inwap.com/pdp10/ansicode.txt
const (
// ECMA-48 Set Graphics Rendition
// Note:
// -- Constants leading with an underscore (e.g., _ANSI_xxx) are unsupported or reserved
// -- Fonts could possibly be supported via SetCurrentConsoleFontEx
// -- Windows does not expose the per-window cursor (i.e., caret) blink times
ANSI_SGR_RESET = 0
ANSI_SGR_BOLD = 1
ANSI_SGR_DIM = 2
_ANSI_SGR_ITALIC = 3
ANSI_SGR_UNDERLINE = 4
_ANSI_SGR_BLINKSLOW = 5
_ANSI_SGR_BLINKFAST = 6
ANSI_SGR_REVERSE = 7
_ANSI_SGR_INVISIBLE = 8
_ANSI_SGR_LINETHROUGH = 9
_ANSI_SGR_FONT_00 = 10
_ANSI_SGR_FONT_01 = 11
_ANSI_SGR_FONT_02 = 12
_ANSI_SGR_FONT_03 = 13
_ANSI_SGR_FONT_04 = 14
_ANSI_SGR_FONT_05 = 15
_ANSI_SGR_FONT_06 = 16
_ANSI_SGR_FONT_07 = 17
_ANSI_SGR_FONT_08 = 18
_ANSI_SGR_FONT_09 = 19
_ANSI_SGR_FONT_10 = 20
_ANSI_SGR_DOUBLEUNDERLINE = 21
ANSI_SGR_BOLD_DIM_OFF = 22
_ANSI_SGR_ITALIC_OFF = 23
ANSI_SGR_UNDERLINE_OFF = 24
_ANSI_SGR_BLINK_OFF = 25
_ANSI_SGR_RESERVED_00 = 26
ANSI_SGR_REVERSE_OFF = 27
_ANSI_SGR_INVISIBLE_OFF = 28
_ANSI_SGR_LINETHROUGH_OFF = 29
ANSI_SGR_FOREGROUND_BLACK = 30
ANSI_SGR_FOREGROUND_RED = 31
ANSI_SGR_FOREGROUND_GREEN = 32
ANSI_SGR_FOREGROUND_YELLOW = 33
ANSI_SGR_FOREGROUND_BLUE = 34
ANSI_SGR_FOREGROUND_MAGENTA = 35
ANSI_SGR_FOREGROUND_CYAN = 36
ANSI_SGR_FOREGROUND_WHITE = 37
_ANSI_SGR_RESERVED_01 = 38
ANSI_SGR_FOREGROUND_DEFAULT = 39
ANSI_SGR_BACKGROUND_BLACK = 40
ANSI_SGR_BACKGROUND_RED = 41
ANSI_SGR_BACKGROUND_GREEN = 42
ANSI_SGR_BACKGROUND_YELLOW = 43
ANSI_SGR_BACKGROUND_BLUE = 44
ANSI_SGR_BACKGROUND_MAGENTA = 45
ANSI_SGR_BACKGROUND_CYAN = 46
ANSI_SGR_BACKGROUND_WHITE = 47
_ANSI_SGR_RESERVED_02 = 48
ANSI_SGR_BACKGROUND_DEFAULT = 49
// 50 - 65: Unsupported
ANSI_MAX_CMD_LENGTH = 4096
MAX_INPUT_EVENTS = 128
DEFAULT_WIDTH = 80
DEFAULT_HEIGHT = 24
ANSI_BEL = 0x07
ANSI_BACKSPACE = 0x08
ANSI_TAB = 0x09
ANSI_LINE_FEED = 0x0A
ANSI_VERTICAL_TAB = 0x0B
ANSI_FORM_FEED = 0x0C
ANSI_CARRIAGE_RETURN = 0x0D
ANSI_ESCAPE_PRIMARY = 0x1B
ANSI_ESCAPE_SECONDARY = 0x5B
ANSI_OSC_STRING_ENTRY = 0x5D
ANSI_COMMAND_FIRST = 0x40
ANSI_COMMAND_LAST = 0x7E
DCS_ENTRY = 0x90
CSI_ENTRY = 0x9B
OSC_STRING = 0x9D
ANSI_PARAMETER_SEP = ";"
ANSI_CMD_G0 = '('
ANSI_CMD_G1 = ')'
ANSI_CMD_G2 = '*'
ANSI_CMD_G3 = '+'
ANSI_CMD_DECPNM = '>'
ANSI_CMD_DECPAM = '='
ANSI_CMD_OSC = ']'
ANSI_CMD_STR_TERM = '\\'
KEY_CONTROL_PARAM_2 = ";2"
KEY_CONTROL_PARAM_3 = ";3"
KEY_CONTROL_PARAM_4 = ";4"
KEY_CONTROL_PARAM_5 = ";5"
KEY_CONTROL_PARAM_6 = ";6"
KEY_CONTROL_PARAM_7 = ";7"
KEY_CONTROL_PARAM_8 = ";8"
KEY_ESC_CSI = "\x1B["
KEY_ESC_N = "\x1BN"
KEY_ESC_O = "\x1BO"
FILL_CHARACTER = ' '
)
func getByteRange(start byte, end byte) []byte {
bytes := make([]byte, 0, 32)
for i := start; i <= end; i++ {
bytes = append(bytes, byte(i))
}
return bytes
}
var toGroundBytes = getToGroundBytes()
var executors = getExecuteBytes()
// SPACE 20+A0 hex Always and everywhere a blank space
// Intermediate 20-2F hex !"#$%&'()*+,-./
var intermeds = getByteRange(0x20, 0x2F)
// Parameters 30-3F hex 0123456789:;<=>?
// CSI Parameters 30-39, 3B hex 0123456789;
var csiParams = getByteRange(0x30, 0x3F)
var csiCollectables = append(getByteRange(0x30, 0x39), getByteRange(0x3B, 0x3F)...)
// Uppercase 40-5F hex @ABCDEFGHIJKLMNOPQRSTUVWXYZ[\]^_
var upperCase = getByteRange(0x40, 0x5F)
// Lowercase 60-7E hex `abcdefghijlkmnopqrstuvwxyz{|}~
var lowerCase = getByteRange(0x60, 0x7E)
// Alphabetics 40-7E hex (all of upper and lower case)
var alphabetics = append(upperCase, lowerCase...)
var printables = getByteRange(0x20, 0x7F)
var escapeIntermediateToGroundBytes = getByteRange(0x30, 0x7E)
var escapeToGroundBytes = getEscapeToGroundBytes()
// See http://www.vt100.net/emu/vt500_parser.png for description of the complex
// byte ranges below
func getEscapeToGroundBytes() []byte {
escapeToGroundBytes := getByteRange(0x30, 0x4F)
escapeToGroundBytes = append(escapeToGroundBytes, getByteRange(0x51, 0x57)...)
escapeToGroundBytes = append(escapeToGroundBytes, 0x59)
escapeToGroundBytes = append(escapeToGroundBytes, 0x5A)
escapeToGroundBytes = append(escapeToGroundBytes, 0x5C)
escapeToGroundBytes = append(escapeToGroundBytes, getByteRange(0x60, 0x7E)...)
return escapeToGroundBytes
}
func getExecuteBytes() []byte {
executeBytes := getByteRange(0x00, 0x17)
executeBytes = append(executeBytes, 0x19)
executeBytes = append(executeBytes, getByteRange(0x1C, 0x1F)...)
return executeBytes
}
func getToGroundBytes() []byte {
groundBytes := []byte{0x18}
groundBytes = append(groundBytes, 0x1A)
groundBytes = append(groundBytes, getByteRange(0x80, 0x8F)...)
groundBytes = append(groundBytes, getByteRange(0x91, 0x97)...)
groundBytes = append(groundBytes, 0x99)
groundBytes = append(groundBytes, 0x9A)
groundBytes = append(groundBytes, 0x9C)
return groundBytes
}
// Delete 7F hex Always and everywhere ignored
// C1 Control 80-9F hex 32 additional control characters
// G1 Displayable A1-FE hex 94 additional displayable characters
// Special A0+FF hex Same as SPACE and DELETE

7
vendor/github.com/Azure/go-ansiterm/context.go generated vendored Normal file
View File

@ -0,0 +1,7 @@
package ansiterm
type ansiContext struct {
currentChar byte
paramBuffer []byte
interBuffer []byte
}

49
vendor/github.com/Azure/go-ansiterm/csi_entry_state.go generated vendored Normal file
View File

@ -0,0 +1,49 @@
package ansiterm
type csiEntryState struct {
baseState
}
func (csiState csiEntryState) Handle(b byte) (s state, e error) {
logger.Infof("CsiEntry::Handle %#x", b)
nextState, err := csiState.baseState.Handle(b)
if nextState != nil || err != nil {
return nextState, err
}
switch {
case sliceContains(alphabetics, b):
return csiState.parser.ground, nil
case sliceContains(csiCollectables, b):
return csiState.parser.csiParam, nil
case sliceContains(executors, b):
return csiState, csiState.parser.execute()
}
return csiState, nil
}
func (csiState csiEntryState) Transition(s state) error {
logger.Infof("CsiEntry::Transition %s --> %s", csiState.Name(), s.Name())
csiState.baseState.Transition(s)
switch s {
case csiState.parser.ground:
return csiState.parser.csiDispatch()
case csiState.parser.csiParam:
switch {
case sliceContains(csiParams, csiState.parser.context.currentChar):
csiState.parser.collectParam()
case sliceContains(intermeds, csiState.parser.context.currentChar):
csiState.parser.collectInter()
}
}
return nil
}
func (csiState csiEntryState) Enter() error {
csiState.parser.clear()
return nil
}

38
vendor/github.com/Azure/go-ansiterm/csi_param_state.go generated vendored Normal file
View File

@ -0,0 +1,38 @@
package ansiterm
type csiParamState struct {
baseState
}
func (csiState csiParamState) Handle(b byte) (s state, e error) {
logger.Infof("CsiParam::Handle %#x", b)
nextState, err := csiState.baseState.Handle(b)
if nextState != nil || err != nil {
return nextState, err
}
switch {
case sliceContains(alphabetics, b):
return csiState.parser.ground, nil
case sliceContains(csiCollectables, b):
csiState.parser.collectParam()
return csiState, nil
case sliceContains(executors, b):
return csiState, csiState.parser.execute()
}
return csiState, nil
}
func (csiState csiParamState) Transition(s state) error {
logger.Infof("CsiParam::Transition %s --> %s", csiState.Name(), s.Name())
csiState.baseState.Transition(s)
switch s {
case csiState.parser.ground:
return csiState.parser.csiDispatch()
}
return nil
}

View File

@ -0,0 +1,36 @@
package ansiterm
type escapeIntermediateState struct {
baseState
}
func (escState escapeIntermediateState) Handle(b byte) (s state, e error) {
logger.Infof("escapeIntermediateState::Handle %#x", b)
nextState, err := escState.baseState.Handle(b)
if nextState != nil || err != nil {
return nextState, err
}
switch {
case sliceContains(intermeds, b):
return escState, escState.parser.collectInter()
case sliceContains(executors, b):
return escState, escState.parser.execute()
case sliceContains(escapeIntermediateToGroundBytes, b):
return escState.parser.ground, nil
}
return escState, nil
}
func (escState escapeIntermediateState) Transition(s state) error {
logger.Infof("escapeIntermediateState::Transition %s --> %s", escState.Name(), s.Name())
escState.baseState.Transition(s)
switch s {
case escState.parser.ground:
return escState.parser.escDispatch()
}
return nil
}

47
vendor/github.com/Azure/go-ansiterm/escape_state.go generated vendored Normal file
View File

@ -0,0 +1,47 @@
package ansiterm
type escapeState struct {
baseState
}
func (escState escapeState) Handle(b byte) (s state, e error) {
logger.Infof("escapeState::Handle %#x", b)
nextState, err := escState.baseState.Handle(b)
if nextState != nil || err != nil {
return nextState, err
}
switch {
case b == ANSI_ESCAPE_SECONDARY:
return escState.parser.csiEntry, nil
case b == ANSI_OSC_STRING_ENTRY:
return escState.parser.oscString, nil
case sliceContains(executors, b):
return escState, escState.parser.execute()
case sliceContains(escapeToGroundBytes, b):
return escState.parser.ground, nil
case sliceContains(intermeds, b):
return escState.parser.escapeIntermediate, nil
}
return escState, nil
}
func (escState escapeState) Transition(s state) error {
logger.Infof("Escape::Transition %s --> %s", escState.Name(), s.Name())
escState.baseState.Transition(s)
switch s {
case escState.parser.ground:
return escState.parser.escDispatch()
case escState.parser.escapeIntermediate:
return escState.parser.collectInter()
}
return nil
}
func (escState escapeState) Enter() error {
escState.parser.clear()
return nil
}

90
vendor/github.com/Azure/go-ansiterm/event_handler.go generated vendored Normal file
View File

@ -0,0 +1,90 @@
package ansiterm
type AnsiEventHandler interface {
// Print
Print(b byte) error
// Execute C0 commands
Execute(b byte) error
// CUrsor Up
CUU(int) error
// CUrsor Down
CUD(int) error
// CUrsor Forward
CUF(int) error
// CUrsor Backward
CUB(int) error
// Cursor to Next Line
CNL(int) error
// Cursor to Previous Line
CPL(int) error
// Cursor Horizontal position Absolute
CHA(int) error
// Vertical line Position Absolute
VPA(int) error
// CUrsor Position
CUP(int, int) error
// Horizontal and Vertical Position (depends on PUM)
HVP(int, int) error
// Text Cursor Enable Mode
DECTCEM(bool) error
// Origin Mode
DECOM(bool) error
// 132 Column Mode
DECCOLM(bool) error
// Erase in Display
ED(int) error
// Erase in Line
EL(int) error
// Insert Line
IL(int) error
// Delete Line
DL(int) error
// Insert Character
ICH(int) error
// Delete Character
DCH(int) error
// Set Graphics Rendition
SGR([]int) error
// Pan Down
SU(int) error
// Pan Up
SD(int) error
// Device Attributes
DA([]string) error
// Set Top and Bottom Margins
DECSTBM(int, int) error
// Index
IND() error
// Reverse Index
RI() error
// Flush updates from previous commands
Flush() error
}

24
vendor/github.com/Azure/go-ansiterm/ground_state.go generated vendored Normal file
View File

@ -0,0 +1,24 @@
package ansiterm
type groundState struct {
baseState
}
func (gs groundState) Handle(b byte) (s state, e error) {
gs.parser.context.currentChar = b
nextState, err := gs.baseState.Handle(b)
if nextState != nil || err != nil {
return nextState, err
}
switch {
case sliceContains(printables, b):
return gs, gs.parser.print()
case sliceContains(executors, b):
return gs, gs.parser.execute()
}
return gs, nil
}

View File

@ -0,0 +1,31 @@
package ansiterm
type oscStringState struct {
baseState
}
func (oscState oscStringState) Handle(b byte) (s state, e error) {
logger.Infof("OscString::Handle %#x", b)
nextState, err := oscState.baseState.Handle(b)
if nextState != nil || err != nil {
return nextState, err
}
switch {
case isOscStringTerminator(b):
return oscState.parser.ground, nil
}
return oscState, nil
}
// See below for OSC string terminators for linux
// http://man7.org/linux/man-pages/man4/console_codes.4.html
func isOscStringTerminator(b byte) bool {
if b == ANSI_BEL || b == 0x5C {
return true
}
return false
}

136
vendor/github.com/Azure/go-ansiterm/parser.go generated vendored Normal file
View File

@ -0,0 +1,136 @@
package ansiterm
import (
"errors"
"io/ioutil"
"os"
"github.com/Sirupsen/logrus"
)
var logger *logrus.Logger
type AnsiParser struct {
currState state
eventHandler AnsiEventHandler
context *ansiContext
csiEntry state
csiParam state
dcsEntry state
escape state
escapeIntermediate state
error state
ground state
oscString state
stateMap []state
}
func CreateParser(initialState string, evtHandler AnsiEventHandler) *AnsiParser {
logFile := ioutil.Discard
if isDebugEnv := os.Getenv(LogEnv); isDebugEnv == "1" {
logFile, _ = os.Create("ansiParser.log")
}
logger = &logrus.Logger{
Out: logFile,
Formatter: new(logrus.TextFormatter),
Level: logrus.InfoLevel,
}
parser := &AnsiParser{
eventHandler: evtHandler,
context: &ansiContext{},
}
parser.csiEntry = csiEntryState{baseState{name: "CsiEntry", parser: parser}}
parser.csiParam = csiParamState{baseState{name: "CsiParam", parser: parser}}
parser.dcsEntry = dcsEntryState{baseState{name: "DcsEntry", parser: parser}}
parser.escape = escapeState{baseState{name: "Escape", parser: parser}}
parser.escapeIntermediate = escapeIntermediateState{baseState{name: "EscapeIntermediate", parser: parser}}
parser.error = errorState{baseState{name: "Error", parser: parser}}
parser.ground = groundState{baseState{name: "Ground", parser: parser}}
parser.oscString = oscStringState{baseState{name: "OscString", parser: parser}}
parser.stateMap = []state{
parser.csiEntry,
parser.csiParam,
parser.dcsEntry,
parser.escape,
parser.escapeIntermediate,
parser.error,
parser.ground,
parser.oscString,
}
parser.currState = getState(initialState, parser.stateMap)
logger.Infof("CreateParser: parser %p", parser)
return parser
}
func getState(name string, states []state) state {
for _, el := range states {
if el.Name() == name {
return el
}
}
return nil
}
func (ap *AnsiParser) Parse(bytes []byte) (int, error) {
for i, b := range bytes {
if err := ap.handle(b); err != nil {
return i, err
}
}
return len(bytes), ap.eventHandler.Flush()
}
func (ap *AnsiParser) handle(b byte) error {
ap.context.currentChar = b
newState, err := ap.currState.Handle(b)
if err != nil {
return err
}
if newState == nil {
logger.Warning("newState is nil")
return errors.New("New state of 'nil' is invalid.")
}
if newState != ap.currState {
if err := ap.changeState(newState); err != nil {
return err
}
}
return nil
}
func (ap *AnsiParser) changeState(newState state) error {
logger.Infof("ChangeState %s --> %s", ap.currState.Name(), newState.Name())
// Exit old state
if err := ap.currState.Exit(); err != nil {
logger.Infof("Exit state '%s' failed with : '%v'", ap.currState.Name(), err)
return err
}
// Perform transition action
if err := ap.currState.Transition(newState); err != nil {
logger.Infof("Transition from '%s' to '%s' failed with: '%v'", ap.currState.Name(), newState.Name, err)
return err
}
// Enter new state
if err := newState.Enter(); err != nil {
logger.Infof("Enter state '%s' failed with: '%v'", newState.Name(), err)
return err
}
ap.currState = newState
return nil
}

View File

@ -0,0 +1,103 @@
package ansiterm
import (
"strconv"
)
func parseParams(bytes []byte) ([]string, error) {
paramBuff := make([]byte, 0, 0)
params := []string{}
for _, v := range bytes {
if v == ';' {
if len(paramBuff) > 0 {
// Completed parameter, append it to the list
s := string(paramBuff)
params = append(params, s)
paramBuff = make([]byte, 0, 0)
}
} else {
paramBuff = append(paramBuff, v)
}
}
// Last parameter may not be terminated with ';'
if len(paramBuff) > 0 {
s := string(paramBuff)
params = append(params, s)
}
logger.Infof("Parsed params: %v with length: %d", params, len(params))
return params, nil
}
func parseCmd(context ansiContext) (string, error) {
return string(context.currentChar), nil
}
func getInt(params []string, dflt int) int {
i := getInts(params, 1, dflt)[0]
logger.Infof("getInt: %v", i)
return i
}
func getInts(params []string, minCount int, dflt int) []int {
ints := []int{}
for _, v := range params {
i, _ := strconv.Atoi(v)
// Zero is mapped to the default value in VT100.
if i == 0 {
i = dflt
}
ints = append(ints, i)
}
if len(ints) < minCount {
remaining := minCount - len(ints)
for i := 0; i < remaining; i++ {
ints = append(ints, dflt)
}
}
logger.Infof("getInts: %v", ints)
return ints
}
func (ap *AnsiParser) modeDispatch(param string, set bool) error {
switch param {
case "?3":
return ap.eventHandler.DECCOLM(set)
case "?6":
return ap.eventHandler.DECOM(set)
case "?25":
return ap.eventHandler.DECTCEM(set)
}
return nil
}
func (ap *AnsiParser) hDispatch(params []string) error {
if len(params) == 1 {
return ap.modeDispatch(params[0], true)
}
return nil
}
func (ap *AnsiParser) lDispatch(params []string) error {
if len(params) == 1 {
return ap.modeDispatch(params[0], false)
}
return nil
}
func getEraseParam(params []string) int {
param := getInt(params, 0)
if param < 0 || 3 < param {
param = 0
}
return param
}

122
vendor/github.com/Azure/go-ansiterm/parser_actions.go generated vendored Normal file
View File

@ -0,0 +1,122 @@
package ansiterm
import (
"fmt"
)
func (ap *AnsiParser) collectParam() error {
currChar := ap.context.currentChar
logger.Infof("collectParam %#x", currChar)
ap.context.paramBuffer = append(ap.context.paramBuffer, currChar)
return nil
}
func (ap *AnsiParser) collectInter() error {
currChar := ap.context.currentChar
logger.Infof("collectInter %#x", currChar)
ap.context.paramBuffer = append(ap.context.interBuffer, currChar)
return nil
}
func (ap *AnsiParser) escDispatch() error {
cmd, _ := parseCmd(*ap.context)
intermeds := ap.context.interBuffer
logger.Infof("escDispatch currentChar: %#x", ap.context.currentChar)
logger.Infof("escDispatch: %v(%v)", cmd, intermeds)
switch cmd {
case "D": // IND
return ap.eventHandler.IND()
case "E": // NEL, equivalent to CRLF
err := ap.eventHandler.Execute(ANSI_CARRIAGE_RETURN)
if err == nil {
err = ap.eventHandler.Execute(ANSI_LINE_FEED)
}
return err
case "M": // RI
return ap.eventHandler.RI()
}
return nil
}
func (ap *AnsiParser) csiDispatch() error {
cmd, _ := parseCmd(*ap.context)
params, _ := parseParams(ap.context.paramBuffer)
logger.Infof("csiDispatch: %v(%v)", cmd, params)
switch cmd {
case "@":
return ap.eventHandler.ICH(getInt(params, 1))
case "A":
return ap.eventHandler.CUU(getInt(params, 1))
case "B":
return ap.eventHandler.CUD(getInt(params, 1))
case "C":
return ap.eventHandler.CUF(getInt(params, 1))
case "D":
return ap.eventHandler.CUB(getInt(params, 1))
case "E":
return ap.eventHandler.CNL(getInt(params, 1))
case "F":
return ap.eventHandler.CPL(getInt(params, 1))
case "G":
return ap.eventHandler.CHA(getInt(params, 1))
case "H":
ints := getInts(params, 2, 1)
x, y := ints[0], ints[1]
return ap.eventHandler.CUP(x, y)
case "J":
param := getEraseParam(params)
return ap.eventHandler.ED(param)
case "K":
param := getEraseParam(params)
return ap.eventHandler.EL(param)
case "L":
return ap.eventHandler.IL(getInt(params, 1))
case "M":
return ap.eventHandler.DL(getInt(params, 1))
case "P":
return ap.eventHandler.DCH(getInt(params, 1))
case "S":
return ap.eventHandler.SU(getInt(params, 1))
case "T":
return ap.eventHandler.SD(getInt(params, 1))
case "c":
return ap.eventHandler.DA(params)
case "d":
return ap.eventHandler.VPA(getInt(params, 1))
case "f":
ints := getInts(params, 2, 1)
x, y := ints[0], ints[1]
return ap.eventHandler.HVP(x, y)
case "h":
return ap.hDispatch(params)
case "l":
return ap.lDispatch(params)
case "m":
return ap.eventHandler.SGR(getInts(params, 1, 0))
case "r":
ints := getInts(params, 2, 1)
top, bottom := ints[0], ints[1]
return ap.eventHandler.DECSTBM(top, bottom)
default:
logger.Errorf(fmt.Sprintf("Unsupported CSI command: '%s', with full context: %v", cmd, ap.context))
return nil
}
}
func (ap *AnsiParser) print() error {
return ap.eventHandler.Print(ap.context.currentChar)
}
func (ap *AnsiParser) clear() error {
ap.context = &ansiContext{}
return nil
}
func (ap *AnsiParser) execute() error {
return ap.eventHandler.Execute(ap.context.currentChar)
}

71
vendor/github.com/Azure/go-ansiterm/states.go generated vendored Normal file
View File

@ -0,0 +1,71 @@
package ansiterm
type stateID int
type state interface {
Enter() error
Exit() error
Handle(byte) (state, error)
Name() string
Transition(state) error
}
type baseState struct {
name string
parser *AnsiParser
}
func (base baseState) Enter() error {
return nil
}
func (base baseState) Exit() error {
return nil
}
func (base baseState) Handle(b byte) (s state, e error) {
switch {
case b == CSI_ENTRY:
return base.parser.csiEntry, nil
case b == DCS_ENTRY:
return base.parser.dcsEntry, nil
case b == ANSI_ESCAPE_PRIMARY:
return base.parser.escape, nil
case b == OSC_STRING:
return base.parser.oscString, nil
case sliceContains(toGroundBytes, b):
return base.parser.ground, nil
}
return nil, nil
}
func (base baseState) Name() string {
return base.name
}
func (base baseState) Transition(s state) error {
if s == base.parser.ground {
execBytes := []byte{0x18}
execBytes = append(execBytes, 0x1A)
execBytes = append(execBytes, getByteRange(0x80, 0x8F)...)
execBytes = append(execBytes, getByteRange(0x91, 0x97)...)
execBytes = append(execBytes, 0x99)
execBytes = append(execBytes, 0x9A)
if sliceContains(execBytes, base.parser.context.currentChar) {
return base.parser.execute()
}
}
return nil
}
type dcsEntryState struct {
baseState
}
type errorState struct {
baseState
}

21
vendor/github.com/Azure/go-ansiterm/utilities.go generated vendored Normal file
View File

@ -0,0 +1,21 @@
package ansiterm
import (
"strconv"
)
func sliceContains(bytes []byte, b byte) bool {
for _, v := range bytes {
if v == b {
return true
}
}
return false
}
func convertBytesToInteger(bytes []byte) int {
s := string(bytes)
i, _ := strconv.Atoi(s)
return i
}

182
vendor/github.com/Azure/go-ansiterm/winterm/ansi.go generated vendored Normal file
View File

@ -0,0 +1,182 @@
// +build windows
package winterm
import (
"fmt"
"os"
"strconv"
"strings"
"syscall"
"github.com/Azure/go-ansiterm"
)
// Windows keyboard constants
// See https://msdn.microsoft.com/en-us/library/windows/desktop/dd375731(v=vs.85).aspx.
const (
VK_PRIOR = 0x21 // PAGE UP key
VK_NEXT = 0x22 // PAGE DOWN key
VK_END = 0x23 // END key
VK_HOME = 0x24 // HOME key
VK_LEFT = 0x25 // LEFT ARROW key
VK_UP = 0x26 // UP ARROW key
VK_RIGHT = 0x27 // RIGHT ARROW key
VK_DOWN = 0x28 // DOWN ARROW key
VK_SELECT = 0x29 // SELECT key
VK_PRINT = 0x2A // PRINT key
VK_EXECUTE = 0x2B // EXECUTE key
VK_SNAPSHOT = 0x2C // PRINT SCREEN key
VK_INSERT = 0x2D // INS key
VK_DELETE = 0x2E // DEL key
VK_HELP = 0x2F // HELP key
VK_F1 = 0x70 // F1 key
VK_F2 = 0x71 // F2 key
VK_F3 = 0x72 // F3 key
VK_F4 = 0x73 // F4 key
VK_F5 = 0x74 // F5 key
VK_F6 = 0x75 // F6 key
VK_F7 = 0x76 // F7 key
VK_F8 = 0x77 // F8 key
VK_F9 = 0x78 // F9 key
VK_F10 = 0x79 // F10 key
VK_F11 = 0x7A // F11 key
VK_F12 = 0x7B // F12 key
RIGHT_ALT_PRESSED = 0x0001
LEFT_ALT_PRESSED = 0x0002
RIGHT_CTRL_PRESSED = 0x0004
LEFT_CTRL_PRESSED = 0x0008
SHIFT_PRESSED = 0x0010
NUMLOCK_ON = 0x0020
SCROLLLOCK_ON = 0x0040
CAPSLOCK_ON = 0x0080
ENHANCED_KEY = 0x0100
)
type ansiCommand struct {
CommandBytes []byte
Command string
Parameters []string
IsSpecial bool
}
func newAnsiCommand(command []byte) *ansiCommand {
if isCharacterSelectionCmdChar(command[1]) {
// Is Character Set Selection commands
return &ansiCommand{
CommandBytes: command,
Command: string(command),
IsSpecial: true,
}
}
// last char is command character
lastCharIndex := len(command) - 1
ac := &ansiCommand{
CommandBytes: command,
Command: string(command[lastCharIndex]),
IsSpecial: false,
}
// more than a single escape
if lastCharIndex != 0 {
start := 1
// skip if double char escape sequence
if command[0] == ansiterm.ANSI_ESCAPE_PRIMARY && command[1] == ansiterm.ANSI_ESCAPE_SECONDARY {
start++
}
// convert this to GetNextParam method
ac.Parameters = strings.Split(string(command[start:lastCharIndex]), ansiterm.ANSI_PARAMETER_SEP)
}
return ac
}
func (ac *ansiCommand) paramAsSHORT(index int, defaultValue int16) int16 {
if index < 0 || index >= len(ac.Parameters) {
return defaultValue
}
param, err := strconv.ParseInt(ac.Parameters[index], 10, 16)
if err != nil {
return defaultValue
}
return int16(param)
}
func (ac *ansiCommand) String() string {
return fmt.Sprintf("0x%v \"%v\" (\"%v\")",
bytesToHex(ac.CommandBytes),
ac.Command,
strings.Join(ac.Parameters, "\",\""))
}
// isAnsiCommandChar returns true if the passed byte falls within the range of ANSI commands.
// See http://manpages.ubuntu.com/manpages/intrepid/man4/console_codes.4.html.
func isAnsiCommandChar(b byte) bool {
switch {
case ansiterm.ANSI_COMMAND_FIRST <= b && b <= ansiterm.ANSI_COMMAND_LAST && b != ansiterm.ANSI_ESCAPE_SECONDARY:
return true
case b == ansiterm.ANSI_CMD_G1 || b == ansiterm.ANSI_CMD_OSC || b == ansiterm.ANSI_CMD_DECPAM || b == ansiterm.ANSI_CMD_DECPNM:
// non-CSI escape sequence terminator
return true
case b == ansiterm.ANSI_CMD_STR_TERM || b == ansiterm.ANSI_BEL:
// String escape sequence terminator
return true
}
return false
}
func isXtermOscSequence(command []byte, current byte) bool {
return (len(command) >= 2 && command[0] == ansiterm.ANSI_ESCAPE_PRIMARY && command[1] == ansiterm.ANSI_CMD_OSC && current != ansiterm.ANSI_BEL)
}
func isCharacterSelectionCmdChar(b byte) bool {
return (b == ansiterm.ANSI_CMD_G0 || b == ansiterm.ANSI_CMD_G1 || b == ansiterm.ANSI_CMD_G2 || b == ansiterm.ANSI_CMD_G3)
}
// bytesToHex converts a slice of bytes to a human-readable string.
func bytesToHex(b []byte) string {
hex := make([]string, len(b))
for i, ch := range b {
hex[i] = fmt.Sprintf("%X", ch)
}
return strings.Join(hex, "")
}
// ensureInRange adjusts the passed value, if necessary, to ensure it is within
// the passed min / max range.
func ensureInRange(n int16, min int16, max int16) int16 {
if n < min {
return min
} else if n > max {
return max
} else {
return n
}
}
func GetStdFile(nFile int) (*os.File, uintptr) {
var file *os.File
switch nFile {
case syscall.STD_INPUT_HANDLE:
file = os.Stdin
case syscall.STD_OUTPUT_HANDLE:
file = os.Stdout
case syscall.STD_ERROR_HANDLE:
file = os.Stderr
default:
panic(fmt.Errorf("Invalid standard handle identifier: %v", nFile))
}
fd, err := syscall.GetStdHandle(nFile)
if err != nil {
panic(fmt.Errorf("Invalid standard handle indentifier: %v -- %v", nFile, err))
}
return file, uintptr(fd)
}

322
vendor/github.com/Azure/go-ansiterm/winterm/api.go generated vendored Normal file
View File

@ -0,0 +1,322 @@
// +build windows
package winterm
import (
"fmt"
"syscall"
"unsafe"
)
//===========================================================================================================
// IMPORTANT NOTE:
//
// The methods below make extensive use of the "unsafe" package to obtain the required pointers.
// Beginning in Go 1.3, the garbage collector may release local variables (e.g., incoming arguments, stack
// variables) the pointers reference *before* the API completes.
//
// As a result, in those cases, the code must hint that the variables remain in active by invoking the
// dummy method "use" (see below). Newer versions of Go are planned to change the mechanism to no longer
// require unsafe pointers.
//
// If you add or modify methods, ENSURE protection of local variables through the "use" builtin to inform
// the garbage collector the variables remain in use if:
//
// -- The value is not a pointer (e.g., int32, struct)
// -- The value is not referenced by the method after passing the pointer to Windows
//
// See http://golang.org/doc/go1.3.
//===========================================================================================================
var (
kernel32DLL = syscall.NewLazyDLL("kernel32.dll")
getConsoleCursorInfoProc = kernel32DLL.NewProc("GetConsoleCursorInfo")
setConsoleCursorInfoProc = kernel32DLL.NewProc("SetConsoleCursorInfo")
setConsoleCursorPositionProc = kernel32DLL.NewProc("SetConsoleCursorPosition")
setConsoleModeProc = kernel32DLL.NewProc("SetConsoleMode")
getConsoleScreenBufferInfoProc = kernel32DLL.NewProc("GetConsoleScreenBufferInfo")
setConsoleScreenBufferSizeProc = kernel32DLL.NewProc("SetConsoleScreenBufferSize")
scrollConsoleScreenBufferProc = kernel32DLL.NewProc("ScrollConsoleScreenBufferA")
setConsoleTextAttributeProc = kernel32DLL.NewProc("SetConsoleTextAttribute")
setConsoleWindowInfoProc = kernel32DLL.NewProc("SetConsoleWindowInfo")
writeConsoleOutputProc = kernel32DLL.NewProc("WriteConsoleOutputW")
readConsoleInputProc = kernel32DLL.NewProc("ReadConsoleInputW")
waitForSingleObjectProc = kernel32DLL.NewProc("WaitForSingleObject")
)
// Windows Console constants
const (
// Console modes
// See https://msdn.microsoft.com/en-us/library/windows/desktop/ms686033(v=vs.85).aspx.
ENABLE_PROCESSED_INPUT = 0x0001
ENABLE_LINE_INPUT = 0x0002
ENABLE_ECHO_INPUT = 0x0004
ENABLE_WINDOW_INPUT = 0x0008
ENABLE_MOUSE_INPUT = 0x0010
ENABLE_INSERT_MODE = 0x0020
ENABLE_QUICK_EDIT_MODE = 0x0040
ENABLE_EXTENDED_FLAGS = 0x0080
ENABLE_PROCESSED_OUTPUT = 0x0001
ENABLE_WRAP_AT_EOL_OUTPUT = 0x0002
// Character attributes
// Note:
// -- The attributes are combined to produce various colors (e.g., Blue + Green will create Cyan).
// Clearing all foreground or background colors results in black; setting all creates white.
// See https://msdn.microsoft.com/en-us/library/windows/desktop/ms682088(v=vs.85).aspx#_win32_character_attributes.
FOREGROUND_BLUE uint16 = 0x0001
FOREGROUND_GREEN uint16 = 0x0002
FOREGROUND_RED uint16 = 0x0004
FOREGROUND_INTENSITY uint16 = 0x0008
FOREGROUND_MASK uint16 = 0x000F
BACKGROUND_BLUE uint16 = 0x0010
BACKGROUND_GREEN uint16 = 0x0020
BACKGROUND_RED uint16 = 0x0040
BACKGROUND_INTENSITY uint16 = 0x0080
BACKGROUND_MASK uint16 = 0x00F0
COMMON_LVB_MASK uint16 = 0xFF00
COMMON_LVB_REVERSE_VIDEO uint16 = 0x4000
COMMON_LVB_UNDERSCORE uint16 = 0x8000
// Input event types
// See https://msdn.microsoft.com/en-us/library/windows/desktop/ms683499(v=vs.85).aspx.
KEY_EVENT = 0x0001
MOUSE_EVENT = 0x0002
WINDOW_BUFFER_SIZE_EVENT = 0x0004
MENU_EVENT = 0x0008
FOCUS_EVENT = 0x0010
// WaitForSingleObject return codes
WAIT_ABANDONED = 0x00000080
WAIT_FAILED = 0xFFFFFFFF
WAIT_SIGNALED = 0x0000000
WAIT_TIMEOUT = 0x00000102
// WaitForSingleObject wait duration
WAIT_INFINITE = 0xFFFFFFFF
WAIT_ONE_SECOND = 1000
WAIT_HALF_SECOND = 500
WAIT_QUARTER_SECOND = 250
)
// Windows API Console types
// -- See https://msdn.microsoft.com/en-us/library/windows/desktop/ms682101(v=vs.85).aspx for Console specific types (e.g., COORD)
// -- See https://msdn.microsoft.com/en-us/library/aa296569(v=vs.60).aspx for comments on alignment
type (
CHAR_INFO struct {
UnicodeChar uint16
Attributes uint16
}
CONSOLE_CURSOR_INFO struct {
Size uint32
Visible int32
}
CONSOLE_SCREEN_BUFFER_INFO struct {
Size COORD
CursorPosition COORD
Attributes uint16
Window SMALL_RECT
MaximumWindowSize COORD
}
COORD struct {
X int16
Y int16
}
SMALL_RECT struct {
Left int16
Top int16
Right int16
Bottom int16
}
// INPUT_RECORD is a C/C++ union of which KEY_EVENT_RECORD is one case, it is also the largest
// See https://msdn.microsoft.com/en-us/library/windows/desktop/ms683499(v=vs.85).aspx.
INPUT_RECORD struct {
EventType uint16
KeyEvent KEY_EVENT_RECORD
}
KEY_EVENT_RECORD struct {
KeyDown int32
RepeatCount uint16
VirtualKeyCode uint16
VirtualScanCode uint16
UnicodeChar uint16
ControlKeyState uint32
}
WINDOW_BUFFER_SIZE struct {
Size COORD
}
)
// boolToBOOL converts a Go bool into a Windows int32.
func boolToBOOL(f bool) int32 {
if f {
return int32(1)
} else {
return int32(0)
}
}
// GetConsoleCursorInfo retrieves information about the size and visiblity of the console cursor.
// See https://msdn.microsoft.com/en-us/library/windows/desktop/ms683163(v=vs.85).aspx.
func GetConsoleCursorInfo(handle uintptr, cursorInfo *CONSOLE_CURSOR_INFO) error {
r1, r2, err := getConsoleCursorInfoProc.Call(handle, uintptr(unsafe.Pointer(cursorInfo)), 0)
return checkError(r1, r2, err)
}
// SetConsoleCursorInfo sets the size and visiblity of the console cursor.
// See https://msdn.microsoft.com/en-us/library/windows/desktop/ms686019(v=vs.85).aspx.
func SetConsoleCursorInfo(handle uintptr, cursorInfo *CONSOLE_CURSOR_INFO) error {
r1, r2, err := setConsoleCursorInfoProc.Call(handle, uintptr(unsafe.Pointer(cursorInfo)), 0)
return checkError(r1, r2, err)
}
// SetConsoleCursorPosition location of the console cursor.
// See https://msdn.microsoft.com/en-us/library/windows/desktop/ms686025(v=vs.85).aspx.
func SetConsoleCursorPosition(handle uintptr, coord COORD) error {
r1, r2, err := setConsoleCursorPositionProc.Call(handle, coordToPointer(coord))
use(coord)
return checkError(r1, r2, err)
}
// GetConsoleMode gets the console mode for given file descriptor
// See http://msdn.microsoft.com/en-us/library/windows/desktop/ms683167(v=vs.85).aspx.
func GetConsoleMode(handle uintptr) (mode uint32, err error) {
err = syscall.GetConsoleMode(syscall.Handle(handle), &mode)
return mode, err
}
// SetConsoleMode sets the console mode for given file descriptor
// See http://msdn.microsoft.com/en-us/library/windows/desktop/ms686033(v=vs.85).aspx.
func SetConsoleMode(handle uintptr, mode uint32) error {
r1, r2, err := setConsoleModeProc.Call(handle, uintptr(mode), 0)
use(mode)
return checkError(r1, r2, err)
}
// GetConsoleScreenBufferInfo retrieves information about the specified console screen buffer.
// See http://msdn.microsoft.com/en-us/library/windows/desktop/ms683171(v=vs.85).aspx.
func GetConsoleScreenBufferInfo(handle uintptr) (*CONSOLE_SCREEN_BUFFER_INFO, error) {
info := CONSOLE_SCREEN_BUFFER_INFO{}
err := checkError(getConsoleScreenBufferInfoProc.Call(handle, uintptr(unsafe.Pointer(&info)), 0))
if err != nil {
return nil, err
}
return &info, nil
}
func ScrollConsoleScreenBuffer(handle uintptr, scrollRect SMALL_RECT, clipRect SMALL_RECT, destOrigin COORD, char CHAR_INFO) error {
r1, r2, err := scrollConsoleScreenBufferProc.Call(handle, uintptr(unsafe.Pointer(&scrollRect)), uintptr(unsafe.Pointer(&clipRect)), coordToPointer(destOrigin), uintptr(unsafe.Pointer(&char)))
use(scrollRect)
use(clipRect)
use(destOrigin)
use(char)
return checkError(r1, r2, err)
}
// SetConsoleScreenBufferSize sets the size of the console screen buffer.
// See https://msdn.microsoft.com/en-us/library/windows/desktop/ms686044(v=vs.85).aspx.
func SetConsoleScreenBufferSize(handle uintptr, coord COORD) error {
r1, r2, err := setConsoleScreenBufferSizeProc.Call(handle, coordToPointer(coord))
use(coord)
return checkError(r1, r2, err)
}
// SetConsoleTextAttribute sets the attributes of characters written to the
// console screen buffer by the WriteFile or WriteConsole function.
// See http://msdn.microsoft.com/en-us/library/windows/desktop/ms686047(v=vs.85).aspx.
func SetConsoleTextAttribute(handle uintptr, attribute uint16) error {
r1, r2, err := setConsoleTextAttributeProc.Call(handle, uintptr(attribute), 0)
use(attribute)
return checkError(r1, r2, err)
}
// SetConsoleWindowInfo sets the size and position of the console screen buffer's window.
// Note that the size and location must be within and no larger than the backing console screen buffer.
// See https://msdn.microsoft.com/en-us/library/windows/desktop/ms686125(v=vs.85).aspx.
func SetConsoleWindowInfo(handle uintptr, isAbsolute bool, rect SMALL_RECT) error {
r1, r2, err := setConsoleWindowInfoProc.Call(handle, uintptr(boolToBOOL(isAbsolute)), uintptr(unsafe.Pointer(&rect)))
use(isAbsolute)
use(rect)
return checkError(r1, r2, err)
}
// WriteConsoleOutput writes the CHAR_INFOs from the provided buffer to the active console buffer.
// See https://msdn.microsoft.com/en-us/library/windows/desktop/ms687404(v=vs.85).aspx.
func WriteConsoleOutput(handle uintptr, buffer []CHAR_INFO, bufferSize COORD, bufferCoord COORD, writeRegion *SMALL_RECT) error {
r1, r2, err := writeConsoleOutputProc.Call(handle, uintptr(unsafe.Pointer(&buffer[0])), coordToPointer(bufferSize), coordToPointer(bufferCoord), uintptr(unsafe.Pointer(writeRegion)))
use(buffer)
use(bufferSize)
use(bufferCoord)
return checkError(r1, r2, err)
}
// ReadConsoleInput reads (and removes) data from the console input buffer.
// See https://msdn.microsoft.com/en-us/library/windows/desktop/ms684961(v=vs.85).aspx.
func ReadConsoleInput(handle uintptr, buffer []INPUT_RECORD, count *uint32) error {
r1, r2, err := readConsoleInputProc.Call(handle, uintptr(unsafe.Pointer(&buffer[0])), uintptr(len(buffer)), uintptr(unsafe.Pointer(count)))
use(buffer)
return checkError(r1, r2, err)
}
// WaitForSingleObject waits for the passed handle to be signaled.
// It returns true if the handle was signaled; false otherwise.
// See https://msdn.microsoft.com/en-us/library/windows/desktop/ms687032(v=vs.85).aspx.
func WaitForSingleObject(handle uintptr, msWait uint32) (bool, error) {
r1, _, err := waitForSingleObjectProc.Call(handle, uintptr(uint32(msWait)))
switch r1 {
case WAIT_ABANDONED, WAIT_TIMEOUT:
return false, nil
case WAIT_SIGNALED:
return true, nil
}
use(msWait)
return false, err
}
// String helpers
func (info CONSOLE_SCREEN_BUFFER_INFO) String() string {
return fmt.Sprintf("Size(%v) Cursor(%v) Window(%v) Max(%v)", info.Size, info.CursorPosition, info.Window, info.MaximumWindowSize)
}
func (coord COORD) String() string {
return fmt.Sprintf("%v,%v", coord.X, coord.Y)
}
func (rect SMALL_RECT) String() string {
return fmt.Sprintf("(%v,%v),(%v,%v)", rect.Left, rect.Top, rect.Right, rect.Bottom)
}
// checkError evaluates the results of a Windows API call and returns the error if it failed.
func checkError(r1, r2 uintptr, err error) error {
// Windows APIs return non-zero to indicate success
if r1 != 0 {
return nil
}
// Return the error if provided, otherwise default to EINVAL
if err != nil {
return err
}
return syscall.EINVAL
}
// coordToPointer converts a COORD into a uintptr (by fooling the type system).
func coordToPointer(c COORD) uintptr {
// Note: This code assumes the two SHORTs are correctly laid out; the "cast" to uint32 is just to get a pointer to pass.
return uintptr(*((*uint32)(unsafe.Pointer(&c))))
}
// use is a no-op, but the compiler cannot see that it is.
// Calling use(p) ensures that p is kept live until that point.
func use(p interface{}) {}

View File

@ -0,0 +1,100 @@
// +build windows
package winterm
import "github.com/Azure/go-ansiterm"
const (
FOREGROUND_COLOR_MASK = FOREGROUND_RED | FOREGROUND_GREEN | FOREGROUND_BLUE
BACKGROUND_COLOR_MASK = BACKGROUND_RED | BACKGROUND_GREEN | BACKGROUND_BLUE
)
// collectAnsiIntoWindowsAttributes modifies the passed Windows text mode flags to reflect the
// request represented by the passed ANSI mode.
func collectAnsiIntoWindowsAttributes(windowsMode uint16, inverted bool, baseMode uint16, ansiMode int16) (uint16, bool) {
switch ansiMode {
// Mode styles
case ansiterm.ANSI_SGR_BOLD:
windowsMode = windowsMode | FOREGROUND_INTENSITY
case ansiterm.ANSI_SGR_DIM, ansiterm.ANSI_SGR_BOLD_DIM_OFF:
windowsMode &^= FOREGROUND_INTENSITY
case ansiterm.ANSI_SGR_UNDERLINE:
windowsMode = windowsMode | COMMON_LVB_UNDERSCORE
case ansiterm.ANSI_SGR_REVERSE:
inverted = true
case ansiterm.ANSI_SGR_REVERSE_OFF:
inverted = false
case ansiterm.ANSI_SGR_UNDERLINE_OFF:
windowsMode &^= COMMON_LVB_UNDERSCORE
// Foreground colors
case ansiterm.ANSI_SGR_FOREGROUND_DEFAULT:
windowsMode = (windowsMode &^ FOREGROUND_MASK) | (baseMode & FOREGROUND_MASK)
case ansiterm.ANSI_SGR_FOREGROUND_BLACK:
windowsMode = (windowsMode &^ FOREGROUND_COLOR_MASK)
case ansiterm.ANSI_SGR_FOREGROUND_RED:
windowsMode = (windowsMode &^ FOREGROUND_COLOR_MASK) | FOREGROUND_RED
case ansiterm.ANSI_SGR_FOREGROUND_GREEN:
windowsMode = (windowsMode &^ FOREGROUND_COLOR_MASK) | FOREGROUND_GREEN
case ansiterm.ANSI_SGR_FOREGROUND_YELLOW:
windowsMode = (windowsMode &^ FOREGROUND_COLOR_MASK) | FOREGROUND_RED | FOREGROUND_GREEN
case ansiterm.ANSI_SGR_FOREGROUND_BLUE:
windowsMode = (windowsMode &^ FOREGROUND_COLOR_MASK) | FOREGROUND_BLUE
case ansiterm.ANSI_SGR_FOREGROUND_MAGENTA:
windowsMode = (windowsMode &^ FOREGROUND_COLOR_MASK) | FOREGROUND_RED | FOREGROUND_BLUE
case ansiterm.ANSI_SGR_FOREGROUND_CYAN:
windowsMode = (windowsMode &^ FOREGROUND_COLOR_MASK) | FOREGROUND_GREEN | FOREGROUND_BLUE
case ansiterm.ANSI_SGR_FOREGROUND_WHITE:
windowsMode = (windowsMode &^ FOREGROUND_COLOR_MASK) | FOREGROUND_RED | FOREGROUND_GREEN | FOREGROUND_BLUE
// Background colors
case ansiterm.ANSI_SGR_BACKGROUND_DEFAULT:
// Black with no intensity
windowsMode = (windowsMode &^ BACKGROUND_MASK) | (baseMode & BACKGROUND_MASK)
case ansiterm.ANSI_SGR_BACKGROUND_BLACK:
windowsMode = (windowsMode &^ BACKGROUND_COLOR_MASK)
case ansiterm.ANSI_SGR_BACKGROUND_RED:
windowsMode = (windowsMode &^ BACKGROUND_COLOR_MASK) | BACKGROUND_RED
case ansiterm.ANSI_SGR_BACKGROUND_GREEN:
windowsMode = (windowsMode &^ BACKGROUND_COLOR_MASK) | BACKGROUND_GREEN
case ansiterm.ANSI_SGR_BACKGROUND_YELLOW:
windowsMode = (windowsMode &^ BACKGROUND_COLOR_MASK) | BACKGROUND_RED | BACKGROUND_GREEN
case ansiterm.ANSI_SGR_BACKGROUND_BLUE:
windowsMode = (windowsMode &^ BACKGROUND_COLOR_MASK) | BACKGROUND_BLUE
case ansiterm.ANSI_SGR_BACKGROUND_MAGENTA:
windowsMode = (windowsMode &^ BACKGROUND_COLOR_MASK) | BACKGROUND_RED | BACKGROUND_BLUE
case ansiterm.ANSI_SGR_BACKGROUND_CYAN:
windowsMode = (windowsMode &^ BACKGROUND_COLOR_MASK) | BACKGROUND_GREEN | BACKGROUND_BLUE
case ansiterm.ANSI_SGR_BACKGROUND_WHITE:
windowsMode = (windowsMode &^ BACKGROUND_COLOR_MASK) | BACKGROUND_RED | BACKGROUND_GREEN | BACKGROUND_BLUE
}
return windowsMode, inverted
}
// invertAttributes inverts the foreground and background colors of a Windows attributes value
func invertAttributes(windowsMode uint16) uint16 {
return (COMMON_LVB_MASK & windowsMode) | ((FOREGROUND_MASK & windowsMode) << 4) | ((BACKGROUND_MASK & windowsMode) >> 4)
}

View File

@ -0,0 +1,101 @@
// +build windows
package winterm
const (
horizontal = iota
vertical
)
func (h *windowsAnsiEventHandler) getCursorWindow(info *CONSOLE_SCREEN_BUFFER_INFO) SMALL_RECT {
if h.originMode {
sr := h.effectiveSr(info.Window)
return SMALL_RECT{
Top: sr.top,
Bottom: sr.bottom,
Left: 0,
Right: info.Size.X - 1,
}
} else {
return SMALL_RECT{
Top: info.Window.Top,
Bottom: info.Window.Bottom,
Left: 0,
Right: info.Size.X - 1,
}
}
}
// setCursorPosition sets the cursor to the specified position, bounded to the screen size
func (h *windowsAnsiEventHandler) setCursorPosition(position COORD, window SMALL_RECT) error {
position.X = ensureInRange(position.X, window.Left, window.Right)
position.Y = ensureInRange(position.Y, window.Top, window.Bottom)
err := SetConsoleCursorPosition(h.fd, position)
if err != nil {
return err
}
logger.Infof("Cursor position set: (%d, %d)", position.X, position.Y)
return err
}
func (h *windowsAnsiEventHandler) moveCursorVertical(param int) error {
return h.moveCursor(vertical, param)
}
func (h *windowsAnsiEventHandler) moveCursorHorizontal(param int) error {
return h.moveCursor(horizontal, param)
}
func (h *windowsAnsiEventHandler) moveCursor(moveMode int, param int) error {
info, err := GetConsoleScreenBufferInfo(h.fd)
if err != nil {
return err
}
position := info.CursorPosition
switch moveMode {
case horizontal:
position.X += int16(param)
case vertical:
position.Y += int16(param)
}
if err = h.setCursorPosition(position, h.getCursorWindow(info)); err != nil {
return err
}
return nil
}
func (h *windowsAnsiEventHandler) moveCursorLine(param int) error {
info, err := GetConsoleScreenBufferInfo(h.fd)
if err != nil {
return err
}
position := info.CursorPosition
position.X = 0
position.Y += int16(param)
if err = h.setCursorPosition(position, h.getCursorWindow(info)); err != nil {
return err
}
return nil
}
func (h *windowsAnsiEventHandler) moveCursorColumn(param int) error {
info, err := GetConsoleScreenBufferInfo(h.fd)
if err != nil {
return err
}
position := info.CursorPosition
position.X = int16(param) - 1
if err = h.setCursorPosition(position, h.getCursorWindow(info)); err != nil {
return err
}
return nil
}

View File

@ -0,0 +1,84 @@
// +build windows
package winterm
import "github.com/Azure/go-ansiterm"
func (h *windowsAnsiEventHandler) clearRange(attributes uint16, fromCoord COORD, toCoord COORD) error {
// Ignore an invalid (negative area) request
if toCoord.Y < fromCoord.Y {
return nil
}
var err error
var coordStart = COORD{}
var coordEnd = COORD{}
xCurrent, yCurrent := fromCoord.X, fromCoord.Y
xEnd, yEnd := toCoord.X, toCoord.Y
// Clear any partial initial line
if xCurrent > 0 {
coordStart.X, coordStart.Y = xCurrent, yCurrent
coordEnd.X, coordEnd.Y = xEnd, yCurrent
err = h.clearRect(attributes, coordStart, coordEnd)
if err != nil {
return err
}
xCurrent = 0
yCurrent += 1
}
// Clear intervening rectangular section
if yCurrent < yEnd {
coordStart.X, coordStart.Y = xCurrent, yCurrent
coordEnd.X, coordEnd.Y = xEnd, yEnd-1
err = h.clearRect(attributes, coordStart, coordEnd)
if err != nil {
return err
}
xCurrent = 0
yCurrent = yEnd
}
// Clear remaining partial ending line
coordStart.X, coordStart.Y = xCurrent, yCurrent
coordEnd.X, coordEnd.Y = xEnd, yEnd
err = h.clearRect(attributes, coordStart, coordEnd)
if err != nil {
return err
}
return nil
}
func (h *windowsAnsiEventHandler) clearRect(attributes uint16, fromCoord COORD, toCoord COORD) error {
region := SMALL_RECT{Top: fromCoord.Y, Left: fromCoord.X, Bottom: toCoord.Y, Right: toCoord.X}
width := toCoord.X - fromCoord.X + 1
height := toCoord.Y - fromCoord.Y + 1
size := uint32(width) * uint32(height)
if size <= 0 {
return nil
}
buffer := make([]CHAR_INFO, size)
char := CHAR_INFO{ansiterm.FILL_CHARACTER, attributes}
for i := 0; i < int(size); i++ {
buffer[i] = char
}
err := WriteConsoleOutput(h.fd, buffer, COORD{X: width, Y: height}, COORD{X: 0, Y: 0}, &region)
if err != nil {
return err
}
return nil
}

View File

@ -0,0 +1,118 @@
// +build windows
package winterm
// effectiveSr gets the current effective scroll region in buffer coordinates
func (h *windowsAnsiEventHandler) effectiveSr(window SMALL_RECT) scrollRegion {
top := addInRange(window.Top, h.sr.top, window.Top, window.Bottom)
bottom := addInRange(window.Top, h.sr.bottom, window.Top, window.Bottom)
if top >= bottom {
top = window.Top
bottom = window.Bottom
}
return scrollRegion{top: top, bottom: bottom}
}
func (h *windowsAnsiEventHandler) scrollUp(param int) error {
info, err := GetConsoleScreenBufferInfo(h.fd)
if err != nil {
return err
}
sr := h.effectiveSr(info.Window)
return h.scroll(param, sr, info)
}
func (h *windowsAnsiEventHandler) scrollDown(param int) error {
return h.scrollUp(-param)
}
func (h *windowsAnsiEventHandler) deleteLines(param int) error {
info, err := GetConsoleScreenBufferInfo(h.fd)
if err != nil {
return err
}
start := info.CursorPosition.Y
sr := h.effectiveSr(info.Window)
// Lines cannot be inserted or deleted outside the scrolling region.
if start >= sr.top && start <= sr.bottom {
sr.top = start
return h.scroll(param, sr, info)
} else {
return nil
}
}
func (h *windowsAnsiEventHandler) insertLines(param int) error {
return h.deleteLines(-param)
}
// scroll scrolls the provided scroll region by param lines. The scroll region is in buffer coordinates.
func (h *windowsAnsiEventHandler) scroll(param int, sr scrollRegion, info *CONSOLE_SCREEN_BUFFER_INFO) error {
logger.Infof("scroll: scrollTop: %d, scrollBottom: %d", sr.top, sr.bottom)
logger.Infof("scroll: windowTop: %d, windowBottom: %d", info.Window.Top, info.Window.Bottom)
// Copy from and clip to the scroll region (full buffer width)
scrollRect := SMALL_RECT{
Top: sr.top,
Bottom: sr.bottom,
Left: 0,
Right: info.Size.X - 1,
}
// Origin to which area should be copied
destOrigin := COORD{
X: 0,
Y: sr.top - int16(param),
}
char := CHAR_INFO{
UnicodeChar: ' ',
Attributes: h.attributes,
}
if err := ScrollConsoleScreenBuffer(h.fd, scrollRect, scrollRect, destOrigin, char); err != nil {
return err
}
return nil
}
func (h *windowsAnsiEventHandler) deleteCharacters(param int) error {
info, err := GetConsoleScreenBufferInfo(h.fd)
if err != nil {
return err
}
return h.scrollLine(param, info.CursorPosition, info)
}
func (h *windowsAnsiEventHandler) insertCharacters(param int) error {
return h.deleteCharacters(-param)
}
// scrollLine scrolls a line horizontally starting at the provided position by a number of columns.
func (h *windowsAnsiEventHandler) scrollLine(columns int, position COORD, info *CONSOLE_SCREEN_BUFFER_INFO) error {
// Copy from and clip to the scroll region (full buffer width)
scrollRect := SMALL_RECT{
Top: position.Y,
Bottom: position.Y,
Left: position.X,
Right: info.Size.X - 1,
}
// Origin to which area should be copied
destOrigin := COORD{
X: position.X - int16(columns),
Y: position.Y,
}
char := CHAR_INFO{
UnicodeChar: ' ',
Attributes: h.attributes,
}
if err := ScrollConsoleScreenBuffer(h.fd, scrollRect, scrollRect, destOrigin, char); err != nil {
return err
}
return nil
}

View File

@ -0,0 +1,9 @@
// +build windows
package winterm
// AddInRange increments a value by the passed quantity while ensuring the values
// always remain within the supplied min / max range.
func addInRange(n int16, increment int16, min int16, max int16) int16 {
return ensureInRange(n+increment, min, max)
}

View File

@ -0,0 +1,726 @@
// +build windows
package winterm
import (
"bytes"
"io/ioutil"
"os"
"strconv"
"github.com/Azure/go-ansiterm"
"github.com/Sirupsen/logrus"
)
var logger *logrus.Logger
type windowsAnsiEventHandler struct {
fd uintptr
file *os.File
infoReset *CONSOLE_SCREEN_BUFFER_INFO
sr scrollRegion
buffer bytes.Buffer
attributes uint16
inverted bool
wrapNext bool
drewMarginByte bool
originMode bool
marginByte byte
curInfo *CONSOLE_SCREEN_BUFFER_INFO
curPos COORD
}
func CreateWinEventHandler(fd uintptr, file *os.File) ansiterm.AnsiEventHandler {
logFile := ioutil.Discard
if isDebugEnv := os.Getenv(ansiterm.LogEnv); isDebugEnv == "1" {
logFile, _ = os.Create("winEventHandler.log")
}
logger = &logrus.Logger{
Out: logFile,
Formatter: new(logrus.TextFormatter),
Level: logrus.DebugLevel,
}
infoReset, err := GetConsoleScreenBufferInfo(fd)
if err != nil {
return nil
}
return &windowsAnsiEventHandler{
fd: fd,
file: file,
infoReset: infoReset,
attributes: infoReset.Attributes,
}
}
type scrollRegion struct {
top int16
bottom int16
}
// simulateLF simulates a LF or CR+LF by scrolling if necessary to handle the
// current cursor position and scroll region settings, in which case it returns
// true. If no special handling is necessary, then it does nothing and returns
// false.
//
// In the false case, the caller should ensure that a carriage return
// and line feed are inserted or that the text is otherwise wrapped.
func (h *windowsAnsiEventHandler) simulateLF(includeCR bool) (bool, error) {
if h.wrapNext {
if err := h.Flush(); err != nil {
return false, err
}
h.clearWrap()
}
pos, info, err := h.getCurrentInfo()
if err != nil {
return false, err
}
sr := h.effectiveSr(info.Window)
if pos.Y == sr.bottom {
// Scrolling is necessary. Let Windows automatically scroll if the scrolling region
// is the full window.
if sr.top == info.Window.Top && sr.bottom == info.Window.Bottom {
if includeCR {
pos.X = 0
h.updatePos(pos)
}
return false, nil
}
// A custom scroll region is active. Scroll the window manually to simulate
// the LF.
if err := h.Flush(); err != nil {
return false, err
}
logger.Info("Simulating LF inside scroll region")
if err := h.scrollUp(1); err != nil {
return false, err
}
if includeCR {
pos.X = 0
if err := SetConsoleCursorPosition(h.fd, pos); err != nil {
return false, err
}
}
return true, nil
} else if pos.Y < info.Window.Bottom {
// Let Windows handle the LF.
pos.Y++
if includeCR {
pos.X = 0
}
h.updatePos(pos)
return false, nil
} else {
// The cursor is at the bottom of the screen but outside the scroll
// region. Skip the LF.
logger.Info("Simulating LF outside scroll region")
if includeCR {
if err := h.Flush(); err != nil {
return false, err
}
pos.X = 0
if err := SetConsoleCursorPosition(h.fd, pos); err != nil {
return false, err
}
}
return true, nil
}
}
// executeLF executes a LF without a CR.
func (h *windowsAnsiEventHandler) executeLF() error {
handled, err := h.simulateLF(false)
if err != nil {
return err
}
if !handled {
// Windows LF will reset the cursor column position. Write the LF
// and restore the cursor position.
pos, _, err := h.getCurrentInfo()
if err != nil {
return err
}
h.buffer.WriteByte(ansiterm.ANSI_LINE_FEED)
if pos.X != 0 {
if err := h.Flush(); err != nil {
return err
}
logger.Info("Resetting cursor position for LF without CR")
if err := SetConsoleCursorPosition(h.fd, pos); err != nil {
return err
}
}
}
return nil
}
func (h *windowsAnsiEventHandler) Print(b byte) error {
if h.wrapNext {
h.buffer.WriteByte(h.marginByte)
h.clearWrap()
if _, err := h.simulateLF(true); err != nil {
return err
}
}
pos, info, err := h.getCurrentInfo()
if err != nil {
return err
}
if pos.X == info.Size.X-1 {
h.wrapNext = true
h.marginByte = b
} else {
pos.X++
h.updatePos(pos)
h.buffer.WriteByte(b)
}
return nil
}
func (h *windowsAnsiEventHandler) Execute(b byte) error {
switch b {
case ansiterm.ANSI_TAB:
logger.Info("Execute(TAB)")
// Move to the next tab stop, but preserve auto-wrap if already set.
if !h.wrapNext {
pos, info, err := h.getCurrentInfo()
if err != nil {
return err
}
pos.X = (pos.X + 8) - pos.X%8
if pos.X >= info.Size.X {
pos.X = info.Size.X - 1
}
if err := h.Flush(); err != nil {
return err
}
if err := SetConsoleCursorPosition(h.fd, pos); err != nil {
return err
}
}
return nil
case ansiterm.ANSI_BEL:
h.buffer.WriteByte(ansiterm.ANSI_BEL)
return nil
case ansiterm.ANSI_BACKSPACE:
if h.wrapNext {
if err := h.Flush(); err != nil {
return err
}
h.clearWrap()
}
pos, _, err := h.getCurrentInfo()
if err != nil {
return err
}
if pos.X > 0 {
pos.X--
h.updatePos(pos)
h.buffer.WriteByte(ansiterm.ANSI_BACKSPACE)
}
return nil
case ansiterm.ANSI_VERTICAL_TAB, ansiterm.ANSI_FORM_FEED:
// Treat as true LF.
return h.executeLF()
case ansiterm.ANSI_LINE_FEED:
// Simulate a CR and LF for now since there is no way in go-ansiterm
// to tell if the LF should include CR (and more things break when it's
// missing than when it's incorrectly added).
handled, err := h.simulateLF(true)
if handled || err != nil {
return err
}
return h.buffer.WriteByte(ansiterm.ANSI_LINE_FEED)
case ansiterm.ANSI_CARRIAGE_RETURN:
if h.wrapNext {
if err := h.Flush(); err != nil {
return err
}
h.clearWrap()
}
pos, _, err := h.getCurrentInfo()
if err != nil {
return err
}
if pos.X != 0 {
pos.X = 0
h.updatePos(pos)
h.buffer.WriteByte(ansiterm.ANSI_CARRIAGE_RETURN)
}
return nil
default:
return nil
}
}
func (h *windowsAnsiEventHandler) CUU(param int) error {
if err := h.Flush(); err != nil {
return err
}
logger.Infof("CUU: [%v]", []string{strconv.Itoa(param)})
h.clearWrap()
return h.moveCursorVertical(-param)
}
func (h *windowsAnsiEventHandler) CUD(param int) error {
if err := h.Flush(); err != nil {
return err
}
logger.Infof("CUD: [%v]", []string{strconv.Itoa(param)})
h.clearWrap()
return h.moveCursorVertical(param)
}
func (h *windowsAnsiEventHandler) CUF(param int) error {
if err := h.Flush(); err != nil {
return err
}
logger.Infof("CUF: [%v]", []string{strconv.Itoa(param)})
h.clearWrap()
return h.moveCursorHorizontal(param)
}
func (h *windowsAnsiEventHandler) CUB(param int) error {
if err := h.Flush(); err != nil {
return err
}
logger.Infof("CUB: [%v]", []string{strconv.Itoa(param)})
h.clearWrap()
return h.moveCursorHorizontal(-param)
}
func (h *windowsAnsiEventHandler) CNL(param int) error {
if err := h.Flush(); err != nil {
return err
}
logger.Infof("CNL: [%v]", []string{strconv.Itoa(param)})
h.clearWrap()
return h.moveCursorLine(param)
}
func (h *windowsAnsiEventHandler) CPL(param int) error {
if err := h.Flush(); err != nil {
return err
}
logger.Infof("CPL: [%v]", []string{strconv.Itoa(param)})
h.clearWrap()
return h.moveCursorLine(-param)
}
func (h *windowsAnsiEventHandler) CHA(param int) error {
if err := h.Flush(); err != nil {
return err
}
logger.Infof("CHA: [%v]", []string{strconv.Itoa(param)})
h.clearWrap()
return h.moveCursorColumn(param)
}
func (h *windowsAnsiEventHandler) VPA(param int) error {
if err := h.Flush(); err != nil {
return err
}
logger.Infof("VPA: [[%d]]", param)
h.clearWrap()
info, err := GetConsoleScreenBufferInfo(h.fd)
if err != nil {
return err
}
window := h.getCursorWindow(info)
position := info.CursorPosition
position.Y = window.Top + int16(param) - 1
return h.setCursorPosition(position, window)
}
func (h *windowsAnsiEventHandler) CUP(row int, col int) error {
if err := h.Flush(); err != nil {
return err
}
logger.Infof("CUP: [[%d %d]]", row, col)
h.clearWrap()
info, err := GetConsoleScreenBufferInfo(h.fd)
if err != nil {
return err
}
window := h.getCursorWindow(info)
position := COORD{window.Left + int16(col) - 1, window.Top + int16(row) - 1}
return h.setCursorPosition(position, window)
}
func (h *windowsAnsiEventHandler) HVP(row int, col int) error {
if err := h.Flush(); err != nil {
return err
}
logger.Infof("HVP: [[%d %d]]", row, col)
h.clearWrap()
return h.CUP(row, col)
}
func (h *windowsAnsiEventHandler) DECTCEM(visible bool) error {
if err := h.Flush(); err != nil {
return err
}
logger.Infof("DECTCEM: [%v]", []string{strconv.FormatBool(visible)})
h.clearWrap()
return nil
}
func (h *windowsAnsiEventHandler) DECOM(enable bool) error {
if err := h.Flush(); err != nil {
return err
}
logger.Infof("DECOM: [%v]", []string{strconv.FormatBool(enable)})
h.clearWrap()
h.originMode = enable
return h.CUP(1, 1)
}
func (h *windowsAnsiEventHandler) DECCOLM(use132 bool) error {
if err := h.Flush(); err != nil {
return err
}
logger.Infof("DECCOLM: [%v]", []string{strconv.FormatBool(use132)})
h.clearWrap()
if err := h.ED(2); err != nil {
return err
}
info, err := GetConsoleScreenBufferInfo(h.fd)
if err != nil {
return err
}
targetWidth := int16(80)
if use132 {
targetWidth = 132
}
if info.Size.X < targetWidth {
if err := SetConsoleScreenBufferSize(h.fd, COORD{targetWidth, info.Size.Y}); err != nil {
logger.Info("set buffer failed:", err)
return err
}
}
window := info.Window
window.Left = 0
window.Right = targetWidth - 1
if err := SetConsoleWindowInfo(h.fd, true, window); err != nil {
logger.Info("set window failed:", err)
return err
}
if info.Size.X > targetWidth {
if err := SetConsoleScreenBufferSize(h.fd, COORD{targetWidth, info.Size.Y}); err != nil {
logger.Info("set buffer failed:", err)
return err
}
}
return SetConsoleCursorPosition(h.fd, COORD{0, 0})
}
func (h *windowsAnsiEventHandler) ED(param int) error {
if err := h.Flush(); err != nil {
return err
}
logger.Infof("ED: [%v]", []string{strconv.Itoa(param)})
h.clearWrap()
// [J -- Erases from the cursor to the end of the screen, including the cursor position.
// [1J -- Erases from the beginning of the screen to the cursor, including the cursor position.
// [2J -- Erases the complete display. The cursor does not move.
// Notes:
// -- Clearing the entire buffer, versus just the Window, works best for Windows Consoles
info, err := GetConsoleScreenBufferInfo(h.fd)
if err != nil {
return err
}
var start COORD
var end COORD
switch param {
case 0:
start = info.CursorPosition
end = COORD{info.Size.X - 1, info.Size.Y - 1}
case 1:
start = COORD{0, 0}
end = info.CursorPosition
case 2:
start = COORD{0, 0}
end = COORD{info.Size.X - 1, info.Size.Y - 1}
}
err = h.clearRange(h.attributes, start, end)
if err != nil {
return err
}
// If the whole buffer was cleared, move the window to the top while preserving
// the window-relative cursor position.
if param == 2 {
pos := info.CursorPosition
window := info.Window
pos.Y -= window.Top
window.Bottom -= window.Top
window.Top = 0
if err := SetConsoleCursorPosition(h.fd, pos); err != nil {
return err
}
if err := SetConsoleWindowInfo(h.fd, true, window); err != nil {
return err
}
}
return nil
}
func (h *windowsAnsiEventHandler) EL(param int) error {
if err := h.Flush(); err != nil {
return err
}
logger.Infof("EL: [%v]", strconv.Itoa(param))
h.clearWrap()
// [K -- Erases from the cursor to the end of the line, including the cursor position.
// [1K -- Erases from the beginning of the line to the cursor, including the cursor position.
// [2K -- Erases the complete line.
info, err := GetConsoleScreenBufferInfo(h.fd)
if err != nil {
return err
}
var start COORD
var end COORD
switch param {
case 0:
start = info.CursorPosition
end = COORD{info.Size.X, info.CursorPosition.Y}
case 1:
start = COORD{0, info.CursorPosition.Y}
end = info.CursorPosition
case 2:
start = COORD{0, info.CursorPosition.Y}
end = COORD{info.Size.X, info.CursorPosition.Y}
}
err = h.clearRange(h.attributes, start, end)
if err != nil {
return err
}
return nil
}
func (h *windowsAnsiEventHandler) IL(param int) error {
if err := h.Flush(); err != nil {
return err
}
logger.Infof("IL: [%v]", strconv.Itoa(param))
h.clearWrap()
return h.insertLines(param)
}
func (h *windowsAnsiEventHandler) DL(param int) error {
if err := h.Flush(); err != nil {
return err
}
logger.Infof("DL: [%v]", strconv.Itoa(param))
h.clearWrap()
return h.deleteLines(param)
}
func (h *windowsAnsiEventHandler) ICH(param int) error {
if err := h.Flush(); err != nil {
return err
}
logger.Infof("ICH: [%v]", strconv.Itoa(param))
h.clearWrap()
return h.insertCharacters(param)
}
func (h *windowsAnsiEventHandler) DCH(param int) error {
if err := h.Flush(); err != nil {
return err
}
logger.Infof("DCH: [%v]", strconv.Itoa(param))
h.clearWrap()
return h.deleteCharacters(param)
}
func (h *windowsAnsiEventHandler) SGR(params []int) error {
if err := h.Flush(); err != nil {
return err
}
strings := []string{}
for _, v := range params {
strings = append(strings, strconv.Itoa(v))
}
logger.Infof("SGR: [%v]", strings)
if len(params) <= 0 {
h.attributes = h.infoReset.Attributes
h.inverted = false
} else {
for _, attr := range params {
if attr == ansiterm.ANSI_SGR_RESET {
h.attributes = h.infoReset.Attributes
h.inverted = false
continue
}
h.attributes, h.inverted = collectAnsiIntoWindowsAttributes(h.attributes, h.inverted, h.infoReset.Attributes, int16(attr))
}
}
attributes := h.attributes
if h.inverted {
attributes = invertAttributes(attributes)
}
err := SetConsoleTextAttribute(h.fd, attributes)
if err != nil {
return err
}
return nil
}
func (h *windowsAnsiEventHandler) SU(param int) error {
if err := h.Flush(); err != nil {
return err
}
logger.Infof("SU: [%v]", []string{strconv.Itoa(param)})
h.clearWrap()
return h.scrollUp(param)
}
func (h *windowsAnsiEventHandler) SD(param int) error {
if err := h.Flush(); err != nil {
return err
}
logger.Infof("SD: [%v]", []string{strconv.Itoa(param)})
h.clearWrap()
return h.scrollDown(param)
}
func (h *windowsAnsiEventHandler) DA(params []string) error {
logger.Infof("DA: [%v]", params)
// DA cannot be implemented because it must send data on the VT100 input stream,
// which is not available to go-ansiterm.
return nil
}
func (h *windowsAnsiEventHandler) DECSTBM(top int, bottom int) error {
if err := h.Flush(); err != nil {
return err
}
logger.Infof("DECSTBM: [%d, %d]", top, bottom)
// Windows is 0 indexed, Linux is 1 indexed
h.sr.top = int16(top - 1)
h.sr.bottom = int16(bottom - 1)
// This command also moves the cursor to the origin.
h.clearWrap()
return h.CUP(1, 1)
}
func (h *windowsAnsiEventHandler) RI() error {
if err := h.Flush(); err != nil {
return err
}
logger.Info("RI: []")
h.clearWrap()
info, err := GetConsoleScreenBufferInfo(h.fd)
if err != nil {
return err
}
sr := h.effectiveSr(info.Window)
if info.CursorPosition.Y == sr.top {
return h.scrollDown(1)
}
return h.moveCursorVertical(-1)
}
func (h *windowsAnsiEventHandler) IND() error {
logger.Info("IND: []")
return h.executeLF()
}
func (h *windowsAnsiEventHandler) Flush() error {
h.curInfo = nil
if h.buffer.Len() > 0 {
logger.Infof("Flush: [%s]", h.buffer.Bytes())
if _, err := h.buffer.WriteTo(h.file); err != nil {
return err
}
}
if h.wrapNext && !h.drewMarginByte {
logger.Infof("Flush: drawing margin byte '%c'", h.marginByte)
info, err := GetConsoleScreenBufferInfo(h.fd)
if err != nil {
return err
}
charInfo := []CHAR_INFO{{UnicodeChar: uint16(h.marginByte), Attributes: info.Attributes}}
size := COORD{1, 1}
position := COORD{0, 0}
region := SMALL_RECT{Left: info.CursorPosition.X, Top: info.CursorPosition.Y, Right: info.CursorPosition.X, Bottom: info.CursorPosition.Y}
if err := WriteConsoleOutput(h.fd, charInfo, size, position, &region); err != nil {
return err
}
h.drewMarginByte = true
}
return nil
}
// cacheConsoleInfo ensures that the current console screen information has been queried
// since the last call to Flush(). It must be called before accessing h.curInfo or h.curPos.
func (h *windowsAnsiEventHandler) getCurrentInfo() (COORD, *CONSOLE_SCREEN_BUFFER_INFO, error) {
if h.curInfo == nil {
info, err := GetConsoleScreenBufferInfo(h.fd)
if err != nil {
return COORD{}, nil, err
}
h.curInfo = info
h.curPos = info.CursorPosition
}
return h.curPos, h.curInfo, nil
}
func (h *windowsAnsiEventHandler) updatePos(pos COORD) {
if h.curInfo == nil {
panic("failed to call getCurrentInfo before calling updatePos")
}
h.curPos = pos
}
// clearWrap clears the state where the cursor is in the margin
// waiting for the next character before wrapping the line. This must
// be done before most operations that act on the cursor.
func (h *windowsAnsiEventHandler) clearWrap() {
h.wrapNext = false
h.drewMarginByte = false
}

201
vendor/github.com/containerd/console/LICENSE generated vendored Normal file
View File

@ -0,0 +1,201 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

17
vendor/github.com/containerd/console/README.md generated vendored Normal file
View File

@ -0,0 +1,17 @@
# console
[![Build Status](https://travis-ci.org/containerd/console.svg?branch=master)](https://travis-ci.org/containerd/console)
Golang package for dealing with consoles. Light on deps and a simple API.
## Modifying the current process
```go
current := console.Current()
defer current.Reset()
if err := current.SetRaw(); err != nil {
}
ws, err := current.Size()
current.Resize(ws)
```

52
vendor/github.com/containerd/console/console.go generated vendored Normal file
View File

@ -0,0 +1,52 @@
package console
import (
"errors"
"io"
"os"
)
var ErrNotAConsole = errors.New("provided file is not a console")
type Console interface {
io.Reader
io.Writer
io.Closer
// Resize resizes the console to the provided window size
Resize(WinSize) error
// ResizeFrom resizes the calling console to the size of the
// provided console
ResizeFrom(Console) error
// SetRaw sets the console in raw mode
SetRaw() error
// DisableEcho disables echo on the console
DisableEcho() error
// Reset restores the console to its orignal state
Reset() error
// Size returns the window size of the console
Size() (WinSize, error)
}
// WinSize specifies the window size of the console
type WinSize struct {
// Height of the console
Height uint16
// Width of the console
Width uint16
x uint16
y uint16
}
// Current returns the current processes console
func Current() Console {
return newMaster(os.Stdin)
}
// ConsoleFromFile returns a console using the provided file
func ConsoleFromFile(f *os.File) (Console, error) {
if err := checkConsole(f); err != nil {
return nil, err
}
return newMaster(f), nil
}

130
vendor/github.com/containerd/console/console_unix.go generated vendored Normal file
View File

@ -0,0 +1,130 @@
// +build darwin freebsd linux
package console
import (
"os"
"unsafe"
"golang.org/x/sys/unix"
)
// NewPty creates a new pty pair
// The master is returned as the first console and a string
// with the path to the pty slave is returned as the second
func NewPty() (Console, string, error) {
f, err := os.OpenFile("/dev/ptmx", unix.O_RDWR|unix.O_NOCTTY|unix.O_CLOEXEC, 0)
if err != nil {
return nil, "", err
}
if err := saneTerminal(f); err != nil {
return nil, "", err
}
slave, err := ptsname(f)
if err != nil {
return nil, "", err
}
if err := unlockpt(f); err != nil {
return nil, "", err
}
return &master{
f: f,
}, slave, nil
}
type master struct {
f *os.File
original *unix.Termios
}
func (m *master) Read(b []byte) (int, error) {
return m.f.Read(b)
}
func (m *master) Write(b []byte) (int, error) {
return m.f.Write(b)
}
func (m *master) Close() error {
return m.f.Close()
}
func (m *master) Resize(ws WinSize) error {
return ioctl(
m.f.Fd(),
uintptr(unix.TIOCSWINSZ),
uintptr(unsafe.Pointer(&ws)),
)
}
func (m *master) ResizeFrom(c Console) error {
ws, err := c.Size()
if err != nil {
return err
}
return m.Resize(ws)
}
func (m *master) Reset() error {
if m.original == nil {
return nil
}
return tcset(m.f.Fd(), m.original)
}
func (m *master) getCurrent() (unix.Termios, error) {
var termios unix.Termios
if err := tcget(m.f.Fd(), &termios); err != nil {
return unix.Termios{}, err
}
if m.original == nil {
m.original = &termios
}
return termios, nil
}
func (m *master) SetRaw() error {
rawState, err := m.getCurrent()
if err != nil {
return err
}
rawState = cfmakeraw(rawState)
rawState.Oflag = rawState.Oflag | unix.OPOST
return tcset(m.f.Fd(), &rawState)
}
func (m *master) DisableEcho() error {
rawState, err := m.getCurrent()
if err != nil {
return err
}
rawState.Lflag = rawState.Lflag &^ unix.ECHO
return tcset(m.f.Fd(), &rawState)
}
func (m *master) Size() (WinSize, error) {
var ws WinSize
if err := ioctl(
m.f.Fd(),
uintptr(unix.TIOCGWINSZ),
uintptr(unsafe.Pointer(&ws)),
); err != nil {
return ws, err
}
return ws, nil
}
// checkConsole checks if the provided file is a console
func checkConsole(f *os.File) error {
var termios unix.Termios
if tcget(f.Fd(), &termios) != nil {
return ErrNotAConsole
}
return nil
}
func newMaster(f *os.File) Console {
return &master{
f: f,
}
}

199
vendor/github.com/containerd/console/console_windows.go generated vendored Normal file
View File

@ -0,0 +1,199 @@
package console
import (
"fmt"
"os"
"github.com/Azure/go-ansiterm/winterm"
"github.com/pkg/errors"
)
const (
// https://msdn.microsoft.com/en-us/library/windows/desktop/ms683167(v=vs.85).aspx
enableVirtualTerminalInput = 0x0200
enableVirtualTerminalProcessing = 0x0004
disableNewlineAutoReturn = 0x0008
)
var (
vtInputSupported bool
ErrNotImplemented = errors.New("not implemented")
)
func (m *master) initStdios() {
m.in = os.Stdin.Fd()
if mode, err := winterm.GetConsoleMode(m.in); err == nil {
m.inMode = mode
// Validate that enableVirtualTerminalInput is supported, but do not set it.
if err = winterm.SetConsoleMode(m.in, mode|enableVirtualTerminalInput); err == nil {
vtInputSupported = true
}
// Unconditionally set the console mode back even on failure because SetConsoleMode
// remembers invalid bits on input handles.
winterm.SetConsoleMode(m.in, mode)
} else {
fmt.Printf("failed to get console mode for stdin: %v\n", err)
}
m.out = os.Stdout.Fd()
if mode, err := winterm.GetConsoleMode(m.out); err == nil {
m.outMode = mode
if err := winterm.SetConsoleMode(m.out, mode|enableVirtualTerminalProcessing); err == nil {
m.outMode |= enableVirtualTerminalProcessing
} else {
winterm.SetConsoleMode(m.out, m.outMode)
}
} else {
fmt.Printf("failed to get console mode for stdout: %v\n", err)
}
m.err = os.Stderr.Fd()
if mode, err := winterm.GetConsoleMode(m.err); err == nil {
m.errMode = mode
if err := winterm.SetConsoleMode(m.err, mode|enableVirtualTerminalProcessing); err == nil {
m.errMode |= enableVirtualTerminalProcessing
} else {
winterm.SetConsoleMode(m.err, m.errMode)
}
} else {
fmt.Printf("failed to get console mode for stderr: %v\n", err)
}
}
type master struct {
in uintptr
inMode uint32
out uintptr
outMode uint32
err uintptr
errMode uint32
}
func (m *master) SetRaw() error {
if err := makeInputRaw(m.in, m.inMode); err != nil {
return err
}
// Set StdOut and StdErr to raw mode, we ignore failures since
// disableNewlineAutoReturn might not be supported on this version of
// Windows.
winterm.SetConsoleMode(m.out, m.outMode|disableNewlineAutoReturn)
winterm.SetConsoleMode(m.err, m.errMode|disableNewlineAutoReturn)
return nil
}
func (m *master) Reset() error {
for _, s := range []struct {
fd uintptr
mode uint32
}{
{m.in, m.inMode},
{m.out, m.outMode},
{m.err, m.errMode},
} {
if err := winterm.SetConsoleMode(s.fd, s.mode); err != nil {
return errors.Wrap(err, "unable to restore console mode")
}
}
return nil
}
func (m *master) Size() (WinSize, error) {
info, err := winterm.GetConsoleScreenBufferInfo(m.out)
if err != nil {
return WinSize{}, errors.Wrap(err, "unable to get console info")
}
winsize := WinSize{
Width: uint16(info.Window.Right - info.Window.Left + 1),
Height: uint16(info.Window.Bottom - info.Window.Top + 1),
}
return winsize, nil
}
func (m *master) Resize(ws WinSize) error {
return ErrNotImplemented
}
func (m *master) ResizeFrom(c Console) error {
return ErrNotImplemented
}
func (m *master) DisableEcho() error {
mode := m.inMode &^ winterm.ENABLE_ECHO_INPUT
mode |= winterm.ENABLE_PROCESSED_INPUT
mode |= winterm.ENABLE_LINE_INPUT
if err := winterm.SetConsoleMode(m.in, mode); err != nil {
return errors.Wrap(err, "unable to set console to disable echo")
}
return nil
}
func (m *master) Close() error {
return nil
}
func (m *master) Read(b []byte) (int, error) {
panic("not implemented on windows")
}
func (m *master) Write(b []byte) (int, error) {
panic("not implemented on windows")
}
// makeInputRaw puts the terminal (Windows Console) connected to the given
// file descriptor into raw mode
func makeInputRaw(fd uintptr, mode uint32) error {
// See
// -- https://msdn.microsoft.com/en-us/library/windows/desktop/ms686033(v=vs.85).aspx
// -- https://msdn.microsoft.com/en-us/library/windows/desktop/ms683462(v=vs.85).aspx
// Disable these modes
mode &^= winterm.ENABLE_ECHO_INPUT
mode &^= winterm.ENABLE_LINE_INPUT
mode &^= winterm.ENABLE_MOUSE_INPUT
mode &^= winterm.ENABLE_WINDOW_INPUT
mode &^= winterm.ENABLE_PROCESSED_INPUT
// Enable these modes
mode |= winterm.ENABLE_EXTENDED_FLAGS
mode |= winterm.ENABLE_INSERT_MODE
mode |= winterm.ENABLE_QUICK_EDIT_MODE
if vtInputSupported {
mode |= enableVirtualTerminalInput
}
if err := winterm.SetConsoleMode(fd, mode); err != nil {
return errors.Wrap(err, "unable to set console to raw mode")
}
return nil
}
func checkConsole(f *os.File) error {
if _, err := winterm.GetConsoleMode(f.Fd()); err != nil {
return err
}
return nil
}
func newMaster(f *os.File) Console {
if f != os.Stdin && f != os.Stdout && f != os.Stderr {
panic("creating a console from a file is not supported on windows")
}
m := &master{}
m.initStdios()
return m
}

63
vendor/github.com/containerd/console/tc_darwin.go generated vendored Normal file
View File

@ -0,0 +1,63 @@
package console
import (
"fmt"
"os"
"unsafe"
"golang.org/x/sys/unix"
)
func tcget(fd uintptr, p *unix.Termios) error {
return ioctl(fd, unix.TIOCGETA, uintptr(unsafe.Pointer(p)))
}
func tcset(fd uintptr, p *unix.Termios) error {
return ioctl(fd, unix.TIOCSETA, uintptr(unsafe.Pointer(p)))
}
func ioctl(fd, flag, data uintptr) error {
if _, _, err := unix.Syscall(unix.SYS_IOCTL, fd, flag, data); err != 0 {
return err
}
return nil
}
// unlockpt unlocks the slave pseudoterminal device corresponding to the master pseudoterminal referred to by f.
// unlockpt should be called before opening the slave side of a pty.
func unlockpt(f *os.File) error {
var u int32
return ioctl(f.Fd(), unix.TIOCPTYUNLK, uintptr(unsafe.Pointer(&u)))
}
// ptsname retrieves the name of the first available pts for the given master.
func ptsname(f *os.File) (string, error) {
var n int32
if err := ioctl(f.Fd(), unix.TIOCPTYGNAME, uintptr(unsafe.Pointer(&n))); err != nil {
return "", err
}
return fmt.Sprintf("/dev/pts/%d", n), nil
}
func saneTerminal(f *os.File) error {
// Go doesn't have a wrapper for any of the termios ioctls.
var termios unix.Termios
if err := tcget(f.Fd(), &termios); err != nil {
return err
}
// Set -onlcr so we don't have to deal with \r.
termios.Oflag &^= unix.ONLCR
return tcset(f.Fd(), &termios)
}
func cfmakeraw(t unix.Termios) unix.Termios {
t.Iflag = uint64(uint32(t.Iflag) & ^uint32((unix.IGNBRK | unix.BRKINT | unix.PARMRK | unix.ISTRIP | unix.INLCR | unix.IGNCR | unix.ICRNL | unix.IXON)))
t.Oflag = uint64(uint32(t.Oflag) & ^uint32(unix.OPOST))
t.Lflag = uint64(uint32(t.Lflag) & ^(uint32(unix.ECHO | unix.ECHONL | unix.ICANON | unix.ISIG | unix.IEXTEN)))
t.Cflag = uint64(uint32(t.Cflag) & ^(uint32(unix.CSIZE | unix.PARENB)))
t.Cflag = t.Cflag | unix.CS8
t.Cc[unix.VMIN] = 1
t.Cc[unix.VTIME] = 0
return t
}

63
vendor/github.com/containerd/console/tc_freebsd.go generated vendored Normal file
View File

@ -0,0 +1,63 @@
package console
import (
"fmt"
"os"
"unsafe"
"golang.org/x/sys/unix"
)
func tcget(fd uintptr, p *unix.Termios) error {
return ioctl(fd, unix.TIOCGETA, uintptr(unsafe.Pointer(p)))
}
func tcset(fd uintptr, p *unix.Termios) error {
return ioctl(fd, unix.TIOCSETA, uintptr(unsafe.Pointer(p)))
}
func ioctl(fd, flag, data uintptr) error {
if _, _, err := unix.Syscall(unix.SYS_IOCTL, fd, flag, data); err != 0 {
return err
}
return nil
}
// unlockpt unlocks the slave pseudoterminal device corresponding to the master pseudoterminal referred to by f.
// unlockpt should be called before opening the slave side of a pty.
// This does not exist on FreeBSD, it does not allocate controlling terminals on open
func unlockpt(f *os.File) error {
return nil
}
// ptsname retrieves the name of the first available pts for the given master.
func ptsname(f *os.File) (string, error) {
var n int32
if err := ioctl(f.Fd(), unix.TIOCGPTN, uintptr(unsafe.Pointer(&n))); err != nil {
return "", err
}
return fmt.Sprintf("/dev/pts/%d", n), nil
}
func saneTerminal(f *os.File) error {
// Go doesn't have a wrapper for any of the termios ioctls.
var termios unix.Termios
if err := tcget(f.Fd(), &termios); err != nil {
return err
}
// Set -onlcr so we don't have to deal with \r.
termios.Oflag &^= unix.ONLCR
return tcset(f.Fd(), &termios)
}
func cfmakeraw(t unix.Termios) unix.Termios {
t.Iflag = t.Iflag & ^uint32((unix.IGNBRK | unix.BRKINT | unix.PARMRK | unix.ISTRIP | unix.INLCR | unix.IGNCR | unix.ICRNL | unix.IXON))
t.Oflag = t.Oflag & ^uint32(unix.OPOST)
t.Lflag = t.Lflag & ^(uint32(unix.ECHO | unix.ECHONL | unix.ICANON | unix.ISIG | unix.IEXTEN))
t.Cflag = t.Cflag & ^(uint32(unix.CSIZE | unix.PARENB))
t.Cflag = t.Cflag | unix.CS8
t.Cc[unix.VMIN] = 1
t.Cc[unix.VTIME] = 0
return t
}

63
vendor/github.com/containerd/console/tc_linux.go generated vendored Normal file
View File

@ -0,0 +1,63 @@
package console
import (
"fmt"
"os"
"unsafe"
"golang.org/x/sys/unix"
)
func tcget(fd uintptr, p *unix.Termios) error {
return ioctl(fd, unix.TCGETS, uintptr(unsafe.Pointer(p)))
}
func tcset(fd uintptr, p *unix.Termios) error {
return ioctl(fd, unix.TCSETS, uintptr(unsafe.Pointer(p)))
}
func ioctl(fd, flag, data uintptr) error {
if _, _, err := unix.Syscall(unix.SYS_IOCTL, fd, flag, data); err != 0 {
return err
}
return nil
}
// unlockpt unlocks the slave pseudoterminal device corresponding to the master pseudoterminal referred to by f.
// unlockpt should be called before opening the slave side of a pty.
func unlockpt(f *os.File) error {
var u int32
return ioctl(f.Fd(), unix.TIOCSPTLCK, uintptr(unsafe.Pointer(&u)))
}
// ptsname retrieves the name of the first available pts for the given master.
func ptsname(f *os.File) (string, error) {
var n int32
if err := ioctl(f.Fd(), unix.TIOCGPTN, uintptr(unsafe.Pointer(&n))); err != nil {
return "", err
}
return fmt.Sprintf("/dev/pts/%d", n), nil
}
func saneTerminal(f *os.File) error {
// Go doesn't have a wrapper for any of the termios ioctls.
var termios unix.Termios
if err := tcget(f.Fd(), &termios); err != nil {
return err
}
// Set -onlcr so we don't have to deal with \r.
termios.Oflag &^= unix.ONLCR
return tcset(f.Fd(), &termios)
}
func cfmakeraw(t unix.Termios) unix.Termios {
t.Iflag = t.Iflag & ^uint32((unix.IGNBRK | unix.BRKINT | unix.PARMRK | unix.ISTRIP | unix.INLCR | unix.IGNCR | unix.ICRNL | unix.IXON))
t.Oflag = t.Oflag & ^uint32(unix.OPOST)
t.Lflag = t.Lflag & ^(uint32(unix.ECHO | unix.ECHONL | unix.ICANON | unix.ISIG | unix.IEXTEN))
t.Cflag = t.Cflag & ^(uint32(unix.CSIZE | unix.PARENB))
t.Cflag = t.Cflag | unix.CS8
t.Cc[unix.VMIN] = 1
t.Cc[unix.VTIME] = 0
return t
}

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,116 @@
syntax = "proto3";
package containerd.v1;
import "gogoproto/gogo.proto";
import "google/protobuf/any.proto";
import "google/protobuf/empty.proto";
import "google/protobuf/field_mask.proto";
import "github.com/containerd/containerd/api/types/descriptor/descriptor.proto";
// Containers provides metadata storage for containers used in the execution
// service.
//
// The objects here provide an state-independent view of containers for use in
// management and resource pinning. From that perspective, contaienrs do not
// have a "state" but rather this is the set of resources that will be
// considered in use by the container.
//
// From the perspective of the execution service, these objects represent the
// base parameters for creating a container process.
//
// In general, when looking to add fields for this type, first ask yourself
// whether or not the function of the field has to do with runtime execution or
// is invariant of the runtime state of the container. If it has to do with
// runtime, or changes as the "container" is started and stops, it probably
// doesn't belong on this object.
service Containers {
rpc Get(GetContainerRequest) returns (GetContainerResponse);
rpc List(ListContainersRequest) returns (ListContainersResponse);
rpc Create(CreateContainerRequest) returns (CreateContainerResponse);
rpc Update(UpdateContainerRequest) returns (UpdateContainerResponse);
rpc Delete(DeleteContainerRequest) returns (google.protobuf.Empty);
}
message Container {
// ID is the user-specified identifier.
//
// This field may not be updated.
string id = 1;
// Labels provides an area to include arbitrary data on containers.
//
// Note that to add a new value to this field, read the existing set and
// include the entire result in the update call.
map<string, string> labels = 2;
// Image contains the reference of the image used to build the
// specification and snapshots for running this container.
//
// If this field is updated, the spec and rootfs needed to updated, as well.
string image = 3;
// Runtime specifies which runtime to use for executing this container.
string runtime = 4;
// Spec to be used when creating the container. This is runtime specific.
google.protobuf.Any spec = 6;
// RootFS specifies the snapshot key to use for the container's root
// filesystem. When starting a task from this container, a caller should
// look up the mounts from the snapshot service and include those on the
// task create request.
//
// Snapshots referenced in this field will not be garbage collected.
//
// This field may be updated.
string rootfs = 7 [(gogoproto.customname) = "RootFS"];
}
message GetContainerRequest {
string id = 1;
}
message GetContainerResponse {
Container container = 1 [(gogoproto.nullable) = false];
}
message ListContainersRequest {
string filter = 1; // TODO(stevvooe): Define a filtering syntax to make these queries.
}
message ListContainersResponse {
repeated Container containers = 1 [(gogoproto.nullable) = false];
}
message CreateContainerRequest {
Container container = 1 [(gogoproto.nullable) = false];
}
message CreateContainerResponse {
Container container = 1 [(gogoproto.nullable) = false];
}
// UpdateContainerRequest updates the metadata on one or more container.
//
// The operation should follow semantics described in
// https://developers.google.com/protocol-buffers/docs/reference/csharp/class/google/protobuf/well-known-types/field-mask,
// unless otherwise qualified.
message UpdateContainerRequest {
// Container provides the target values, as declared by the mask, for the update.
//
// The ID field must be set.
Container container = 1 [(gogoproto.nullable) = false];
// UpdateMask specifies which fields to perform the update on. If empty,
// the operation applies to all fields.
google.protobuf.FieldMask update_mask = 2;
}
message UpdateContainerResponse {
Container container = 1 [(gogoproto.nullable) = false];
}
message DeleteContainerRequest {
string id = 1;
}

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,255 @@
syntax = "proto3";
package containerd.v1;
import "gogoproto/gogo.proto";
import "google/protobuf/timestamp.proto";
import "google/protobuf/empty.proto";
// Content provides access to a content addressable storage system.
service Content {
// Info returns information about a committed object.
//
// This call can be used for getting the size of content and checking for
// existence.
rpc Info(InfoRequest) returns (InfoResponse);
// List streams the entire set of content as Info objects and closes the
// stream.
//
// Typically, this will yield a large response, chunked into messages.
// Clients should make provisions to ensure they can handle the entire data
// set.
rpc List(ListContentRequest) returns (stream ListContentResponse);
// Delete will delete the referenced object.
rpc Delete(DeleteContentRequest) returns (google.protobuf.Empty);
// Read allows one to read an object based on the offset into the content.
//
// The requested data may be returned in one or more messages.
rpc Read(ReadRequest) returns (stream ReadResponse);
// Status returns the status of ongoing object ingestions, started via
// Write.
//
// Only those matching the regular expression will be provided in the
// response. If the provided regular expression is empty, all ingestions
// will be provided.
rpc Status(StatusRequest) returns (StatusResponse);
// Write begins or resumes writes to a resource identified by a unique ref.
// Only one active stream may exist at a time for each ref.
//
// Once a write stream has started, it may only write to a single ref, thus
// once a stream is started, the ref may be ommitted on subsequent writes.
//
// For any write transaction represented by a ref, only a single write may
// be made to a given offset. If overlapping writes occur, it is an error.
// Writes should be sequential and implementations may throw an error if
// this is required.
//
// If expected_digest is set and already part of the content store, the
// write will fail.
//
// When completed, the commit flag should be set to true. If expected size
// or digest is set, the content will be validated against those values.
rpc Write(stream WriteRequest) returns (stream WriteResponse);
// Abort cancels the ongoing write named in the request. Any resources
// associated with the write will be collected.
rpc Abort(AbortRequest) returns (google.protobuf.Empty);
}
message Info {
// Digest is the hash identity of the blob.
string digest = 1 [(gogoproto.customtype) = "github.com/opencontainers/go-digest.Digest", (gogoproto.nullable) = false];
// Size is the total number of bytes in the blob.
int64 size = 2;
// CommittedAt provides the time at which the blob was committed.
google.protobuf.Timestamp committed_at = 3 [(gogoproto.stdtime) = true, (gogoproto.nullable) = false];
}
message InfoRequest {
string digest = 1 [(gogoproto.customtype) = "github.com/opencontainers/go-digest.Digest", (gogoproto.nullable) = false];
}
message InfoResponse {
Info info = 1 [(gogoproto.nullable) = false];
}
message ListContentRequest {}
message ListContentResponse {
repeated Info info = 1 [(gogoproto.nullable) = false];
}
message DeleteContentRequest {
// Digest specifies which content to delete.
string digest = 1 [(gogoproto.customtype) = "github.com/opencontainers/go-digest.Digest", (gogoproto.nullable) = false];
}
// ReadRequest defines the fields that make up a request to read a portion of
// data from a stored object.
message ReadRequest {
// Digest is the hash identity to read.
string digest = 1 [(gogoproto.customtype) = "github.com/opencontainers/go-digest.Digest", (gogoproto.nullable) = false];
// Offset specifies the number of bytes from the start at which to begin
// the read. If zero or less, the read will be from the start. This uses
// standard zero-indexed semantics.
int64 offset = 2;
// size is the total size of the read. If zero, the entire blob will be
// returned by the service.
int64 size = 3;
}
// ReadResponse carries byte data for a read request.
message ReadResponse {
int64 offset = 1; // offset of the returned data
bytes data = 2; // actual data
}
message StatusRequest {
string regexp = 1;
}
message Status {
google.protobuf.Timestamp started_at = 1 [(gogoproto.stdtime) = true, (gogoproto.nullable) = false];
google.protobuf.Timestamp updated_at = 2 [(gogoproto.stdtime) = true, (gogoproto.nullable) = false];
string ref = 3;
int64 offset = 4;
int64 total = 5;
string expected = 6 [(gogoproto.customtype) = "github.com/opencontainers/go-digest.Digest", (gogoproto.nullable) = false];
}
message StatusResponse {
repeated Status statuses = 1 [(gogoproto.nullable) = false];
}
// WriteAction defines the behavior of a WriteRequest.
enum WriteAction {
option (gogoproto.goproto_enum_prefix) = false;
option (gogoproto.enum_customname) = "WriteAction";
// WriteActionStat instructs the writer to return the current status while
// holding the lock on the write.
STAT = 0 [(gogoproto.enumvalue_customname) = "WriteActionStat"];
// WriteActionWrite sets the action for the write request to write data.
//
// Any data included will be written at the provided offset. The
// transaction will be left open for further writes.
//
// This is the default.
WRITE = 1 [(gogoproto.enumvalue_customname) = "WriteActionWrite"];
// WriteActionCommit will write any outstanding data in the message and
// commit the write, storing it under the digest.
//
// This can be used in a single message to send the data, verify it and
// commit it.
//
// This action will always terminate the write.
COMMIT = 2 [(gogoproto.enumvalue_customname) = "WriteActionCommit"];
}
// WriteRequest writes data to the request ref at offset.
message WriteRequest {
// Action sets the behavior of the write.
//
// When this is a write and the ref is not yet allocated, the ref will be
// allocated and the data will be written at offset.
//
// If the action is write and the ref is allocated, it will accept data to
// an offset that has not yet been written.
//
// If the action is write and there is no data, the current write status
// will be returned. This works differently from status because the stream
// holds a lock.
WriteAction action = 1;
// Ref identifies the pre-commit object to write to.
string ref = 2;
// Total can be set to have the service validate the total size of the
// committed content.
//
// The latest value before or with the commit action message will be use to
// validate the content. If the offset overflows total, the service may
// report an error. It is only required on one message for the write.
//
// If the value is zero or less, no validation of the final content will be
// performed.
int64 total = 3;
// Expected can be set to have the service validate the final content against
// the provided digest.
//
// If the digest is already present in the object store, an AlreadyExists
// error will be returned.
//
// Only the latest version will be used to check the content against the
// digest. It is only required to include it on a single message, before or
// with the commit action message.
string expected = 4 [(gogoproto.customtype) = "github.com/opencontainers/go-digest.Digest", (gogoproto.nullable) = false];
// Offset specifies the number of bytes from the start at which to begin
// the write. For most implementations, this means from the start of the
// file. This uses standard, zero-indexed semantics.
//
// If the action is write, the remote may remove all previously written
// data after the offset. Implementations may support arbitrary offsets but
// MUST support reseting this value to zero with a write. If an
// implementation does not support a write at a particular offset, an
// OutOfRange error must be returned.
int64 offset = 5;
// Data is the actual bytes to be written.
//
// If this is empty and the message is not a commit, a response will be
// returned with the current write state.
bytes data = 6;
}
// WriteResponse is returned on the culmination of a write call.
message WriteResponse {
// Action contains the action for the final message of the stream. A writer
// should confirm that they match the intended result.
WriteAction action = 1;
// StartedAt provides the time at which the write began.
//
// This must be set for stat and commit write actions. All other write
// actions may omit this.
google.protobuf.Timestamp started_at = 2 [(gogoproto.stdtime) = true, (gogoproto.nullable) = false];
// UpdatedAt provides the last time of a successful write.
//
// This must be set for stat and commit write actions. All other write
// actions may omit this.
google.protobuf.Timestamp updated_at = 3 [(gogoproto.stdtime) = true, (gogoproto.nullable) = false];
// Offset is the current committed size for the write.
int64 offset = 4;
// Total provides the current, expected total size of the write.
//
// We include this to provide consistency with the Status structure on the
// client writer.
//
// This is only valid on the Stat and Commit response.
int64 total = 5;
// Digest, if present, includes the digest up to the currently committed
// bytes. If action is commit, this field will be set. It is implementation
// defined if this is set for other actions.
string digest = 6 [(gogoproto.customtype) = "github.com/opencontainers/go-digest.Digest", (gogoproto.nullable) = false];
}
message AbortRequest {
string ref = 1;
}

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,58 @@
syntax = "proto3";
package containerd.v1;
import "gogoproto/gogo.proto";
import "google/protobuf/empty.proto";
import "google/protobuf/timestamp.proto";
import "github.com/containerd/containerd/api/types/mount/mount.proto";
import "github.com/containerd/containerd/api/types/descriptor/descriptor.proto";
// Diff service creates and applies diffs
service Diff {
// Apply applies the content associated with the provided digests onto
// the provided mounts. Archive content will be extracted and
// decompressed if necessary.
rpc Apply(ApplyRequest) returns (ApplyResponse);
// Diff creates a diff between the given mounts and uploads the result
// to the content store.
rpc Diff(DiffRequest) returns (DiffResponse);
}
message ApplyRequest {
// Diff is the descriptor of the diff to be extracted
containerd.v1.types.Descriptor diff = 1;
repeated containerd.v1.types.Mount mounts = 2;
}
message ApplyResponse {
// Applied is the descriptor for the object which was applied.
// If the input was a compressed blob then the result will be
// the descriptor for the uncompressed blob.
containerd.v1.types.Descriptor applied = 1;
}
message DiffRequest {
// Left are the mounts which represent the older copy
// in which is the base of the computed changes.
repeated containerd.v1.types.Mount left = 1;
// Right are the mounts which represents the newer copy
// in which changes from the left were made into.
repeated containerd.v1.types.Mount right = 2;
// MediaType is the media type descriptor for the created diff
// object
string media_type = 3;
// Ref identifies the pre-commit content store object. This
// reference can be used to get the status from the content store.
string ref = 5;
}
message DiffResponse {
// Diff is the descriptor of the diff which can be applied
containerd.v1.types.Descriptor diff = 3;
}

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,170 @@
syntax = "proto3";
package containerd.v1.services.execution;
import "google/protobuf/empty.proto";
import "google/protobuf/any.proto";
import "gogoproto/gogo.proto";
import "github.com/containerd/containerd/api/types/mount/mount.proto";
import "github.com/containerd/containerd/api/types/descriptor/descriptor.proto";
import "github.com/containerd/containerd/api/types/task/task.proto";
import "google/protobuf/timestamp.proto";
service Tasks {
rpc Create(CreateRequest) returns (CreateResponse);
rpc Start(StartRequest) returns (google.protobuf.Empty);
rpc Delete(DeleteRequest) returns (DeleteResponse);
rpc Info(InfoRequest) returns (InfoResponse);
rpc List(ListRequest) returns (ListResponse);
rpc Kill(KillRequest) returns (google.protobuf.Empty);
rpc Events(EventsRequest) returns (stream containerd.v1.types.Event);
rpc Exec(ExecRequest) returns (ExecResponse);
rpc Pty(PtyRequest) returns (google.protobuf.Empty);
rpc CloseStdin(CloseStdinRequest) returns (google.protobuf.Empty);
rpc Pause(PauseRequest) returns (google.protobuf.Empty);
rpc Resume(ResumeRequest) returns (google.protobuf.Empty);
rpc Processes(ProcessesRequest) returns (ProcessesResponse);
rpc Checkpoint(CheckpointRequest) returns (CheckpointResponse);
}
message CreateRequest {
// ContainerID specifies the container to use for creating this task.
//
// The spec from the provided container id will be used to create the
// task associated with this container. Only one task can be run at a time
// per container.
//
// This should be created using the Containers service.
string container_id = 2;
// RootFS provides the pre-chroot mounts to perform in the shim before
// executing the container task.
//
// These are for mounts that cannot be performed in the user namespace.
// Typically, these mounts should be resolved from snapshots specified on
// the container object.
repeated containerd.v1.types.Mount rootfs = 3;
string stdin = 5;
string stdout = 6;
string stderr = 7;
bool terminal = 8;
types.Descriptor checkpoint = 9;
}
message CreateResponse {
// TODO(stevvooe): We no longer have an id for a task since they are bound
// to a single container. Although, we should represent each new task with
// an ID so one can differentiate between each instance of a container
// running.
//
// Hence, we are leaving this here and reserving the field number in case
// we need to move in this direction.
// string id = 1;
string container_id = 2;
uint32 pid = 3;
}
message StartRequest {
string container_id = 1;
}
message DeleteRequest {
string container_id = 1;
}
message DeleteResponse {
string container_id = 1;
uint32 exit_status = 2;
google.protobuf.Timestamp exited_at = 3 [(gogoproto.stdtime) = true, (gogoproto.nullable) = false];
}
message InfoRequest {
string container_id = 1;
}
message InfoResponse {
types.Task task = 1;
}
message ListRequest {
}
message ListResponse {
repeated containerd.v1.types.Task tasks = 1;
}
message KillRequest {
string container_id = 1;
uint32 signal = 2;
oneof pid_or_all {
bool all = 3;
uint32 pid = 4;
}
}
message EventsRequest {
}
message ExecRequest {
// ContainerID specifies the container in which to exec the process.
string container_id = 1;
bool terminal = 2;
string stdin = 3;
string stdout = 4;
string stderr = 5;
// Spec for starting a process in the target container.
//
// For runc, this is a process spec, for example.
google.protobuf.Any spec = 6;
}
message ExecResponse {
uint32 pid = 1;
}
message PtyRequest {
string container_id = 1;
uint32 pid = 2;
uint32 width = 3;
uint32 height = 4;
}
message CloseStdinRequest {
string container_id = 1;
uint32 pid = 2;
}
message PauseRequest {
string container_id = 1;
}
message ResumeRequest {
string container_id = 1;
}
message ProcessesRequest {
string container_id = 1;
}
message ProcessesResponse{
repeated containerd.v1.types.Process processes = 1;
}
message CheckpointRequest {
string container_id = 1;
bool allow_tcp = 2;
bool allow_unix_sockets = 3;
bool allow_terminal = 4;
bool file_locks = 5;
repeated string empty_namespaces = 6;
string parent_checkpoint = 7 [(gogoproto.customtype) = "github.com/opencontainers/go-digest.Digest", (gogoproto.nullable) = false];
bool exit = 8;
}
message CheckpointResponse {
repeated types.Descriptor descriptors = 1;
}

View File

@ -0,0 +1 @@
package images

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,78 @@
syntax = "proto3";
package containerd.v1;
import "gogoproto/gogo.proto";
import "google/protobuf/empty.proto";
import "github.com/containerd/containerd/api/types/mount/mount.proto";
import "github.com/containerd/containerd/api/types/descriptor/descriptor.proto";
// Images is a service that allows one to register images with containerd.
//
// In containerd, an image is merely the mapping of a name to a content root,
// described by a descriptor. The behavior and state of image is purely
// dictated by the type of the descriptor.
//
// From the perspective of this service, these references are mostly shallow,
// in that the existence of the required content won't be validated until
// required by consuming services.
//
// As such, this can really be considered a "metadata service".
service Images {
// Get returns an image by name.
rpc Get(GetRequest) returns (GetResponse);
// List returns a list of all images known to containerd.
rpc List(ListRequest) returns (ListResponse);
// Put assigns the name to a given target image based on the provided
// image.
rpc Put(PutRequest) returns (google.protobuf.Empty);
// Delete deletes the image by name.
rpc Delete(DeleteRequest) returns (google.protobuf.Empty);
}
message Image {
string name = 1;
string labels = 2;
types.Descriptor target = 3 [(gogoproto.nullable) = false];
}
message GetRequest {
string name = 1;
// TODO(stevvooe): Consider that we may want to have multiple images under
// the same name or multiple names for the same image. This mapping could
// be truly many to many but we'll need a way to identify an entry.
//
// For now, we consider it unique but an intermediary index could be
// created to allow for a dispatch of images.
}
message GetResponse {
Image image = 1;
}
message PutRequest {
Image image = 1 [(gogoproto.nullable) = false];
}
message ListRequest {
// TODO(stevvooe): empty for now, need to ad filtration
// Some common use cases we might consider:
//
// 1. Select by multiple names.
// 2. Select by platform.
// 3. Select by annotations.
}
message ListResponse {
repeated Image images = 1 [(gogoproto.nullable) = false];
// TODO(stevvooe): Add pagination.
}
message DeleteRequest {
string name = 1;
}

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,81 @@
syntax = "proto3";
package containerd.v1.snapshot;
import "gogoproto/gogo.proto";
import "google/protobuf/empty.proto";
import "github.com/containerd/containerd/api/types/mount/mount.proto";
// Snapshot service manages snapshots
service Snapshot {
rpc Prepare(PrepareRequest) returns (MountsResponse);
rpc View(PrepareRequest) returns (MountsResponse);
rpc Mounts(MountsRequest) returns (MountsResponse);
rpc Commit(CommitRequest) returns (google.protobuf.Empty);
rpc Remove(RemoveRequest) returns (google.protobuf.Empty);
rpc Stat(StatRequest) returns (StatResponse);
rpc List(ListRequest) returns (stream ListResponse);
rpc Usage(UsageRequest) returns (UsageResponse);
// "Snapshot" prepares a new set of mounts from existing name
}
message PrepareRequest {
string key = 1;
string parent = 2;
}
message MountsRequest {
string key = 1;
}
message MountsResponse {
repeated containerd.v1.types.Mount mounts = 1;
}
message RemoveRequest {
string key = 1;
}
message CommitRequest {
string name = 1;
string key = 2;
}
message StatRequest {
string key = 1;
}
enum Kind {
option (gogoproto.goproto_enum_prefix) = false;
option (gogoproto.enum_customname) = "Kind";
ACTIVE = 0 [(gogoproto.enumvalue_customname) = "KindActive"];
COMMITTED = 1 [(gogoproto.enumvalue_customname) = "KindCommitted"];
}
message Info {
string name = 1;
string parent = 2;
Kind kind = 3;
bool readonly = 4;
}
message StatResponse {
Info info = 1 [(gogoproto.nullable) = false];
}
message ListRequest{}
message ListResponse {
repeated Info info = 1 [(gogoproto.nullable) = false];
}
message UsageRequest {
string key = 1;
}
message UsageResponse {
int64 inodes = 2;
int64 size = 1;
}

View File

@ -0,0 +1,422 @@
// Code generated by protoc-gen-gogo.
// source: github.com/containerd/containerd/api/types/descriptor/descriptor.proto
// DO NOT EDIT!
/*
Package descriptor is a generated protocol buffer package.
It is generated from these files:
github.com/containerd/containerd/api/types/descriptor/descriptor.proto
It has these top-level messages:
Descriptor
*/
package descriptor
import proto "github.com/gogo/protobuf/proto"
import fmt "fmt"
import math "math"
import _ "github.com/gogo/protobuf/gogoproto"
import github_com_opencontainers_go_digest "github.com/opencontainers/go-digest"
import strings "strings"
import reflect "reflect"
import io "io"
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
var _ = fmt.Errorf
var _ = math.Inf
// This is a compile-time assertion to ensure that this generated file
// is compatible with the proto package it is being compiled against.
// A compilation error at this line likely means your copy of the
// proto package needs to be updated.
const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package
// Descriptor describes a blob in a content store.
//
// This descriptor can be used to reference content from an
// oci descriptor found in a manifest.
// See https://godoc.org/github.com/opencontainers/image-spec/specs-go/v1#Descriptor
type Descriptor struct {
MediaType string `protobuf:"bytes,1,opt,name=media_type,json=mediaType,proto3" json:"media_type,omitempty"`
Digest github_com_opencontainers_go_digest.Digest `protobuf:"bytes,2,opt,name=digest,proto3,customtype=github.com/opencontainers/go-digest.Digest" json:"digest"`
Size_ int64 `protobuf:"varint,3,opt,name=size,proto3" json:"size,omitempty"`
}
func (m *Descriptor) Reset() { *m = Descriptor{} }
func (*Descriptor) ProtoMessage() {}
func (*Descriptor) Descriptor() ([]byte, []int) { return fileDescriptorDescriptor, []int{0} }
func init() {
proto.RegisterType((*Descriptor)(nil), "containerd.v1.types.Descriptor")
}
func (m *Descriptor) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalTo(dAtA)
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *Descriptor) MarshalTo(dAtA []byte) (int, error) {
var i int
_ = i
var l int
_ = l
if len(m.MediaType) > 0 {
dAtA[i] = 0xa
i++
i = encodeVarintDescriptor(dAtA, i, uint64(len(m.MediaType)))
i += copy(dAtA[i:], m.MediaType)
}
if len(m.Digest) > 0 {
dAtA[i] = 0x12
i++
i = encodeVarintDescriptor(dAtA, i, uint64(len(m.Digest)))
i += copy(dAtA[i:], m.Digest)
}
if m.Size_ != 0 {
dAtA[i] = 0x18
i++
i = encodeVarintDescriptor(dAtA, i, uint64(m.Size_))
}
return i, nil
}
func encodeFixed64Descriptor(dAtA []byte, offset int, v uint64) int {
dAtA[offset] = uint8(v)
dAtA[offset+1] = uint8(v >> 8)
dAtA[offset+2] = uint8(v >> 16)
dAtA[offset+3] = uint8(v >> 24)
dAtA[offset+4] = uint8(v >> 32)
dAtA[offset+5] = uint8(v >> 40)
dAtA[offset+6] = uint8(v >> 48)
dAtA[offset+7] = uint8(v >> 56)
return offset + 8
}
func encodeFixed32Descriptor(dAtA []byte, offset int, v uint32) int {
dAtA[offset] = uint8(v)
dAtA[offset+1] = uint8(v >> 8)
dAtA[offset+2] = uint8(v >> 16)
dAtA[offset+3] = uint8(v >> 24)
return offset + 4
}
func encodeVarintDescriptor(dAtA []byte, offset int, v uint64) int {
for v >= 1<<7 {
dAtA[offset] = uint8(v&0x7f | 0x80)
v >>= 7
offset++
}
dAtA[offset] = uint8(v)
return offset + 1
}
func (m *Descriptor) Size() (n int) {
var l int
_ = l
l = len(m.MediaType)
if l > 0 {
n += 1 + l + sovDescriptor(uint64(l))
}
l = len(m.Digest)
if l > 0 {
n += 1 + l + sovDescriptor(uint64(l))
}
if m.Size_ != 0 {
n += 1 + sovDescriptor(uint64(m.Size_))
}
return n
}
func sovDescriptor(x uint64) (n int) {
for {
n++
x >>= 7
if x == 0 {
break
}
}
return n
}
func sozDescriptor(x uint64) (n int) {
return sovDescriptor(uint64((x << 1) ^ uint64((int64(x) >> 63))))
}
func (this *Descriptor) String() string {
if this == nil {
return "nil"
}
s := strings.Join([]string{`&Descriptor{`,
`MediaType:` + fmt.Sprintf("%v", this.MediaType) + `,`,
`Digest:` + fmt.Sprintf("%v", this.Digest) + `,`,
`Size_:` + fmt.Sprintf("%v", this.Size_) + `,`,
`}`,
}, "")
return s
}
func valueToStringDescriptor(v interface{}) string {
rv := reflect.ValueOf(v)
if rv.IsNil() {
return "nil"
}
pv := reflect.Indirect(rv).Interface()
return fmt.Sprintf("*%v", pv)
}
func (m *Descriptor) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowDescriptor
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: Descriptor: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: Descriptor: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field MediaType", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowDescriptor
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthDescriptor
}
postIndex := iNdEx + intStringLen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.MediaType = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
case 2:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Digest", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowDescriptor
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthDescriptor
}
postIndex := iNdEx + intStringLen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Digest = github_com_opencontainers_go_digest.Digest(dAtA[iNdEx:postIndex])
iNdEx = postIndex
case 3:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field Size_", wireType)
}
m.Size_ = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowDescriptor
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
m.Size_ |= (int64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
default:
iNdEx = preIndex
skippy, err := skipDescriptor(dAtA[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthDescriptor
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func skipDescriptor(dAtA []byte) (n int, err error) {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return 0, ErrIntOverflowDescriptor
}
if iNdEx >= l {
return 0, io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
wireType := int(wire & 0x7)
switch wireType {
case 0:
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return 0, ErrIntOverflowDescriptor
}
if iNdEx >= l {
return 0, io.ErrUnexpectedEOF
}
iNdEx++
if dAtA[iNdEx-1] < 0x80 {
break
}
}
return iNdEx, nil
case 1:
iNdEx += 8
return iNdEx, nil
case 2:
var length int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return 0, ErrIntOverflowDescriptor
}
if iNdEx >= l {
return 0, io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
length |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
iNdEx += length
if length < 0 {
return 0, ErrInvalidLengthDescriptor
}
return iNdEx, nil
case 3:
for {
var innerWire uint64
var start int = iNdEx
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return 0, ErrIntOverflowDescriptor
}
if iNdEx >= l {
return 0, io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
innerWire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
innerWireType := int(innerWire & 0x7)
if innerWireType == 4 {
break
}
next, err := skipDescriptor(dAtA[start:])
if err != nil {
return 0, err
}
iNdEx = start + next
}
return iNdEx, nil
case 4:
return iNdEx, nil
case 5:
iNdEx += 4
return iNdEx, nil
default:
return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
}
}
panic("unreachable")
}
var (
ErrInvalidLengthDescriptor = fmt.Errorf("proto: negative length found during unmarshaling")
ErrIntOverflowDescriptor = fmt.Errorf("proto: integer overflow")
)
func init() {
proto.RegisterFile("github.com/containerd/containerd/api/types/descriptor/descriptor.proto", fileDescriptorDescriptor)
}
var fileDescriptorDescriptor = []byte{
// 229 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x72, 0x4b, 0xcf, 0x2c, 0xc9,
0x28, 0x4d, 0xd2, 0x4b, 0xce, 0xcf, 0xd5, 0x4f, 0xce, 0xcf, 0x2b, 0x49, 0xcc, 0xcc, 0x4b, 0x2d,
0x4a, 0x41, 0x66, 0x26, 0x16, 0x64, 0xea, 0x97, 0x54, 0x16, 0xa4, 0x16, 0xeb, 0xa7, 0xa4, 0x16,
0x27, 0x17, 0x65, 0x16, 0x94, 0xe4, 0x17, 0x21, 0x31, 0xf5, 0x0a, 0x8a, 0xf2, 0x4b, 0xf2, 0x85,
0x84, 0x11, 0x3a, 0xf4, 0xca, 0x0c, 0xf5, 0xc0, 0x1a, 0xa4, 0x44, 0xd2, 0xf3, 0xd3, 0xf3, 0xc1,
0xf2, 0xfa, 0x20, 0x16, 0x44, 0xa9, 0x52, 0x37, 0x23, 0x17, 0x97, 0x0b, 0x5c, 0xbf, 0x90, 0x2c,
0x17, 0x57, 0x6e, 0x6a, 0x4a, 0x66, 0x62, 0x3c, 0x48, 0x8f, 0x04, 0xa3, 0x02, 0xa3, 0x06, 0x67,
0x10, 0x27, 0x58, 0x24, 0xa4, 0xb2, 0x20, 0x55, 0xc8, 0x8b, 0x8b, 0x2d, 0x25, 0x33, 0x3d, 0xb5,
0xb8, 0x44, 0x82, 0x09, 0x24, 0xe5, 0x64, 0x74, 0xe2, 0x9e, 0x3c, 0xc3, 0xad, 0x7b, 0xf2, 0x5a,
0x48, 0x0e, 0xcf, 0x2f, 0x48, 0xcd, 0x83, 0xdb, 0x5f, 0xac, 0x9f, 0x9e, 0xaf, 0x0b, 0xd1, 0xa2,
0xe7, 0x02, 0xa6, 0x82, 0xa0, 0x26, 0x08, 0x09, 0x71, 0xb1, 0x14, 0x67, 0x56, 0xa5, 0x4a, 0x30,
0x2b, 0x30, 0x6a, 0x30, 0x07, 0x81, 0xd9, 0x4e, 0x12, 0x27, 0x1e, 0xca, 0x31, 0xdc, 0x78, 0x28,
0xc7, 0xd0, 0xf0, 0x48, 0x8e, 0xf1, 0xc4, 0x23, 0x39, 0xc6, 0x0b, 0x8f, 0xe4, 0x18, 0x1f, 0x3c,
0x92, 0x63, 0x4c, 0x62, 0x03, 0x3b, 0xd7, 0x18, 0x10, 0x00, 0x00, 0xff, 0xff, 0x45, 0x60, 0xfd,
0x5b, 0x23, 0x01, 0x00, 0x00,
}

View File

@ -0,0 +1,16 @@
syntax = "proto3";
package containerd.v1.types;
import "gogoproto/gogo.proto";
// Descriptor describes a blob in a content store.
//
// This descriptor can be used to reference content from an
// oci descriptor found in a manifest.
// See https://godoc.org/github.com/opencontainers/image-spec/specs-go/v1#Descriptor
message Descriptor {
string media_type = 1;
string digest = 2 [(gogoproto.customtype) = "github.com/opencontainers/go-digest.Digest", (gogoproto.nullable) = false];
int64 size = 3;
}

View File

@ -0,0 +1,489 @@
// Code generated by protoc-gen-gogo.
// source: github.com/containerd/containerd/api/types/mount/mount.proto
// DO NOT EDIT!
/*
Package mount is a generated protocol buffer package.
It is generated from these files:
github.com/containerd/containerd/api/types/mount/mount.proto
It has these top-level messages:
Mount
*/
package mount
import proto "github.com/gogo/protobuf/proto"
import fmt "fmt"
import math "math"
import _ "github.com/gogo/protobuf/gogoproto"
import strings "strings"
import reflect "reflect"
import io "io"
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
var _ = fmt.Errorf
var _ = math.Inf
// This is a compile-time assertion to ensure that this generated file
// is compatible with the proto package it is being compiled against.
// A compilation error at this line likely means your copy of the
// proto package needs to be updated.
const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package
// Mount describes mounts for a container.
//
// This type is the lingua franca of ContainerD. All services provide mounts
// to be used with the container at creation time.
//
// The Mount type follows the structure of the mount syscall, including a type,
// source, target and options.
type Mount struct {
// Type defines the nature of the mount.
Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"`
// Source specifies the name of the mount. Depending on mount type, this
// may be a volume name or a host path, or even ignored.
Source string `protobuf:"bytes,2,opt,name=source,proto3" json:"source,omitempty"`
// Target path in container
Target string `protobuf:"bytes,3,opt,name=target,proto3" json:"target,omitempty"`
// Options specifies zero or more fstab style mount options.
Options []string `protobuf:"bytes,4,rep,name=options" json:"options,omitempty"`
}
func (m *Mount) Reset() { *m = Mount{} }
func (*Mount) ProtoMessage() {}
func (*Mount) Descriptor() ([]byte, []int) { return fileDescriptorMount, []int{0} }
func init() {
proto.RegisterType((*Mount)(nil), "containerd.v1.types.Mount")
}
func (m *Mount) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalTo(dAtA)
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *Mount) MarshalTo(dAtA []byte) (int, error) {
var i int
_ = i
var l int
_ = l
if len(m.Type) > 0 {
dAtA[i] = 0xa
i++
i = encodeVarintMount(dAtA, i, uint64(len(m.Type)))
i += copy(dAtA[i:], m.Type)
}
if len(m.Source) > 0 {
dAtA[i] = 0x12
i++
i = encodeVarintMount(dAtA, i, uint64(len(m.Source)))
i += copy(dAtA[i:], m.Source)
}
if len(m.Target) > 0 {
dAtA[i] = 0x1a
i++
i = encodeVarintMount(dAtA, i, uint64(len(m.Target)))
i += copy(dAtA[i:], m.Target)
}
if len(m.Options) > 0 {
for _, s := range m.Options {
dAtA[i] = 0x22
i++
l = len(s)
for l >= 1<<7 {
dAtA[i] = uint8(uint64(l)&0x7f | 0x80)
l >>= 7
i++
}
dAtA[i] = uint8(l)
i++
i += copy(dAtA[i:], s)
}
}
return i, nil
}
func encodeFixed64Mount(dAtA []byte, offset int, v uint64) int {
dAtA[offset] = uint8(v)
dAtA[offset+1] = uint8(v >> 8)
dAtA[offset+2] = uint8(v >> 16)
dAtA[offset+3] = uint8(v >> 24)
dAtA[offset+4] = uint8(v >> 32)
dAtA[offset+5] = uint8(v >> 40)
dAtA[offset+6] = uint8(v >> 48)
dAtA[offset+7] = uint8(v >> 56)
return offset + 8
}
func encodeFixed32Mount(dAtA []byte, offset int, v uint32) int {
dAtA[offset] = uint8(v)
dAtA[offset+1] = uint8(v >> 8)
dAtA[offset+2] = uint8(v >> 16)
dAtA[offset+3] = uint8(v >> 24)
return offset + 4
}
func encodeVarintMount(dAtA []byte, offset int, v uint64) int {
for v >= 1<<7 {
dAtA[offset] = uint8(v&0x7f | 0x80)
v >>= 7
offset++
}
dAtA[offset] = uint8(v)
return offset + 1
}
func (m *Mount) Size() (n int) {
var l int
_ = l
l = len(m.Type)
if l > 0 {
n += 1 + l + sovMount(uint64(l))
}
l = len(m.Source)
if l > 0 {
n += 1 + l + sovMount(uint64(l))
}
l = len(m.Target)
if l > 0 {
n += 1 + l + sovMount(uint64(l))
}
if len(m.Options) > 0 {
for _, s := range m.Options {
l = len(s)
n += 1 + l + sovMount(uint64(l))
}
}
return n
}
func sovMount(x uint64) (n int) {
for {
n++
x >>= 7
if x == 0 {
break
}
}
return n
}
func sozMount(x uint64) (n int) {
return sovMount(uint64((x << 1) ^ uint64((int64(x) >> 63))))
}
func (this *Mount) String() string {
if this == nil {
return "nil"
}
s := strings.Join([]string{`&Mount{`,
`Type:` + fmt.Sprintf("%v", this.Type) + `,`,
`Source:` + fmt.Sprintf("%v", this.Source) + `,`,
`Target:` + fmt.Sprintf("%v", this.Target) + `,`,
`Options:` + fmt.Sprintf("%v", this.Options) + `,`,
`}`,
}, "")
return s
}
func valueToStringMount(v interface{}) string {
rv := reflect.ValueOf(v)
if rv.IsNil() {
return "nil"
}
pv := reflect.Indirect(rv).Interface()
return fmt.Sprintf("*%v", pv)
}
func (m *Mount) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowMount
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: Mount: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: Mount: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowMount
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthMount
}
postIndex := iNdEx + intStringLen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Type = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
case 2:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Source", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowMount
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthMount
}
postIndex := iNdEx + intStringLen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Source = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
case 3:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Target", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowMount
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthMount
}
postIndex := iNdEx + intStringLen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Target = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
case 4:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Options", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowMount
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthMount
}
postIndex := iNdEx + intStringLen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Options = append(m.Options, string(dAtA[iNdEx:postIndex]))
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipMount(dAtA[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthMount
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func skipMount(dAtA []byte) (n int, err error) {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return 0, ErrIntOverflowMount
}
if iNdEx >= l {
return 0, io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
wireType := int(wire & 0x7)
switch wireType {
case 0:
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return 0, ErrIntOverflowMount
}
if iNdEx >= l {
return 0, io.ErrUnexpectedEOF
}
iNdEx++
if dAtA[iNdEx-1] < 0x80 {
break
}
}
return iNdEx, nil
case 1:
iNdEx += 8
return iNdEx, nil
case 2:
var length int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return 0, ErrIntOverflowMount
}
if iNdEx >= l {
return 0, io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
length |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
iNdEx += length
if length < 0 {
return 0, ErrInvalidLengthMount
}
return iNdEx, nil
case 3:
for {
var innerWire uint64
var start int = iNdEx
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return 0, ErrIntOverflowMount
}
if iNdEx >= l {
return 0, io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
innerWire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
innerWireType := int(innerWire & 0x7)
if innerWireType == 4 {
break
}
next, err := skipMount(dAtA[start:])
if err != nil {
return 0, err
}
iNdEx = start + next
}
return iNdEx, nil
case 4:
return iNdEx, nil
case 5:
iNdEx += 4
return iNdEx, nil
default:
return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
}
}
panic("unreachable")
}
var (
ErrInvalidLengthMount = fmt.Errorf("proto: negative length found during unmarshaling")
ErrIntOverflowMount = fmt.Errorf("proto: integer overflow")
)
func init() {
proto.RegisterFile("github.com/containerd/containerd/api/types/mount/mount.proto", fileDescriptorMount)
}
var fileDescriptorMount = []byte{
// 197 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xb2, 0x49, 0xcf, 0x2c, 0xc9,
0x28, 0x4d, 0xd2, 0x4b, 0xce, 0xcf, 0xd5, 0x4f, 0xce, 0xcf, 0x2b, 0x49, 0xcc, 0xcc, 0x4b, 0x2d,
0x4a, 0x41, 0x66, 0x26, 0x16, 0x64, 0xea, 0x97, 0x54, 0x16, 0xa4, 0x16, 0xeb, 0xe7, 0xe6, 0x97,
0xe6, 0x95, 0x40, 0x48, 0xbd, 0x82, 0xa2, 0xfc, 0x92, 0x7c, 0x21, 0x61, 0x84, 0x3a, 0xbd, 0x32,
0x43, 0x3d, 0xb0, 0x32, 0x29, 0x91, 0xf4, 0xfc, 0xf4, 0x7c, 0xb0, 0xbc, 0x3e, 0x88, 0x05, 0x51,
0xaa, 0x94, 0xca, 0xc5, 0xea, 0x0b, 0xd2, 0x29, 0x24, 0xc4, 0xc5, 0x02, 0x52, 0x27, 0xc1, 0xa8,
0xc0, 0xa8, 0xc1, 0x19, 0x04, 0x66, 0x0b, 0x89, 0x71, 0xb1, 0x15, 0xe7, 0x97, 0x16, 0x25, 0xa7,
0x4a, 0x30, 0x81, 0x45, 0xa1, 0x3c, 0x90, 0x78, 0x49, 0x62, 0x51, 0x7a, 0x6a, 0x89, 0x04, 0x33,
0x44, 0x1c, 0xc2, 0x13, 0x92, 0xe0, 0x62, 0xcf, 0x2f, 0x28, 0xc9, 0xcc, 0xcf, 0x2b, 0x96, 0x60,
0x51, 0x60, 0xd6, 0xe0, 0x0c, 0x82, 0x71, 0x9d, 0x24, 0x4e, 0x3c, 0x94, 0x63, 0xb8, 0xf1, 0x50,
0x8e, 0xa1, 0xe1, 0x91, 0x1c, 0xe3, 0x89, 0x47, 0x72, 0x8c, 0x17, 0x1e, 0xc9, 0x31, 0x3e, 0x78,
0x24, 0xc7, 0x98, 0xc4, 0x06, 0x76, 0x87, 0x31, 0x20, 0x00, 0x00, 0xff, 0xff, 0xbe, 0xda, 0x1c,
0x59, 0xf2, 0x00, 0x00, 0x00,
}

View File

@ -0,0 +1,27 @@
syntax = "proto3";
package containerd.v1.types;
import "gogoproto/gogo.proto";
// Mount describes mounts for a container.
//
// This type is the lingua franca of ContainerD. All services provide mounts
// to be used with the container at creation time.
//
// The Mount type follows the structure of the mount syscall, including a type,
// source, target and options.
message Mount {
// Type defines the nature of the mount.
string type = 1;
// Source specifies the name of the mount. Depending on mount type, this
// may be a volume name or a host path, or even ignored.
string source = 2;
// Target path in container
string target = 3;
// Options specifies zero or more fstab style mount options.
repeated string options = 4;
}

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,62 @@
syntax = "proto3";
package containerd.v1.types;
import "gogoproto/gogo.proto";
import "google/protobuf/any.proto";
import "google/protobuf/timestamp.proto";
enum Status {
option (gogoproto.goproto_enum_prefix) = false;
option (gogoproto.enum_customname) = "Status";
UNKNOWN = 0 [(gogoproto.enumvalue_customname) = "StatusUnknown"];
CREATED = 1 [(gogoproto.enumvalue_customname) = "StatusCreated"];
RUNNING = 2 [(gogoproto.enumvalue_customname) = "StatusRunning"];
STOPPED = 3 [(gogoproto.enumvalue_customname) = "StatusStopped"];
PAUSED = 4 [(gogoproto.enumvalue_customname) = "StatusPaused"];
}
message Task {
string id = 1; // TODO(stevvooe): For now, this is just the container id.
string container_id = 2;
uint32 pid = 3;
Status status = 4;
google.protobuf.Any spec = 5;
}
message Process {
uint32 pid = 1;
repeated string args = 2;
repeated string env = 3;
User user = 4;
string cwd = 5;
bool terminal = 6;
uint32 exit_status = 7;
Status status = 8;
google.protobuf.Any runtime_data = 9;
}
message User {
uint32 uid = 1;
uint32 gid = 2;
repeated uint32 additional_gids = 3;
}
message Event {
string id = 1;
enum EventType {
EXIT = 0;
OOM = 1;
CREATE = 2;
START = 3;
EXEC_ADDED = 4;
PAUSED = 5;
}
EventType type = 2;
uint32 pid = 3;
uint32 exit_status = 4;
google.protobuf.Timestamp exited_at = 5 [(gogoproto.stdtime) = true, (gogoproto.nullable) = false];
}

339
vendor/github.com/containerd/containerd/client.go generated vendored Normal file
View File

@ -0,0 +1,339 @@
package containerd
import (
"context"
"encoding/json"
"io/ioutil"
"log"
"net/http"
"runtime"
"time"
"github.com/containerd/containerd/api/services/containers"
contentapi "github.com/containerd/containerd/api/services/content"
diffapi "github.com/containerd/containerd/api/services/diff"
"github.com/containerd/containerd/api/services/execution"
imagesapi "github.com/containerd/containerd/api/services/images"
snapshotapi "github.com/containerd/containerd/api/services/snapshot"
"github.com/containerd/containerd/content"
"github.com/containerd/containerd/images"
"github.com/containerd/containerd/remotes"
"github.com/containerd/containerd/remotes/docker"
"github.com/containerd/containerd/rootfs"
contentservice "github.com/containerd/containerd/services/content"
"github.com/containerd/containerd/services/diff"
diffservice "github.com/containerd/containerd/services/diff"
imagesservice "github.com/containerd/containerd/services/images"
snapshotservice "github.com/containerd/containerd/services/snapshot"
"github.com/containerd/containerd/snapshot"
protobuf "github.com/gogo/protobuf/types"
"github.com/opencontainers/image-spec/identity"
"github.com/opencontainers/image-spec/specs-go/v1"
specs "github.com/opencontainers/runtime-spec/specs-go"
"github.com/pkg/errors"
"google.golang.org/grpc"
"google.golang.org/grpc/grpclog"
)
func init() {
// reset the grpc logger so that it does not output in the STDIO of the calling process
grpclog.SetLogger(log.New(ioutil.Discard, "", log.LstdFlags))
}
type NewClientOpts func(c *Client) error
func WithNamespace(namespace string) NewClientOpts {
return func(c *Client) error {
c.namespace = namespace
return nil
}
}
// New returns a new containerd client that is connected to the containerd
// instance provided by address
func New(address string, opts ...NewClientOpts) (*Client, error) {
gopts := []grpc.DialOption{
grpc.WithInsecure(),
grpc.WithTimeout(100 * time.Second),
grpc.WithDialer(dialer),
}
conn, err := grpc.Dial(dialAddress(address), gopts...)
if err != nil {
return nil, errors.Wrapf(err, "failed to dial %q", address)
}
c := &Client{
conn: conn,
runtime: runtime.GOOS,
}
for _, o := range opts {
if err := o(c); err != nil {
return nil, err
}
}
return c, nil
}
// Client is the client to interact with containerd and its various services
// using a uniform interface
type Client struct {
conn *grpc.ClientConn
runtime string
namespace string
}
// Containers returns all containers created in containerd
func (c *Client) Containers(ctx context.Context) ([]Container, error) {
r, err := c.ContainerService().List(ctx, &containers.ListContainersRequest{})
if err != nil {
return nil, err
}
var out []Container
for _, container := range r.Containers {
out = append(out, containerFromProto(c, container))
}
return out, nil
}
type NewContainerOpts func(ctx context.Context, client *Client, c *containers.Container) error
// WithContainerLabels adds the provided labels to the container
func WithContainerLabels(labels map[string]string) NewContainerOpts {
return func(_ context.Context, _ *Client, c *containers.Container) error {
c.Labels = labels
return nil
}
}
// WithExistingRootFS uses an existing root filesystem for the container
func WithExistingRootFS(id string) NewContainerOpts {
return func(ctx context.Context, client *Client, c *containers.Container) error {
// check that the snapshot exists, if not, fail on creation
if _, err := client.SnapshotService().Mounts(ctx, id); err != nil {
return err
}
c.RootFS = id
return nil
}
}
// WithNewRootFS allocates a new snapshot to be used by the container as the
// root filesystem in read-write mode
func WithNewRootFS(id string, i Image) NewContainerOpts {
return func(ctx context.Context, client *Client, c *containers.Container) error {
diffIDs, err := i.(*image).i.RootFS(ctx, client.ContentStore())
if err != nil {
return err
}
if _, err := client.SnapshotService().Prepare(ctx, id, identity.ChainID(diffIDs).String()); err != nil {
return err
}
c.RootFS = id
return nil
}
}
// WithNewReadonlyRootFS allocates a new snapshot to be used by the container as the
// root filesystem in read-only mode
func WithNewReadonlyRootFS(id string, i Image) NewContainerOpts {
return func(ctx context.Context, client *Client, c *containers.Container) error {
diffIDs, err := i.(*image).i.RootFS(ctx, client.ContentStore())
if err != nil {
return err
}
if _, err := client.SnapshotService().View(ctx, id, identity.ChainID(diffIDs).String()); err != nil {
return err
}
c.RootFS = id
return nil
}
}
func WithRuntime(name string) NewContainerOpts {
return func(ctx context.Context, client *Client, c *containers.Container) error {
c.Runtime = name
return nil
}
}
func WithImage(i Image) NewContainerOpts {
return func(ctx context.Context, client *Client, c *containers.Container) error {
c.Image = i.Name()
return nil
}
}
// NewContainer will create a new container in container with the provided id
// the id must be unique within the namespace
func (c *Client) NewContainer(ctx context.Context, id string, spec *specs.Spec, opts ...NewContainerOpts) (Container, error) {
data, err := json.Marshal(spec)
if err != nil {
return nil, err
}
container := containers.Container{
ID: id,
Runtime: c.runtime,
Spec: &protobuf.Any{
TypeUrl: specs.Version,
Value: data,
},
}
for _, o := range opts {
if err := o(ctx, c, &container); err != nil {
return nil, err
}
}
r, err := c.ContainerService().Create(ctx, &containers.CreateContainerRequest{
Container: container,
})
if err != nil {
return nil, err
}
return containerFromProto(c, r.Container), nil
}
type PullOpts func(*Client, *PullContext) error
type PullContext struct {
Resolver remotes.Resolver
Unpacker Unpacker
}
func defaultPullContext() *PullContext {
return &PullContext{
Resolver: docker.NewResolver(docker.ResolverOptions{
Client: http.DefaultClient,
}),
}
}
func WithPullUnpack(client *Client, c *PullContext) error {
c.Unpacker = &snapshotUnpacker{
store: client.ContentStore(),
diff: client.DiffService(),
snapshotter: client.SnapshotService(),
}
return nil
}
type Unpacker interface {
Unpack(context.Context, images.Image) error
}
type snapshotUnpacker struct {
snapshotter snapshot.Snapshotter
store content.Store
diff diff.DiffService
}
func (s *snapshotUnpacker) Unpack(ctx context.Context, image images.Image) error {
layers, err := s.getLayers(ctx, image)
if err != nil {
return err
}
if _, err := rootfs.ApplyLayers(ctx, layers, s.snapshotter, s.diff); err != nil {
return err
}
return nil
}
func (s *snapshotUnpacker) getLayers(ctx context.Context, image images.Image) ([]rootfs.Layer, error) {
p, err := content.ReadBlob(ctx, s.store, image.Target.Digest)
if err != nil {
return nil, errors.Wrapf(err, "failed to read manifest blob")
}
var manifest v1.Manifest
if err := json.Unmarshal(p, &manifest); err != nil {
return nil, errors.Wrap(err, "failed to unmarshal manifest")
}
diffIDs, err := image.RootFS(ctx, s.store)
if err != nil {
return nil, errors.Wrap(err, "failed to resolve rootfs")
}
if len(diffIDs) != len(manifest.Layers) {
return nil, errors.Errorf("mismatched image rootfs and manifest layers")
}
layers := make([]rootfs.Layer, len(diffIDs))
for i := range diffIDs {
layers[i].Diff = v1.Descriptor{
// TODO: derive media type from compressed type
MediaType: v1.MediaTypeImageLayer,
Digest: diffIDs[i],
}
layers[i].Blob = manifest.Layers[i]
}
return layers, nil
}
func (c *Client) Pull(ctx context.Context, ref string, opts ...PullOpts) (Image, error) {
pullCtx := defaultPullContext()
for _, o := range opts {
if err := o(c, pullCtx); err != nil {
return nil, err
}
}
store := c.ContentStore()
name, desc, err := pullCtx.Resolver.Resolve(ctx, ref)
if err != nil {
return nil, err
}
fetcher, err := pullCtx.Resolver.Fetcher(ctx, name)
if err != nil {
return nil, err
}
handlers := []images.Handler{
remotes.FetchHandler(store, fetcher),
images.ChildrenHandler(store),
}
if err := images.Dispatch(ctx, images.Handlers(handlers...), desc); err != nil {
return nil, err
}
is := c.ImageService()
if err := is.Put(ctx, name, desc); err != nil {
return nil, err
}
i, err := is.Get(ctx, name)
if err != nil {
return nil, err
}
if pullCtx.Unpacker != nil {
if err := pullCtx.Unpacker.Unpack(ctx, i); err != nil {
return nil, err
}
}
return &image{
client: c,
i: i,
}, nil
}
// Close closes the clients connection to containerd
func (c *Client) Close() error {
return c.conn.Close()
}
func (c *Client) ContainerService() containers.ContainersClient {
return containers.NewContainersClient(c.conn)
}
func (c *Client) ContentStore() content.Store {
return contentservice.NewStoreFromClient(contentapi.NewContentClient(c.conn))
}
func (c *Client) SnapshotService() snapshot.Snapshotter {
return snapshotservice.NewSnapshotterFromClient(snapshotapi.NewSnapshotClient(c.conn))
}
func (c *Client) TaskService() execution.TasksClient {
return execution.NewTasksClient(c.conn)
}
func (c *Client) ImageService() images.Store {
return imagesservice.NewStoreFromClient(imagesapi.NewImagesClient(c.conn))
}
func (c *Client) DiffService() diff.DiffService {
return diffservice.NewDiffServiceFromClient(diffapi.NewDiffClient(c.conn))
}

17
vendor/github.com/containerd/containerd/client_unix.go generated vendored Normal file
View File

@ -0,0 +1,17 @@
package containerd
import (
"fmt"
"net"
"strings"
"time"
)
func dialer(address string, timeout time.Duration) (net.Conn, error) {
address = strings.TrimPrefix(address, "unix://")
return net.DialTimeout("unix", address, timeout)
}
func dialAddress(address string) string {
return fmt.Sprintf("unix://%s", address)
}

View File

@ -0,0 +1,16 @@
package containerd
import (
"net"
"time"
winio "github.com/Microsoft/go-winio"
)
func dialer(address string, timeout time.Duration) (net.Conn, error) {
return winio.DialPipe(address, &timeout)
}
func dialAddress(address string) string {
return address
}

110
vendor/github.com/containerd/containerd/container.go generated vendored Normal file
View File

@ -0,0 +1,110 @@
package containerd
import (
"context"
"encoding/json"
"github.com/containerd/containerd/api/services/containers"
"github.com/containerd/containerd/api/services/execution"
"github.com/containerd/containerd/api/types/mount"
specs "github.com/opencontainers/runtime-spec/specs-go"
)
type Container interface {
ID() string
Delete(context.Context) error
NewTask(context.Context, IOCreation) (Task, error)
Spec() (*specs.Spec, error)
Task() Task
}
func containerFromProto(client *Client, c containers.Container) *container {
return &container{
client: client,
c: c,
}
}
var _ = (Container)(&container{})
type container struct {
client *Client
c containers.Container
task *task
}
// ID returns the container's unique id
func (c *container) ID() string {
return c.c.ID
}
// Spec returns the current OCI specification for the container
func (c *container) Spec() (*specs.Spec, error) {
var s specs.Spec
if err := json.Unmarshal(c.c.Spec.Value, &s); err != nil {
return nil, err
}
return &s, nil
}
// Delete deletes an existing container
// an error is returned if the container has running tasks
func (c *container) Delete(ctx context.Context) (err error) {
// TODO: should the client be the one removing resources attached
// to the container at the moment before we have GC?
if c.c.RootFS != "" {
err = c.client.SnapshotService().Remove(ctx, c.c.RootFS)
}
if _, cerr := c.client.ContainerService().Delete(ctx, &containers.DeleteContainerRequest{
ID: c.c.ID,
}); err == nil {
err = cerr
}
return err
}
func (c *container) Task() Task {
return c.task
}
func (c *container) NewTask(ctx context.Context, ioCreate IOCreation) (Task, error) {
i, err := ioCreate()
if err != nil {
return nil, err
}
request := &execution.CreateRequest{
ContainerID: c.c.ID,
Terminal: i.Terminal,
Stdin: i.Stdin,
Stdout: i.Stdout,
Stderr: i.Stderr,
}
if c.c.RootFS != "" {
// get the rootfs from the snapshotter and add it to the request
mounts, err := c.client.SnapshotService().Mounts(ctx, c.c.RootFS)
if err != nil {
return nil, err
}
for _, m := range mounts {
request.Rootfs = append(request.Rootfs, &mount.Mount{
Type: m.Type,
Source: m.Source,
Options: m.Options,
})
}
}
response, err := c.client.TaskService().Create(ctx, request)
if err != nil {
return nil, err
}
t := &task{
client: c.client,
io: i,
containerID: response.ContainerID,
pid: response.Pid,
}
c.task = t
return t, nil
}

19
vendor/github.com/containerd/containerd/image.go generated vendored Normal file
View File

@ -0,0 +1,19 @@
package containerd
import "github.com/containerd/containerd/images"
type Image interface {
Name() string
}
var _ = (Image)(&image{})
type image struct {
client *Client
i images.Image
}
func (i *image) Name() string {
return i.i.Name
}

View File

@ -0,0 +1,30 @@
package content
import (
"github.com/containerd/containerd/content"
"github.com/pkg/errors"
"google.golang.org/grpc"
"google.golang.org/grpc/codes"
)
func rewriteGRPCError(err error) error {
switch grpc.Code(errors.Cause(err)) {
case codes.AlreadyExists:
return content.ErrExists
case codes.NotFound:
return content.ErrNotFound
}
return err
}
func serverErrorToGRPC(err error, id string) error {
switch {
case content.IsNotFound(err):
return grpc.Errorf(codes.NotFound, "%v: not found", id)
case content.IsExists(err):
return grpc.Errorf(codes.AlreadyExists, "%v: exists", id)
}
return err
}

View File

@ -0,0 +1,51 @@
package content
import (
contentapi "github.com/containerd/containerd/api/services/content"
)
type remoteReader struct {
client contentapi.Content_ReadClient
extra []byte
}
func (rr *remoteReader) Read(p []byte) (n int, err error) {
n += copy(p, rr.extra)
if n >= len(p) {
if n <= len(rr.extra) {
rr.extra = rr.extra[n:]
} else {
rr.extra = rr.extra[:0]
}
return
}
rr.extra = rr.extra[:0]
p = p[n:]
for len(p) > 0 {
var resp *contentapi.ReadResponse
// fill our buffer up until we can fill p.
resp, err = rr.client.Recv()
if err != nil {
return
}
copied := copy(p, resp.Data)
n += copied
p = p[copied:]
if copied < len(p) {
continue
}
rr.extra = append(rr.extra, resp.Data[copied:]...)
}
return
}
// TODO(stevvooe): Implemente io.ReaderAt.
func (rr *remoteReader) Close() error {
return rr.client.CloseSend()
}

View File

@ -0,0 +1,401 @@
package content
import (
"io"
"sync"
"github.com/Sirupsen/logrus"
api "github.com/containerd/containerd/api/services/content"
"github.com/containerd/containerd/content"
"github.com/containerd/containerd/log"
"github.com/containerd/containerd/plugin"
"github.com/golang/protobuf/ptypes/empty"
digest "github.com/opencontainers/go-digest"
"github.com/pkg/errors"
"golang.org/x/net/context"
"google.golang.org/grpc"
"google.golang.org/grpc/codes"
)
type Service struct {
store content.Store
}
var bufPool = sync.Pool{
New: func() interface{} {
return make([]byte, 1<<20)
},
}
var _ api.ContentServer = &Service{}
func init() {
plugin.Register("content-grpc", &plugin.Registration{
Type: plugin.GRPCPlugin,
Init: NewService,
})
}
func NewService(ic *plugin.InitContext) (interface{}, error) {
return &Service{
store: ic.Content,
}, nil
}
func (s *Service) Register(server *grpc.Server) error {
api.RegisterContentServer(server, s)
return nil
}
func (s *Service) Info(ctx context.Context, req *api.InfoRequest) (*api.InfoResponse, error) {
if err := req.Digest.Validate(); err != nil {
return nil, grpc.Errorf(codes.InvalidArgument, "%q failed validation", req.Digest)
}
bi, err := s.store.Info(ctx, req.Digest)
if err != nil {
return nil, serverErrorToGRPC(err, req.Digest.String())
}
return &api.InfoResponse{
Info: api.Info{
Digest: bi.Digest,
Size_: bi.Size,
CommittedAt: bi.CommittedAt,
},
}, nil
}
func (s *Service) List(req *api.ListContentRequest, session api.Content_ListServer) error {
var (
buffer []api.Info
sendBlock = func(block []api.Info) error {
// send last block
return session.Send(&api.ListContentResponse{
Info: block,
})
}
)
if err := s.store.Walk(session.Context(), func(info content.Info) error {
buffer = append(buffer, api.Info{
Digest: info.Digest,
Size_: info.Size,
CommittedAt: info.CommittedAt,
})
if len(buffer) >= 100 {
if err := sendBlock(buffer); err != nil {
return err
}
buffer = buffer[:0]
}
return nil
}); err != nil {
return err
}
if len(buffer) > 0 {
// send last block
if err := sendBlock(buffer); err != nil {
return err
}
}
return nil
}
func (s *Service) Delete(ctx context.Context, req *api.DeleteContentRequest) (*empty.Empty, error) {
if err := req.Digest.Validate(); err != nil {
return nil, grpc.Errorf(codes.InvalidArgument, err.Error())
}
if err := s.store.Delete(ctx, req.Digest); err != nil {
return nil, serverErrorToGRPC(err, req.Digest.String())
}
return &empty.Empty{}, nil
}
func (s *Service) Read(req *api.ReadRequest, session api.Content_ReadServer) error {
if err := req.Digest.Validate(); err != nil {
return grpc.Errorf(codes.InvalidArgument, "%v: %v", req.Digest, err)
}
oi, err := s.store.Info(session.Context(), req.Digest)
if err != nil {
return serverErrorToGRPC(err, req.Digest.String())
}
rc, err := s.store.Reader(session.Context(), req.Digest)
if err != nil {
return serverErrorToGRPC(err, req.Digest.String())
}
defer rc.Close() // TODO(stevvooe): Cache these file descriptors for performance.
ra, ok := rc.(io.ReaderAt)
if !ok {
// TODO(stevvooe): Need to set this up to get correct behavior across
// board. May change interface to store to just return ReaderAtCloser.
// Possibly, we could just return io.ReaderAt and handle file
// descriptors internally.
return errors.New("content service only supports content stores that return ReaderAt")
}
var (
offset = req.Offset
size = req.Size_
// TODO(stevvooe): Using the global buffer pool. At 32KB, it is probably
// little inefficient for work over a fast network. We can tune this later.
p = bufPool.Get().([]byte)
)
defer bufPool.Put(p)
if offset < 0 {
offset = 0
}
if size <= 0 {
size = oi.Size - offset
}
if offset+size > oi.Size {
return grpc.Errorf(codes.OutOfRange, "read past object length %v bytes", oi.Size)
}
if _, err := io.CopyBuffer(
&readResponseWriter{session: session},
io.NewSectionReader(ra, offset, size), p); err != nil {
return err
}
return nil
}
// readResponseWriter is a writer that places the output into ReadResponse messages.
//
// This allows io.CopyBuffer to do the heavy lifting of chunking the responses
// into the buffer size.
type readResponseWriter struct {
offset int64
session api.Content_ReadServer
}
func (rw *readResponseWriter) Write(p []byte) (n int, err error) {
if err := rw.session.Send(&api.ReadResponse{
Offset: rw.offset,
Data: p,
}); err != nil {
return 0, err
}
rw.offset += int64(len(p))
return len(p), nil
}
func (s *Service) Status(ctx context.Context, req *api.StatusRequest) (*api.StatusResponse, error) {
statuses, err := s.store.Status(ctx, req.Regexp)
if err != nil {
return nil, serverErrorToGRPC(err, req.Regexp)
}
var resp api.StatusResponse
for _, status := range statuses {
resp.Statuses = append(resp.Statuses, api.Status{
StartedAt: status.StartedAt,
UpdatedAt: status.UpdatedAt,
Ref: status.Ref,
Offset: status.Offset,
Total: status.Total,
Expected: status.Expected,
})
}
return &resp, nil
}
func (s *Service) Write(session api.Content_WriteServer) (err error) {
var (
ctx = session.Context()
msg api.WriteResponse
req *api.WriteRequest
ref string
total int64
expected digest.Digest
)
defer func(msg *api.WriteResponse) {
// pump through the last message if no error was encountered
if err != nil {
// TODO(stevvooe): Really need a log line here to track which
// errors are actually causing failure on the server side. May want
// to configure the service with an interceptor to make this work
// identically across all GRPC methods.
//
// This is pretty noisy, so we can remove it but leave it for now.
log.G(ctx).WithError(err).Error("(*Service).Write failed")
return
}
err = session.Send(msg)
}(&msg)
// handle the very first request!
req, err = session.Recv()
if err != nil {
return err
}
ref = req.Ref
if ref == "" {
return grpc.Errorf(codes.InvalidArgument, "first message must have a reference")
}
fields := logrus.Fields{
"ref": ref,
}
total = req.Total
expected = req.Expected
if total > 0 {
fields["total"] = total
}
if expected != "" {
fields["expected"] = expected
}
ctx = log.WithLogger(ctx, log.G(ctx).WithFields(fields))
log.G(ctx).Debug("(*Service).Write started")
// this action locks the writer for the session.
wr, err := s.store.Writer(ctx, ref, total, expected)
if err != nil {
return err
}
defer wr.Close()
for {
msg.Action = req.Action
ws, err := wr.Status()
if err != nil {
return err
}
msg.Offset = ws.Offset // always set the offset.
// NOTE(stevvooe): In general, there are two cases underwhich a remote
// writer is used.
//
// For pull, we almost always have this before fetching large content,
// through descriptors. We allow predeclaration of the expected size
// and digest.
//
// For push, it is more complex. If we want to cut through content into
// storage, we may have no expectation until we are done processing the
// content. The case here is the following:
//
// 1. Start writing content.
// 2. Compress inline.
// 3. Validate digest and size (maybe).
//
// Supporting these two paths is quite awkward but it let's both API
// users use the same writer style for each with a minimum of overhead.
if req.Expected != "" {
if expected != "" && expected != req.Expected {
return grpc.Errorf(codes.InvalidArgument, "inconsistent digest provided: %v != %v", req.Expected, expected)
}
expected = req.Expected
if _, err := s.store.Info(session.Context(), req.Expected); err == nil {
if err := s.store.Abort(session.Context(), ref); err != nil {
log.G(ctx).WithError(err).Error("failed to abort write")
}
return grpc.Errorf(codes.AlreadyExists, "blob with expected digest %v exists", req.Expected)
}
}
if req.Total > 0 {
// Update the expected total. Typically, this could be seen at
// negotiation time or on a commit message.
if total > 0 && req.Total != total {
return grpc.Errorf(codes.InvalidArgument, "inconsistent total provided: %v != %v", req.Total, total)
}
total = req.Total
}
switch req.Action {
case api.WriteActionStat:
msg.Digest = wr.Digest()
msg.StartedAt = ws.StartedAt
msg.UpdatedAt = ws.UpdatedAt
msg.Total = total
case api.WriteActionWrite, api.WriteActionCommit:
if req.Offset > 0 {
// validate the offset if provided
if req.Offset != ws.Offset {
return grpc.Errorf(codes.OutOfRange, "write @%v must occur at current offset %v", req.Offset, ws.Offset)
}
}
if req.Offset == 0 && ws.Offset > 0 {
if err := wr.Truncate(req.Offset); err != nil {
return errors.Wrapf(err, "truncate failed")
}
msg.Offset = req.Offset
}
// issue the write if we actually have data.
if len(req.Data) > 0 {
// While this looks like we could use io.WriterAt here, because we
// maintain the offset as append only, we just issue the write.
n, err := wr.Write(req.Data)
if err != nil {
return err
}
if n != len(req.Data) {
// TODO(stevvooe): Perhaps, we can recover this by including it
// in the offset on the write return.
return grpc.Errorf(codes.DataLoss, "wrote %v of %v bytes", n, len(req.Data))
}
msg.Offset += int64(n)
}
if req.Action == api.WriteActionCommit {
if err := wr.Commit(total, expected); err != nil {
return err
}
}
msg.Digest = wr.Digest()
}
if err := session.Send(&msg); err != nil {
return err
}
req, err = session.Recv()
if err != nil {
if err == io.EOF {
return nil
}
return err
}
}
}
func (s *Service) Abort(ctx context.Context, req *api.AbortRequest) (*empty.Empty, error) {
if err := s.store.Abort(ctx, req.Ref); err != nil {
return nil, serverErrorToGRPC(err, req.Ref)
}
return &empty.Empty{}, nil
}

View File

@ -0,0 +1,155 @@
package content
import (
"context"
"io"
contentapi "github.com/containerd/containerd/api/services/content"
"github.com/containerd/containerd/content"
digest "github.com/opencontainers/go-digest"
)
type remoteStore struct {
client contentapi.ContentClient
}
func NewStoreFromClient(client contentapi.ContentClient) content.Store {
return &remoteStore{
client: client,
}
}
func (rs *remoteStore) Info(ctx context.Context, dgst digest.Digest) (content.Info, error) {
resp, err := rs.client.Info(ctx, &contentapi.InfoRequest{
Digest: dgst,
})
if err != nil {
return content.Info{}, rewriteGRPCError(err)
}
return content.Info{
Digest: resp.Info.Digest,
Size: resp.Info.Size_,
CommittedAt: resp.Info.CommittedAt,
}, nil
}
func (rs *remoteStore) Walk(ctx context.Context, fn content.WalkFunc) error {
session, err := rs.client.List(ctx, &contentapi.ListContentRequest{})
if err != nil {
return rewriteGRPCError(err)
}
for {
msg, err := session.Recv()
if err != nil {
if err != io.EOF {
return rewriteGRPCError(err)
}
break
}
for _, info := range msg.Info {
if err := fn(content.Info{
Digest: info.Digest,
Size: info.Size_,
CommittedAt: info.CommittedAt,
}); err != nil {
return err
}
}
}
return nil
}
func (rs *remoteStore) Delete(ctx context.Context, dgst digest.Digest) error {
if _, err := rs.client.Delete(ctx, &contentapi.DeleteContentRequest{
Digest: dgst,
}); err != nil {
return rewriteGRPCError(err)
}
return nil
}
func (rs *remoteStore) Reader(ctx context.Context, dgst digest.Digest) (io.ReadCloser, error) {
client, err := rs.client.Read(ctx, &contentapi.ReadRequest{Digest: dgst})
if err != nil {
return nil, err
}
return &remoteReader{
client: client,
}, nil
}
func (rs *remoteStore) Status(ctx context.Context, re string) ([]content.Status, error) {
resp, err := rs.client.Status(ctx, &contentapi.StatusRequest{
Regexp: re,
})
if err != nil {
return nil, rewriteGRPCError(err)
}
var statuses []content.Status
for _, status := range resp.Statuses {
statuses = append(statuses, content.Status{
Ref: status.Ref,
StartedAt: status.StartedAt,
UpdatedAt: status.UpdatedAt,
Offset: status.Offset,
Total: status.Total,
Expected: status.Expected,
})
}
return statuses, nil
}
func (rs *remoteStore) Writer(ctx context.Context, ref string, size int64, expected digest.Digest) (content.Writer, error) {
wrclient, offset, err := rs.negotiate(ctx, ref, size, expected)
if err != nil {
return nil, rewriteGRPCError(err)
}
return &remoteWriter{
client: wrclient,
offset: offset,
}, nil
}
// Abort implements asynchronous abort. It starts a new write session on the ref l
func (rs *remoteStore) Abort(ctx context.Context, ref string) error {
if _, err := rs.client.Abort(ctx, &contentapi.AbortRequest{
Ref: ref,
}); err != nil {
return rewriteGRPCError(err)
}
return nil
}
func (rs *remoteStore) negotiate(ctx context.Context, ref string, size int64, expected digest.Digest) (contentapi.Content_WriteClient, int64, error) {
wrclient, err := rs.client.Write(ctx)
if err != nil {
return nil, 0, err
}
if err := wrclient.Send(&contentapi.WriteRequest{
Action: contentapi.WriteActionStat,
Ref: ref,
Total: size,
Expected: expected,
}); err != nil {
return nil, 0, err
}
resp, err := wrclient.Recv()
if err != nil {
return nil, 0, err
}
return wrclient, resp.Offset, nil
}

View File

@ -0,0 +1,121 @@
package content
import (
"io"
contentapi "github.com/containerd/containerd/api/services/content"
"github.com/containerd/containerd/content"
digest "github.com/opencontainers/go-digest"
"github.com/pkg/errors"
)
type remoteWriter struct {
ref string
client contentapi.Content_WriteClient
offset int64
digest digest.Digest
}
func newRemoteWriter(client contentapi.Content_WriteClient, ref string, offset int64) (*remoteWriter, error) {
return &remoteWriter{
ref: ref,
client: client,
offset: offset,
}, nil
}
// send performs a synchronous req-resp cycle on the client.
func (rw *remoteWriter) send(req *contentapi.WriteRequest) (*contentapi.WriteResponse, error) {
if err := rw.client.Send(req); err != nil {
return nil, err
}
resp, err := rw.client.Recv()
if err == nil {
// try to keep these in sync
if resp.Digest != "" {
rw.digest = resp.Digest
}
}
return resp, err
}
func (rw *remoteWriter) Status() (content.Status, error) {
resp, err := rw.send(&contentapi.WriteRequest{
Action: contentapi.WriteActionStat,
})
if err != nil {
return content.Status{}, err
}
return content.Status{
Ref: rw.ref,
Offset: resp.Offset,
StartedAt: resp.StartedAt,
UpdatedAt: resp.UpdatedAt,
}, nil
}
func (rw *remoteWriter) Digest() digest.Digest {
return rw.digest
}
func (rw *remoteWriter) Write(p []byte) (n int, err error) {
offset := rw.offset
resp, err := rw.send(&contentapi.WriteRequest{
Action: contentapi.WriteActionWrite,
Offset: offset,
Data: p,
})
if err != nil {
return 0, err
}
n = int(resp.Offset - offset)
if n < len(p) {
err = io.ErrShortWrite
}
rw.offset += int64(n)
if resp.Digest != "" {
rw.digest = resp.Digest
}
return
}
func (rw *remoteWriter) Commit(size int64, expected digest.Digest) error {
resp, err := rw.send(&contentapi.WriteRequest{
Action: contentapi.WriteActionCommit,
Total: size,
Offset: rw.offset,
Expected: expected,
})
if err != nil {
return rewriteGRPCError(err)
}
if size != 0 && resp.Offset != size {
return errors.Errorf("unexpected size: %v != %v", resp.Offset, size)
}
if expected != "" && resp.Digest != expected {
return errors.Errorf("unexpected digest: %v != %v", resp.Digest, expected)
}
rw.digest = resp.Digest
rw.offset = resp.Offset
return nil
}
func (rw *remoteWriter) Truncate(size int64) error {
// This truncation won't actually be validated until a write is issued.
rw.offset = size
return nil
}
func (rw *remoteWriter) Close() error {
return rw.client.CloseSend()
}

View File

@ -0,0 +1,75 @@
package diff
import (
"context"
diffapi "github.com/containerd/containerd/api/services/diff"
"github.com/containerd/containerd/api/types/descriptor"
mounttypes "github.com/containerd/containerd/api/types/mount"
"github.com/containerd/containerd/mount"
"github.com/containerd/containerd/rootfs"
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
)
type DiffService interface {
rootfs.Applier
rootfs.MountDiffer
}
// NewApplierFromClient returns a new Applier which communicates
// over a GRPC connection.
func NewDiffServiceFromClient(client diffapi.DiffClient) DiffService {
return &remote{
client: client,
}
}
type remote struct {
client diffapi.DiffClient
}
func (r *remote) Apply(ctx context.Context, diff ocispec.Descriptor, mounts []mount.Mount) (ocispec.Descriptor, error) {
req := &diffapi.ApplyRequest{
Diff: fromDescriptor(diff),
Mounts: fromMounts(mounts),
}
resp, err := r.client.Apply(ctx, req)
if err != nil {
return ocispec.Descriptor{}, err
}
return toDescriptor(resp.Applied), nil
}
func (r *remote) DiffMounts(ctx context.Context, a, b []mount.Mount, media, ref string) (ocispec.Descriptor, error) {
req := &diffapi.DiffRequest{
Left: fromMounts(a),
Right: fromMounts(b),
MediaType: media,
Ref: ref,
}
resp, err := r.client.Diff(ctx, req)
if err != nil {
return ocispec.Descriptor{}, err
}
return toDescriptor(resp.Diff), nil
}
func fromDescriptor(d ocispec.Descriptor) *descriptor.Descriptor {
return &descriptor.Descriptor{
MediaType: d.MediaType,
Digest: d.Digest,
Size_: d.Size,
}
}
func fromMounts(mounts []mount.Mount) []*mounttypes.Mount {
apiMounts := make([]*mounttypes.Mount, len(mounts))
for i, m := range mounts {
apiMounts[i] = &mounttypes.Mount{
Type: m.Type,
Source: m.Source,
Options: m.Options,
}
}
return apiMounts
}

View File

@ -0,0 +1,203 @@
package diff
import (
"io"
"io/ioutil"
"os"
diffapi "github.com/containerd/containerd/api/services/diff"
"github.com/containerd/containerd/api/types/descriptor"
mounttypes "github.com/containerd/containerd/api/types/mount"
"github.com/containerd/containerd/archive"
"github.com/containerd/containerd/archive/compression"
"github.com/containerd/containerd/content"
"github.com/containerd/containerd/mount"
"github.com/containerd/containerd/plugin"
"github.com/containerd/containerd/snapshot"
digest "github.com/opencontainers/go-digest"
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
"github.com/pkg/errors"
"golang.org/x/net/context"
"google.golang.org/grpc"
)
func init() {
plugin.Register("diff-grpc", &plugin.Registration{
Type: plugin.GRPCPlugin,
Init: func(ic *plugin.InitContext) (interface{}, error) {
return newService(ic.Content, ic.Snapshotter)
},
})
}
type service struct {
store content.Store
snapshotter snapshot.Snapshotter
}
func newService(store content.Store, snapshotter snapshot.Snapshotter) (*service, error) {
return &service{
store: store,
snapshotter: snapshotter,
}, nil
}
func (s *service) Register(gs *grpc.Server) error {
diffapi.RegisterDiffServer(gs, s)
return nil
}
func (s *service) Apply(ctx context.Context, er *diffapi.ApplyRequest) (*diffapi.ApplyResponse, error) {
desc := toDescriptor(er.Diff)
// TODO: Check for supported media types
mounts := toMounts(er.Mounts)
dir, err := ioutil.TempDir("", "extract-")
if err != nil {
return nil, errors.Wrap(err, "failed to create temporary directory")
}
defer os.RemoveAll(dir)
if err := mount.MountAll(mounts, dir); err != nil {
return nil, errors.Wrap(err, "failed to mount")
}
defer mount.Unmount(dir, 0)
r, err := s.store.Reader(ctx, desc.Digest)
if err != nil {
return nil, errors.Wrap(err, "failed to get reader from content store")
}
defer r.Close()
// TODO: only decompress stream if media type is compressed
ds, err := compression.DecompressStream(r)
if err != nil {
return nil, err
}
defer ds.Close()
digester := digest.Canonical.Digester()
rc := &readCounter{
r: io.TeeReader(ds, digester.Hash()),
}
if _, err := archive.Apply(ctx, dir, rc); err != nil {
return nil, err
}
// Read any trailing data
if _, err := io.Copy(ioutil.Discard, rc); err != nil {
return nil, err
}
resp := &diffapi.ApplyResponse{
Applied: &descriptor.Descriptor{
MediaType: ocispec.MediaTypeImageLayer,
Digest: digester.Digest(),
Size_: rc.c,
},
}
return resp, nil
}
func (s *service) Diff(ctx context.Context, dr *diffapi.DiffRequest) (*diffapi.DiffResponse, error) {
aMounts := toMounts(dr.Left)
bMounts := toMounts(dr.Right)
aDir, err := ioutil.TempDir("", "left-")
if err != nil {
return nil, errors.Wrap(err, "failed to create temporary directory")
}
defer os.RemoveAll(aDir)
bDir, err := ioutil.TempDir("", "right-")
if err != nil {
return nil, errors.Wrap(err, "failed to create temporary directory")
}
defer os.RemoveAll(bDir)
if err := mount.MountAll(aMounts, aDir); err != nil {
return nil, errors.Wrap(err, "failed to mount")
}
defer mount.Unmount(aDir, 0)
if err := mount.MountAll(bMounts, bDir); err != nil {
return nil, errors.Wrap(err, "failed to mount")
}
defer mount.Unmount(bDir, 0)
cw, err := s.store.Writer(ctx, dr.Ref, 0, "")
if err != nil {
return nil, errors.Wrap(err, "failed to open writer")
}
// TODO: Validate media type
// TODO: Support compressed media types (link compressed to uncompressed)
//dgstr := digest.SHA256.Digester()
//wc := &writeCounter{}
//compressed, err := compression.CompressStream(cw, compression.Gzip)
//if err != nil {
// return nil, errors.Wrap(err, "failed to get compressed stream")
//}
//err = archive.WriteDiff(ctx, io.MultiWriter(compressed, dgstr.Hash(), wc), lowerDir, upperDir)
//compressed.Close()
err = archive.WriteDiff(ctx, cw, aDir, bDir)
if err != nil {
return nil, errors.Wrap(err, "failed to write diff")
}
dgst := cw.Digest()
if err := cw.Commit(0, dgst); err != nil {
return nil, errors.Wrap(err, "failed to commit")
}
info, err := s.store.Info(ctx, dgst)
if err != nil {
return nil, errors.Wrap(err, "failed to get info from content store")
}
desc := ocispec.Descriptor{
MediaType: dr.MediaType,
Digest: info.Digest,
Size: info.Size,
}
return &diffapi.DiffResponse{
Diff: fromDescriptor(desc),
}, nil
}
type readCounter struct {
r io.Reader
c int64
}
func (rc *readCounter) Read(p []byte) (n int, err error) {
n, err = rc.r.Read(p)
rc.c += int64(n)
return
}
func toDescriptor(d *descriptor.Descriptor) ocispec.Descriptor {
return ocispec.Descriptor{
MediaType: d.MediaType,
Digest: d.Digest,
Size: d.Size_,
}
}
func toMounts(apim []*mounttypes.Mount) []mount.Mount {
mounts := make([]mount.Mount, len(apim))
for i, m := range apim {
mounts[i] = mount.Mount{
Type: m.Type,
Source: m.Source,
Options: m.Options,
}
}
return mounts
}

View File

@ -0,0 +1,60 @@
package images
import (
"context"
imagesapi "github.com/containerd/containerd/api/services/images"
"github.com/containerd/containerd/images"
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
)
type remoteStore struct {
client imagesapi.ImagesClient
}
func NewStoreFromClient(client imagesapi.ImagesClient) images.Store {
return &remoteStore{
client: client,
}
}
func (s *remoteStore) Put(ctx context.Context, name string, desc ocispec.Descriptor) error {
// TODO(stevvooe): Consider that the remote may want to augment and return
// a modified image.
_, err := s.client.Put(ctx, &imagesapi.PutRequest{
Image: imagesapi.Image{
Name: name,
Target: descToProto(&desc),
},
})
return rewriteGRPCError(err)
}
func (s *remoteStore) Get(ctx context.Context, name string) (images.Image, error) {
resp, err := s.client.Get(ctx, &imagesapi.GetRequest{
Name: name,
})
if err != nil {
return images.Image{}, rewriteGRPCError(err)
}
return imageFromProto(resp.Image), nil
}
func (s *remoteStore) List(ctx context.Context) ([]images.Image, error) {
resp, err := s.client.List(ctx, &imagesapi.ListRequest{})
if err != nil {
return nil, rewriteGRPCError(err)
}
return imagesFromProto(resp.Images), nil
}
func (s *remoteStore) Delete(ctx context.Context, name string) error {
_, err := s.client.Delete(ctx, &imagesapi.DeleteRequest{
Name: name,
})
return rewriteGRPCError(err)
}

View File

@ -0,0 +1,87 @@
package images
import (
imagesapi "github.com/containerd/containerd/api/services/images"
"github.com/containerd/containerd/api/types/descriptor"
"github.com/containerd/containerd/images"
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
"github.com/pkg/errors"
"google.golang.org/grpc"
"google.golang.org/grpc/codes"
)
func imagesToProto(images []images.Image) []imagesapi.Image {
var imagespb []imagesapi.Image
for _, image := range images {
imagespb = append(imagespb, imageToProto(&image))
}
return imagespb
}
func imagesFromProto(imagespb []imagesapi.Image) []images.Image {
var images []images.Image
for _, image := range imagespb {
images = append(images, imageFromProto(&image))
}
return images
}
func imageToProto(image *images.Image) imagesapi.Image {
return imagesapi.Image{
Name: image.Name,
Target: descToProto(&image.Target),
}
}
func imageFromProto(imagepb *imagesapi.Image) images.Image {
return images.Image{
Name: imagepb.Name,
Target: descFromProto(&imagepb.Target),
}
}
func descFromProto(desc *descriptor.Descriptor) ocispec.Descriptor {
return ocispec.Descriptor{
MediaType: desc.MediaType,
Size: desc.Size_,
Digest: desc.Digest,
}
}
func descToProto(desc *ocispec.Descriptor) descriptor.Descriptor {
return descriptor.Descriptor{
MediaType: desc.MediaType,
Size_: desc.Size,
Digest: desc.Digest,
}
}
func rewriteGRPCError(err error) error {
if err == nil {
return err
}
switch grpc.Code(errors.Cause(err)) {
case codes.AlreadyExists:
return images.ErrExists
case codes.NotFound:
return images.ErrNotFound
}
return err
}
func mapGRPCError(err error, id string) error {
switch {
case images.IsNotFound(err):
return grpc.Errorf(codes.NotFound, "image %v not found", id)
case images.IsExists(err):
return grpc.Errorf(codes.AlreadyExists, "image %v already exists", id)
}
return err
}

View File

@ -0,0 +1,85 @@
package images
import (
"github.com/boltdb/bolt"
imagesapi "github.com/containerd/containerd/api/services/images"
"github.com/containerd/containerd/images"
"github.com/containerd/containerd/plugin"
"github.com/golang/protobuf/ptypes/empty"
"golang.org/x/net/context"
"google.golang.org/grpc"
)
func init() {
plugin.Register("images-grpc", &plugin.Registration{
Type: plugin.GRPCPlugin,
Init: func(ic *plugin.InitContext) (interface{}, error) {
return NewService(ic.Meta), nil
},
})
}
type Service struct {
db *bolt.DB
}
func NewService(db *bolt.DB) imagesapi.ImagesServer {
return &Service{db: db}
}
func (s *Service) Register(server *grpc.Server) error {
imagesapi.RegisterImagesServer(server, s)
return nil
}
func (s *Service) Get(ctx context.Context, req *imagesapi.GetRequest) (*imagesapi.GetResponse, error) {
var resp imagesapi.GetResponse
return &resp, s.withStoreView(ctx, func(ctx context.Context, store images.Store) error {
image, err := store.Get(ctx, req.Name)
if err != nil {
return mapGRPCError(err, req.Name)
}
imagepb := imageToProto(&image)
resp.Image = &imagepb
return nil
})
}
func (s *Service) Put(ctx context.Context, req *imagesapi.PutRequest) (*empty.Empty, error) {
return &empty.Empty{}, s.withStoreUpdate(ctx, func(ctx context.Context, store images.Store) error {
return mapGRPCError(store.Put(ctx, req.Image.Name, descFromProto(&req.Image.Target)), req.Image.Name)
})
}
func (s *Service) List(ctx context.Context, _ *imagesapi.ListRequest) (*imagesapi.ListResponse, error) {
var resp imagesapi.ListResponse
return &resp, s.withStoreView(ctx, func(ctx context.Context, store images.Store) error {
images, err := store.List(ctx)
if err != nil {
return mapGRPCError(err, "")
}
resp.Images = imagesToProto(images)
return nil
})
}
func (s *Service) Delete(ctx context.Context, req *imagesapi.DeleteRequest) (*empty.Empty, error) {
return &empty.Empty{}, s.withStoreUpdate(ctx, func(ctx context.Context, store images.Store) error {
return mapGRPCError(store.Delete(ctx, req.Name), req.Name)
})
}
func (s *Service) withStore(ctx context.Context, fn func(ctx context.Context, store images.Store) error) func(tx *bolt.Tx) error {
return func(tx *bolt.Tx) error { return fn(ctx, images.NewStore(tx)) }
}
func (s *Service) withStoreView(ctx context.Context, fn func(ctx context.Context, store images.Store) error) error {
return s.db.View(s.withStore(ctx, fn))
}
func (s *Service) withStoreUpdate(ctx context.Context, fn func(ctx context.Context, store images.Store) error) error {
return s.db.Update(s.withStore(ctx, fn))
}

View File

@ -0,0 +1,158 @@
package snapshot
import (
"context"
"io"
"strings"
"google.golang.org/grpc"
"google.golang.org/grpc/codes"
snapshotapi "github.com/containerd/containerd/api/services/snapshot"
"github.com/containerd/containerd/mount"
"github.com/containerd/containerd/snapshot"
"github.com/pkg/errors"
)
// NewSnapshotterFromClient returns a new Snapshotter which communicates
// over a GRPC connection.
func NewSnapshotterFromClient(client snapshotapi.SnapshotClient) snapshot.Snapshotter {
return &remoteSnapshotter{
client: client,
}
}
type remoteSnapshotter struct {
client snapshotapi.SnapshotClient
}
func (r *remoteSnapshotter) Stat(ctx context.Context, key string) (snapshot.Info, error) {
resp, err := r.client.Stat(ctx, &snapshotapi.StatRequest{Key: key})
if err != nil {
return snapshot.Info{}, rewriteGRPCError(err)
}
return toInfo(resp.Info), nil
}
func (r *remoteSnapshotter) Usage(ctx context.Context, key string) (snapshot.Usage, error) {
resp, err := r.client.Usage(ctx, &snapshotapi.UsageRequest{Key: key})
if err != nil {
return snapshot.Usage{}, rewriteGRPCError(err)
}
return toUsage(resp), nil
}
func (r *remoteSnapshotter) Mounts(ctx context.Context, key string) ([]mount.Mount, error) {
resp, err := r.client.Mounts(ctx, &snapshotapi.MountsRequest{Key: key})
if err != nil {
return nil, rewriteGRPCError(err)
}
return toMounts(resp), nil
}
func (r *remoteSnapshotter) Prepare(ctx context.Context, key, parent string) ([]mount.Mount, error) {
resp, err := r.client.Prepare(ctx, &snapshotapi.PrepareRequest{Key: key, Parent: parent})
if err != nil {
return nil, rewriteGRPCError(err)
}
return toMounts(resp), nil
}
func (r *remoteSnapshotter) View(ctx context.Context, key, parent string) ([]mount.Mount, error) {
resp, err := r.client.View(ctx, &snapshotapi.PrepareRequest{Key: key, Parent: parent})
if err != nil {
return nil, rewriteGRPCError(err)
}
return toMounts(resp), nil
}
func (r *remoteSnapshotter) Commit(ctx context.Context, name, key string) error {
_, err := r.client.Commit(ctx, &snapshotapi.CommitRequest{
Name: name,
Key: key,
})
return rewriteGRPCError(err)
}
func (r *remoteSnapshotter) Remove(ctx context.Context, key string) error {
_, err := r.client.Remove(ctx, &snapshotapi.RemoveRequest{Key: key})
return rewriteGRPCError(err)
}
func (r *remoteSnapshotter) Walk(ctx context.Context, fn func(context.Context, snapshot.Info) error) error {
sc, err := r.client.List(ctx, &snapshotapi.ListRequest{})
if err != nil {
rewriteGRPCError(err)
}
for {
resp, err := sc.Recv()
if err != nil {
if err == io.EOF {
return nil
}
return rewriteGRPCError(err)
}
if resp == nil {
return nil
}
for _, info := range resp.Info {
if err := fn(ctx, toInfo(info)); err != nil {
return err
}
}
}
}
func rewriteGRPCError(err error) error {
switch grpc.Code(errors.Cause(err)) {
case codes.AlreadyExists:
return snapshot.ErrSnapshotExist
case codes.NotFound:
return snapshot.ErrSnapshotNotExist
case codes.FailedPrecondition:
desc := grpc.ErrorDesc(errors.Cause(err))
if strings.Contains(desc, snapshot.ErrSnapshotNotActive.Error()) {
return snapshot.ErrSnapshotNotActive
}
if strings.Contains(desc, snapshot.ErrSnapshotNotCommitted.Error()) {
return snapshot.ErrSnapshotNotCommitted
}
}
return err
}
func toKind(kind snapshotapi.Kind) snapshot.Kind {
if kind == snapshotapi.KindActive {
return snapshot.KindActive
}
return snapshot.KindCommitted
}
func toInfo(info snapshotapi.Info) snapshot.Info {
return snapshot.Info{
Name: info.Name,
Parent: info.Parent,
Kind: toKind(info.Kind),
Readonly: info.Readonly,
}
}
func toUsage(resp *snapshotapi.UsageResponse) snapshot.Usage {
return snapshot.Usage{
Inodes: resp.Inodes,
Size: resp.Size_,
}
}
func toMounts(resp *snapshotapi.MountsResponse) []mount.Mount {
mounts := make([]mount.Mount, len(resp.Mounts))
for i, m := range resp.Mounts {
mounts[i] = mount.Mount{
Type: m.Type,
Source: m.Source,
Options: m.Options,
}
}
return mounts
}

View File

@ -0,0 +1,204 @@
package snapshot
import (
gocontext "context"
snapshotapi "github.com/containerd/containerd/api/services/snapshot"
mounttypes "github.com/containerd/containerd/api/types/mount"
"github.com/containerd/containerd/log"
"github.com/containerd/containerd/mount"
"github.com/containerd/containerd/plugin"
"github.com/containerd/containerd/snapshot"
protoempty "github.com/golang/protobuf/ptypes/empty"
"golang.org/x/net/context"
"google.golang.org/grpc"
"google.golang.org/grpc/codes"
)
func init() {
plugin.Register("snapshots-grpc", &plugin.Registration{
Type: plugin.GRPCPlugin,
Init: func(ic *plugin.InitContext) (interface{}, error) {
return newService(ic.Snapshotter)
},
})
}
var empty = &protoempty.Empty{}
type service struct {
snapshotter snapshot.Snapshotter
}
func newService(snapshotter snapshot.Snapshotter) (*service, error) {
return &service{
snapshotter: snapshotter,
}, nil
}
func (s *service) Register(gs *grpc.Server) error {
snapshotapi.RegisterSnapshotServer(gs, s)
return nil
}
func (s *service) Prepare(ctx context.Context, pr *snapshotapi.PrepareRequest) (*snapshotapi.MountsResponse, error) {
log.G(ctx).WithField("parent", pr.Parent).WithField("key", pr.Key).Debugf("Preparing snapshot")
// TODO: Apply namespace
// TODO: Lookup snapshot id from metadata store
mounts, err := s.snapshotter.Prepare(ctx, pr.Key, pr.Parent)
if err != nil {
return nil, grpcError(err)
}
return fromMounts(mounts), nil
}
func (s *service) View(ctx context.Context, pr *snapshotapi.PrepareRequest) (*snapshotapi.MountsResponse, error) {
log.G(ctx).WithField("parent", pr.Parent).WithField("key", pr.Key).Debugf("Preparing view snapshot")
// TODO: Apply namespace
// TODO: Lookup snapshot id from metadata store
mounts, err := s.snapshotter.View(ctx, pr.Key, pr.Parent)
if err != nil {
return nil, grpcError(err)
}
return fromMounts(mounts), nil
}
func (s *service) Mounts(ctx context.Context, mr *snapshotapi.MountsRequest) (*snapshotapi.MountsResponse, error) {
log.G(ctx).WithField("key", mr.Key).Debugf("Getting snapshot mounts")
// TODO: Apply namespace
// TODO: Lookup snapshot id from metadata store
mounts, err := s.snapshotter.Mounts(ctx, mr.Key)
if err != nil {
return nil, grpcError(err)
}
return fromMounts(mounts), nil
}
func (s *service) Commit(ctx context.Context, cr *snapshotapi.CommitRequest) (*protoempty.Empty, error) {
log.G(ctx).WithField("key", cr.Key).WithField("name", cr.Name).Debugf("Committing snapshot")
// TODO: Apply namespace
// TODO: Lookup snapshot id from metadata store
if err := s.snapshotter.Commit(ctx, cr.Name, cr.Key); err != nil {
return nil, grpcError(err)
}
return empty, nil
}
func (s *service) Remove(ctx context.Context, rr *snapshotapi.RemoveRequest) (*protoempty.Empty, error) {
log.G(ctx).WithField("key", rr.Key).Debugf("Removing snapshot")
// TODO: Apply namespace
// TODO: Lookup snapshot id from metadata store
if err := s.snapshotter.Remove(ctx, rr.Key); err != nil {
return nil, grpcError(err)
}
return empty, nil
}
func (s *service) Stat(ctx context.Context, sr *snapshotapi.StatRequest) (*snapshotapi.StatResponse, error) {
log.G(ctx).WithField("key", sr.Key).Debugf("Statting snapshot")
// TODO: Apply namespace
info, err := s.snapshotter.Stat(ctx, sr.Key)
if err != nil {
return nil, grpcError(err)
}
return &snapshotapi.StatResponse{Info: fromInfo(info)}, nil
}
func (s *service) List(sr *snapshotapi.ListRequest, ss snapshotapi.Snapshot_ListServer) error {
// TODO: Apply namespace
var (
buffer []snapshotapi.Info
sendBlock = func(block []snapshotapi.Info) error {
return ss.Send(&snapshotapi.ListResponse{
Info: block,
})
}
)
err := s.snapshotter.Walk(ss.Context(), func(ctx gocontext.Context, info snapshot.Info) error {
buffer = append(buffer, fromInfo(info))
if len(buffer) >= 100 {
if err := sendBlock(buffer); err != nil {
return err
}
buffer = buffer[:0]
}
return nil
})
if err != nil {
return err
}
if len(buffer) > 0 {
// Send remaining infos
if err := sendBlock(buffer); err != nil {
return err
}
}
return nil
}
func (s *service) Usage(ctx context.Context, ur *snapshotapi.UsageRequest) (*snapshotapi.UsageResponse, error) {
// TODO: Apply namespace
usage, err := s.snapshotter.Usage(ctx, ur.Key)
if err != nil {
return nil, grpcError(err)
}
return fromUsage(usage), nil
}
func grpcError(err error) error {
if snapshot.IsNotExist(err) {
return grpc.Errorf(codes.NotFound, err.Error())
}
if snapshot.IsExist(err) {
return grpc.Errorf(codes.AlreadyExists, err.Error())
}
if snapshot.IsNotActive(err) || snapshot.IsNotCommitted(err) {
return grpc.Errorf(codes.FailedPrecondition, err.Error())
}
return err
}
func fromKind(kind snapshot.Kind) snapshotapi.Kind {
if kind == snapshot.KindActive {
return snapshotapi.KindActive
}
return snapshotapi.KindCommitted
}
func fromInfo(info snapshot.Info) snapshotapi.Info {
return snapshotapi.Info{
Name: info.Name,
Parent: info.Parent,
Kind: fromKind(info.Kind),
Readonly: info.Readonly,
}
}
func fromUsage(usage snapshot.Usage) *snapshotapi.UsageResponse {
return &snapshotapi.UsageResponse{
Inodes: usage.Inodes,
Size_: usage.Size,
}
}
func fromMounts(mounts []mount.Mount) *snapshotapi.MountsResponse {
resp := &snapshotapi.MountsResponse{
Mounts: make([]*mounttypes.Mount, len(mounts)),
}
for i, m := range mounts {
resp.Mounts[i] = &mounttypes.Mount{
Type: m.Type,
Source: m.Source,
Options: m.Options,
}
}
return resp
}

27
vendor/github.com/containerd/containerd/spec.go generated vendored Normal file
View File

@ -0,0 +1,27 @@
package containerd
import specs "github.com/opencontainers/runtime-spec/specs-go"
type SpecOpts func(s *specs.Spec) error
func WithProcessArgs(args ...string) SpecOpts {
return func(s *specs.Spec) error {
s.Process.Args = args
return nil
}
}
// GenerateSpec will generate a default spec from the provided image
// for use as a containerd container
func GenerateSpec(opts ...SpecOpts) (*specs.Spec, error) {
s, err := createDefaultSpec()
if err != nil {
return nil, err
}
for _, o := range opts {
if err := o(s); err != nil {
return nil, err
}
}
return s, nil
}

255
vendor/github.com/containerd/containerd/spec_unix.go generated vendored Normal file
View File

@ -0,0 +1,255 @@
package containerd
import (
"context"
"encoding/json"
"fmt"
"runtime"
"strconv"
"strings"
"github.com/containerd/containerd/images"
"github.com/opencontainers/image-spec/specs-go/v1"
specs "github.com/opencontainers/runtime-spec/specs-go"
)
const (
rwm = "rwm"
defaultRootfsPath = "rootfs"
)
func defaltCaps() []string {
return []string{
"CAP_CHOWN",
"CAP_DAC_OVERRIDE",
"CAP_FSETID",
"CAP_FOWNER",
"CAP_MKNOD",
"CAP_NET_RAW",
"CAP_SETGID",
"CAP_SETUID",
"CAP_SETFCAP",
"CAP_SETPCAP",
"CAP_NET_BIND_SERVICE",
"CAP_SYS_CHROOT",
"CAP_KILL",
"CAP_AUDIT_WRITE",
}
}
func defaultNamespaces() []specs.LinuxNamespace {
return []specs.LinuxNamespace{
{
Type: specs.PIDNamespace,
},
{
Type: specs.IPCNamespace,
},
{
Type: specs.UTSNamespace,
},
{
Type: specs.MountNamespace,
},
{
Type: specs.NetworkNamespace,
},
}
}
func createDefaultSpec() (*specs.Spec, error) {
s := &specs.Spec{
Version: specs.Version,
Platform: specs.Platform{
OS: runtime.GOOS,
Arch: runtime.GOARCH,
},
Root: specs.Root{
Path: defaultRootfsPath,
},
Process: specs.Process{
Cwd: "/",
NoNewPrivileges: true,
User: specs.User{
UID: 0,
GID: 0,
},
Capabilities: &specs.LinuxCapabilities{
Bounding: defaltCaps(),
Permitted: defaltCaps(),
Inheritable: defaltCaps(),
Effective: defaltCaps(),
Ambient: defaltCaps(),
},
Rlimits: []specs.LinuxRlimit{
{
Type: "RLIMIT_NOFILE",
Hard: uint64(1024),
Soft: uint64(1024),
},
},
},
Mounts: []specs.Mount{
{
Destination: "/proc",
Type: "proc",
Source: "proc",
},
{
Destination: "/dev",
Type: "tmpfs",
Source: "tmpfs",
Options: []string{"nosuid", "strictatime", "mode=755", "size=65536k"},
},
{
Destination: "/dev/pts",
Type: "devpts",
Source: "devpts",
Options: []string{"nosuid", "noexec", "newinstance", "ptmxmode=0666", "mode=0620", "gid=5"},
},
{
Destination: "/dev/shm",
Type: "tmpfs",
Source: "shm",
Options: []string{"nosuid", "noexec", "nodev", "mode=1777", "size=65536k"},
},
{
Destination: "/dev/mqueue",
Type: "mqueue",
Source: "mqueue",
Options: []string{"nosuid", "noexec", "nodev"},
},
{
Destination: "/sys",
Type: "sysfs",
Source: "sysfs",
Options: []string{"nosuid", "noexec", "nodev", "ro"},
},
{
Destination: "/run",
Type: "tmpfs",
Source: "tmpfs",
Options: []string{"nosuid", "strictatime", "mode=755", "size=65536k"},
},
{
Destination: "/etc/resolv.conf",
Type: "bind",
Source: "/etc/resolv.conf",
Options: []string{"rbind", "ro"},
},
{
Destination: "/etc/hosts",
Type: "bind",
Source: "/etc/hosts",
Options: []string{"rbind", "ro"},
},
{
Destination: "/etc/localtime",
Type: "bind",
Source: "/etc/localtime",
Options: []string{"rbind", "ro"},
},
},
Linux: &specs.Linux{
Resources: &specs.LinuxResources{
Devices: []specs.LinuxDeviceCgroup{
{
Allow: false,
Access: rwm,
},
},
},
Namespaces: defaultNamespaces(),
},
}
return s, nil
}
func WithTTY(s *specs.Spec) error {
s.Process.Terminal = true
s.Process.Env = append(s.Process.Env, "TERM=xterm")
return nil
}
func WithHostNamespace(ns specs.LinuxNamespaceType) SpecOpts {
return func(s *specs.Spec) error {
for i, n := range s.Linux.Namespaces {
if n.Type == ns {
s.Linux.Namespaces = append(s.Linux.Namespaces[:i], s.Linux.Namespaces[i+1:]...)
return nil
}
}
return nil
}
}
func WithImageConfig(ctx context.Context, i Image) SpecOpts {
return func(s *specs.Spec) error {
var (
image = i.(*image)
store = image.client.ContentStore()
)
ic, err := image.i.Config(ctx, store)
if err != nil {
return err
}
var (
ociimage v1.Image
config v1.ImageConfig
)
switch ic.MediaType {
case v1.MediaTypeImageConfig, images.MediaTypeDockerSchema2Config:
r, err := store.Reader(ctx, ic.Digest)
if err != nil {
return err
}
if err := json.NewDecoder(r).Decode(&ociimage); err != nil {
r.Close()
return err
}
r.Close()
config = ociimage.Config
default:
return fmt.Errorf("unknown image config media type %s", ic.MediaType)
}
env := []string{
"PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin",
}
s.Process.Env = append(env, config.Env...)
var (
uid, gid uint32
)
cmd := config.Cmd
s.Process.Args = append(config.Entrypoint, cmd...)
if config.User != "" {
parts := strings.Split(config.User, ":")
switch len(parts) {
case 1:
v, err := strconv.ParseUint(parts[0], 0, 10)
if err != nil {
return err
}
uid, gid = uint32(v), uint32(v)
case 2:
v, err := strconv.ParseUint(parts[0], 0, 10)
if err != nil {
return err
}
uid = uint32(v)
if v, err = strconv.ParseUint(parts[1], 0, 10); err != nil {
return err
}
gid = uint32(v)
default:
return fmt.Errorf("invalid USER value %s", config.User)
}
}
s.Process.User.UID, s.Process.User.GID = uid, gid
cwd := config.WorkingDir
if cwd == "" {
cwd = "/"
}
s.Process.Cwd = cwd
return nil
}
}

View File

@ -0,0 +1,79 @@
package containerd
import (
"context"
"encoding/json"
"fmt"
"runtime"
"github.com/containerd/containerd/images"
"github.com/opencontainers/image-spec/specs-go/v1"
specs "github.com/opencontainers/runtime-spec/specs-go"
)
const pipeRoot = `\\.\pipe`
func createDefaultSpec() (*specs.Spec, error) {
return &specs.Spec{
Version: specs.Version,
Platform: specs.Platform{
OS: runtime.GOOS,
Arch: runtime.GOARCH,
},
Root: specs.Root{},
Process: specs.Process{
Env: config.Env,
ConsoleSize: specs.Box{
Width: 80,
Height: 20,
},
},
}, nil
}
func WithImageConfig(ctx context.Context, i Image) SpecOpts {
return func(s *specs.Spec) error {
var (
image = i.(*image)
store = image.client.ContentStore()
)
ic, err := image.i.Config(ctx, store)
if err != nil {
return err
}
var (
ociimage v1.Image
config v1.ImageConfig
)
switch ic.MediaType {
case v1.MediaTypeImageConfig, images.MediaTypeDockerSchema2Config:
r, err := store.Reader(ctx, ic.Digest)
if err != nil {
return err
}
if err := json.NewDecoder(r).Decode(&ociimage); err != nil {
r.Close()
return err
}
r.Close()
config = ociimage.Config
default:
return fmt.Errorf("unknown image config media type %s", ic.MediaType)
}
s.Process.Env = config.Env
s.Process.Args = append(config.Entrypoint, config.Cmd...)
s.Process.User = specs.User{
Username: config.User,
}
return nil
}
}
func WithTTY(width, height int) SpecOpts {
func(s *specs.Spec) error {
s.Process.Terminal = true
s.Process.ConsoleSize.Width = width
s.Process.ConsoleSize.Height = height
return nil
}
}

273
vendor/github.com/containerd/containerd/task.go generated vendored Normal file
View File

@ -0,0 +1,273 @@
package containerd
import (
"context"
"io"
"io/ioutil"
"os"
"path/filepath"
"sync"
"syscall"
"github.com/containerd/containerd/api/services/execution"
taskapi "github.com/containerd/containerd/api/types/task"
"github.com/containerd/fifo"
)
const UnknownExitStatus = 255
type IO struct {
Terminal bool
Stdin string
Stdout string
Stderr string
closer io.Closer
}
func (i *IO) Close() error {
if i.closer == nil {
return nil
}
return i.closer.Close()
}
type IOCreation func() (*IO, error)
// Stdio returns an IO implementation to be used for a task
// that outputs the container's IO as the current processes Stdio
func Stdio() (*IO, error) {
paths, err := fifoPaths()
if err != nil {
return nil, err
}
set := &ioSet{
in: os.Stdin,
out: os.Stdout,
err: os.Stderr,
}
closer, err := copyIO(paths, set, false)
if err != nil {
return nil, err
}
return &IO{
Terminal: false,
Stdin: paths.in,
Stdout: paths.out,
Stderr: paths.err,
closer: closer,
}, nil
}
func fifoPaths() (*fifoSet, error) {
root := filepath.Join(os.TempDir(), "containerd")
if err := os.MkdirAll(root, 0700); err != nil {
return nil, err
}
dir, err := ioutil.TempDir(root, "")
if err != nil {
return nil, err
}
return &fifoSet{
dir: dir,
in: filepath.Join(dir, "stdin"),
out: filepath.Join(dir, "stdout"),
err: filepath.Join(dir, "stderr"),
}, nil
}
type fifoSet struct {
// dir is the directory holding the task fifos
dir string
in, out, err string
}
type ioSet struct {
in io.Reader
out, err io.Writer
}
func copyIO(fifos *fifoSet, ioset *ioSet, tty bool) (closer io.Closer, err error) {
var (
ctx = context.Background()
wg = &sync.WaitGroup{}
)
f, err := fifo.OpenFifo(ctx, fifos.in, syscall.O_WRONLY|syscall.O_CREAT|syscall.O_NONBLOCK, 0700)
if err != nil {
return nil, err
}
defer func(c io.Closer) {
if err != nil {
c.Close()
}
}(f)
go func(w io.WriteCloser) {
io.Copy(w, ioset.in)
w.Close()
}(f)
f, err = fifo.OpenFifo(ctx, fifos.out, syscall.O_RDONLY|syscall.O_CREAT|syscall.O_NONBLOCK, 0700)
if err != nil {
return nil, err
}
defer func(c io.Closer) {
if err != nil {
c.Close()
}
}(f)
wg.Add(1)
go func(r io.ReadCloser) {
io.Copy(ioset.out, r)
r.Close()
wg.Done()
}(f)
f, err = fifo.OpenFifo(ctx, fifos.err, syscall.O_RDONLY|syscall.O_CREAT|syscall.O_NONBLOCK, 0700)
if err != nil {
return nil, err
}
defer func(c io.Closer) {
if err != nil {
c.Close()
}
}(f)
if !tty {
wg.Add(1)
go func(r io.ReadCloser) {
io.Copy(ioset.err, r)
r.Close()
wg.Done()
}(f)
}
return &wgCloser{
wg: wg,
dir: fifos.dir,
}, nil
}
type wgCloser struct {
wg *sync.WaitGroup
dir string
}
func (g *wgCloser) Close() error {
g.wg.Wait()
if g.dir != "" {
return os.RemoveAll(g.dir)
}
return nil
}
type TaskStatus string
const (
Running TaskStatus = "running"
Created TaskStatus = "created"
Stopped TaskStatus = "stopped"
Paused TaskStatus = "paused"
Pausing TaskStatus = "pausing"
)
type Task interface {
Delete(context.Context) (uint32, error)
Kill(context.Context, syscall.Signal) error
Pause(context.Context) error
Resume(context.Context) error
Pid() uint32
Start(context.Context) error
Status(context.Context) (TaskStatus, error)
Wait(context.Context) (uint32, error)
}
var _ = (Task)(&task{})
type task struct {
client *Client
io *IO
containerID string
pid uint32
}
// Pid returns the pid or process id for the task
func (t *task) Pid() uint32 {
return t.pid
}
func (t *task) Start(ctx context.Context) error {
_, err := t.client.TaskService().Start(ctx, &execution.StartRequest{
ContainerID: t.containerID,
})
return err
}
func (t *task) Kill(ctx context.Context, s syscall.Signal) error {
_, err := t.client.TaskService().Kill(ctx, &execution.KillRequest{
Signal: uint32(s),
ContainerID: t.containerID,
PidOrAll: &execution.KillRequest_All{
All: true,
},
})
return err
}
func (t *task) Pause(ctx context.Context) error {
_, err := t.client.TaskService().Pause(ctx, &execution.PauseRequest{
ContainerID: t.containerID,
})
return err
}
func (t *task) Resume(ctx context.Context) error {
_, err := t.client.TaskService().Resume(ctx, &execution.ResumeRequest{
ContainerID: t.containerID,
})
return err
}
func (t *task) Status(ctx context.Context) (TaskStatus, error) {
r, err := t.client.TaskService().Info(ctx, &execution.InfoRequest{
ContainerID: t.containerID,
})
if err != nil {
return "", err
}
return TaskStatus(r.Task.Status.String()), nil
}
// Wait is a blocking call that will wait for the task to exit and return the exit status
func (t *task) Wait(ctx context.Context) (uint32, error) {
events, err := t.client.TaskService().Events(ctx, &execution.EventsRequest{})
if err != nil {
return UnknownExitStatus, err
}
for {
e, err := events.Recv()
if err != nil {
return UnknownExitStatus, err
}
if e.Type != taskapi.Event_EXIT {
continue
}
if e.ID == t.containerID && e.Pid == t.pid {
return e.ExitStatus, nil
}
}
}
// Delete deletes the task and its runtime state
// it returns the exit status of the task and any errors that were encountered
// during cleanup
func (t *task) Delete(ctx context.Context) (uint32, error) {
cerr := t.io.Close()
r, err := t.client.TaskService().Delete(ctx, &execution.DeleteRequest{
ContainerID: t.containerID,
})
if err != nil {
return UnknownExitStatus, err
}
return r.ExitStatus, cerr
}

201
vendor/github.com/containerd/fifo/LICENSE generated vendored Normal file
View File

@ -0,0 +1,201 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

220
vendor/github.com/containerd/fifo/fifo.go generated vendored Normal file
View File

@ -0,0 +1,220 @@
package fifo
import (
"io"
"os"
"runtime"
"sync"
"syscall"
"github.com/pkg/errors"
"golang.org/x/net/context"
)
type fifo struct {
flag int
opened chan struct{}
closed chan struct{}
closing chan struct{}
err error
file *os.File
closingOnce sync.Once // close has been called
closedOnce sync.Once // fifo is closed
handle *handle
}
var leakCheckWg *sync.WaitGroup
// OpenFifo opens a fifo. Returns io.ReadWriteCloser.
// Context can be used to cancel this function until open(2) has not returned.
// Accepted flags:
// - syscall.O_CREAT - create new fifo if one doesn't exist
// - syscall.O_RDONLY - open fifo only from reader side
// - syscall.O_WRONLY - open fifo only from writer side
// - syscall.O_RDWR - open fifo from both sides, never block on syscall level
// - syscall.O_NONBLOCK - return io.ReadWriteCloser even if other side of the
// fifo isn't open. read/write will be connected after the actual fifo is
// open or after fifo is closed.
func OpenFifo(ctx context.Context, fn string, flag int, perm os.FileMode) (io.ReadWriteCloser, error) {
if _, err := os.Stat(fn); err != nil {
if os.IsNotExist(err) && flag&syscall.O_CREAT != 0 {
if err := mkfifo(fn, uint32(perm&os.ModePerm)); err != nil && !os.IsExist(err) {
return nil, errors.Wrapf(err, "error creating fifo %v", fn)
}
} else {
return nil, err
}
}
block := flag&syscall.O_NONBLOCK == 0 || flag&syscall.O_RDWR != 0
flag &= ^syscall.O_CREAT
flag &= ^syscall.O_NONBLOCK
h, err := getHandle(fn)
if err != nil {
return nil, err
}
f := &fifo{
handle: h,
flag: flag,
opened: make(chan struct{}),
closed: make(chan struct{}),
closing: make(chan struct{}),
}
wg := leakCheckWg
if wg != nil {
wg.Add(2)
}
go func() {
if wg != nil {
defer wg.Done()
}
select {
case <-ctx.Done():
select {
case <-f.opened:
default:
f.Close()
}
case <-f.opened:
case <-f.closed:
}
}()
go func() {
if wg != nil {
defer wg.Done()
}
var file *os.File
fn, err := h.Path()
if err == nil {
file, err = os.OpenFile(fn, flag, 0)
}
select {
case <-f.closing:
if err == nil {
select {
case <-ctx.Done():
err = ctx.Err()
default:
err = errors.Errorf("fifo %v was closed before opening", h.Name())
}
if file != nil {
file.Close()
}
}
default:
}
if err != nil {
f.closedOnce.Do(func() {
f.err = err
close(f.closed)
})
return
}
f.file = file
close(f.opened)
}()
if block {
select {
case <-f.opened:
case <-f.closed:
return nil, f.err
}
}
return f, nil
}
// Read from a fifo to a byte array.
func (f *fifo) Read(b []byte) (int, error) {
if f.flag&syscall.O_WRONLY > 0 {
return 0, errors.New("reading from write-only fifo")
}
select {
case <-f.opened:
return f.file.Read(b)
default:
}
select {
case <-f.opened:
return f.file.Read(b)
case <-f.closed:
return 0, errors.New("reading from a closed fifo")
}
}
// Write from byte array to a fifo.
func (f *fifo) Write(b []byte) (int, error) {
if f.flag&(syscall.O_WRONLY|syscall.O_RDWR) == 0 {
return 0, errors.New("writing to read-only fifo")
}
select {
case <-f.opened:
return f.file.Write(b)
default:
}
select {
case <-f.opened:
return f.file.Write(b)
case <-f.closed:
return 0, errors.New("writing to a closed fifo")
}
}
// Close the fifo. Next reads/writes will error. This method can also be used
// before open(2) has returned and fifo was never opened.
func (f *fifo) Close() (retErr error) {
for {
select {
case <-f.closed:
f.handle.Close()
return
default:
select {
case <-f.opened:
f.closedOnce.Do(func() {
retErr = f.file.Close()
f.err = retErr
close(f.closed)
})
default:
if f.flag&syscall.O_RDWR != 0 {
runtime.Gosched()
break
}
f.closingOnce.Do(func() {
close(f.closing)
})
reverseMode := syscall.O_WRONLY
if f.flag&syscall.O_WRONLY > 0 {
reverseMode = syscall.O_RDONLY
}
fn, err := f.handle.Path()
// if Close() is called concurrently(shouldn't) it may cause error
// because handle is closed
select {
case <-f.closed:
default:
if err != nil {
// Path has become invalid. We will leak a goroutine.
// This case should not happen in linux.
f.closedOnce.Do(func() {
f.err = err
close(f.closed)
})
<-f.closed
break
}
f, err := os.OpenFile(fn, reverseMode|syscall.O_NONBLOCK, 0)
if err == nil {
f.Close()
}
runtime.Gosched()
}
}
}
}
}

76
vendor/github.com/containerd/fifo/handle_linux.go generated vendored Normal file
View File

@ -0,0 +1,76 @@
// +build linux
package fifo
import (
"fmt"
"os"
"sync"
"syscall"
"github.com/pkg/errors"
)
const O_PATH = 010000000
type handle struct {
f *os.File
dev uint64
ino uint64
closeOnce sync.Once
name string
}
func getHandle(fn string) (*handle, error) {
f, err := os.OpenFile(fn, O_PATH, 0)
if err != nil {
return nil, errors.Wrapf(err, "failed to open %v with O_PATH", fn)
}
var stat syscall.Stat_t
if err := syscall.Fstat(int(f.Fd()), &stat); err != nil {
f.Close()
return nil, errors.Wrapf(err, "failed to stat handle %v", f.Fd())
}
h := &handle{
f: f,
name: fn,
dev: uint64(stat.Dev),
ino: stat.Ino,
}
// check /proc just in case
if _, err := os.Stat(h.procPath()); err != nil {
f.Close()
return nil, errors.Wrapf(err, "couldn't stat %v", h.procPath())
}
return h, nil
}
func (h *handle) procPath() string {
return fmt.Sprintf("/proc/self/fd/%d", h.f.Fd())
}
func (h *handle) Name() string {
return h.name
}
func (h *handle) Path() (string, error) {
var stat syscall.Stat_t
if err := syscall.Stat(h.procPath(), &stat); err != nil {
return "", errors.Wrapf(err, "path %v could not be statted", h.procPath())
}
if uint64(stat.Dev) != h.dev || stat.Ino != h.ino {
return "", errors.Errorf("failed to verify handle %v/%v %v/%v", stat.Dev, h.dev, stat.Ino, h.ino)
}
return h.procPath(), nil
}
func (h *handle) Close() error {
h.closeOnce.Do(func() {
h.f.Close()
})
return nil
}

49
vendor/github.com/containerd/fifo/handle_nolinux.go generated vendored Normal file
View File

@ -0,0 +1,49 @@
// +build !linux
package fifo
import (
"syscall"
"github.com/pkg/errors"
)
type handle struct {
fn string
dev uint64
ino uint64
}
func getHandle(fn string) (*handle, error) {
var stat syscall.Stat_t
if err := syscall.Stat(fn, &stat); err != nil {
return nil, errors.Wrapf(err, "failed to stat %v", fn)
}
h := &handle{
fn: fn,
dev: uint64(stat.Dev),
ino: uint64(stat.Ino),
}
return h, nil
}
func (h *handle) Path() (string, error) {
var stat syscall.Stat_t
if err := syscall.Stat(h.fn, &stat); err != nil {
return "", errors.Wrapf(err, "path %v could not be statted", h.fn)
}
if uint64(stat.Dev) != h.dev || uint64(stat.Ino) != h.ino {
return "", errors.Errorf("failed to verify handle %v/%v %v/%v for %v", stat.Dev, h.dev, stat.Ino, h.ino, h.fn)
}
return h.fn, nil
}
func (h *handle) Name() string {
return h.fn
}
func (h *handle) Close() error {
return nil
}

View File

@ -0,0 +1,9 @@
// +build !solaris
package fifo
import "syscall"
func mkfifo(path string, mode uint32) (err error) {
return syscall.Mkfifo(path, mode)
}

11
vendor/github.com/containerd/fifo/mkfifo_solaris.go generated vendored Normal file
View File

@ -0,0 +1,11 @@
// +build solaris
package fifo
import (
"golang.org/x/sys/unix"
)
func mkfifo(path string, mode uint32) (err error) {
return unix.Mkfifo(path, mode)
}

32
vendor/github.com/containerd/fifo/readme.md generated vendored Normal file
View File

@ -0,0 +1,32 @@
### fifo
[![Build Status](https://travis-ci.org/containerd/fifo.svg?branch=master)](https://travis-ci.org/containerd/fifo)
Go package for handling fifos in a sane way.
```
// OpenFifo opens a fifo. Returns io.ReadWriteCloser.
// Context can be used to cancel this function until open(2) has not returned.
// Accepted flags:
// - syscall.O_CREAT - create new fifo if one doesn't exist
// - syscall.O_RDONLY - open fifo only from reader side
// - syscall.O_WRONLY - open fifo only from writer side
// - syscall.O_RDWR - open fifo from both sides, never block on syscall level
// - syscall.O_NONBLOCK - return io.ReadWriteCloser even if other side of the
// fifo isn't open. read/write will be connected after the actual fifo is
// open or after fifo is closed.
func OpenFifo(ctx context.Context, fn string, flag int, perm os.FileMode) (io.ReadWriteCloser, error)
// Read from a fifo to a byte array.
func (f *fifo) Read(b []byte) (int, error)
// Write from byte array to a fifo.
func (f *fifo) Write(b []byte) (int, error)
// Close the fifo. Next reads/writes will error. This method can also be used
// before open(2) has returned and fifo was never opened.
func (f *fifo) Close() error
```

201
vendor/github.com/containerd/go-runc/LICENSE generated vendored Normal file
View File

@ -0,0 +1,201 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

14
vendor/github.com/containerd/go-runc/README.md generated vendored Normal file
View File

@ -0,0 +1,14 @@
# go-runc
[![Build Status](https://travis-ci.org/containerd/go-runc.svg?branch=master)](https://travis-ci.org/containerd/go-runc)
This is a package for consuming the [runc](https://github.com/opencontainers/runc) binary in your Go applications.
It tries to expose all the settings and features of the runc CLI. If there is something missing then add it, its opensource!
This needs runc @ [a9610f2c0](https://github.com/opencontainers/runc/commit/a9610f2c0237d2636d05a031ec8659a70e75ffeb)
or greater.
## Docs
Docs can be found at [godoc.org](https://godoc.org/github.com/containerd/go-runc).

21
vendor/github.com/containerd/go-runc/command_linux.go generated vendored Normal file
View File

@ -0,0 +1,21 @@
package runc
import (
"context"
"os/exec"
"syscall"
)
func (r *Runc) command(context context.Context, args ...string) *exec.Cmd {
command := r.Command
if command == "" {
command = DefaultCommand
}
cmd := exec.CommandContext(context, command, append(r.args(), args...)...)
if r.PdeathSignal != 0 {
cmd.SysProcAttr = &syscall.SysProcAttr{
Pdeathsig: r.PdeathSignal,
}
}
return cmd
}

16
vendor/github.com/containerd/go-runc/command_other.go generated vendored Normal file
View File

@ -0,0 +1,16 @@
// +build !linux
package runc
import (
"context"
"os/exec"
)
func (r *Runc) command(context context.Context, args ...string) *exec.Cmd {
command := r.Command
if command == "" {
command = DefaultCommand
}
return exec.CommandContext(context, command, append(r.args(), args...)...)
}

141
vendor/github.com/containerd/go-runc/console.go generated vendored Normal file
View File

@ -0,0 +1,141 @@
package runc
import (
"fmt"
"io/ioutil"
"net"
"os"
"path/filepath"
"github.com/containerd/console"
"golang.org/x/sys/unix"
)
// NewConsoleSocket creates a new unix socket at the provided path to accept a
// pty master created by runc for use by the container
func NewConsoleSocket(path string) (*Socket, error) {
abs, err := filepath.Abs(path)
if err != nil {
return nil, err
}
addr, err := net.ResolveUnixAddr("unix", abs)
if err != nil {
return nil, err
}
l, err := net.ListenUnix("unix", addr)
if err != nil {
return nil, err
}
return &Socket{
l: l,
}, nil
}
// NewTempConsoleSocket returns a temp console socket for use with a container
// On Close(), the socket is deleted
func NewTempConsoleSocket() (*Socket, error) {
dir, err := ioutil.TempDir("", "pty")
if err != nil {
return nil, err
}
abs, err := filepath.Abs(filepath.Join(dir, "pty.sock"))
if err != nil {
return nil, err
}
addr, err := net.ResolveUnixAddr("unix", abs)
if err != nil {
return nil, err
}
l, err := net.ListenUnix("unix", addr)
if err != nil {
return nil, err
}
return &Socket{
l: l,
rmdir: true,
}, nil
}
// Socket is a unix socket that accepts the pty master created by runc
type Socket struct {
rmdir bool
l *net.UnixListener
}
// Path returns the path to the unix socket on disk
func (c *Socket) Path() string {
return c.l.Addr().String()
}
// recvFd waits for a file descriptor to be sent over the given AF_UNIX
// socket. The file name of the remote file descriptor will be recreated
// locally (it is sent as non-auxiliary data in the same payload).
func recvFd(socket *net.UnixConn) (*os.File, error) {
const MaxNameLen = 4096
var oobSpace = unix.CmsgSpace(4)
name := make([]byte, MaxNameLen)
oob := make([]byte, oobSpace)
n, oobn, _, _, err := socket.ReadMsgUnix(name, oob)
if err != nil {
return nil, err
}
if n >= MaxNameLen || oobn != oobSpace {
return nil, fmt.Errorf("recvfd: incorrect number of bytes read (n=%d oobn=%d)", n, oobn)
}
// Truncate.
name = name[:n]
oob = oob[:oobn]
scms, err := unix.ParseSocketControlMessage(oob)
if err != nil {
return nil, err
}
if len(scms) != 1 {
return nil, fmt.Errorf("recvfd: number of SCMs is not 1: %d", len(scms))
}
scm := scms[0]
fds, err := unix.ParseUnixRights(&scm)
if err != nil {
return nil, err
}
if len(fds) != 1 {
return nil, fmt.Errorf("recvfd: number of fds is not 1: %d", len(fds))
}
fd := uintptr(fds[0])
return os.NewFile(fd, string(name)), nil
}
// ReceiveMaster blocks until the socket receives the pty master
func (c *Socket) ReceiveMaster() (console.Console, error) {
conn, err := c.l.Accept()
if err != nil {
return nil, err
}
defer conn.Close()
uc, ok := conn.(*net.UnixConn)
if !ok {
return nil, fmt.Errorf("received connection which was not a unix socket")
}
f, err := recvFd(uc)
if err != nil {
return nil, err
}
return console.ConsoleFromFile(f)
}
// Close closes the unix socket
func (c *Socket) Close() error {
err := c.l.Close()
if c.rmdir {
if rerr := os.RemoveAll(filepath.Dir(c.Path())); err == nil {
err = rerr
}
}
return err
}

14
vendor/github.com/containerd/go-runc/container.go generated vendored Normal file
View File

@ -0,0 +1,14 @@
package runc
import "time"
// Container hold information for a runc container
type Container struct {
ID string `json:"id"`
Pid int `json:"pid"`
Status string `json:"status"`
Bundle string `json:"bundle"`
Rootfs string `json:"rootfs"`
Created time.Time `json:"created"`
Annotations map[string]string `json:"annotations"`
}

84
vendor/github.com/containerd/go-runc/events.go generated vendored Normal file
View File

@ -0,0 +1,84 @@
package runc
type Event struct {
// Type are the event type generated by runc
// If the type is "error" then check the Err field on the event for
// the actual error
Type string `json:"type"`
ID string `json:"id"`
Stats *Stats `json:"data,omitempty"`
// Err has a read error if we were unable to decode the event from runc
Err error `json:"-"`
}
type Stats struct {
Cpu Cpu `json:"cpu"`
Memory Memory `json:"memory"`
Pids Pids `json:"pids"`
Blkio Blkio `json:"blkio"`
Hugetlb map[string]Hugetlb `json:"hugetlb"`
}
type Hugetlb struct {
Usage uint64 `json:"usage,omitempty"`
Max uint64 `json:"max,omitempty"`
Failcnt uint64 `json:"failcnt"`
}
type BlkioEntry struct {
Major uint64 `json:"major,omitempty"`
Minor uint64 `json:"minor,omitempty"`
Op string `json:"op,omitempty"`
Value uint64 `json:"value,omitempty"`
}
type Blkio struct {
IoServiceBytesRecursive []BlkioEntry `json:"ioServiceBytesRecursive,omitempty"`
IoServicedRecursive []BlkioEntry `json:"ioServicedRecursive,omitempty"`
IoQueuedRecursive []BlkioEntry `json:"ioQueueRecursive,omitempty"`
IoServiceTimeRecursive []BlkioEntry `json:"ioServiceTimeRecursive,omitempty"`
IoWaitTimeRecursive []BlkioEntry `json:"ioWaitTimeRecursive,omitempty"`
IoMergedRecursive []BlkioEntry `json:"ioMergedRecursive,omitempty"`
IoTimeRecursive []BlkioEntry `json:"ioTimeRecursive,omitempty"`
SectorsRecursive []BlkioEntry `json:"sectorsRecursive,omitempty"`
}
type Pids struct {
Current uint64 `json:"current,omitempty"`
Limit uint64 `json:"limit,omitempty"`
}
type Throttling struct {
Periods uint64 `json:"periods,omitempty"`
ThrottledPeriods uint64 `json:"throttledPeriods,omitempty"`
ThrottledTime uint64 `json:"throttledTime,omitempty"`
}
type CpuUsage struct {
// Units: nanoseconds.
Total uint64 `json:"total,omitempty"`
Percpu []uint64 `json:"percpu,omitempty"`
Kernel uint64 `json:"kernel"`
User uint64 `json:"user"`
}
type Cpu struct {
Usage CpuUsage `json:"usage,omitempty"`
Throttling Throttling `json:"throttling,omitempty"`
}
type MemoryEntry struct {
Limit uint64 `json:"limit"`
Usage uint64 `json:"usage,omitempty"`
Max uint64 `json:"max,omitempty"`
Failcnt uint64 `json:"failcnt"`
}
type Memory struct {
Cache uint64 `json:"cache,omitempty"`
Usage MemoryEntry `json:"usage,omitempty"`
Swap MemoryEntry `json:"swap,omitempty"`
Kernel MemoryEntry `json:"kernel,omitempty"`
KernelTCP MemoryEntry `json:"kernelTCP,omitempty"`
Raw map[string]uint64 `json:"raw,omitempty"`
}

165
vendor/github.com/containerd/go-runc/io.go generated vendored Normal file
View File

@ -0,0 +1,165 @@
package runc
import (
"io"
"os"
"os/exec"
"golang.org/x/sys/unix"
)
type IO interface {
io.Closer
Stdin() io.WriteCloser
Stdout() io.ReadCloser
Stderr() io.ReadCloser
Set(*exec.Cmd)
}
type StartCloser interface {
CloseAfterStart() error
}
// NewPipeIO creates pipe pairs to be used with runc
func NewPipeIO(uid, gid int) (i IO, err error) {
var pipes []*pipe
// cleanup in case of an error
defer func() {
if err != nil {
for _, p := range pipes {
p.Close()
}
}
}()
stdin, err := newPipe(uid, gid)
if err != nil {
return nil, err
}
pipes = append(pipes, stdin)
stdout, err := newPipe(uid, gid)
if err != nil {
return nil, err
}
pipes = append(pipes, stdout)
stderr, err := newPipe(uid, gid)
if err != nil {
return nil, err
}
pipes = append(pipes, stderr)
return &pipeIO{
in: stdin,
out: stdout,
err: stderr,
}, nil
}
func newPipe(uid, gid int) (*pipe, error) {
r, w, err := os.Pipe()
if err != nil {
return nil, err
}
if err := unix.Fchown(int(r.Fd()), uid, gid); err != nil {
return nil, err
}
if err := unix.Fchown(int(w.Fd()), uid, gid); err != nil {
return nil, err
}
return &pipe{
r: r,
w: w,
}, nil
}
type pipe struct {
r *os.File
w *os.File
}
func (p *pipe) Close() error {
err := p.r.Close()
if werr := p.w.Close(); err == nil {
err = werr
}
return err
}
type pipeIO struct {
in *pipe
out *pipe
err *pipe
}
func (i *pipeIO) Stdin() io.WriteCloser {
return i.in.w
}
func (i *pipeIO) Stdout() io.ReadCloser {
return i.out.r
}
func (i *pipeIO) Stderr() io.ReadCloser {
return i.err.r
}
func (i *pipeIO) Close() error {
var err error
for _, v := range []*pipe{
i.in,
i.out,
i.err,
} {
if cerr := v.Close(); err == nil {
err = cerr
}
}
return err
}
func (i *pipeIO) CloseAfterStart() error {
for _, f := range []*os.File{
i.out.w,
i.err.w,
} {
f.Close()
}
return nil
}
// Set sets the io to the exec.Cmd
func (i *pipeIO) Set(cmd *exec.Cmd) {
cmd.Stdin = i.in.r
cmd.Stdout = i.out.w
cmd.Stderr = i.err.w
}
func NewSTDIO() (IO, error) {
return &stdio{}, nil
}
type stdio struct {
}
func (s *stdio) Close() error {
return nil
}
func (s *stdio) Set(cmd *exec.Cmd) {
cmd.Stdin = os.Stdin
cmd.Stdout = os.Stdout
cmd.Stderr = os.Stderr
}
func (s *stdio) Stdin() io.WriteCloser {
return os.Stdin
}
func (s *stdio) Stdout() io.ReadCloser {
return os.Stdout
}
func (s *stdio) Stderr() io.ReadCloser {
return os.Stderr
}

50
vendor/github.com/containerd/go-runc/monitor.go generated vendored Normal file
View File

@ -0,0 +1,50 @@
package runc
import (
"os/exec"
"syscall"
)
var Monitor ProcessMonitor = &defaultMonitor{}
// ProcessMonitor is an interface for process monitoring
//
// It allows daemons using go-runc to have a SIGCHLD handler
// to handle exits without introducing races between the handler
// and go's exec.Cmd
// These methods should match the methods exposed by exec.Cmd to provide
// a consistent experience for the caller
type ProcessMonitor interface {
Output(*exec.Cmd) ([]byte, error)
CombinedOutput(*exec.Cmd) ([]byte, error)
Run(*exec.Cmd) error
Start(*exec.Cmd) error
Wait(*exec.Cmd) (int, error)
}
type defaultMonitor struct {
}
func (m *defaultMonitor) Output(c *exec.Cmd) ([]byte, error) {
return c.Output()
}
func (m *defaultMonitor) CombinedOutput(c *exec.Cmd) ([]byte, error) {
return c.CombinedOutput()
}
func (m *defaultMonitor) Run(c *exec.Cmd) error {
return c.Run()
}
func (m *defaultMonitor) Start(c *exec.Cmd) error {
return c.Start()
}
func (m *defaultMonitor) Wait(c *exec.Cmd) (int, error) {
status, err := c.Process.Wait()
if err != nil {
return -1, err
}
return status.Sys().(syscall.WaitStatus).ExitStatus(), nil
}

546
vendor/github.com/containerd/go-runc/runc.go generated vendored Normal file
View File

@ -0,0 +1,546 @@
package runc
import (
"bytes"
"context"
"encoding/json"
"fmt"
"io"
"io/ioutil"
"os"
"os/exec"
"path/filepath"
"strconv"
"syscall"
"time"
specs "github.com/opencontainers/runtime-spec/specs-go"
)
// Format is the type of log formatting options avaliable
type Format string
const (
none Format = ""
JSON Format = "json"
Text Format = "text"
// DefaultCommand is the default command for Runc
DefaultCommand = "runc"
)
// Runc is the client to the runc cli
type Runc struct {
//If command is empty, DefaultCommand is used
Command string
Root string
Debug bool
Log string
LogFormat Format
PdeathSignal syscall.Signal
Criu string
}
// List returns all containers created inside the provided runc root directory
func (r *Runc) List(context context.Context) ([]*Container, error) {
data, err := Monitor.Output(r.command(context, "list", "--format=json"))
if err != nil {
return nil, err
}
var out []*Container
if err := json.Unmarshal(data, &out); err != nil {
return nil, err
}
return out, nil
}
// State returns the state for the container provided by id
func (r *Runc) State(context context.Context, id string) (*Container, error) {
data, err := Monitor.CombinedOutput(r.command(context, "state", id))
if err != nil {
return nil, fmt.Errorf("%s: %s", err, data)
}
var c Container
if err := json.Unmarshal(data, &c); err != nil {
return nil, err
}
return &c, nil
}
type ConsoleSocket interface {
Path() string
}
type CreateOpts struct {
IO
// PidFile is a path to where a pid file should be created
PidFile string
ConsoleSocket ConsoleSocket
Detach bool
NoPivot bool
NoNewKeyring bool
ExtraFiles []*os.File
}
func (o *CreateOpts) args() (out []string, err error) {
if o.PidFile != "" {
abs, err := filepath.Abs(o.PidFile)
if err != nil {
return nil, err
}
out = append(out, "--pid-file", abs)
}
if o.ConsoleSocket != nil {
out = append(out, "--console-socket", o.ConsoleSocket.Path())
}
if o.NoPivot {
out = append(out, "--no-pivot")
}
if o.NoNewKeyring {
out = append(out, "--no-new-keyring")
}
if o.Detach {
out = append(out, "--detach")
}
if o.ExtraFiles != nil {
out = append(out, "--preserve-fds", strconv.Itoa(len(o.ExtraFiles)))
}
return out, nil
}
// Create creates a new container and returns its pid if it was created successfully
func (r *Runc) Create(context context.Context, id, bundle string, opts *CreateOpts) error {
args := []string{"create", "--bundle", bundle}
if opts != nil {
oargs, err := opts.args()
if err != nil {
return err
}
args = append(args, oargs...)
}
cmd := r.command(context, append(args, id)...)
if opts != nil && opts.IO != nil {
opts.Set(cmd)
}
cmd.ExtraFiles = opts.ExtraFiles
if cmd.Stdout == nil && cmd.Stderr == nil {
data, err := Monitor.CombinedOutput(cmd)
if err != nil {
return fmt.Errorf("%s: %s", err, data)
}
return nil
}
if err := Monitor.Start(cmd); err != nil {
return err
}
if opts != nil && opts.IO != nil {
if c, ok := opts.IO.(StartCloser); ok {
if err := c.CloseAfterStart(); err != nil {
return err
}
}
}
_, err := Monitor.Wait(cmd)
return err
}
// Start will start an already created container
func (r *Runc) Start(context context.Context, id string) error {
return r.runOrError(r.command(context, "start", id))
}
type ExecOpts struct {
IO
PidFile string
ConsoleSocket ConsoleSocket
Detach bool
}
func (o *ExecOpts) args() (out []string, err error) {
if o.ConsoleSocket != nil {
out = append(out, "--console-socket", o.ConsoleSocket.Path())
}
if o.Detach {
out = append(out, "--detach")
}
if o.PidFile != "" {
abs, err := filepath.Abs(o.PidFile)
if err != nil {
return nil, err
}
out = append(out, "--pid-file", abs)
}
return out, nil
}
// Exec executres and additional process inside the container based on a full
// OCI Process specification
func (r *Runc) Exec(context context.Context, id string, spec specs.Process, opts *ExecOpts) error {
f, err := ioutil.TempFile("", "runc-process")
if err != nil {
return err
}
defer os.Remove(f.Name())
err = json.NewEncoder(f).Encode(spec)
f.Close()
if err != nil {
return err
}
args := []string{"exec", "--process", f.Name()}
if opts != nil {
oargs, err := opts.args()
if err != nil {
return err
}
args = append(args, oargs...)
}
cmd := r.command(context, append(args, id)...)
if opts != nil && opts.IO != nil {
opts.Set(cmd)
}
if cmd.Stdout == nil && cmd.Stderr == nil {
data, err := Monitor.CombinedOutput(cmd)
if err != nil {
return fmt.Errorf("%s: %s", err, data)
}
return nil
}
if err := Monitor.Start(cmd); err != nil {
return err
}
if opts != nil && opts.IO != nil {
if c, ok := opts.IO.(StartCloser); ok {
if err := c.CloseAfterStart(); err != nil {
return err
}
}
}
_, err = Monitor.Wait(cmd)
return err
}
// Run runs the create, start, delete lifecycle of the container
// and returns its exit status after it has exited
func (r *Runc) Run(context context.Context, id, bundle string, opts *CreateOpts) (int, error) {
args := []string{"run", "--bundle", bundle}
if opts != nil {
oargs, err := opts.args()
if err != nil {
return -1, err
}
args = append(args, oargs...)
}
cmd := r.command(context, append(args, id)...)
if opts != nil {
opts.Set(cmd)
}
if err := Monitor.Start(cmd); err != nil {
return -1, err
}
return Monitor.Wait(cmd)
}
// Delete deletes the container
func (r *Runc) Delete(context context.Context, id string) error {
return r.runOrError(r.command(context, "delete", id))
}
// KillOpts specifies options for killing a container and its processes
type KillOpts struct {
All bool
}
func (o *KillOpts) args() (out []string) {
if o.All {
out = append(out, "--all")
}
return out
}
// Kill sends the specified signal to the container
func (r *Runc) Kill(context context.Context, id string, sig int, opts *KillOpts) error {
args := []string{
"kill",
}
if opts != nil {
args = append(args, opts.args()...)
}
return r.runOrError(r.command(context, append(args, id, strconv.Itoa(sig))...))
}
// Stats return the stats for a container like cpu, memory, and io
func (r *Runc) Stats(context context.Context, id string) (*Stats, error) {
cmd := r.command(context, "events", "--stats", id)
rd, err := cmd.StdoutPipe()
if err != nil {
return nil, err
}
defer func() {
rd.Close()
Monitor.Wait(cmd)
}()
if err := Monitor.Start(cmd); err != nil {
return nil, err
}
var e Event
if err := json.NewDecoder(rd).Decode(&e); err != nil {
return nil, err
}
return e.Stats, nil
}
// Events returns an event stream from runc for a container with stats and OOM notifications
func (r *Runc) Events(context context.Context, id string, interval time.Duration) (chan *Event, error) {
cmd := r.command(context, "events", fmt.Sprintf("--interval=%ds", int(interval.Seconds())), id)
rd, err := cmd.StdoutPipe()
if err != nil {
return nil, err
}
if err := Monitor.Start(cmd); err != nil {
rd.Close()
return nil, err
}
var (
dec = json.NewDecoder(rd)
c = make(chan *Event, 128)
)
go func() {
defer func() {
close(c)
rd.Close()
Monitor.Wait(cmd)
}()
for {
var e Event
if err := dec.Decode(&e); err != nil {
if err == io.EOF {
return
}
e = Event{
Type: "error",
Err: err,
}
}
c <- &e
}
}()
return c, nil
}
// Pause the container with the provided id
func (r *Runc) Pause(context context.Context, id string) error {
return r.runOrError(r.command(context, "pause", id))
}
// Resume the container with the provided id
func (r *Runc) Resume(context context.Context, id string) error {
return r.runOrError(r.command(context, "resume", id))
}
// Ps lists all the processes inside the container returning their pids
func (r *Runc) Ps(context context.Context, id string) ([]int, error) {
data, err := Monitor.CombinedOutput(r.command(context, "ps", "--format", "json", id))
if err != nil {
return nil, fmt.Errorf("%s: %s", err, data)
}
var pids []int
if err := json.Unmarshal(data, &pids); err != nil {
return nil, err
}
return pids, nil
}
type CheckpointOpts struct {
// ImagePath is the path for saving the criu image file
ImagePath string
// WorkDir is the working directory for criu
WorkDir string
// ParentPath is the path for previous image files from a pre-dump
ParentPath string
// AllowOpenTCP allows open tcp connections to be checkpointed
AllowOpenTCP bool
// AllowExternalUnixSockets allows external unix sockets to be checkpointed
AllowExternalUnixSockets bool
// AllowTerminal allows the terminal(pty) to be checkpointed with a container
AllowTerminal bool
// CriuPageServer is the address:port for the criu page server
CriuPageServer string
// FileLocks handle file locks held by the container
FileLocks bool
// Cgroups is the cgroup mode for how to handle the checkpoint of a container's cgroups
Cgroups CgroupMode
// EmptyNamespaces creates a namespace for the container but does not save its properties
// Provide the namespaces you wish to be checkpointed without their settings on restore
EmptyNamespaces []string
}
type CgroupMode string
const (
Soft CgroupMode = "soft"
Full CgroupMode = "full"
Strict CgroupMode = "strict"
)
func (o *CheckpointOpts) args() (out []string) {
if o.ImagePath != "" {
out = append(out, "--image-path", o.ImagePath)
}
if o.WorkDir != "" {
out = append(out, "--work-path", o.WorkDir)
}
if o.ParentPath != "" {
out = append(out, "--parent-path", o.ParentPath)
}
if o.AllowOpenTCP {
out = append(out, "--tcp-established")
}
if o.AllowExternalUnixSockets {
out = append(out, "--ext-unix-sk")
}
if o.AllowTerminal {
out = append(out, "--shell-job")
}
if o.CriuPageServer != "" {
out = append(out, "--page-server", o.CriuPageServer)
}
if o.FileLocks {
out = append(out, "--file-locks")
}
if string(o.Cgroups) != "" {
out = append(out, "--manage-cgroups-mode", string(o.Cgroups))
}
for _, ns := range o.EmptyNamespaces {
out = append(out, "--empty-ns", ns)
}
return out
}
type CheckpointAction func([]string) []string
// LeaveRunning keeps the container running after the checkpoint has been completed
func LeaveRunning(args []string) []string {
return append(args, "--leave-running")
}
// PreDump allows a pre-dump of the checkpoint to be made and completed later
func PreDump(args []string) []string {
return append(args, "--pre-dump")
}
// Checkpoint allows you to checkpoint a container using criu
func (r *Runc) Checkpoint(context context.Context, id string, opts *CheckpointOpts, actions ...CheckpointAction) error {
args := []string{"checkpoint"}
if opts != nil {
args = append(args, opts.args()...)
}
for _, a := range actions {
args = a(args)
}
return r.runOrError(r.command(context, append(args, id)...))
}
type RestoreOpts struct {
CheckpointOpts
IO
Detach bool
PidFile string
NoSubreaper bool
NoPivot bool
}
func (o *RestoreOpts) args() ([]string, error) {
out := o.CheckpointOpts.args()
if o.Detach {
out = append(out, "--detach")
}
if o.PidFile != "" {
abs, err := filepath.Abs(o.PidFile)
if err != nil {
return nil, err
}
out = append(out, "--pid-file", abs)
}
if o.NoPivot {
out = append(out, "--no-pivot")
}
if o.NoSubreaper {
out = append(out, "-no-subreaper")
}
return out, nil
}
// Restore restores a container with the provide id from an existing checkpoint
func (r *Runc) Restore(context context.Context, id, bundle string, opts *RestoreOpts) (int, error) {
args := []string{"restore"}
if opts != nil {
oargs, err := opts.args()
if err != nil {
return -1, err
}
args = append(args, oargs...)
}
args = append(args, "--bundle", bundle)
cmd := r.command(context, append(args, id)...)
if opts != nil {
opts.Set(cmd)
}
if err := Monitor.Start(cmd); err != nil {
return -1, err
}
if opts != nil && opts.IO != nil {
if c, ok := opts.IO.(StartCloser); ok {
if err := c.CloseAfterStart(); err != nil {
return -1, err
}
}
}
return Monitor.Wait(cmd)
}
// Update updates the current container with the provided resource spec
func (r *Runc) Update(context context.Context, id string, resources *specs.LinuxResources) error {
buf := bytes.NewBuffer(nil)
if err := json.NewEncoder(buf).Encode(resources); err != nil {
return err
}
args := []string{"update", "--resources", "-", id}
cmd := r.command(context, args...)
cmd.Stdin = buf
return r.runOrError(cmd)
}
func (r *Runc) args() (out []string) {
if r.Root != "" {
out = append(out, "--root", r.Root)
}
if r.Debug {
out = append(out, "--debug")
}
if r.Log != "" {
out = append(out, "--log", r.Log)
}
if r.LogFormat != none {
out = append(out, "--log-format", string(r.LogFormat))
}
if r.Criu != "" {
out = append(out, "--criu", r.Criu)
}
return out
}
// runOrError will run the provided command. If an error is
// encountered and neither Stdout or Stderr was set the error and the
// stderr of the command will be returned in the format of <error>:
// <stderr>
func (r *Runc) runOrError(cmd *exec.Cmd) error {
if cmd.Stdout != nil || cmd.Stderr != nil {
return Monitor.Run(cmd)
}
data, err := Monitor.CombinedOutput(cmd)
if err != nil {
return fmt.Errorf("%s: %s", err, data)
}
return nil
}

28
vendor/github.com/containerd/go-runc/utils.go generated vendored Normal file
View File

@ -0,0 +1,28 @@
package runc
import (
"io/ioutil"
"strconv"
"syscall"
)
// ReadPidFile reads the pid file at the provided path and returns
// the pid or an error if the read and conversion is unsuccessful
func ReadPidFile(path string) (int, error) {
data, err := ioutil.ReadFile(path)
if err != nil {
return -1, err
}
return strconv.Atoi(string(data))
}
const exitSignalOffset = 128
// exitStatus returns the correct exit status for a process based on if it
// was signaled or exited cleanly
func exitStatus(status syscall.WaitStatus) int {
if status.Signaled() {
return exitSignalOffset + int(status.Signal())
}
return status.ExitStatus()
}

101
vendor/github.com/gogo/protobuf/sortkeys/sortkeys.go generated vendored Normal file
View File

@ -0,0 +1,101 @@
// Protocol Buffers for Go with Gadgets
//
// Copyright (c) 2013, The GoGo Authors. All rights reserved.
// http://github.com/gogo/protobuf
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
package sortkeys
import (
"sort"
)
func Strings(l []string) {
sort.Strings(l)
}
func Float64s(l []float64) {
sort.Float64s(l)
}
func Float32s(l []float32) {
sort.Sort(Float32Slice(l))
}
func Int64s(l []int64) {
sort.Sort(Int64Slice(l))
}
func Int32s(l []int32) {
sort.Sort(Int32Slice(l))
}
func Uint64s(l []uint64) {
sort.Sort(Uint64Slice(l))
}
func Uint32s(l []uint32) {
sort.Sort(Uint32Slice(l))
}
func Bools(l []bool) {
sort.Sort(BoolSlice(l))
}
type BoolSlice []bool
func (p BoolSlice) Len() int { return len(p) }
func (p BoolSlice) Less(i, j int) bool { return p[j] }
func (p BoolSlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
type Int64Slice []int64
func (p Int64Slice) Len() int { return len(p) }
func (p Int64Slice) Less(i, j int) bool { return p[i] < p[j] }
func (p Int64Slice) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
type Int32Slice []int32
func (p Int32Slice) Len() int { return len(p) }
func (p Int32Slice) Less(i, j int) bool { return p[i] < p[j] }
func (p Int32Slice) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
type Uint64Slice []uint64
func (p Uint64Slice) Len() int { return len(p) }
func (p Uint64Slice) Less(i, j int) bool { return p[i] < p[j] }
func (p Uint64Slice) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
type Uint32Slice []uint32
func (p Uint32Slice) Len() int { return len(p) }
func (p Uint32Slice) Less(i, j int) bool { return p[i] < p[j] }
func (p Uint32Slice) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
type Float32Slice []float32
func (p Float32Slice) Len() int { return len(p) }
func (p Float32Slice) Less(i, j int) bool { return p[i] < p[j] }
func (p Float32Slice) Swap(i, j int) { p[i], p[j] = p[j], p[i] }

135
vendor/github.com/gogo/protobuf/types/any.go generated vendored Normal file
View File

@ -0,0 +1,135 @@
// Go support for Protocol Buffers - Google's data interchange format
//
// Copyright 2016 The Go Authors. All rights reserved.
// https://github.com/golang/protobuf
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
package types
// This file implements functions to marshal proto.Message to/from
// google.protobuf.Any message.
import (
"fmt"
"reflect"
"strings"
"github.com/gogo/protobuf/proto"
)
const googleApis = "type.googleapis.com/"
// AnyMessageName returns the name of the message contained in a google.protobuf.Any message.
//
// Note that regular type assertions should be done using the Is
// function. AnyMessageName is provided for less common use cases like filtering a
// sequence of Any messages based on a set of allowed message type names.
func AnyMessageName(any *Any) (string, error) {
slash := strings.LastIndex(any.TypeUrl, "/")
if slash < 0 {
return "", fmt.Errorf("message type url %q is invalid", any.TypeUrl)
}
return any.TypeUrl[slash+1:], nil
}
// MarshalAny takes the protocol buffer and encodes it into google.protobuf.Any.
func MarshalAny(pb proto.Message) (*Any, error) {
value, err := proto.Marshal(pb)
if err != nil {
return nil, err
}
return &Any{TypeUrl: googleApis + proto.MessageName(pb), Value: value}, nil
}
// DynamicAny is a value that can be passed to UnmarshalAny to automatically
// allocate a proto.Message for the type specified in a google.protobuf.Any
// message. The allocated message is stored in the embedded proto.Message.
//
// Example:
//
// var x ptypes.DynamicAny
// if err := ptypes.UnmarshalAny(a, &x); err != nil { ... }
// fmt.Printf("unmarshaled message: %v", x.Message)
type DynamicAny struct {
proto.Message
}
// Empty returns a new proto.Message of the type specified in a
// google.protobuf.Any message. It returns an error if corresponding message
// type isn't linked in.
func EmptyAny(any *Any) (proto.Message, error) {
aname, err := AnyMessageName(any)
if err != nil {
return nil, err
}
t := proto.MessageType(aname)
if t == nil {
return nil, fmt.Errorf("any: message type %q isn't linked in", aname)
}
return reflect.New(t.Elem()).Interface().(proto.Message), nil
}
// UnmarshalAny parses the protocol buffer representation in a google.protobuf.Any
// message and places the decoded result in pb. It returns an error if type of
// contents of Any message does not match type of pb message.
//
// pb can be a proto.Message, or a *DynamicAny.
func UnmarshalAny(any *Any, pb proto.Message) error {
if d, ok := pb.(*DynamicAny); ok {
if d.Message == nil {
var err error
d.Message, err = EmptyAny(any)
if err != nil {
return err
}
}
return UnmarshalAny(any, d.Message)
}
aname, err := AnyMessageName(any)
if err != nil {
return err
}
mname := proto.MessageName(pb)
if aname != mname {
return fmt.Errorf("mismatched message type: got %q want %q", aname, mname)
}
return proto.Unmarshal(any.Value, pb)
}
// Is returns true if any value contains a given message type.
func Is(any *Any, pb proto.Message) bool {
aname, err := AnyMessageName(any)
if err != nil {
return false
}
return aname == proto.MessageName(pb)
}

666
vendor/github.com/gogo/protobuf/types/any.pb.go generated vendored Normal file
View File

@ -0,0 +1,666 @@
// Code generated by protoc-gen-gogo.
// source: any.proto
// DO NOT EDIT!
/*
Package types is a generated protocol buffer package.
It is generated from these files:
any.proto
It has these top-level messages:
Any
*/
package types
import proto "github.com/gogo/protobuf/proto"
import fmt "fmt"
import math "math"
import bytes "bytes"
import strings "strings"
import reflect "reflect"
import io "io"
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
var _ = fmt.Errorf
var _ = math.Inf
// This is a compile-time assertion to ensure that this generated file
// is compatible with the proto package it is being compiled against.
// A compilation error at this line likely means your copy of the
// proto package needs to be updated.
const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package
// `Any` contains an arbitrary serialized protocol buffer message along with a
// URL that describes the type of the serialized message.
//
// Protobuf library provides support to pack/unpack Any values in the form
// of utility functions or additional generated methods of the Any type.
//
// Example 1: Pack and unpack a message in C++.
//
// Foo foo = ...;
// Any any;
// any.PackFrom(foo);
// ...
// if (any.UnpackTo(&foo)) {
// ...
// }
//
// Example 2: Pack and unpack a message in Java.
//
// Foo foo = ...;
// Any any = Any.pack(foo);
// ...
// if (any.is(Foo.class)) {
// foo = any.unpack(Foo.class);
// }
//
// Example 3: Pack and unpack a message in Python.
//
// foo = Foo(...)
// any = Any()
// any.Pack(foo)
// ...
// if any.Is(Foo.DESCRIPTOR):
// any.Unpack(foo)
// ...
//
// The pack methods provided by protobuf library will by default use
// 'type.googleapis.com/full.type.name' as the type URL and the unpack
// methods only use the fully qualified type name after the last '/'
// in the type URL, for example "foo.bar.com/x/y.z" will yield type
// name "y.z".
//
//
// JSON
// ====
// The JSON representation of an `Any` value uses the regular
// representation of the deserialized, embedded message, with an
// additional field `@type` which contains the type URL. Example:
//
// package google.profile;
// message Person {
// string first_name = 1;
// string last_name = 2;
// }
//
// {
// "@type": "type.googleapis.com/google.profile.Person",
// "firstName": <string>,
// "lastName": <string>
// }
//
// If the embedded message type is well-known and has a custom JSON
// representation, that representation will be embedded adding a field
// `value` which holds the custom JSON in addition to the `@type`
// field. Example (for message [google.protobuf.Duration][]):
//
// {
// "@type": "type.googleapis.com/google.protobuf.Duration",
// "value": "1.212s"
// }
//
type Any struct {
// A URL/resource name whose content describes the type of the
// serialized protocol buffer message.
//
// For URLs which use the scheme `http`, `https`, or no scheme, the
// following restrictions and interpretations apply:
//
// * If no scheme is provided, `https` is assumed.
// * The last segment of the URL's path must represent the fully
// qualified name of the type (as in `path/google.protobuf.Duration`).
// The name should be in a canonical form (e.g., leading "." is
// not accepted).
// * An HTTP GET on the URL must yield a [google.protobuf.Type][]
// value in binary format, or produce an error.
// * Applications are allowed to cache lookup results based on the
// URL, or have them precompiled into a binary to avoid any
// lookup. Therefore, binary compatibility needs to be preserved
// on changes to types. (Use versioned type names to manage
// breaking changes.)
//
// Schemes other than `http`, `https` (or the empty scheme) might be
// used with implementation specific semantics.
//
TypeUrl string `protobuf:"bytes,1,opt,name=type_url,json=typeUrl,proto3" json:"type_url,omitempty"`
// Must be a valid serialized protocol buffer of the above specified type.
Value []byte `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"`
}
func (m *Any) Reset() { *m = Any{} }
func (*Any) ProtoMessage() {}
func (*Any) Descriptor() ([]byte, []int) { return fileDescriptorAny, []int{0} }
func (*Any) XXX_WellKnownType() string { return "Any" }
func (m *Any) GetTypeUrl() string {
if m != nil {
return m.TypeUrl
}
return ""
}
func (m *Any) GetValue() []byte {
if m != nil {
return m.Value
}
return nil
}
func init() {
proto.RegisterType((*Any)(nil), "google.protobuf.Any")
}
func (this *Any) Compare(that interface{}) int {
if that == nil {
if this == nil {
return 0
}
return 1
}
that1, ok := that.(*Any)
if !ok {
that2, ok := that.(Any)
if ok {
that1 = &that2
} else {
return 1
}
}
if that1 == nil {
if this == nil {
return 0
}
return 1
} else if this == nil {
return -1
}
if this.TypeUrl != that1.TypeUrl {
if this.TypeUrl < that1.TypeUrl {
return -1
}
return 1
}
if c := bytes.Compare(this.Value, that1.Value); c != 0 {
return c
}
return 0
}
func (this *Any) Equal(that interface{}) bool {
if that == nil {
if this == nil {
return true
}
return false
}
that1, ok := that.(*Any)
if !ok {
that2, ok := that.(Any)
if ok {
that1 = &that2
} else {
return false
}
}
if that1 == nil {
if this == nil {
return true
}
return false
} else if this == nil {
return false
}
if this.TypeUrl != that1.TypeUrl {
return false
}
if !bytes.Equal(this.Value, that1.Value) {
return false
}
return true
}
func (this *Any) GoString() string {
if this == nil {
return "nil"
}
s := make([]string, 0, 6)
s = append(s, "&types.Any{")
s = append(s, "TypeUrl: "+fmt.Sprintf("%#v", this.TypeUrl)+",\n")
s = append(s, "Value: "+fmt.Sprintf("%#v", this.Value)+",\n")
s = append(s, "}")
return strings.Join(s, "")
}
func valueToGoStringAny(v interface{}, typ string) string {
rv := reflect.ValueOf(v)
if rv.IsNil() {
return "nil"
}
pv := reflect.Indirect(rv).Interface()
return fmt.Sprintf("func(v %v) *%v { return &v } ( %#v )", typ, typ, pv)
}
func (m *Any) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalTo(dAtA)
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *Any) MarshalTo(dAtA []byte) (int, error) {
var i int
_ = i
var l int
_ = l
if len(m.TypeUrl) > 0 {
dAtA[i] = 0xa
i++
i = encodeVarintAny(dAtA, i, uint64(len(m.TypeUrl)))
i += copy(dAtA[i:], m.TypeUrl)
}
if len(m.Value) > 0 {
dAtA[i] = 0x12
i++
i = encodeVarintAny(dAtA, i, uint64(len(m.Value)))
i += copy(dAtA[i:], m.Value)
}
return i, nil
}
func encodeFixed64Any(dAtA []byte, offset int, v uint64) int {
dAtA[offset] = uint8(v)
dAtA[offset+1] = uint8(v >> 8)
dAtA[offset+2] = uint8(v >> 16)
dAtA[offset+3] = uint8(v >> 24)
dAtA[offset+4] = uint8(v >> 32)
dAtA[offset+5] = uint8(v >> 40)
dAtA[offset+6] = uint8(v >> 48)
dAtA[offset+7] = uint8(v >> 56)
return offset + 8
}
func encodeFixed32Any(dAtA []byte, offset int, v uint32) int {
dAtA[offset] = uint8(v)
dAtA[offset+1] = uint8(v >> 8)
dAtA[offset+2] = uint8(v >> 16)
dAtA[offset+3] = uint8(v >> 24)
return offset + 4
}
func encodeVarintAny(dAtA []byte, offset int, v uint64) int {
for v >= 1<<7 {
dAtA[offset] = uint8(v&0x7f | 0x80)
v >>= 7
offset++
}
dAtA[offset] = uint8(v)
return offset + 1
}
func NewPopulatedAny(r randyAny, easy bool) *Any {
this := &Any{}
this.TypeUrl = string(randStringAny(r))
v1 := r.Intn(100)
this.Value = make([]byte, v1)
for i := 0; i < v1; i++ {
this.Value[i] = byte(r.Intn(256))
}
if !easy && r.Intn(10) != 0 {
}
return this
}
type randyAny interface {
Float32() float32
Float64() float64
Int63() int64
Int31() int32
Uint32() uint32
Intn(n int) int
}
func randUTF8RuneAny(r randyAny) rune {
ru := r.Intn(62)
if ru < 10 {
return rune(ru + 48)
} else if ru < 36 {
return rune(ru + 55)
}
return rune(ru + 61)
}
func randStringAny(r randyAny) string {
v2 := r.Intn(100)
tmps := make([]rune, v2)
for i := 0; i < v2; i++ {
tmps[i] = randUTF8RuneAny(r)
}
return string(tmps)
}
func randUnrecognizedAny(r randyAny, maxFieldNumber int) (dAtA []byte) {
l := r.Intn(5)
for i := 0; i < l; i++ {
wire := r.Intn(4)
if wire == 3 {
wire = 5
}
fieldNumber := maxFieldNumber + r.Intn(100)
dAtA = randFieldAny(dAtA, r, fieldNumber, wire)
}
return dAtA
}
func randFieldAny(dAtA []byte, r randyAny, fieldNumber int, wire int) []byte {
key := uint32(fieldNumber)<<3 | uint32(wire)
switch wire {
case 0:
dAtA = encodeVarintPopulateAny(dAtA, uint64(key))
v3 := r.Int63()
if r.Intn(2) == 0 {
v3 *= -1
}
dAtA = encodeVarintPopulateAny(dAtA, uint64(v3))
case 1:
dAtA = encodeVarintPopulateAny(dAtA, uint64(key))
dAtA = append(dAtA, byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)))
case 2:
dAtA = encodeVarintPopulateAny(dAtA, uint64(key))
ll := r.Intn(100)
dAtA = encodeVarintPopulateAny(dAtA, uint64(ll))
for j := 0; j < ll; j++ {
dAtA = append(dAtA, byte(r.Intn(256)))
}
default:
dAtA = encodeVarintPopulateAny(dAtA, uint64(key))
dAtA = append(dAtA, byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)))
}
return dAtA
}
func encodeVarintPopulateAny(dAtA []byte, v uint64) []byte {
for v >= 1<<7 {
dAtA = append(dAtA, uint8(uint64(v)&0x7f|0x80))
v >>= 7
}
dAtA = append(dAtA, uint8(v))
return dAtA
}
func (m *Any) Size() (n int) {
var l int
_ = l
l = len(m.TypeUrl)
if l > 0 {
n += 1 + l + sovAny(uint64(l))
}
l = len(m.Value)
if l > 0 {
n += 1 + l + sovAny(uint64(l))
}
return n
}
func sovAny(x uint64) (n int) {
for {
n++
x >>= 7
if x == 0 {
break
}
}
return n
}
func sozAny(x uint64) (n int) {
return sovAny(uint64((x << 1) ^ uint64((int64(x) >> 63))))
}
func (this *Any) String() string {
if this == nil {
return "nil"
}
s := strings.Join([]string{`&Any{`,
`TypeUrl:` + fmt.Sprintf("%v", this.TypeUrl) + `,`,
`Value:` + fmt.Sprintf("%v", this.Value) + `,`,
`}`,
}, "")
return s
}
func valueToStringAny(v interface{}) string {
rv := reflect.ValueOf(v)
if rv.IsNil() {
return "nil"
}
pv := reflect.Indirect(rv).Interface()
return fmt.Sprintf("*%v", pv)
}
func (m *Any) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowAny
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: Any: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: Any: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field TypeUrl", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowAny
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthAny
}
postIndex := iNdEx + intStringLen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.TypeUrl = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
case 2:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType)
}
var byteLen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowAny
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
byteLen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if byteLen < 0 {
return ErrInvalidLengthAny
}
postIndex := iNdEx + byteLen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Value = append(m.Value[:0], dAtA[iNdEx:postIndex]...)
if m.Value == nil {
m.Value = []byte{}
}
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipAny(dAtA[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthAny
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func skipAny(dAtA []byte) (n int, err error) {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return 0, ErrIntOverflowAny
}
if iNdEx >= l {
return 0, io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
wireType := int(wire & 0x7)
switch wireType {
case 0:
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return 0, ErrIntOverflowAny
}
if iNdEx >= l {
return 0, io.ErrUnexpectedEOF
}
iNdEx++
if dAtA[iNdEx-1] < 0x80 {
break
}
}
return iNdEx, nil
case 1:
iNdEx += 8
return iNdEx, nil
case 2:
var length int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return 0, ErrIntOverflowAny
}
if iNdEx >= l {
return 0, io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
length |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
iNdEx += length
if length < 0 {
return 0, ErrInvalidLengthAny
}
return iNdEx, nil
case 3:
for {
var innerWire uint64
var start int = iNdEx
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return 0, ErrIntOverflowAny
}
if iNdEx >= l {
return 0, io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
innerWire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
innerWireType := int(innerWire & 0x7)
if innerWireType == 4 {
break
}
next, err := skipAny(dAtA[start:])
if err != nil {
return 0, err
}
iNdEx = start + next
}
return iNdEx, nil
case 4:
return iNdEx, nil
case 5:
iNdEx += 4
return iNdEx, nil
default:
return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
}
}
panic("unreachable")
}
var (
ErrInvalidLengthAny = fmt.Errorf("proto: negative length found during unmarshaling")
ErrIntOverflowAny = fmt.Errorf("proto: integer overflow")
)
func init() { proto.RegisterFile("any.proto", fileDescriptorAny) }
var fileDescriptorAny = []byte{
// 208 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xe2, 0xe2, 0x4c, 0xcc, 0xab, 0xd4,
0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0xe2, 0x4f, 0xcf, 0xcf, 0x4f, 0xcf, 0x49, 0x85, 0xf0, 0x92,
0x4a, 0xd3, 0x94, 0xcc, 0xb8, 0x98, 0x1d, 0xf3, 0x2a, 0x85, 0x24, 0xb9, 0x38, 0x4a, 0x2a, 0x0b,
0x52, 0xe3, 0x4b, 0x8b, 0x72, 0x24, 0x18, 0x15, 0x18, 0x35, 0x38, 0x83, 0xd8, 0x41, 0xfc, 0xd0,
0xa2, 0x1c, 0x21, 0x11, 0x2e, 0xd6, 0xb2, 0xc4, 0x9c, 0xd2, 0x54, 0x09, 0x26, 0x05, 0x46, 0x0d,
0x9e, 0x20, 0x08, 0xc7, 0xa9, 0x89, 0xf1, 0xc2, 0x43, 0x39, 0x86, 0x1b, 0x0f, 0xe5, 0x18, 0x3e,
0x3c, 0x94, 0x63, 0xfc, 0xf1, 0x50, 0x8e, 0xb1, 0xe1, 0x91, 0x1c, 0xe3, 0x8a, 0x47, 0x72, 0x8c,
0x27, 0x1e, 0xc9, 0x31, 0x5e, 0x78, 0x24, 0xc7, 0xf8, 0xe0, 0x91, 0x1c, 0xe3, 0x8b, 0x47, 0x72,
0x0c, 0x1f, 0x40, 0xe2, 0x8f, 0xe5, 0x18, 0xb9, 0x84, 0x93, 0xf3, 0x73, 0xf5, 0xd0, 0xec, 0x77,
0xe2, 0x70, 0xcc, 0xab, 0x0c, 0x00, 0x71, 0x02, 0x18, 0xa3, 0x58, 0x41, 0x56, 0x16, 0x2f, 0x60,
0x64, 0x5c, 0xc4, 0xc4, 0xec, 0x1e, 0xe0, 0xb4, 0x8a, 0x49, 0xce, 0x1d, 0xa2, 0x3a, 0x00, 0xaa,
0x5a, 0x2f, 0x3c, 0x35, 0x27, 0xc7, 0x3b, 0x2f, 0xbf, 0x3c, 0x2f, 0x04, 0xa4, 0x32, 0x89, 0x0d,
0x6c, 0x8c, 0x31, 0x20, 0x00, 0x00, 0xff, 0xff, 0x63, 0x5d, 0x2d, 0x27, 0xe1, 0x00, 0x00, 0x00,
}

Some files were not shown because too many files have changed in this diff Show More