llb: improve llb generation
Signed-off-by: Tonis Tiigi <tonistiigi@gmail.com>docker-18.09
parent
168fbcc873
commit
728de510f3
|
@ -115,9 +115,9 @@ func testBuildMultiMount(t *testing.T, address string) {
|
||||||
assert.Nil(t, err)
|
assert.Nil(t, err)
|
||||||
|
|
||||||
alpine := llb.Image("docker.io/library/alpine:latest")
|
alpine := llb.Image("docker.io/library/alpine:latest")
|
||||||
ls := alpine.Run(llb.Meta{Args: []string{"/bin/ls", "-l"}, Cwd: "/"})
|
ls := alpine.Run(llb.Shlex("/bin/ls -l"))
|
||||||
busybox := llb.Image("docker.io/library/busybox:latest")
|
busybox := llb.Image("docker.io/library/busybox:latest")
|
||||||
cp := ls.Run(llb.Meta{Args: []string{"/bin/cp", "-a", "/busybox/etc/passwd", "baz"}, Cwd: "/"})
|
cp := ls.Run(llb.Shlex("/bin/cp -a /busybox/etc/passwd baz"))
|
||||||
cp.AddMount("/busybox", busybox)
|
cp.AddMount("/busybox", busybox)
|
||||||
|
|
||||||
dt, err := cp.Marshal()
|
dt, err := cp.Marshal()
|
||||||
|
|
|
@ -2,19 +2,18 @@ package llb
|
||||||
|
|
||||||
import (
|
import (
|
||||||
_ "crypto/sha256"
|
_ "crypto/sha256"
|
||||||
|
"fmt"
|
||||||
"sort"
|
"sort"
|
||||||
|
|
||||||
"github.com/gogo/protobuf/proto"
|
"github.com/gogo/protobuf/proto"
|
||||||
|
"github.com/google/shlex"
|
||||||
digest "github.com/opencontainers/go-digest"
|
digest "github.com/opencontainers/go-digest"
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
"github.com/tonistiigi/buildkit_poc/solver/pb"
|
"github.com/tonistiigi/buildkit_poc/solver/pb"
|
||||||
|
"github.com/tonistiigi/buildkit_poc/util/system"
|
||||||
)
|
)
|
||||||
|
|
||||||
type Op interface {
|
type RunOption func(m Meta) Meta
|
||||||
Validate() error
|
|
||||||
Marshal() ([][]byte, error)
|
|
||||||
Run(meta Meta) *ExecOp
|
|
||||||
}
|
|
||||||
|
|
||||||
type SourceOp struct {
|
type SourceOp struct {
|
||||||
id string
|
id string
|
||||||
|
@ -38,47 +37,133 @@ type Mount struct {
|
||||||
mount *Mount
|
mount *Mount
|
||||||
src *SourceOp
|
src *SourceOp
|
||||||
output bool
|
output bool
|
||||||
|
inputIndex int64
|
||||||
}
|
}
|
||||||
|
|
||||||
func Source(id string) *SourceOp {
|
func NewMeta(args ...string) Meta {
|
||||||
return &SourceOp{id: id}
|
m := Meta{}
|
||||||
|
m = m.addEnv("PATH", system.DefaultPathEnv)
|
||||||
|
m = m.setArgs(args...)
|
||||||
|
m.Cwd = "/"
|
||||||
|
return m
|
||||||
}
|
}
|
||||||
|
|
||||||
func (so *SourceOp) Validate() error {
|
func (m *Meta) ensurePrivate() {
|
||||||
// TODO: basic identifier validation
|
m.Env = append([]string{}, m.Env...)
|
||||||
if so.id == "" {
|
m.Args = append([]string{}, m.Args...)
|
||||||
return errors.Errorf("source identifier can't be empty")
|
}
|
||||||
|
|
||||||
|
func (m Meta) addEnv(k, v string) Meta {
|
||||||
|
(&m).ensurePrivate()
|
||||||
|
// TODO: flatten
|
||||||
|
m.Env = append(m.Env, k+"="+v)
|
||||||
|
return m
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m Meta) setArgs(args ...string) Meta {
|
||||||
|
m.Args = append([]string{}, args...)
|
||||||
|
return m
|
||||||
|
}
|
||||||
|
|
||||||
|
func Shlex(str string, v ...string) RunOption {
|
||||||
|
return func(m Meta) Meta {
|
||||||
|
vi := make([]interface{}, 0, len(v))
|
||||||
|
for _, v := range v {
|
||||||
|
vi = append(vi, v)
|
||||||
|
}
|
||||||
|
sp, err := shlex.Split(fmt.Sprintf(str, vi...))
|
||||||
|
if err != nil {
|
||||||
|
panic(err) // TODO
|
||||||
|
}
|
||||||
|
(&m).ensurePrivate()
|
||||||
|
return m.setArgs(sp...)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
type State struct {
|
||||||
|
src *SourceOp
|
||||||
|
exec *ExecOp
|
||||||
|
meta Meta
|
||||||
|
mount *Mount
|
||||||
|
metaNext Meta
|
||||||
|
}
|
||||||
|
|
||||||
|
type ExecState struct {
|
||||||
|
State
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *State) Validate() error {
|
||||||
|
if s.src != nil {
|
||||||
|
if err := s.src.Validate(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if s.exec != nil {
|
||||||
|
if err := s.exec.Validate(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (so *SourceOp) Run(m Meta) *ExecOp {
|
func (s *State) Run(opts ...RunOption) *ExecState {
|
||||||
return newExec(m, so, nil)
|
var es ExecState
|
||||||
|
meta := s.metaNext
|
||||||
|
for _, o := range opts {
|
||||||
|
meta = o(meta)
|
||||||
|
}
|
||||||
|
exec := newExec(meta, s.src, s.mount)
|
||||||
|
es.exec = exec
|
||||||
|
es.mount = exec.root
|
||||||
|
es.metaNext = meta
|
||||||
|
es.meta = meta
|
||||||
|
return &es
|
||||||
}
|
}
|
||||||
|
|
||||||
func (so *SourceOp) Marshal() ([][]byte, error) {
|
func (s *State) AddEnv(k, v string) *State {
|
||||||
if err := so.Validate(); err != nil {
|
s.metaNext = s.metaNext.addEnv(k, v)
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
func (s *State) Dir(wd string) *State {
|
||||||
|
s.metaNext.Cwd = wd
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *State) Marshal() ([][]byte, error) {
|
||||||
|
if err := s.Validate(); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
cache := make(map[digest.Digest]struct{})
|
cache := make(map[digest.Digest]struct{})
|
||||||
_, list, err := so.recursiveMarshal(nil, cache)
|
var list [][]byte
|
||||||
|
var err error
|
||||||
|
if s.src != nil { // TODO: fix repetition
|
||||||
|
_, list, err = s.src.recursiveMarshal(nil, cache)
|
||||||
|
} else if s.exec != nil {
|
||||||
|
_, list, err = s.exec.root.recursiveMarshal(nil, cache)
|
||||||
|
} else {
|
||||||
|
_, list, err = s.mount.recursiveMarshal(nil, cache)
|
||||||
|
}
|
||||||
return list, err
|
return list, err
|
||||||
}
|
}
|
||||||
|
|
||||||
func (so *SourceOp) recursiveMarshal(list [][]byte, cache map[digest.Digest]struct{}) (digest.Digest, [][]byte, error) {
|
func (s *ExecState) AddMount(dest string, mount *State) *State {
|
||||||
if err := so.Validate(); err != nil {
|
m := &Mount{
|
||||||
return "", nil, err
|
dest: dest,
|
||||||
|
src: mount.src,
|
||||||
|
mount: mount.mount,
|
||||||
|
op: s.exec,
|
||||||
|
output: true, // TODO: should be set only if something inherits
|
||||||
}
|
}
|
||||||
po := &pb.Op{
|
var newState State
|
||||||
Op: &pb.Op_Source{
|
newState.meta = s.meta
|
||||||
Source: &pb.SourceOp{Identifier: so.id},
|
newState.metaNext = s.meta
|
||||||
},
|
newState.mount = m
|
||||||
}
|
s.exec.mounts = append(s.exec.mounts, m)
|
||||||
return marshal(po, list, cache)
|
return &newState
|
||||||
}
|
}
|
||||||
|
|
||||||
func Image(ref string) *SourceOp {
|
func (s *ExecState) Root() *State {
|
||||||
return Source("docker-image://" + ref) // controversial
|
return &s.State
|
||||||
}
|
}
|
||||||
|
|
||||||
func newExec(meta Meta, src *SourceOp, m *Mount) *ExecOp {
|
func newExec(meta Meta, src *SourceOp, m *Mount) *ExecOp {
|
||||||
|
@ -97,26 +182,35 @@ func newExec(meta Meta, src *SourceOp, m *Mount) *ExecOp {
|
||||||
return exec
|
return exec
|
||||||
}
|
}
|
||||||
|
|
||||||
func (eo *ExecOp) AddMount(dest string, src interface{}) *Mount {
|
func Source(id string) *State {
|
||||||
var s *SourceOp
|
return &State{
|
||||||
var m *Mount
|
metaNext: NewMeta(),
|
||||||
switch v := src.(type) {
|
src: &SourceOp{id: id},
|
||||||
case *SourceOp:
|
|
||||||
s = v
|
|
||||||
case *Mount:
|
|
||||||
m = v
|
|
||||||
case *ExecOp:
|
|
||||||
m = v.root
|
|
||||||
default:
|
|
||||||
panic("invalid input")
|
|
||||||
}
|
}
|
||||||
eo.mounts = append(eo.mounts, &Mount{
|
}
|
||||||
dest: dest,
|
|
||||||
src: s,
|
func (so *SourceOp) Validate() error {
|
||||||
mount: m,
|
// TODO: basic identifier validation
|
||||||
output: true, // TODO: should be set only if something inherits
|
if so.id == "" {
|
||||||
})
|
return errors.Errorf("source identifier can't be empty")
|
||||||
return m
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (so *SourceOp) recursiveMarshal(list [][]byte, cache map[digest.Digest]struct{}) (digest.Digest, [][]byte, error) {
|
||||||
|
if err := so.Validate(); err != nil {
|
||||||
|
return "", nil, err
|
||||||
|
}
|
||||||
|
po := &pb.Op{
|
||||||
|
Op: &pb.Op_Source{
|
||||||
|
Source: &pb.SourceOp{Identifier: so.id},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
return appendResult(po, list, cache)
|
||||||
|
}
|
||||||
|
|
||||||
|
func Image(ref string) *State {
|
||||||
|
return Source("docker-image://" + ref) // controversial
|
||||||
}
|
}
|
||||||
|
|
||||||
func (eo *ExecOp) Validate() error {
|
func (eo *ExecOp) Validate() error {
|
||||||
|
@ -136,10 +230,6 @@ func (eo *ExecOp) Validate() error {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (eo *ExecOp) Run(meta Meta) *ExecOp {
|
|
||||||
return newExec(meta, nil, eo.root)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (eo *ExecOp) Marshal() ([][]byte, error) {
|
func (eo *ExecOp) Marshal() ([][]byte, error) {
|
||||||
if err := eo.Validate(); err != nil {
|
if err := eo.Validate(); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
|
@ -173,7 +263,7 @@ func (eo *ExecOp) recursiveMarshal(list [][]byte, cache map[digest.Digest]struct
|
||||||
for _, m := range eo.mounts {
|
for _, m := range eo.mounts {
|
||||||
var dgst digest.Digest
|
var dgst digest.Digest
|
||||||
var err error
|
var err error
|
||||||
var op Op
|
var op interface{}
|
||||||
if m.src != nil {
|
if m.src != nil {
|
||||||
op = m.src
|
op = m.src
|
||||||
} else {
|
} else {
|
||||||
|
@ -185,15 +275,19 @@ func (eo *ExecOp) recursiveMarshal(list [][]byte, cache map[digest.Digest]struct
|
||||||
}
|
}
|
||||||
inputIndex := len(pop.Inputs)
|
inputIndex := len(pop.Inputs)
|
||||||
for i := range pop.Inputs {
|
for i := range pop.Inputs {
|
||||||
if pop.Inputs[i].Digest == dgst.String() {
|
if pop.Inputs[i].Digest == dgst {
|
||||||
inputIndex = i
|
inputIndex = i
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if inputIndex == len(pop.Inputs) {
|
if inputIndex == len(pop.Inputs) {
|
||||||
|
var mountIndex int64
|
||||||
|
if m.mount != nil {
|
||||||
|
mountIndex = m.mount.inputIndex
|
||||||
|
}
|
||||||
pop.Inputs = append(pop.Inputs, &pb.Input{
|
pop.Inputs = append(pop.Inputs, &pb.Input{
|
||||||
Digest: dgst.String(),
|
Digest: dgst,
|
||||||
Index: 0, // TODO
|
Index: mountIndex,
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -207,13 +301,36 @@ func (eo *ExecOp) recursiveMarshal(list [][]byte, cache map[digest.Digest]struct
|
||||||
} else {
|
} else {
|
||||||
pm.Output = -1
|
pm.Output = -1
|
||||||
}
|
}
|
||||||
|
m.inputIndex = outputIndex - 1
|
||||||
peo.Mounts = append(peo.Mounts, pm)
|
peo.Mounts = append(peo.Mounts, pm)
|
||||||
}
|
}
|
||||||
|
|
||||||
return marshal(pop, list, cache)
|
return appendResult(pop, list, cache)
|
||||||
}
|
}
|
||||||
|
|
||||||
func marshal(p proto.Marshaler, list [][]byte, cache map[digest.Digest]struct{}) (dgst digest.Digest, out [][]byte, err error) {
|
func (m *Mount) recursiveMarshal(list [][]byte, cache map[digest.Digest]struct{}) (digest.Digest, [][]byte, error) {
|
||||||
|
if m.op == nil {
|
||||||
|
return "", nil, errors.Errorf("invalid mount")
|
||||||
|
}
|
||||||
|
var dgst digest.Digest
|
||||||
|
dgst, list, err := m.op.recursiveMarshal(list, cache)
|
||||||
|
if err != nil {
|
||||||
|
return "", list, err
|
||||||
|
}
|
||||||
|
for _, m2 := range m.op.mounts {
|
||||||
|
if m2 == m {
|
||||||
|
po := &pb.Op{}
|
||||||
|
po.Inputs = append(po.Inputs, &pb.Input{
|
||||||
|
Digest: dgst,
|
||||||
|
Index: int64(m.inputIndex),
|
||||||
|
})
|
||||||
|
return appendResult(po, list, cache)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return "", nil, errors.Errorf("invalid mount")
|
||||||
|
}
|
||||||
|
|
||||||
|
func appendResult(p proto.Marshaler, list [][]byte, cache map[digest.Digest]struct{}) (dgst digest.Digest, out [][]byte, err error) {
|
||||||
dt, err := p.Marshal()
|
dt, err := p.Marshal()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", nil, err
|
return "", nil, err
|
||||||
|
@ -227,7 +344,7 @@ func marshal(p proto.Marshaler, list [][]byte, cache map[digest.Digest]struct{})
|
||||||
return dgst, list, nil
|
return dgst, list, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func recursiveMarshalAny(op Op, list [][]byte, cache map[digest.Digest]struct{}) (dgst digest.Digest, out [][]byte, err error) {
|
func recursiveMarshalAny(op interface{}, list [][]byte, cache map[digest.Digest]struct{}) (dgst digest.Digest, out [][]byte, err error) {
|
||||||
switch op := op.(type) {
|
switch op := op.(type) {
|
||||||
case *ExecOp:
|
case *ExecOp:
|
||||||
return op.recursiveMarshal(list, cache)
|
return op.recursiveMarshal(list, cache)
|
||||||
|
|
|
@ -1,24 +1,29 @@
|
||||||
package main
|
package main
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"log"
|
||||||
"os"
|
"os"
|
||||||
|
|
||||||
"github.com/tonistiigi/buildkit_poc/client/llb"
|
"github.com/tonistiigi/buildkit_poc/client/llb"
|
||||||
)
|
)
|
||||||
|
|
||||||
func main() {
|
func main() {
|
||||||
busybox := llb.Image("docker.io/library/redis:latest")
|
busybox := llb.Image("docker.io/library/busybox:latest")
|
||||||
mod1 := busybox.Run(llb.Meta{Args: []string{"/bin/sleep", "1"}, Cwd: "/"})
|
img1 := busybox.
|
||||||
mod2 := mod1.Run(llb.Meta{Args: []string{"/bin/sh", "-c", "echo foo > /bar"}, Cwd: "/"})
|
Run(llb.Shlex("sleep 1")).
|
||||||
alpine := llb.Image("docker.io/library/alpine:latest")
|
Run(llb.Shlex("sh -c \"echo foo > /bar\""))
|
||||||
mod3 := mod2.Run(llb.Meta{Args: []string{"/bin/cp", "-a", "/alpine/etc/passwd", "baz"}, Cwd: "/"})
|
|
||||||
mod3.AddMount("/alpine", alpine)
|
alpine := llb.Image("docker.io/library/alpine:latest")
|
||||||
mod3.AddMount("/redis", busybox)
|
|
||||||
mod4 := mod3.Run(llb.Meta{Args: []string{"/bin/ls", "-l", "/"}, Cwd: "/"})
|
copy := img1.Run(llb.Shlex("cp -a /alpine/etc/passwd /baz"))
|
||||||
|
copy.AddMount("/alpine", alpine)
|
||||||
|
copy.AddMount("/subroot", busybox)
|
||||||
|
|
||||||
|
res := copy.Run(llb.Shlex("ls -l /"))
|
||||||
|
|
||||||
res := mod4
|
|
||||||
dt, err := res.Marshal()
|
dt, err := res.Marshal()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
log.Printf("%+v\n", err)
|
||||||
panic(err)
|
panic(err)
|
||||||
}
|
}
|
||||||
llb.WriteTo(dt, os.Stdout)
|
llb.WriteTo(dt, os.Stdout)
|
||||||
|
|
|
@ -17,7 +17,7 @@ RUN apk add --no-cache btrfs-progs-dev
|
||||||
ARG CONTAINERD_VERSION
|
ARG CONTAINERD_VERSION
|
||||||
RUN git clone https://github.com/containerd/containerd.git "$GOPATH/src/github.com/containerd/containerd" \
|
RUN git clone https://github.com/containerd/containerd.git "$GOPATH/src/github.com/containerd/containerd" \
|
||||||
&& cd "$GOPATH/src/github.com/containerd/containerd" \
|
&& cd "$GOPATH/src/github.com/containerd/containerd" \
|
||||||
&& git checkout -q "$RUNC_VERSION" \
|
&& git checkout -q "$CONTAINERD_VERSION" \
|
||||||
&& make bin/containerd
|
&& make bin/containerd
|
||||||
|
|
||||||
FROM gobuild-base AS unit-tests
|
FROM gobuild-base AS unit-tests
|
||||||
|
|
|
@ -1,3 +1,3 @@
|
||||||
package pb
|
package pb
|
||||||
|
|
||||||
//go:generate protoc --gogoslick_out=. ops.proto
|
//go:generate protoc -I=. -I=../../vendor/ --gogofaster_out=. ops.proto
|
||||||
|
|
1222
solver/pb/ops.pb.go
1222
solver/pb/ops.pb.go
File diff suppressed because it is too large
Load Diff
|
@ -2,6 +2,8 @@ syntax = "proto3";
|
||||||
|
|
||||||
package pb;
|
package pb;
|
||||||
|
|
||||||
|
import "github.com/gogo/protobuf/gogoproto/gogo.proto";
|
||||||
|
|
||||||
message Op {
|
message Op {
|
||||||
repeated Input inputs = 1;
|
repeated Input inputs = 1;
|
||||||
oneof op {
|
oneof op {
|
||||||
|
@ -12,7 +14,7 @@ message Op {
|
||||||
}
|
}
|
||||||
|
|
||||||
message Input {
|
message Input {
|
||||||
string digest = 1;
|
string digest = 1 [(gogoproto.customtype) = "github.com/opencontainers/go-digest.Digest", (gogoproto.nullable) = false];
|
||||||
int64 index = 2;
|
int64 index = 2;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -41,6 +41,10 @@ func (s *Solver) Solve(ctx context.Context, id string, g *opVertex) error {
|
||||||
|
|
||||||
pr, ctx, closeProgressWriter := progress.NewContext(ctx)
|
pr, ctx, closeProgressWriter := progress.NewContext(ctx)
|
||||||
|
|
||||||
|
if len(g.inputs) > 0 { // TODO: detect op_return better
|
||||||
|
g = g.inputs[0]
|
||||||
|
}
|
||||||
|
|
||||||
_, err := s.jobs.new(ctx, id, g, pr)
|
_, err := s.jobs.new(ctx, id, g, pr)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
|
@ -158,7 +162,7 @@ func (g *opVertex) solve(ctx context.Context, opt Opt) (retErr error) {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
default:
|
default:
|
||||||
return errors.Errorf("invalid op type")
|
return errors.Errorf("invalid op type %T", g.op.Op)
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
|
@ -0,0 +1,14 @@
|
||||||
|
// +build !windows
|
||||||
|
|
||||||
|
package system
|
||||||
|
|
||||||
|
// DefaultPathEnv is unix style list of directories to search for
|
||||||
|
// executables. Each directory is separated from the next by a colon
|
||||||
|
// ':' character .
|
||||||
|
const DefaultPathEnv = "/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
|
||||||
|
|
||||||
|
// CheckSystemDriveAndRemoveDriveLetter verifies that a path, if it includes a drive letter,
|
||||||
|
// is the system drive. This is a no-op on Linux.
|
||||||
|
func CheckSystemDriveAndRemoveDriveLetter(path string) (string, error) {
|
||||||
|
return path, nil
|
||||||
|
}
|
|
@ -0,0 +1,37 @@
|
||||||
|
// +build windows
|
||||||
|
|
||||||
|
package system
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"path/filepath"
|
||||||
|
"strings"
|
||||||
|
)
|
||||||
|
|
||||||
|
// DefaultPathEnv is deliberately empty on Windows as the default path will be set by
|
||||||
|
// the container. Docker has no context of what the default path should be.
|
||||||
|
const DefaultPathEnv = ""
|
||||||
|
|
||||||
|
// CheckSystemDriveAndRemoveDriveLetter verifies and manipulates a Windows path.
|
||||||
|
// This is used, for example, when validating a user provided path in docker cp.
|
||||||
|
// If a drive letter is supplied, it must be the system drive. The drive letter
|
||||||
|
// is always removed. Also, it translates it to OS semantics (IOW / to \). We
|
||||||
|
// need the path in this syntax so that it can ultimately be contatenated with
|
||||||
|
// a Windows long-path which doesn't support drive-letters. Examples:
|
||||||
|
// C: --> Fail
|
||||||
|
// C:\ --> \
|
||||||
|
// a --> a
|
||||||
|
// /a --> \a
|
||||||
|
// d:\ --> Fail
|
||||||
|
func CheckSystemDriveAndRemoveDriveLetter(path string) (string, error) {
|
||||||
|
if len(path) == 2 && string(path[1]) == ":" {
|
||||||
|
return "", fmt.Errorf("No relative path specified in %q", path)
|
||||||
|
}
|
||||||
|
if !filepath.IsAbs(path) || len(path) < 2 {
|
||||||
|
return filepath.FromSlash(path), nil
|
||||||
|
}
|
||||||
|
if string(path[1]) == ":" && !strings.EqualFold(string(path[0]), "c") {
|
||||||
|
return "", fmt.Errorf("The specified path is not on the system drive (C:)")
|
||||||
|
}
|
||||||
|
return filepath.FromSlash(path[2:]), nil
|
||||||
|
}
|
|
@ -0,0 +1,78 @@
|
||||||
|
// +build windows
|
||||||
|
|
||||||
|
package system
|
||||||
|
|
||||||
|
import "testing"
|
||||||
|
|
||||||
|
// TestCheckSystemDriveAndRemoveDriveLetter tests CheckSystemDriveAndRemoveDriveLetter
|
||||||
|
func TestCheckSystemDriveAndRemoveDriveLetter(t *testing.T) {
|
||||||
|
// Fails if not C drive.
|
||||||
|
_, err := CheckSystemDriveAndRemoveDriveLetter(`d:\`)
|
||||||
|
if err == nil || (err != nil && err.Error() != "The specified path is not on the system drive (C:)") {
|
||||||
|
t.Fatalf("Expected error for d:")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Single character is unchanged
|
||||||
|
if path, err = CheckSystemDriveAndRemoveDriveLetter("z"); err != nil {
|
||||||
|
t.Fatalf("Single character should pass")
|
||||||
|
}
|
||||||
|
if path != "z" {
|
||||||
|
t.Fatalf("Single character should be unchanged")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Two characters without colon is unchanged
|
||||||
|
if path, err = CheckSystemDriveAndRemoveDriveLetter("AB"); err != nil {
|
||||||
|
t.Fatalf("2 characters without colon should pass")
|
||||||
|
}
|
||||||
|
if path != "AB" {
|
||||||
|
t.Fatalf("2 characters without colon should be unchanged")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Abs path without drive letter
|
||||||
|
if path, err = CheckSystemDriveAndRemoveDriveLetter(`\l`); err != nil {
|
||||||
|
t.Fatalf("abs path no drive letter should pass")
|
||||||
|
}
|
||||||
|
if path != `\l` {
|
||||||
|
t.Fatalf("abs path without drive letter should be unchanged")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Abs path without drive letter, linux style
|
||||||
|
if path, err = CheckSystemDriveAndRemoveDriveLetter(`/l`); err != nil {
|
||||||
|
t.Fatalf("abs path no drive letter linux style should pass")
|
||||||
|
}
|
||||||
|
if path != `\l` {
|
||||||
|
t.Fatalf("abs path without drive letter linux failed %s", path)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Drive-colon should be stripped
|
||||||
|
if path, err = CheckSystemDriveAndRemoveDriveLetter(`c:\`); err != nil {
|
||||||
|
t.Fatalf("An absolute path should pass")
|
||||||
|
}
|
||||||
|
if path != `\` {
|
||||||
|
t.Fatalf(`An absolute path should have been shortened to \ %s`, path)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Verify with a linux-style path
|
||||||
|
if path, err = CheckSystemDriveAndRemoveDriveLetter(`c:/`); err != nil {
|
||||||
|
t.Fatalf("An absolute path should pass")
|
||||||
|
}
|
||||||
|
if path != `\` {
|
||||||
|
t.Fatalf(`A linux style absolute path should have been shortened to \ %s`, path)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Failure on c:
|
||||||
|
if path, err = CheckSystemDriveAndRemoveDriveLetter(`c:`); err == nil {
|
||||||
|
t.Fatalf("c: should fail")
|
||||||
|
}
|
||||||
|
if err.Error() != `No relative path specified in "c:"` {
|
||||||
|
t.Fatalf(path, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Failure on d:
|
||||||
|
if path, err = CheckSystemDriveAndRemoveDriveLetter(`d:`); err == nil {
|
||||||
|
t.Fatalf("c: should fail")
|
||||||
|
}
|
||||||
|
if err.Error() != `No relative path specified in "d:"` {
|
||||||
|
t.Fatalf(path, err)
|
||||||
|
}
|
||||||
|
}
|
|
@ -28,3 +28,4 @@ golang.org/x/text 19e51611da83d6be54ddafce4a4af510cb3e9ea4
|
||||||
|
|
||||||
github.com/urfave/cli d70f47eeca3afd795160003bc6e28b001d60c67c
|
github.com/urfave/cli d70f47eeca3afd795160003bc6e28b001d60c67c
|
||||||
github.com/docker/go-units 0dadbb0345b35ec7ef35e228dabb8de89a65bf52
|
github.com/docker/go-units 0dadbb0345b35ec7ef35e228dabb8de89a65bf52
|
||||||
|
github.com/google/shlex 6f45313302b9c56850fc17f99e40caebce98c716
|
|
@ -0,0 +1,202 @@
|
||||||
|
|
||||||
|
Apache License
|
||||||
|
Version 2.0, January 2004
|
||||||
|
http://www.apache.org/licenses/
|
||||||
|
|
||||||
|
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||||
|
|
||||||
|
1. Definitions.
|
||||||
|
|
||||||
|
"License" shall mean the terms and conditions for use, reproduction,
|
||||||
|
and distribution as defined by Sections 1 through 9 of this document.
|
||||||
|
|
||||||
|
"Licensor" shall mean the copyright owner or entity authorized by
|
||||||
|
the copyright owner that is granting the License.
|
||||||
|
|
||||||
|
"Legal Entity" shall mean the union of the acting entity and all
|
||||||
|
other entities that control, are controlled by, or are under common
|
||||||
|
control with that entity. For the purposes of this definition,
|
||||||
|
"control" means (i) the power, direct or indirect, to cause the
|
||||||
|
direction or management of such entity, whether by contract or
|
||||||
|
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||||
|
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||||
|
|
||||||
|
"You" (or "Your") shall mean an individual or Legal Entity
|
||||||
|
exercising permissions granted by this License.
|
||||||
|
|
||||||
|
"Source" form shall mean the preferred form for making modifications,
|
||||||
|
including but not limited to software source code, documentation
|
||||||
|
source, and configuration files.
|
||||||
|
|
||||||
|
"Object" form shall mean any form resulting from mechanical
|
||||||
|
transformation or translation of a Source form, including but
|
||||||
|
not limited to compiled object code, generated documentation,
|
||||||
|
and conversions to other media types.
|
||||||
|
|
||||||
|
"Work" shall mean the work of authorship, whether in Source or
|
||||||
|
Object form, made available under the License, as indicated by a
|
||||||
|
copyright notice that is included in or attached to the work
|
||||||
|
(an example is provided in the Appendix below).
|
||||||
|
|
||||||
|
"Derivative Works" shall mean any work, whether in Source or Object
|
||||||
|
form, that is based on (or derived from) the Work and for which the
|
||||||
|
editorial revisions, annotations, elaborations, or other modifications
|
||||||
|
represent, as a whole, an original work of authorship. For the purposes
|
||||||
|
of this License, Derivative Works shall not include works that remain
|
||||||
|
separable from, or merely link (or bind by name) to the interfaces of,
|
||||||
|
the Work and Derivative Works thereof.
|
||||||
|
|
||||||
|
"Contribution" shall mean any work of authorship, including
|
||||||
|
the original version of the Work and any modifications or additions
|
||||||
|
to that Work or Derivative Works thereof, that is intentionally
|
||||||
|
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||||
|
or by an individual or Legal Entity authorized to submit on behalf of
|
||||||
|
the copyright owner. For the purposes of this definition, "submitted"
|
||||||
|
means any form of electronic, verbal, or written communication sent
|
||||||
|
to the Licensor or its representatives, including but not limited to
|
||||||
|
communication on electronic mailing lists, source code control systems,
|
||||||
|
and issue tracking systems that are managed by, or on behalf of, the
|
||||||
|
Licensor for the purpose of discussing and improving the Work, but
|
||||||
|
excluding communication that is conspicuously marked or otherwise
|
||||||
|
designated in writing by the copyright owner as "Not a Contribution."
|
||||||
|
|
||||||
|
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||||
|
on behalf of whom a Contribution has been received by Licensor and
|
||||||
|
subsequently incorporated within the Work.
|
||||||
|
|
||||||
|
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||||
|
this License, each Contributor hereby grants to You a perpetual,
|
||||||
|
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||||
|
copyright license to reproduce, prepare Derivative Works of,
|
||||||
|
publicly display, publicly perform, sublicense, and distribute the
|
||||||
|
Work and such Derivative Works in Source or Object form.
|
||||||
|
|
||||||
|
3. Grant of Patent License. Subject to the terms and conditions of
|
||||||
|
this License, each Contributor hereby grants to You a perpetual,
|
||||||
|
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||||
|
(except as stated in this section) patent license to make, have made,
|
||||||
|
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||||
|
where such license applies only to those patent claims licensable
|
||||||
|
by such Contributor that are necessarily infringed by their
|
||||||
|
Contribution(s) alone or by combination of their Contribution(s)
|
||||||
|
with the Work to which such Contribution(s) was submitted. If You
|
||||||
|
institute patent litigation against any entity (including a
|
||||||
|
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||||
|
or a Contribution incorporated within the Work constitutes direct
|
||||||
|
or contributory patent infringement, then any patent licenses
|
||||||
|
granted to You under this License for that Work shall terminate
|
||||||
|
as of the date such litigation is filed.
|
||||||
|
|
||||||
|
4. Redistribution. You may reproduce and distribute copies of the
|
||||||
|
Work or Derivative Works thereof in any medium, with or without
|
||||||
|
modifications, and in Source or Object form, provided that You
|
||||||
|
meet the following conditions:
|
||||||
|
|
||||||
|
(a) You must give any other recipients of the Work or
|
||||||
|
Derivative Works a copy of this License; and
|
||||||
|
|
||||||
|
(b) You must cause any modified files to carry prominent notices
|
||||||
|
stating that You changed the files; and
|
||||||
|
|
||||||
|
(c) You must retain, in the Source form of any Derivative Works
|
||||||
|
that You distribute, all copyright, patent, trademark, and
|
||||||
|
attribution notices from the Source form of the Work,
|
||||||
|
excluding those notices that do not pertain to any part of
|
||||||
|
the Derivative Works; and
|
||||||
|
|
||||||
|
(d) If the Work includes a "NOTICE" text file as part of its
|
||||||
|
distribution, then any Derivative Works that You distribute must
|
||||||
|
include a readable copy of the attribution notices contained
|
||||||
|
within such NOTICE file, excluding those notices that do not
|
||||||
|
pertain to any part of the Derivative Works, in at least one
|
||||||
|
of the following places: within a NOTICE text file distributed
|
||||||
|
as part of the Derivative Works; within the Source form or
|
||||||
|
documentation, if provided along with the Derivative Works; or,
|
||||||
|
within a display generated by the Derivative Works, if and
|
||||||
|
wherever such third-party notices normally appear. The contents
|
||||||
|
of the NOTICE file are for informational purposes only and
|
||||||
|
do not modify the License. You may add Your own attribution
|
||||||
|
notices within Derivative Works that You distribute, alongside
|
||||||
|
or as an addendum to the NOTICE text from the Work, provided
|
||||||
|
that such additional attribution notices cannot be construed
|
||||||
|
as modifying the License.
|
||||||
|
|
||||||
|
You may add Your own copyright statement to Your modifications and
|
||||||
|
may provide additional or different license terms and conditions
|
||||||
|
for use, reproduction, or distribution of Your modifications, or
|
||||||
|
for any such Derivative Works as a whole, provided Your use,
|
||||||
|
reproduction, and distribution of the Work otherwise complies with
|
||||||
|
the conditions stated in this License.
|
||||||
|
|
||||||
|
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||||
|
any Contribution intentionally submitted for inclusion in the Work
|
||||||
|
by You to the Licensor shall be under the terms and conditions of
|
||||||
|
this License, without any additional terms or conditions.
|
||||||
|
Notwithstanding the above, nothing herein shall supersede or modify
|
||||||
|
the terms of any separate license agreement you may have executed
|
||||||
|
with Licensor regarding such Contributions.
|
||||||
|
|
||||||
|
6. Trademarks. This License does not grant permission to use the trade
|
||||||
|
names, trademarks, service marks, or product names of the Licensor,
|
||||||
|
except as required for reasonable and customary use in describing the
|
||||||
|
origin of the Work and reproducing the content of the NOTICE file.
|
||||||
|
|
||||||
|
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||||
|
agreed to in writing, Licensor provides the Work (and each
|
||||||
|
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||||
|
implied, including, without limitation, any warranties or conditions
|
||||||
|
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||||
|
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||||
|
appropriateness of using or redistributing the Work and assume any
|
||||||
|
risks associated with Your exercise of permissions under this License.
|
||||||
|
|
||||||
|
8. Limitation of Liability. In no event and under no legal theory,
|
||||||
|
whether in tort (including negligence), contract, or otherwise,
|
||||||
|
unless required by applicable law (such as deliberate and grossly
|
||||||
|
negligent acts) or agreed to in writing, shall any Contributor be
|
||||||
|
liable to You for damages, including any direct, indirect, special,
|
||||||
|
incidental, or consequential damages of any character arising as a
|
||||||
|
result of this License or out of the use or inability to use the
|
||||||
|
Work (including but not limited to damages for loss of goodwill,
|
||||||
|
work stoppage, computer failure or malfunction, or any and all
|
||||||
|
other commercial damages or losses), even if such Contributor
|
||||||
|
has been advised of the possibility of such damages.
|
||||||
|
|
||||||
|
9. Accepting Warranty or Additional Liability. While redistributing
|
||||||
|
the Work or Derivative Works thereof, You may choose to offer,
|
||||||
|
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||||
|
or other liability obligations and/or rights consistent with this
|
||||||
|
License. However, in accepting such obligations, You may act only
|
||||||
|
on Your own behalf and on Your sole responsibility, not on behalf
|
||||||
|
of any other Contributor, and only if You agree to indemnify,
|
||||||
|
defend, and hold each Contributor harmless for any liability
|
||||||
|
incurred by, or claims asserted against, such Contributor by reason
|
||||||
|
of your accepting any such warranty or additional liability.
|
||||||
|
|
||||||
|
END OF TERMS AND CONDITIONS
|
||||||
|
|
||||||
|
APPENDIX: How to apply the Apache License to your work.
|
||||||
|
|
||||||
|
To apply the Apache License to your work, attach the following
|
||||||
|
boilerplate notice, with the fields enclosed by brackets "[]"
|
||||||
|
replaced with your own identifying information. (Don't include
|
||||||
|
the brackets!) The text should be enclosed in the appropriate
|
||||||
|
comment syntax for the file format. We also recommend that a
|
||||||
|
file or class name and description of purpose be included on the
|
||||||
|
same "printed page" as the copyright notice for easier
|
||||||
|
identification within third-party archives.
|
||||||
|
|
||||||
|
Copyright [yyyy] [name of copyright owner]
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
|
@ -0,0 +1,2 @@
|
||||||
|
go-shlex is a simple lexer for go that supports shell-style quoting,
|
||||||
|
commenting, and escaping.
|
|
@ -0,0 +1,417 @@
|
||||||
|
/*
|
||||||
|
Copyright 2012 Google Inc. All Rights Reserved.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
/*
|
||||||
|
Package shlex implements a simple lexer which splits input in to tokens using
|
||||||
|
shell-style rules for quoting and commenting.
|
||||||
|
|
||||||
|
The basic use case uses the default ASCII lexer to split a string into sub-strings:
|
||||||
|
|
||||||
|
shlex.Split("one \"two three\" four") -> []string{"one", "two three", "four"}
|
||||||
|
|
||||||
|
To process a stream of strings:
|
||||||
|
|
||||||
|
l := NewLexer(os.Stdin)
|
||||||
|
for ; token, err := l.Next(); err != nil {
|
||||||
|
// process token
|
||||||
|
}
|
||||||
|
|
||||||
|
To access the raw token stream (which includes tokens for comments):
|
||||||
|
|
||||||
|
t := NewTokenizer(os.Stdin)
|
||||||
|
for ; token, err := t.Next(); err != nil {
|
||||||
|
// process token
|
||||||
|
}
|
||||||
|
|
||||||
|
*/
|
||||||
|
package shlex
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bufio"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"strings"
|
||||||
|
)
|
||||||
|
|
||||||
|
// TokenType is a top-level token classification: A word, space, comment, unknown.
|
||||||
|
type TokenType int
|
||||||
|
|
||||||
|
// runeTokenClass is the type of a UTF-8 character classification: A quote, space, escape.
|
||||||
|
type runeTokenClass int
|
||||||
|
|
||||||
|
// the internal state used by the lexer state machine
|
||||||
|
type lexerState int
|
||||||
|
|
||||||
|
// Token is a (type, value) pair representing a lexographical token.
|
||||||
|
type Token struct {
|
||||||
|
tokenType TokenType
|
||||||
|
value string
|
||||||
|
}
|
||||||
|
|
||||||
|
// Equal reports whether tokens a, and b, are equal.
|
||||||
|
// Two tokens are equal if both their types and values are equal. A nil token can
|
||||||
|
// never be equal to another token.
|
||||||
|
func (a *Token) Equal(b *Token) bool {
|
||||||
|
if a == nil || b == nil {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
if a.tokenType != b.tokenType {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
return a.value == b.value
|
||||||
|
}
|
||||||
|
|
||||||
|
// Named classes of UTF-8 runes
|
||||||
|
const (
|
||||||
|
spaceRunes = " \t\r\n"
|
||||||
|
escapingQuoteRunes = `"`
|
||||||
|
nonEscapingQuoteRunes = "'"
|
||||||
|
escapeRunes = `\`
|
||||||
|
commentRunes = "#"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Classes of rune token
|
||||||
|
const (
|
||||||
|
unknownRuneClass runeTokenClass = iota
|
||||||
|
spaceRuneClass
|
||||||
|
escapingQuoteRuneClass
|
||||||
|
nonEscapingQuoteRuneClass
|
||||||
|
escapeRuneClass
|
||||||
|
commentRuneClass
|
||||||
|
eofRuneClass
|
||||||
|
)
|
||||||
|
|
||||||
|
// Classes of lexographic token
|
||||||
|
const (
|
||||||
|
UnknownToken TokenType = iota
|
||||||
|
WordToken
|
||||||
|
SpaceToken
|
||||||
|
CommentToken
|
||||||
|
)
|
||||||
|
|
||||||
|
// Lexer state machine states
|
||||||
|
const (
|
||||||
|
startState lexerState = iota // no runes have been seen
|
||||||
|
inWordState // processing regular runes in a word
|
||||||
|
escapingState // we have just consumed an escape rune; the next rune is literal
|
||||||
|
escapingQuotedState // we have just consumed an escape rune within a quoted string
|
||||||
|
quotingEscapingState // we are within a quoted string that supports escaping ("...")
|
||||||
|
quotingState // we are within a string that does not support escaping ('...')
|
||||||
|
commentState // we are within a comment (everything following an unquoted or unescaped #
|
||||||
|
)
|
||||||
|
|
||||||
|
// tokenClassifier is used for classifying rune characters.
|
||||||
|
type tokenClassifier map[rune]runeTokenClass
|
||||||
|
|
||||||
|
func (typeMap tokenClassifier) addRuneClass(runes string, tokenType runeTokenClass) {
|
||||||
|
for _, runeChar := range runes {
|
||||||
|
typeMap[runeChar] = tokenType
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// newDefaultClassifier creates a new classifier for ASCII characters.
|
||||||
|
func newDefaultClassifier() tokenClassifier {
|
||||||
|
t := tokenClassifier{}
|
||||||
|
t.addRuneClass(spaceRunes, spaceRuneClass)
|
||||||
|
t.addRuneClass(escapingQuoteRunes, escapingQuoteRuneClass)
|
||||||
|
t.addRuneClass(nonEscapingQuoteRunes, nonEscapingQuoteRuneClass)
|
||||||
|
t.addRuneClass(escapeRunes, escapeRuneClass)
|
||||||
|
t.addRuneClass(commentRunes, commentRuneClass)
|
||||||
|
return t
|
||||||
|
}
|
||||||
|
|
||||||
|
// ClassifyRune classifiees a rune
|
||||||
|
func (t tokenClassifier) ClassifyRune(runeVal rune) runeTokenClass {
|
||||||
|
return t[runeVal]
|
||||||
|
}
|
||||||
|
|
||||||
|
// Lexer turns an input stream into a sequence of tokens. Whitespace and comments are skipped.
|
||||||
|
type Lexer Tokenizer
|
||||||
|
|
||||||
|
// NewLexer creates a new lexer from an input stream.
|
||||||
|
func NewLexer(r io.Reader) *Lexer {
|
||||||
|
|
||||||
|
return (*Lexer)(NewTokenizer(r))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Next returns the next word, or an error. If there are no more words,
|
||||||
|
// the error will be io.EOF.
|
||||||
|
func (l *Lexer) Next() (string, error) {
|
||||||
|
for {
|
||||||
|
token, err := (*Tokenizer)(l).Next()
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
switch token.tokenType {
|
||||||
|
case WordToken:
|
||||||
|
return token.value, nil
|
||||||
|
case CommentToken:
|
||||||
|
// skip comments
|
||||||
|
default:
|
||||||
|
return "", fmt.Errorf("Unknown token type: %v", token.tokenType)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Tokenizer turns an input stream into a sequence of typed tokens
|
||||||
|
type Tokenizer struct {
|
||||||
|
input bufio.Reader
|
||||||
|
classifier tokenClassifier
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewTokenizer creates a new tokenizer from an input stream.
|
||||||
|
func NewTokenizer(r io.Reader) *Tokenizer {
|
||||||
|
input := bufio.NewReader(r)
|
||||||
|
classifier := newDefaultClassifier()
|
||||||
|
return &Tokenizer{
|
||||||
|
input: *input,
|
||||||
|
classifier: classifier}
|
||||||
|
}
|
||||||
|
|
||||||
|
// scanStream scans the stream for the next token using the internal state machine.
|
||||||
|
// It will panic if it encounters a rune which it does not know how to handle.
|
||||||
|
func (t *Tokenizer) scanStream() (*Token, error) {
|
||||||
|
state := startState
|
||||||
|
var tokenType TokenType
|
||||||
|
var value []rune
|
||||||
|
var nextRune rune
|
||||||
|
var nextRuneType runeTokenClass
|
||||||
|
var err error
|
||||||
|
|
||||||
|
for {
|
||||||
|
nextRune, _, err = t.input.ReadRune()
|
||||||
|
nextRuneType = t.classifier.ClassifyRune(nextRune)
|
||||||
|
|
||||||
|
if err == io.EOF {
|
||||||
|
nextRuneType = eofRuneClass
|
||||||
|
err = nil
|
||||||
|
} else if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
switch state {
|
||||||
|
case startState: // no runes read yet
|
||||||
|
{
|
||||||
|
switch nextRuneType {
|
||||||
|
case eofRuneClass:
|
||||||
|
{
|
||||||
|
return nil, io.EOF
|
||||||
|
}
|
||||||
|
case spaceRuneClass:
|
||||||
|
{
|
||||||
|
}
|
||||||
|
case escapingQuoteRuneClass:
|
||||||
|
{
|
||||||
|
tokenType = WordToken
|
||||||
|
state = quotingEscapingState
|
||||||
|
}
|
||||||
|
case nonEscapingQuoteRuneClass:
|
||||||
|
{
|
||||||
|
tokenType = WordToken
|
||||||
|
state = quotingState
|
||||||
|
}
|
||||||
|
case escapeRuneClass:
|
||||||
|
{
|
||||||
|
tokenType = WordToken
|
||||||
|
state = escapingState
|
||||||
|
}
|
||||||
|
case commentRuneClass:
|
||||||
|
{
|
||||||
|
tokenType = CommentToken
|
||||||
|
state = commentState
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
{
|
||||||
|
tokenType = WordToken
|
||||||
|
value = append(value, nextRune)
|
||||||
|
state = inWordState
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
case inWordState: // in a regular word
|
||||||
|
{
|
||||||
|
switch nextRuneType {
|
||||||
|
case eofRuneClass:
|
||||||
|
{
|
||||||
|
token := &Token{
|
||||||
|
tokenType: tokenType,
|
||||||
|
value: string(value)}
|
||||||
|
return token, err
|
||||||
|
}
|
||||||
|
case spaceRuneClass:
|
||||||
|
{
|
||||||
|
t.input.UnreadRune()
|
||||||
|
token := &Token{
|
||||||
|
tokenType: tokenType,
|
||||||
|
value: string(value)}
|
||||||
|
return token, err
|
||||||
|
}
|
||||||
|
case escapingQuoteRuneClass:
|
||||||
|
{
|
||||||
|
state = quotingEscapingState
|
||||||
|
}
|
||||||
|
case nonEscapingQuoteRuneClass:
|
||||||
|
{
|
||||||
|
state = quotingState
|
||||||
|
}
|
||||||
|
case escapeRuneClass:
|
||||||
|
{
|
||||||
|
state = escapingState
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
{
|
||||||
|
value = append(value, nextRune)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
case escapingState: // the rune after an escape character
|
||||||
|
{
|
||||||
|
switch nextRuneType {
|
||||||
|
case eofRuneClass:
|
||||||
|
{
|
||||||
|
err = fmt.Errorf("EOF found after escape character")
|
||||||
|
token := &Token{
|
||||||
|
tokenType: tokenType,
|
||||||
|
value: string(value)}
|
||||||
|
return token, err
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
{
|
||||||
|
state = inWordState
|
||||||
|
value = append(value, nextRune)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
case escapingQuotedState: // the next rune after an escape character, in double quotes
|
||||||
|
{
|
||||||
|
switch nextRuneType {
|
||||||
|
case eofRuneClass:
|
||||||
|
{
|
||||||
|
err = fmt.Errorf("EOF found after escape character")
|
||||||
|
token := &Token{
|
||||||
|
tokenType: tokenType,
|
||||||
|
value: string(value)}
|
||||||
|
return token, err
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
{
|
||||||
|
state = quotingEscapingState
|
||||||
|
value = append(value, nextRune)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
case quotingEscapingState: // in escaping double quotes
|
||||||
|
{
|
||||||
|
switch nextRuneType {
|
||||||
|
case eofRuneClass:
|
||||||
|
{
|
||||||
|
err = fmt.Errorf("EOF found when expecting closing quote")
|
||||||
|
token := &Token{
|
||||||
|
tokenType: tokenType,
|
||||||
|
value: string(value)}
|
||||||
|
return token, err
|
||||||
|
}
|
||||||
|
case escapingQuoteRuneClass:
|
||||||
|
{
|
||||||
|
state = inWordState
|
||||||
|
}
|
||||||
|
case escapeRuneClass:
|
||||||
|
{
|
||||||
|
state = escapingQuotedState
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
{
|
||||||
|
value = append(value, nextRune)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
case quotingState: // in non-escaping single quotes
|
||||||
|
{
|
||||||
|
switch nextRuneType {
|
||||||
|
case eofRuneClass:
|
||||||
|
{
|
||||||
|
err = fmt.Errorf("EOF found when expecting closing quote")
|
||||||
|
token := &Token{
|
||||||
|
tokenType: tokenType,
|
||||||
|
value: string(value)}
|
||||||
|
return token, err
|
||||||
|
}
|
||||||
|
case nonEscapingQuoteRuneClass:
|
||||||
|
{
|
||||||
|
state = inWordState
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
{
|
||||||
|
value = append(value, nextRune)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
case commentState: // in a comment
|
||||||
|
{
|
||||||
|
switch nextRuneType {
|
||||||
|
case eofRuneClass:
|
||||||
|
{
|
||||||
|
token := &Token{
|
||||||
|
tokenType: tokenType,
|
||||||
|
value: string(value)}
|
||||||
|
return token, err
|
||||||
|
}
|
||||||
|
case spaceRuneClass:
|
||||||
|
{
|
||||||
|
if nextRune == '\n' {
|
||||||
|
state = startState
|
||||||
|
token := &Token{
|
||||||
|
tokenType: tokenType,
|
||||||
|
value: string(value)}
|
||||||
|
return token, err
|
||||||
|
} else {
|
||||||
|
value = append(value, nextRune)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
{
|
||||||
|
value = append(value, nextRune)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
{
|
||||||
|
return nil, fmt.Errorf("Unexpected state: %v", state)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Next returns the next token in the stream.
|
||||||
|
func (t *Tokenizer) Next() (*Token, error) {
|
||||||
|
return t.scanStream()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Split partitions a string into a slice of strings.
|
||||||
|
func Split(s string) ([]string, error) {
|
||||||
|
l := NewLexer(strings.NewReader(s))
|
||||||
|
subStrings := make([]string, 0)
|
||||||
|
for {
|
||||||
|
word, err := l.Next()
|
||||||
|
if err != nil {
|
||||||
|
if err == io.EOF {
|
||||||
|
return subStrings, nil
|
||||||
|
}
|
||||||
|
return subStrings, err
|
||||||
|
}
|
||||||
|
subStrings = append(subStrings, word)
|
||||||
|
}
|
||||||
|
}
|
|
@ -13,7 +13,7 @@ import (
|
||||||
// Ideally we don't have to import whole containerd just for the default spec
|
// Ideally we don't have to import whole containerd just for the default spec
|
||||||
|
|
||||||
func GenerateSpec(ctx context.Context, meta worker.Meta, mounts map[string]cache.Mountable) (*specs.Spec, error) {
|
func GenerateSpec(ctx context.Context, meta worker.Meta, mounts map[string]cache.Mountable) (*specs.Spec, error) {
|
||||||
s, err := containerd.GenerateSpec()
|
s, err := containerd.GenerateSpec(containerd.WithHostNamespace(specs.NetworkNamespace))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
Loading…
Reference in New Issue