2018-01-02 18:06:50 +00:00
|
|
|
package solver
|
|
|
|
|
|
|
|
import (
|
|
|
|
"context"
|
|
|
|
"fmt"
|
2018-09-26 22:52:31 +00:00
|
|
|
"strings"
|
2018-01-02 18:06:50 +00:00
|
|
|
"sync"
|
|
|
|
"time"
|
|
|
|
|
|
|
|
"github.com/moby/buildkit/client"
|
2018-04-13 21:00:15 +00:00
|
|
|
"github.com/moby/buildkit/session"
|
2018-01-02 18:06:50 +00:00
|
|
|
"github.com/moby/buildkit/util/flightcontrol"
|
|
|
|
"github.com/moby/buildkit/util/progress"
|
2018-04-13 21:00:15 +00:00
|
|
|
"github.com/moby/buildkit/util/tracing"
|
2018-01-02 18:06:50 +00:00
|
|
|
digest "github.com/opencontainers/go-digest"
|
|
|
|
"github.com/pkg/errors"
|
|
|
|
)
|
|
|
|
|
|
|
|
// ResolveOpFunc finds an Op implementation for a Vertex
|
2018-02-12 19:52:42 +00:00
|
|
|
type ResolveOpFunc func(Vertex, Builder) (Op, error)
|
|
|
|
|
|
|
|
type Builder interface {
|
|
|
|
Build(ctx context.Context, e Edge) (CachedResult, error)
|
2018-07-27 22:39:14 +00:00
|
|
|
Context(ctx context.Context) context.Context
|
2018-08-04 19:42:01 +00:00
|
|
|
EachValue(ctx context.Context, key string, fn func(interface{}) error) error
|
2018-02-12 19:52:42 +00:00
|
|
|
}
|
2018-01-02 18:06:50 +00:00
|
|
|
|
2018-05-09 21:32:26 +00:00
|
|
|
// Solver provides a shared graph of all the vertexes currently being
|
2018-01-02 18:06:50 +00:00
|
|
|
// processed. Every vertex that is being solved needs to be loaded into job
|
2018-09-18 16:58:30 +00:00
|
|
|
// first. Vertex operations are invoked and progress tracking happens through
|
2018-01-02 18:06:50 +00:00
|
|
|
// jobs.
|
2018-05-09 21:32:26 +00:00
|
|
|
type Solver struct {
|
2018-01-02 18:06:50 +00:00
|
|
|
mu sync.RWMutex
|
|
|
|
jobs map[string]*Job
|
|
|
|
actives map[digest.Digest]*state
|
|
|
|
opts SolverOpt
|
|
|
|
|
|
|
|
updateCond *sync.Cond
|
2018-05-09 21:32:26 +00:00
|
|
|
s *scheduler
|
|
|
|
index *edgeIndex
|
2018-01-02 18:06:50 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
type state struct {
|
|
|
|
jobs map[*Job]struct{}
|
|
|
|
parents map[digest.Digest]struct{}
|
|
|
|
childVtx map[digest.Digest]struct{}
|
|
|
|
|
|
|
|
mpw *progress.MultiWriter
|
|
|
|
allPw map[progress.Writer]struct{}
|
|
|
|
|
|
|
|
vtx Vertex
|
|
|
|
clientVertex client.Vertex
|
|
|
|
|
|
|
|
mu sync.Mutex
|
|
|
|
op *sharedOp
|
|
|
|
edges map[Index]*edge
|
|
|
|
opts SolverOpt
|
2018-05-09 21:32:26 +00:00
|
|
|
index *edgeIndex
|
2018-02-01 22:23:42 +00:00
|
|
|
|
|
|
|
cache map[string]CacheManager
|
|
|
|
mainCache CacheManager
|
2018-05-09 21:32:26 +00:00
|
|
|
solver *Solver
|
2018-02-12 19:52:42 +00:00
|
|
|
}
|
|
|
|
|
2018-04-13 21:00:15 +00:00
|
|
|
func (s *state) getSessionID() string {
|
|
|
|
// TODO: connect with sessionmanager to avoid getting dropped sessions
|
|
|
|
s.mu.Lock()
|
|
|
|
for j := range s.jobs {
|
|
|
|
if j.SessionID != "" {
|
|
|
|
s.mu.Unlock()
|
|
|
|
return j.SessionID
|
|
|
|
}
|
|
|
|
}
|
|
|
|
parents := map[digest.Digest]struct{}{}
|
|
|
|
for p := range s.parents {
|
|
|
|
parents[p] = struct{}{}
|
|
|
|
}
|
|
|
|
s.mu.Unlock()
|
|
|
|
|
|
|
|
for p := range parents {
|
2018-05-09 21:32:26 +00:00
|
|
|
s.solver.mu.Lock()
|
|
|
|
pst, ok := s.solver.actives[p]
|
|
|
|
s.solver.mu.Unlock()
|
2018-04-13 21:00:15 +00:00
|
|
|
if ok {
|
|
|
|
if sessionID := pst.getSessionID(); sessionID != "" {
|
|
|
|
return sessionID
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return ""
|
|
|
|
}
|
|
|
|
|
|
|
|
func (s *state) builder() *subBuilder {
|
2018-02-12 19:52:42 +00:00
|
|
|
return &subBuilder{state: s}
|
2018-01-02 18:06:50 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func (s *state) getEdge(index Index) *edge {
|
|
|
|
s.mu.Lock()
|
|
|
|
defer s.mu.Unlock()
|
|
|
|
if e, ok := s.edges[index]; ok {
|
|
|
|
return e
|
|
|
|
}
|
|
|
|
|
|
|
|
if s.op == nil {
|
|
|
|
s.op = newSharedOp(s.opts.ResolveOpFunc, s.opts.DefaultCache, s)
|
|
|
|
}
|
|
|
|
|
|
|
|
e := newEdge(Edge{Index: index, Vertex: s.vtx}, s.op, s.index)
|
|
|
|
s.edges[index] = e
|
|
|
|
return e
|
|
|
|
}
|
|
|
|
|
|
|
|
func (s *state) setEdge(index Index, newEdge *edge) {
|
|
|
|
s.mu.Lock()
|
|
|
|
defer s.mu.Unlock()
|
|
|
|
e, ok := s.edges[index]
|
|
|
|
if ok {
|
|
|
|
if e == newEdge {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
e.release()
|
|
|
|
}
|
|
|
|
|
|
|
|
newEdge.incrementReferenceCount()
|
|
|
|
s.edges[index] = newEdge
|
|
|
|
}
|
|
|
|
|
2018-02-01 22:23:42 +00:00
|
|
|
func (s *state) combinedCacheManager() CacheManager {
|
|
|
|
s.mu.Lock()
|
|
|
|
cms := make([]CacheManager, 0, len(s.cache)+1)
|
|
|
|
cms = append(cms, s.mainCache)
|
|
|
|
for _, cm := range s.cache {
|
|
|
|
cms = append(cms, cm)
|
|
|
|
}
|
|
|
|
s.mu.Unlock()
|
|
|
|
|
|
|
|
if len(cms) == 1 {
|
|
|
|
return s.mainCache
|
|
|
|
}
|
|
|
|
|
|
|
|
return newCombinedCacheManager(cms, s.mainCache)
|
|
|
|
}
|
|
|
|
|
2018-01-02 18:06:50 +00:00
|
|
|
func (s *state) Release() {
|
|
|
|
for _, e := range s.edges {
|
|
|
|
e.release()
|
|
|
|
}
|
2018-04-13 21:00:15 +00:00
|
|
|
if s.op != nil {
|
|
|
|
s.op.release()
|
|
|
|
}
|
2018-01-02 18:06:50 +00:00
|
|
|
}
|
|
|
|
|
2018-02-12 19:52:42 +00:00
|
|
|
type subBuilder struct {
|
|
|
|
*state
|
2018-04-13 21:00:15 +00:00
|
|
|
mu sync.Mutex
|
|
|
|
exporters []ExportableCacheKey
|
2018-02-12 19:52:42 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func (sb *subBuilder) Build(ctx context.Context, e Edge) (CachedResult, error) {
|
2018-05-09 21:32:26 +00:00
|
|
|
res, err := sb.solver.subBuild(ctx, e, sb.vtx)
|
2018-04-13 21:00:15 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
sb.mu.Lock()
|
2018-07-23 21:27:35 +00:00
|
|
|
sb.exporters = append(sb.exporters, res.CacheKeys()[0]) // all keys already have full export chain
|
2018-04-13 21:00:15 +00:00
|
|
|
sb.mu.Unlock()
|
|
|
|
return res, nil
|
|
|
|
}
|
|
|
|
|
2018-07-27 22:39:14 +00:00
|
|
|
func (sb *subBuilder) Context(ctx context.Context) context.Context {
|
|
|
|
return progress.WithProgress(ctx, sb.mpw)
|
2018-02-12 19:52:42 +00:00
|
|
|
}
|
|
|
|
|
2018-08-04 19:42:01 +00:00
|
|
|
func (sb *subBuilder) EachValue(ctx context.Context, key string, fn func(interface{}) error) error {
|
|
|
|
sb.mu.Lock()
|
|
|
|
defer sb.mu.Lock()
|
|
|
|
for j := range sb.jobs {
|
|
|
|
if err := j.EachValue(ctx, key, fn); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2018-01-02 18:06:50 +00:00
|
|
|
type Job struct {
|
2018-08-04 19:42:01 +00:00
|
|
|
list *Solver
|
|
|
|
pr *progress.MultiReader
|
|
|
|
pw progress.Writer
|
|
|
|
values sync.Map
|
2018-01-02 18:06:50 +00:00
|
|
|
|
|
|
|
progressCloser func()
|
2018-04-13 21:00:15 +00:00
|
|
|
SessionID string
|
2018-01-02 18:06:50 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
type SolverOpt struct {
|
|
|
|
ResolveOpFunc ResolveOpFunc
|
|
|
|
DefaultCache CacheManager
|
|
|
|
}
|
|
|
|
|
2018-05-09 21:32:26 +00:00
|
|
|
func NewSolver(opts SolverOpt) *Solver {
|
2018-01-02 18:06:50 +00:00
|
|
|
if opts.DefaultCache == nil {
|
|
|
|
opts.DefaultCache = NewInMemoryCacheManager()
|
|
|
|
}
|
2018-05-09 21:32:26 +00:00
|
|
|
jl := &Solver{
|
2018-01-02 18:06:50 +00:00
|
|
|
jobs: make(map[string]*Job),
|
|
|
|
actives: make(map[digest.Digest]*state),
|
|
|
|
opts: opts,
|
2018-05-09 21:32:26 +00:00
|
|
|
index: newEdgeIndex(),
|
2018-01-02 18:06:50 +00:00
|
|
|
}
|
2018-05-09 21:32:26 +00:00
|
|
|
jl.s = newScheduler(jl)
|
2018-01-02 18:06:50 +00:00
|
|
|
jl.updateCond = sync.NewCond(jl.mu.RLocker())
|
|
|
|
return jl
|
|
|
|
}
|
|
|
|
|
2018-05-09 21:32:26 +00:00
|
|
|
func (jl *Solver) setEdge(e Edge, newEdge *edge) {
|
2018-01-02 18:06:50 +00:00
|
|
|
jl.mu.RLock()
|
|
|
|
defer jl.mu.RUnlock()
|
|
|
|
|
|
|
|
st, ok := jl.actives[e.Vertex.Digest()]
|
|
|
|
if !ok {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
st.setEdge(e.Index, newEdge)
|
|
|
|
}
|
|
|
|
|
2018-05-09 21:32:26 +00:00
|
|
|
func (jl *Solver) getEdge(e Edge) *edge {
|
2018-01-02 18:06:50 +00:00
|
|
|
jl.mu.RLock()
|
|
|
|
defer jl.mu.RUnlock()
|
|
|
|
|
|
|
|
st, ok := jl.actives[e.Vertex.Digest()]
|
|
|
|
if !ok {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
return st.getEdge(e.Index)
|
|
|
|
}
|
|
|
|
|
2018-05-09 21:32:26 +00:00
|
|
|
func (jl *Solver) subBuild(ctx context.Context, e Edge, parent Vertex) (CachedResult, error) {
|
2018-02-05 16:06:51 +00:00
|
|
|
v, err := jl.load(e.Vertex, parent, nil)
|
|
|
|
if err != nil {
|
2018-01-02 18:06:50 +00:00
|
|
|
return nil, err
|
|
|
|
}
|
2018-02-05 16:06:51 +00:00
|
|
|
e.Vertex = v
|
2018-01-02 18:06:50 +00:00
|
|
|
return jl.s.build(ctx, e)
|
|
|
|
}
|
|
|
|
|
2018-05-09 21:32:26 +00:00
|
|
|
func (jl *Solver) Close() {
|
2018-01-02 18:06:50 +00:00
|
|
|
jl.s.Stop()
|
|
|
|
}
|
|
|
|
|
2018-05-09 21:32:26 +00:00
|
|
|
func (jl *Solver) load(v, parent Vertex, j *Job) (Vertex, error) {
|
2018-01-02 18:06:50 +00:00
|
|
|
jl.mu.Lock()
|
|
|
|
defer jl.mu.Unlock()
|
2018-03-17 23:29:38 +00:00
|
|
|
|
|
|
|
cache := map[Vertex]Vertex{}
|
|
|
|
|
|
|
|
return jl.loadUnlocked(v, parent, j, cache)
|
2018-01-02 18:06:50 +00:00
|
|
|
}
|
|
|
|
|
2018-05-09 21:32:26 +00:00
|
|
|
func (jl *Solver) loadUnlocked(v, parent Vertex, j *Job, cache map[Vertex]Vertex) (Vertex, error) {
|
2018-03-17 23:29:38 +00:00
|
|
|
if v, ok := cache[v]; ok {
|
|
|
|
return v, nil
|
|
|
|
}
|
|
|
|
origVtx := v
|
|
|
|
|
2018-02-05 16:06:51 +00:00
|
|
|
inputs := make([]Edge, len(v.Inputs()))
|
|
|
|
for i, e := range v.Inputs() {
|
2018-03-17 23:29:38 +00:00
|
|
|
v, err := jl.loadUnlocked(e.Vertex, parent, j, cache)
|
2018-02-05 16:06:51 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
2018-01-02 18:06:50 +00:00
|
|
|
}
|
2018-02-05 16:06:51 +00:00
|
|
|
inputs[i] = Edge{Index: e.Index, Vertex: v}
|
2018-01-02 18:06:50 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
dgst := v.Digest()
|
|
|
|
|
2018-02-05 16:06:51 +00:00
|
|
|
dgstWithoutCache := digest.FromBytes([]byte(fmt.Sprintf("%s-ignorecache", dgst)))
|
|
|
|
|
2018-03-17 23:29:38 +00:00
|
|
|
// if same vertex is already loaded without cache just use that
|
2018-02-05 16:06:51 +00:00
|
|
|
st, ok := jl.actives[dgstWithoutCache]
|
|
|
|
|
|
|
|
if !ok {
|
|
|
|
st, ok = jl.actives[dgst]
|
|
|
|
|
|
|
|
// !ignorecache merges with ignorecache but ignorecache doesn't merge with !ignorecache
|
|
|
|
if ok && !st.vtx.Options().IgnoreCache && v.Options().IgnoreCache {
|
|
|
|
dgst = dgstWithoutCache
|
|
|
|
}
|
|
|
|
|
|
|
|
v = &vertexWithCacheOptions{
|
|
|
|
Vertex: v,
|
|
|
|
dgst: dgst,
|
|
|
|
inputs: inputs,
|
|
|
|
}
|
|
|
|
|
|
|
|
st, ok = jl.actives[dgst]
|
|
|
|
}
|
|
|
|
|
2018-01-02 18:06:50 +00:00
|
|
|
if !ok {
|
|
|
|
st = &state{
|
|
|
|
opts: jl.opts,
|
|
|
|
jobs: map[*Job]struct{}{},
|
|
|
|
parents: map[digest.Digest]struct{}{},
|
|
|
|
childVtx: map[digest.Digest]struct{}{},
|
|
|
|
allPw: map[progress.Writer]struct{}{},
|
|
|
|
mpw: progress.NewMultiWriter(progress.WithMetadata("vertex", dgst)),
|
|
|
|
vtx: v,
|
|
|
|
clientVertex: initClientVertex(v),
|
|
|
|
edges: map[Index]*edge{},
|
|
|
|
index: jl.index,
|
2018-02-01 22:23:42 +00:00
|
|
|
mainCache: jl.opts.DefaultCache,
|
|
|
|
cache: map[string]CacheManager{},
|
2018-05-09 21:32:26 +00:00
|
|
|
solver: jl,
|
2018-01-02 18:06:50 +00:00
|
|
|
}
|
|
|
|
jl.actives[dgst] = st
|
|
|
|
}
|
|
|
|
|
2018-02-01 22:23:42 +00:00
|
|
|
st.mu.Lock()
|
2018-04-25 17:49:15 +00:00
|
|
|
for _, cache := range v.Options().CacheSources {
|
|
|
|
if cache.ID() != st.mainCache.ID() {
|
|
|
|
if _, ok := st.cache[cache.ID()]; !ok {
|
|
|
|
st.cache[cache.ID()] = cache
|
|
|
|
}
|
|
|
|
}
|
2018-02-01 22:23:42 +00:00
|
|
|
}
|
|
|
|
|
2018-01-02 18:06:50 +00:00
|
|
|
if j != nil {
|
|
|
|
if _, ok := st.jobs[j]; !ok {
|
|
|
|
st.jobs[j] = struct{}{}
|
|
|
|
}
|
|
|
|
}
|
2018-04-13 21:00:15 +00:00
|
|
|
st.mu.Unlock()
|
2018-01-02 18:06:50 +00:00
|
|
|
|
|
|
|
if parent != nil {
|
|
|
|
if _, ok := st.parents[parent.Digest()]; !ok {
|
|
|
|
st.parents[parent.Digest()] = struct{}{}
|
|
|
|
parentState, ok := jl.actives[parent.Digest()]
|
|
|
|
if !ok {
|
2018-02-05 16:06:51 +00:00
|
|
|
return nil, errors.Errorf("inactive parent %s", parent.Digest())
|
2018-01-02 18:06:50 +00:00
|
|
|
}
|
|
|
|
parentState.childVtx[dgst] = struct{}{}
|
2018-04-13 21:00:15 +00:00
|
|
|
|
|
|
|
for id, c := range parentState.cache {
|
|
|
|
st.cache[id] = c
|
|
|
|
}
|
2018-01-02 18:06:50 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
jl.connectProgressFromState(st, st)
|
2018-03-17 23:29:38 +00:00
|
|
|
cache[origVtx] = v
|
2018-02-05 16:06:51 +00:00
|
|
|
return v, nil
|
2018-01-02 18:06:50 +00:00
|
|
|
}
|
|
|
|
|
2018-05-09 21:32:26 +00:00
|
|
|
func (jl *Solver) connectProgressFromState(target, src *state) {
|
2018-01-02 18:06:50 +00:00
|
|
|
for j := range src.jobs {
|
|
|
|
if _, ok := target.allPw[j.pw]; !ok {
|
|
|
|
target.mpw.Add(j.pw)
|
|
|
|
target.allPw[j.pw] = struct{}{}
|
|
|
|
j.pw.Write(target.clientVertex.Digest.String(), target.clientVertex)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
for p := range src.parents {
|
|
|
|
jl.connectProgressFromState(target, jl.actives[p])
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-05-09 21:32:26 +00:00
|
|
|
func (jl *Solver) NewJob(id string) (*Job, error) {
|
2018-01-02 18:06:50 +00:00
|
|
|
jl.mu.Lock()
|
|
|
|
defer jl.mu.Unlock()
|
|
|
|
|
|
|
|
if _, ok := jl.jobs[id]; ok {
|
|
|
|
return nil, errors.Errorf("job ID %s exists", id)
|
|
|
|
}
|
|
|
|
|
|
|
|
pr, ctx, progressCloser := progress.NewContext(context.Background())
|
|
|
|
pw, _, _ := progress.FromContext(ctx) // TODO: expose progress.Pipe()
|
|
|
|
|
|
|
|
j := &Job{
|
|
|
|
list: jl,
|
|
|
|
pr: progress.NewMultiReader(pr),
|
|
|
|
pw: pw,
|
|
|
|
progressCloser: progressCloser,
|
|
|
|
}
|
|
|
|
jl.jobs[id] = j
|
|
|
|
|
|
|
|
jl.updateCond.Broadcast()
|
|
|
|
|
|
|
|
return j, nil
|
|
|
|
}
|
|
|
|
|
2018-05-09 21:32:26 +00:00
|
|
|
func (jl *Solver) Get(id string) (*Job, error) {
|
2018-01-02 18:06:50 +00:00
|
|
|
ctx, cancel := context.WithTimeout(context.Background(), 3*time.Second)
|
|
|
|
defer cancel()
|
|
|
|
|
|
|
|
go func() {
|
|
|
|
<-ctx.Done()
|
|
|
|
jl.updateCond.Broadcast()
|
|
|
|
}()
|
|
|
|
|
|
|
|
jl.mu.RLock()
|
|
|
|
defer jl.mu.RUnlock()
|
|
|
|
for {
|
|
|
|
select {
|
|
|
|
case <-ctx.Done():
|
|
|
|
return nil, errors.Errorf("no such job %s", id)
|
|
|
|
default:
|
|
|
|
}
|
|
|
|
j, ok := jl.jobs[id]
|
|
|
|
if !ok {
|
|
|
|
jl.updateCond.Wait()
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
return j, nil
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-05-09 21:32:26 +00:00
|
|
|
// called with solver lock
|
|
|
|
func (jl *Solver) deleteIfUnreferenced(k digest.Digest, st *state) {
|
2018-01-02 18:06:50 +00:00
|
|
|
if len(st.jobs) == 0 && len(st.parents) == 0 {
|
|
|
|
for chKey := range st.childVtx {
|
|
|
|
chState := jl.actives[chKey]
|
|
|
|
delete(chState.parents, k)
|
|
|
|
jl.deleteIfUnreferenced(chKey, chState)
|
|
|
|
}
|
|
|
|
st.Release()
|
|
|
|
delete(jl.actives, k)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func (j *Job) Build(ctx context.Context, e Edge) (CachedResult, error) {
|
2018-02-05 16:06:51 +00:00
|
|
|
v, err := j.list.load(e.Vertex, nil, j)
|
|
|
|
if err != nil {
|
2018-01-02 18:06:50 +00:00
|
|
|
return nil, err
|
|
|
|
}
|
2018-02-05 16:06:51 +00:00
|
|
|
e.Vertex = v
|
2018-01-02 18:06:50 +00:00
|
|
|
return j.list.s.build(ctx, e)
|
|
|
|
}
|
|
|
|
|
|
|
|
func (j *Job) Discard() error {
|
|
|
|
defer j.progressCloser()
|
|
|
|
|
|
|
|
j.list.mu.Lock()
|
|
|
|
defer j.list.mu.Unlock()
|
|
|
|
|
|
|
|
j.pw.Close()
|
|
|
|
|
|
|
|
for k, st := range j.list.actives {
|
2018-09-08 00:17:17 +00:00
|
|
|
st.mu.Lock()
|
2018-01-02 18:06:50 +00:00
|
|
|
if _, ok := st.jobs[j]; ok {
|
|
|
|
delete(st.jobs, j)
|
|
|
|
j.list.deleteIfUnreferenced(k, st)
|
|
|
|
}
|
|
|
|
if _, ok := st.allPw[j.pw]; ok {
|
|
|
|
delete(st.allPw, j.pw)
|
|
|
|
}
|
2018-09-08 00:17:17 +00:00
|
|
|
st.mu.Unlock()
|
2018-01-02 18:06:50 +00:00
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2018-07-27 22:39:14 +00:00
|
|
|
func (j *Job) Context(ctx context.Context) context.Context {
|
|
|
|
return progress.WithProgress(ctx, j.pw)
|
2018-04-13 21:00:15 +00:00
|
|
|
}
|
|
|
|
|
2018-08-04 19:42:01 +00:00
|
|
|
func (j *Job) SetValue(key string, v interface{}) {
|
|
|
|
j.values.Store(key, v)
|
|
|
|
}
|
|
|
|
|
|
|
|
func (j *Job) EachValue(ctx context.Context, key string, fn func(interface{}) error) error {
|
|
|
|
v, ok := j.values.Load(key)
|
|
|
|
if ok {
|
|
|
|
return fn(v)
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2018-04-24 19:01:50 +00:00
|
|
|
type cacheMapResp struct {
|
|
|
|
*CacheMap
|
|
|
|
complete bool
|
|
|
|
}
|
|
|
|
|
2018-01-02 18:06:50 +00:00
|
|
|
type activeOp interface {
|
2018-04-24 19:01:50 +00:00
|
|
|
CacheMap(context.Context, int) (*cacheMapResp, error)
|
2018-04-13 21:00:15 +00:00
|
|
|
LoadCache(ctx context.Context, rec *CacheRecord) (Result, error)
|
|
|
|
Exec(ctx context.Context, inputs []Result) (outputs []Result, exporters []ExportableCacheKey, err error)
|
2018-02-05 16:06:51 +00:00
|
|
|
IgnoreCache() bool
|
2018-01-02 18:06:50 +00:00
|
|
|
Cache() CacheManager
|
|
|
|
CalcSlowCache(context.Context, Index, ResultBasedCacheFunc, Result) (digest.Digest, error)
|
|
|
|
}
|
|
|
|
|
|
|
|
func newSharedOp(resolver ResolveOpFunc, cacheManager CacheManager, st *state) *sharedOp {
|
|
|
|
so := &sharedOp{
|
|
|
|
resolver: resolver,
|
|
|
|
st: st,
|
|
|
|
slowCacheRes: map[Index]digest.Digest{},
|
|
|
|
slowCacheErr: map[Index]error{},
|
|
|
|
}
|
|
|
|
return so
|
|
|
|
}
|
|
|
|
|
2018-04-13 21:00:15 +00:00
|
|
|
type execRes struct {
|
|
|
|
execRes []*SharedResult
|
|
|
|
execExporters []ExportableCacheKey
|
|
|
|
}
|
|
|
|
|
2018-01-02 18:06:50 +00:00
|
|
|
type sharedOp struct {
|
2018-02-01 22:23:42 +00:00
|
|
|
resolver ResolveOpFunc
|
|
|
|
st *state
|
|
|
|
g flightcontrol.Group
|
2018-01-02 18:06:50 +00:00
|
|
|
|
2018-04-13 21:00:15 +00:00
|
|
|
opOnce sync.Once
|
|
|
|
op Op
|
|
|
|
subBuilder *subBuilder
|
|
|
|
err error
|
2018-01-02 18:06:50 +00:00
|
|
|
|
2018-04-13 21:00:15 +00:00
|
|
|
execRes *execRes
|
2018-01-02 18:06:50 +00:00
|
|
|
execErr error
|
|
|
|
|
2018-04-24 19:01:50 +00:00
|
|
|
cacheRes []*CacheMap
|
|
|
|
cacheDone bool
|
|
|
|
cacheErr error
|
2018-01-02 18:06:50 +00:00
|
|
|
|
|
|
|
slowMu sync.Mutex
|
|
|
|
slowCacheRes map[Index]digest.Digest
|
|
|
|
slowCacheErr map[Index]error
|
|
|
|
}
|
|
|
|
|
2018-02-05 16:06:51 +00:00
|
|
|
func (s *sharedOp) IgnoreCache() bool {
|
|
|
|
return s.st.vtx.Options().IgnoreCache
|
|
|
|
}
|
|
|
|
|
2018-01-02 18:06:50 +00:00
|
|
|
func (s *sharedOp) Cache() CacheManager {
|
2018-02-01 22:23:42 +00:00
|
|
|
return s.st.combinedCacheManager()
|
2018-01-02 18:06:50 +00:00
|
|
|
}
|
|
|
|
|
2018-04-13 21:00:15 +00:00
|
|
|
func (s *sharedOp) LoadCache(ctx context.Context, rec *CacheRecord) (Result, error) {
|
|
|
|
ctx = progress.WithProgress(ctx, s.st.mpw)
|
|
|
|
// no cache hit. start evaluating the node
|
|
|
|
span, ctx := tracing.StartSpan(ctx, "load cache: "+s.st.vtx.Name())
|
|
|
|
notifyStarted(ctx, &s.st.clientVertex, true)
|
|
|
|
res, err := s.Cache().Load(ctx, rec)
|
|
|
|
tracing.FinishWithError(span, err)
|
|
|
|
notifyCompleted(ctx, &s.st.clientVertex, err, true)
|
|
|
|
return res, err
|
|
|
|
}
|
|
|
|
|
2018-01-02 18:06:50 +00:00
|
|
|
func (s *sharedOp) CalcSlowCache(ctx context.Context, index Index, f ResultBasedCacheFunc, res Result) (digest.Digest, error) {
|
|
|
|
key, err := s.g.Do(ctx, fmt.Sprintf("slow-compute-%d", index), func(ctx context.Context) (interface{}, error) {
|
|
|
|
s.slowMu.Lock()
|
|
|
|
// TODO: add helpers for these stored values
|
|
|
|
if res := s.slowCacheRes[index]; res != "" {
|
|
|
|
s.slowMu.Unlock()
|
|
|
|
return res, nil
|
|
|
|
}
|
|
|
|
if err := s.slowCacheErr[index]; err != nil {
|
|
|
|
s.slowMu.Unlock()
|
|
|
|
return err, nil
|
|
|
|
}
|
|
|
|
s.slowMu.Unlock()
|
|
|
|
ctx = progress.WithProgress(ctx, s.st.mpw)
|
|
|
|
key, err := f(ctx, res)
|
|
|
|
complete := true
|
|
|
|
if err != nil {
|
|
|
|
select {
|
|
|
|
case <-ctx.Done():
|
2018-09-26 22:52:31 +00:00
|
|
|
if strings.Contains(err.Error(), context.Canceled.Error()) {
|
|
|
|
complete = false
|
|
|
|
err = errors.Wrap(ctx.Err(), err.Error())
|
|
|
|
}
|
2018-01-02 18:06:50 +00:00
|
|
|
default:
|
|
|
|
}
|
|
|
|
}
|
|
|
|
s.slowMu.Lock()
|
|
|
|
defer s.slowMu.Unlock()
|
|
|
|
if complete {
|
|
|
|
if err == nil {
|
|
|
|
s.slowCacheRes[index] = key
|
|
|
|
}
|
|
|
|
s.slowCacheErr[index] = err
|
|
|
|
}
|
|
|
|
return key, err
|
|
|
|
})
|
|
|
|
if err != nil {
|
2018-07-21 20:03:09 +00:00
|
|
|
ctx = progress.WithProgress(ctx, s.st.mpw)
|
|
|
|
notifyStarted(ctx, &s.st.clientVertex, false)
|
|
|
|
notifyCompleted(ctx, &s.st.clientVertex, err, false)
|
2018-01-02 18:06:50 +00:00
|
|
|
return "", err
|
|
|
|
}
|
|
|
|
return key.(digest.Digest), nil
|
|
|
|
}
|
|
|
|
|
2018-04-24 19:01:50 +00:00
|
|
|
func (s *sharedOp) CacheMap(ctx context.Context, index int) (*cacheMapResp, error) {
|
2018-01-02 18:06:50 +00:00
|
|
|
op, err := s.getOp()
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2018-04-13 21:00:15 +00:00
|
|
|
res, err := s.g.Do(ctx, "cachemap", func(ctx context.Context) (ret interface{}, retErr error) {
|
2018-04-24 19:01:50 +00:00
|
|
|
if s.cacheRes != nil && s.cacheDone || index < len(s.cacheRes) {
|
2018-01-02 18:06:50 +00:00
|
|
|
return s.cacheRes, nil
|
|
|
|
}
|
|
|
|
if s.cacheErr != nil {
|
|
|
|
return nil, s.cacheErr
|
|
|
|
}
|
|
|
|
ctx = progress.WithProgress(ctx, s.st.mpw)
|
2018-04-13 21:00:15 +00:00
|
|
|
ctx = session.NewContext(ctx, s.st.getSessionID())
|
|
|
|
if len(s.st.vtx.Inputs()) == 0 {
|
|
|
|
// no cache hit. start evaluating the node
|
|
|
|
span, ctx := tracing.StartSpan(ctx, "cache request: "+s.st.vtx.Name())
|
|
|
|
notifyStarted(ctx, &s.st.clientVertex, false)
|
|
|
|
defer func() {
|
|
|
|
tracing.FinishWithError(span, retErr)
|
|
|
|
notifyCompleted(ctx, &s.st.clientVertex, retErr, false)
|
|
|
|
}()
|
|
|
|
}
|
2018-04-24 19:01:50 +00:00
|
|
|
res, done, err := op.CacheMap(ctx, len(s.cacheRes))
|
2018-01-02 18:06:50 +00:00
|
|
|
complete := true
|
|
|
|
if err != nil {
|
|
|
|
select {
|
|
|
|
case <-ctx.Done():
|
2018-09-26 22:52:31 +00:00
|
|
|
if strings.Contains(err.Error(), context.Canceled.Error()) {
|
|
|
|
complete = false
|
|
|
|
err = errors.Wrap(ctx.Err(), err.Error())
|
|
|
|
}
|
2018-01-02 18:06:50 +00:00
|
|
|
default:
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if complete {
|
|
|
|
if err == nil {
|
2018-04-24 19:01:50 +00:00
|
|
|
s.cacheRes = append(s.cacheRes, res)
|
|
|
|
s.cacheDone = done
|
2018-01-02 18:06:50 +00:00
|
|
|
}
|
|
|
|
s.cacheErr = err
|
|
|
|
}
|
2018-04-24 19:01:50 +00:00
|
|
|
return s.cacheRes, err
|
2018-01-02 18:06:50 +00:00
|
|
|
})
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2018-04-24 19:01:50 +00:00
|
|
|
|
|
|
|
if len(res.([]*CacheMap)) <= index {
|
|
|
|
return s.CacheMap(ctx, index)
|
|
|
|
}
|
|
|
|
|
|
|
|
return &cacheMapResp{CacheMap: res.([]*CacheMap)[index], complete: s.cacheDone}, nil
|
2018-01-02 18:06:50 +00:00
|
|
|
}
|
|
|
|
|
2018-04-13 21:00:15 +00:00
|
|
|
func (s *sharedOp) Exec(ctx context.Context, inputs []Result) (outputs []Result, exporters []ExportableCacheKey, err error) {
|
2018-01-02 18:06:50 +00:00
|
|
|
op, err := s.getOp()
|
|
|
|
if err != nil {
|
2018-04-13 21:00:15 +00:00
|
|
|
return nil, nil, err
|
2018-01-02 18:06:50 +00:00
|
|
|
}
|
2018-04-13 21:00:15 +00:00
|
|
|
res, err := s.g.Do(ctx, "exec", func(ctx context.Context) (ret interface{}, retErr error) {
|
2018-01-02 18:06:50 +00:00
|
|
|
if s.execRes != nil || s.execErr != nil {
|
|
|
|
return s.execRes, s.execErr
|
|
|
|
}
|
2018-04-13 21:00:15 +00:00
|
|
|
|
2018-01-02 18:06:50 +00:00
|
|
|
ctx = progress.WithProgress(ctx, s.st.mpw)
|
2018-04-13 21:00:15 +00:00
|
|
|
ctx = session.NewContext(ctx, s.st.getSessionID())
|
|
|
|
|
|
|
|
// no cache hit. start evaluating the node
|
|
|
|
span, ctx := tracing.StartSpan(ctx, s.st.vtx.Name())
|
|
|
|
notifyStarted(ctx, &s.st.clientVertex, false)
|
|
|
|
defer func() {
|
|
|
|
tracing.FinishWithError(span, retErr)
|
|
|
|
notifyCompleted(ctx, &s.st.clientVertex, retErr, false)
|
|
|
|
}()
|
|
|
|
|
2018-01-02 18:06:50 +00:00
|
|
|
res, err := op.Exec(ctx, inputs)
|
|
|
|
complete := true
|
|
|
|
if err != nil {
|
|
|
|
select {
|
|
|
|
case <-ctx.Done():
|
2018-09-26 22:52:31 +00:00
|
|
|
if strings.Contains(err.Error(), context.Canceled.Error()) {
|
|
|
|
complete = false
|
|
|
|
err = errors.Wrap(ctx.Err(), err.Error())
|
|
|
|
}
|
2018-01-02 18:06:50 +00:00
|
|
|
default:
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if complete {
|
|
|
|
if res != nil {
|
2018-04-13 21:00:15 +00:00
|
|
|
var subExporters []ExportableCacheKey
|
|
|
|
s.subBuilder.mu.Lock()
|
|
|
|
if len(s.subBuilder.exporters) > 0 {
|
|
|
|
subExporters = append(subExporters, s.subBuilder.exporters...)
|
|
|
|
}
|
|
|
|
s.subBuilder.mu.Unlock()
|
|
|
|
|
|
|
|
s.execRes = &execRes{execRes: wrapShared(res), execExporters: subExporters}
|
2018-01-02 18:06:50 +00:00
|
|
|
}
|
|
|
|
s.execErr = err
|
|
|
|
}
|
|
|
|
return s.execRes, err
|
|
|
|
})
|
|
|
|
if err != nil {
|
2018-04-13 21:00:15 +00:00
|
|
|
return nil, nil, err
|
2018-01-02 18:06:50 +00:00
|
|
|
}
|
2018-04-13 21:00:15 +00:00
|
|
|
r := res.(*execRes)
|
|
|
|
return unwrapShared(r.execRes), r.execExporters, nil
|
2018-01-02 18:06:50 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func (s *sharedOp) getOp() (Op, error) {
|
|
|
|
s.opOnce.Do(func() {
|
2018-04-13 21:00:15 +00:00
|
|
|
s.subBuilder = s.st.builder()
|
|
|
|
s.op, s.err = s.resolver(s.st.vtx, s.subBuilder)
|
2018-01-02 18:06:50 +00:00
|
|
|
})
|
|
|
|
if s.err != nil {
|
|
|
|
return nil, s.err
|
|
|
|
}
|
|
|
|
return s.op, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (s *sharedOp) release() {
|
|
|
|
if s.execRes != nil {
|
2018-04-13 21:00:15 +00:00
|
|
|
for _, r := range s.execRes.execRes {
|
2018-05-11 05:34:16 +00:00
|
|
|
go r.Release(context.TODO())
|
2018-01-02 18:06:50 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func initClientVertex(v Vertex) client.Vertex {
|
|
|
|
inputDigests := make([]digest.Digest, 0, len(v.Inputs()))
|
|
|
|
for _, inp := range v.Inputs() {
|
|
|
|
inputDigests = append(inputDigests, inp.Vertex.Digest())
|
|
|
|
}
|
|
|
|
return client.Vertex{
|
|
|
|
Inputs: inputDigests,
|
|
|
|
Name: v.Name(),
|
|
|
|
Digest: v.Digest(),
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func wrapShared(inp []Result) []*SharedResult {
|
|
|
|
out := make([]*SharedResult, len(inp))
|
|
|
|
for i, r := range inp {
|
|
|
|
out[i] = NewSharedResult(r)
|
|
|
|
}
|
|
|
|
return out
|
|
|
|
}
|
|
|
|
|
|
|
|
func unwrapShared(inp []*SharedResult) []Result {
|
|
|
|
out := make([]Result, len(inp))
|
|
|
|
for i, r := range inp {
|
|
|
|
out[i] = r.Clone()
|
|
|
|
}
|
|
|
|
return out
|
|
|
|
}
|
2018-02-05 16:06:51 +00:00
|
|
|
|
|
|
|
type vertexWithCacheOptions struct {
|
|
|
|
Vertex
|
|
|
|
inputs []Edge
|
|
|
|
dgst digest.Digest
|
|
|
|
}
|
|
|
|
|
|
|
|
func (v *vertexWithCacheOptions) Digest() digest.Digest {
|
|
|
|
return v.dgst
|
|
|
|
}
|
|
|
|
|
|
|
|
func (v *vertexWithCacheOptions) Inputs() []Edge {
|
|
|
|
return v.inputs
|
|
|
|
}
|
2018-04-13 21:00:15 +00:00
|
|
|
|
|
|
|
func notifyStarted(ctx context.Context, v *client.Vertex, cached bool) {
|
|
|
|
pw, _, _ := progress.FromContext(ctx)
|
|
|
|
defer pw.Close()
|
|
|
|
now := time.Now()
|
|
|
|
v.Started = &now
|
|
|
|
v.Completed = nil
|
|
|
|
v.Cached = cached
|
|
|
|
pw.Write(v.Digest.String(), *v)
|
|
|
|
}
|
|
|
|
|
|
|
|
func notifyCompleted(ctx context.Context, v *client.Vertex, err error, cached bool) {
|
|
|
|
pw, _, _ := progress.FromContext(ctx)
|
|
|
|
defer pw.Close()
|
|
|
|
now := time.Now()
|
|
|
|
if v.Started == nil {
|
|
|
|
v.Started = &now
|
|
|
|
}
|
|
|
|
v.Completed = &now
|
|
|
|
v.Cached = cached
|
|
|
|
if err != nil {
|
|
|
|
v.Error = err.Error()
|
|
|
|
}
|
|
|
|
pw.Write(v.Digest.String(), *v)
|
|
|
|
}
|