2017-06-14 00:15:55 +00:00
|
|
|
package solver
|
|
|
|
|
|
|
|
import (
|
2018-01-16 22:30:10 +00:00
|
|
|
"context"
|
2017-06-16 22:49:03 +00:00
|
|
|
"io"
|
2017-06-14 00:15:55 +00:00
|
|
|
"sync"
|
|
|
|
"time"
|
|
|
|
|
2017-12-13 05:55:16 +00:00
|
|
|
"github.com/moby/buildkit/cache/instructioncache"
|
2017-06-22 20:15:46 +00:00
|
|
|
"github.com/moby/buildkit/client"
|
2017-08-10 01:20:33 +00:00
|
|
|
"github.com/moby/buildkit/session"
|
2017-09-26 03:57:38 +00:00
|
|
|
"github.com/moby/buildkit/solver/pb"
|
2017-06-22 20:15:46 +00:00
|
|
|
"github.com/moby/buildkit/util/progress"
|
2017-06-14 00:15:55 +00:00
|
|
|
digest "github.com/opencontainers/go-digest"
|
2018-01-06 16:54:10 +00:00
|
|
|
opentracing "github.com/opentracing/opentracing-go"
|
2017-06-14 00:15:55 +00:00
|
|
|
"github.com/pkg/errors"
|
2017-07-19 01:05:19 +00:00
|
|
|
"github.com/sirupsen/logrus"
|
2017-06-14 00:15:55 +00:00
|
|
|
)
|
|
|
|
|
2017-07-21 17:58:24 +00:00
|
|
|
type jobKeyT string
|
|
|
|
|
|
|
|
var jobKey = jobKeyT("buildkit/solver/job")
|
|
|
|
|
2017-06-14 00:15:55 +00:00
|
|
|
type jobList struct {
|
|
|
|
mu sync.RWMutex
|
|
|
|
refs map[string]*job
|
|
|
|
updateCond *sync.Cond
|
2017-08-10 01:20:33 +00:00
|
|
|
actives map[digest.Digest]*state
|
|
|
|
}
|
|
|
|
|
|
|
|
type state struct {
|
2017-09-26 03:57:38 +00:00
|
|
|
jobs map[*job]*vertex
|
2017-08-10 01:20:33 +00:00
|
|
|
solver VertexSolver
|
|
|
|
mpw *progress.MultiWriter
|
2017-06-14 00:15:55 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func newJobList() *jobList {
|
|
|
|
jl := &jobList{
|
2017-08-10 01:20:33 +00:00
|
|
|
refs: make(map[string]*job),
|
|
|
|
actives: make(map[digest.Digest]*state),
|
2017-06-14 00:15:55 +00:00
|
|
|
}
|
|
|
|
jl.updateCond = sync.NewCond(jl.mu.RLocker())
|
|
|
|
return jl
|
|
|
|
}
|
|
|
|
|
2017-11-06 08:24:42 +00:00
|
|
|
// jobInstructionCache implements InstructionCache.
|
|
|
|
// jobInstructionCache is instantiated for each of job instances rather than jobList or solver instances.
|
|
|
|
// Lookup for objects with IgnoreCache fail until Set is called.
|
|
|
|
type jobInstructionCache struct {
|
|
|
|
mu sync.RWMutex
|
2017-12-13 05:55:16 +00:00
|
|
|
instructioncache.InstructionCache
|
2017-11-06 08:24:42 +00:00
|
|
|
ignoreCache map[digest.Digest]struct{}
|
|
|
|
setInThisJob map[digest.Digest]struct{}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Probe implements InstructionCache
|
|
|
|
func (jic *jobInstructionCache) Probe(ctx context.Context, key digest.Digest) (bool, error) {
|
|
|
|
jic.mu.RLock()
|
|
|
|
defer jic.mu.RUnlock()
|
|
|
|
_, ignoreCache := jic.ignoreCache[key]
|
|
|
|
_, setInThisJob := jic.setInThisJob[key]
|
|
|
|
if ignoreCache && !setInThisJob {
|
|
|
|
return false, nil
|
|
|
|
}
|
|
|
|
return jic.InstructionCache.Probe(ctx, key)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Lookup implements InstructionCache
|
|
|
|
func (jic *jobInstructionCache) Lookup(ctx context.Context, key digest.Digest, msg string) (interface{}, error) {
|
|
|
|
jic.mu.RLock()
|
|
|
|
defer jic.mu.RUnlock()
|
|
|
|
_, ignoreCache := jic.ignoreCache[key]
|
|
|
|
_, setInThisJob := jic.setInThisJob[key]
|
|
|
|
if ignoreCache && !setInThisJob {
|
|
|
|
return nil, nil
|
|
|
|
}
|
|
|
|
return jic.InstructionCache.Lookup(ctx, key, msg)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Set implements InstructionCache
|
|
|
|
func (jic *jobInstructionCache) Set(key digest.Digest, ref interface{}) error {
|
|
|
|
jic.mu.Lock()
|
|
|
|
defer jic.mu.Unlock()
|
|
|
|
jic.setInThisJob[key] = struct{}{}
|
|
|
|
return jic.InstructionCache.Set(key, ref)
|
|
|
|
}
|
|
|
|
|
|
|
|
// SetIgnoreCache is jobInstructionCache-specific extension
|
|
|
|
func (jic *jobInstructionCache) SetIgnoreCache(key digest.Digest) {
|
|
|
|
jic.mu.Lock()
|
|
|
|
defer jic.mu.Unlock()
|
|
|
|
jic.ignoreCache[key] = struct{}{}
|
|
|
|
}
|
|
|
|
|
2017-12-13 05:55:16 +00:00
|
|
|
func newJobInstructionCache(base instructioncache.InstructionCache) *jobInstructionCache {
|
2017-11-06 08:24:42 +00:00
|
|
|
return &jobInstructionCache{
|
|
|
|
InstructionCache: base,
|
|
|
|
ignoreCache: make(map[digest.Digest]struct{}),
|
|
|
|
setInThisJob: make(map[digest.Digest]struct{}),
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-12-13 05:55:16 +00:00
|
|
|
func (jl *jobList) new(ctx context.Context, id string, pr progress.Reader, cache instructioncache.InstructionCache) (context.Context, *job, error) {
|
2017-06-14 00:15:55 +00:00
|
|
|
jl.mu.Lock()
|
|
|
|
defer jl.mu.Unlock()
|
|
|
|
|
|
|
|
if _, ok := jl.refs[id]; ok {
|
2017-07-21 17:58:24 +00:00
|
|
|
return nil, nil, errors.Errorf("id %s exists", id)
|
2017-06-14 00:15:55 +00:00
|
|
|
}
|
2017-08-10 01:20:33 +00:00
|
|
|
|
|
|
|
pw, _, _ := progress.FromContext(ctx) // TODO: remove this
|
|
|
|
sid := session.FromContext(ctx)
|
2018-01-06 16:54:10 +00:00
|
|
|
span := opentracing.SpanFromContext(ctx)
|
2017-08-10 01:20:33 +00:00
|
|
|
|
2017-11-06 08:24:42 +00:00
|
|
|
// TODO(AkihiroSuda): find a way to integrate map[string]*cacheRecord to jobInstructionCache?
|
2018-01-06 16:54:10 +00:00
|
|
|
j := &job{l: jl, pr: progress.NewMultiReader(pr), pw: pw, session: sid, cache: newJobInstructionCache(cache), cached: map[string]*cacheRecord{}, traceSpan: span}
|
2017-06-14 00:15:55 +00:00
|
|
|
jl.refs[id] = j
|
|
|
|
jl.updateCond.Broadcast()
|
|
|
|
go func() {
|
|
|
|
<-ctx.Done()
|
|
|
|
jl.mu.Lock()
|
|
|
|
defer jl.mu.Unlock()
|
|
|
|
delete(jl.refs, id)
|
|
|
|
}()
|
|
|
|
|
2017-07-21 17:58:24 +00:00
|
|
|
return context.WithValue(ctx, jobKey, jl.refs[id]), jl.refs[id], nil
|
2017-06-14 00:15:55 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func (jl *jobList) get(id string) (*job, error) {
|
2017-09-18 23:21:47 +00:00
|
|
|
ctx, cancel := context.WithTimeout(context.Background(), 3*time.Second)
|
2017-06-14 00:15:55 +00:00
|
|
|
defer cancel()
|
|
|
|
|
|
|
|
go func() {
|
|
|
|
<-ctx.Done()
|
|
|
|
jl.updateCond.Broadcast()
|
|
|
|
}()
|
|
|
|
|
|
|
|
jl.mu.RLock()
|
|
|
|
defer jl.mu.RUnlock()
|
|
|
|
for {
|
|
|
|
select {
|
|
|
|
case <-ctx.Done():
|
|
|
|
return nil, errors.Errorf("no such job %s", id)
|
|
|
|
default:
|
|
|
|
}
|
|
|
|
j, ok := jl.refs[id]
|
|
|
|
if !ok {
|
|
|
|
jl.updateCond.Wait()
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
return j, nil
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
type job struct {
|
2017-08-10 01:20:33 +00:00
|
|
|
l *jobList
|
|
|
|
pr *progress.MultiReader
|
|
|
|
pw progress.Writer
|
|
|
|
session string
|
2017-11-06 08:24:42 +00:00
|
|
|
cache *jobInstructionCache
|
2017-10-12 18:23:42 +00:00
|
|
|
cached map[string]*cacheRecord
|
2018-01-06 16:54:10 +00:00
|
|
|
|
|
|
|
traceSpan opentracing.Span // TODO(tonistiigi): temporary until shared tracers support. Do not change until solver refactoring in merged.
|
2017-10-12 18:23:42 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
type cacheRecord struct {
|
|
|
|
VertexSolver
|
2017-12-15 08:06:54 +00:00
|
|
|
index Index
|
|
|
|
ref Ref
|
2017-06-14 00:15:55 +00:00
|
|
|
}
|
|
|
|
|
2017-12-15 08:06:54 +00:00
|
|
|
func (j *job) load(def *pb.Definition, resolveOp ResolveOpFunc) (*Input, error) {
|
2017-08-10 01:20:33 +00:00
|
|
|
j.l.mu.Lock()
|
|
|
|
defer j.l.mu.Unlock()
|
|
|
|
|
2017-10-02 04:59:34 +00:00
|
|
|
return j.loadInternal(def, resolveOp)
|
2017-08-10 01:20:33 +00:00
|
|
|
}
|
|
|
|
|
2017-12-15 08:06:54 +00:00
|
|
|
func (j *job) loadInternal(def *pb.Definition, resolveOp ResolveOpFunc) (*Input, error) {
|
2017-11-06 08:24:42 +00:00
|
|
|
vtx, idx, err := loadLLB(def, func(dgst digest.Digest, pbOp *pb.Op, load func(digest.Digest) (interface{}, error)) (interface{}, error) {
|
2017-09-26 03:57:38 +00:00
|
|
|
if st, ok := j.l.actives[dgst]; ok {
|
|
|
|
if vtx, ok := st.jobs[j]; ok {
|
|
|
|
return vtx, nil
|
|
|
|
}
|
2017-08-10 01:20:33 +00:00
|
|
|
}
|
2017-10-02 04:59:34 +00:00
|
|
|
opMetadata := def.Metadata[dgst]
|
2017-11-06 08:24:42 +00:00
|
|
|
vtx, err := newVertex(dgst, pbOp, &opMetadata, load)
|
2017-08-10 01:20:33 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
2017-09-26 03:57:38 +00:00
|
|
|
st, ok := j.l.actives[dgst]
|
|
|
|
if !ok {
|
|
|
|
st = &state{
|
|
|
|
jobs: map[*job]*vertex{},
|
|
|
|
mpw: progress.NewMultiWriter(progress.WithMetadata("vertex", dgst)),
|
|
|
|
}
|
|
|
|
op, err := resolveOp(vtx)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
ctx := progress.WithProgress(context.Background(), st.mpw)
|
|
|
|
ctx = session.NewContext(ctx, j.session) // TODO: support multiple
|
2017-08-10 01:20:33 +00:00
|
|
|
|
2018-01-06 16:54:10 +00:00
|
|
|
if j.traceSpan != nil {
|
|
|
|
ctx = opentracing.ContextWithSpan(ctx, j.traceSpan)
|
|
|
|
}
|
|
|
|
|
2017-09-26 03:57:38 +00:00
|
|
|
s, err := newVertexSolver(ctx, vtx, op, j.cache, j.getSolver)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2017-11-06 08:24:42 +00:00
|
|
|
for i, input := range pbOp.Inputs {
|
|
|
|
if inputMetadata := def.Metadata[input.Digest]; inputMetadata.IgnoreCache {
|
2017-12-15 08:06:54 +00:00
|
|
|
k, err := s.CacheKey(ctx, Index(i))
|
2017-11-06 08:24:42 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
j.cache.SetIgnoreCache(k)
|
|
|
|
}
|
|
|
|
}
|
2017-09-26 03:57:38 +00:00
|
|
|
st.solver = s
|
|
|
|
|
|
|
|
j.l.actives[dgst] = st
|
|
|
|
}
|
|
|
|
if _, ok := st.jobs[j]; !ok {
|
|
|
|
j.pw.Write(vtx.Digest().String(), vtx.clientVertex)
|
|
|
|
st.mpw.Add(j.pw)
|
|
|
|
st.jobs[j] = vtx
|
|
|
|
}
|
|
|
|
return vtx, nil
|
|
|
|
})
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
2017-08-10 01:20:33 +00:00
|
|
|
}
|
2017-12-15 08:06:54 +00:00
|
|
|
return &Input{Vertex: vtx.(*vertex), Index: Index(idx)}, nil
|
2017-08-10 01:20:33 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func (j *job) discard() {
|
|
|
|
j.l.mu.Lock()
|
|
|
|
defer j.l.mu.Unlock()
|
|
|
|
|
|
|
|
j.pw.Close()
|
|
|
|
|
|
|
|
for k, st := range j.l.actives {
|
|
|
|
if _, ok := st.jobs[j]; ok {
|
|
|
|
delete(st.jobs, j)
|
|
|
|
}
|
|
|
|
if len(st.jobs) == 0 {
|
|
|
|
go st.solver.Release()
|
|
|
|
delete(j.l.actives, k)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func (j *job) getSolver(dgst digest.Digest) (VertexSolver, error) {
|
|
|
|
st, ok := j.l.actives[dgst]
|
|
|
|
if !ok {
|
|
|
|
return nil, errors.Errorf("vertex %v not found", dgst)
|
|
|
|
}
|
|
|
|
return st.solver, nil
|
|
|
|
}
|
|
|
|
|
2017-12-15 08:06:54 +00:00
|
|
|
func (j *job) getRef(ctx context.Context, cv client.Vertex, index Index) (Ref, error) {
|
2017-12-13 07:39:37 +00:00
|
|
|
s, err := j.getSolver(cv.Digest)
|
2017-08-10 01:20:33 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2017-12-21 05:32:54 +00:00
|
|
|
ctx = progress.WithProgress(ctx, j.pw)
|
2017-12-13 07:39:37 +00:00
|
|
|
ref, err := getRef(ctx, s, cv, index, j.cache)
|
2017-10-12 18:23:42 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
j.keepCacheRef(s, index, ref)
|
|
|
|
return ref, nil
|
|
|
|
}
|
|
|
|
|
2017-12-15 08:06:54 +00:00
|
|
|
func (j *job) keepCacheRef(s VertexSolver, index Index, ref Ref) {
|
|
|
|
immutable, ok := ToImmutableRef(ref)
|
2017-10-12 18:23:42 +00:00
|
|
|
if ok {
|
2017-10-13 22:35:35 +00:00
|
|
|
j.cached[immutable.ID()] = &cacheRecord{s, index, ref}
|
2017-10-12 18:23:42 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-12-15 08:06:54 +00:00
|
|
|
func (j *job) cacheExporter(ref Ref) (CacheExporter, error) {
|
|
|
|
immutable, ok := ToImmutableRef(ref)
|
2017-10-12 18:23:42 +00:00
|
|
|
if !ok {
|
|
|
|
return nil, errors.Errorf("invalid reference")
|
|
|
|
}
|
|
|
|
cr, ok := j.cached[immutable.ID()]
|
|
|
|
if !ok {
|
|
|
|
return nil, errors.Errorf("invalid cache exporter")
|
|
|
|
}
|
2017-10-13 22:35:35 +00:00
|
|
|
return cr.Cache(cr.index, cr.ref), nil
|
2017-08-10 01:20:33 +00:00
|
|
|
}
|
|
|
|
|
2017-12-15 08:06:54 +00:00
|
|
|
func getRef(ctx context.Context, s VertexSolver, cv client.Vertex, index Index, cache instructioncache.InstructionCache) (Ref, error) {
|
2017-08-10 01:20:33 +00:00
|
|
|
k, err := s.CacheKey(ctx, index)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2017-10-15 05:39:28 +00:00
|
|
|
ref, err := cache.Lookup(ctx, k, s.(*vertexSolver).v.Name())
|
2017-08-10 01:20:33 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
if ref != nil {
|
2017-12-13 07:39:37 +00:00
|
|
|
markCached(ctx, cv)
|
2017-12-15 08:06:54 +00:00
|
|
|
return ref.(Ref), nil
|
2017-08-10 01:20:33 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
ev, err := s.OutputEvaluator(index)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
defer ev.Cancel()
|
|
|
|
|
|
|
|
for {
|
|
|
|
r, err := ev.Next(ctx)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
if r.CacheKey != "" {
|
2017-10-15 05:39:28 +00:00
|
|
|
ref, err := cache.Lookup(ctx, r.CacheKey, s.(*vertexSolver).v.Name())
|
2017-08-10 01:20:33 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
2017-08-25 20:08:18 +00:00
|
|
|
}
|
2017-08-10 01:20:33 +00:00
|
|
|
if ref != nil {
|
2017-12-13 07:39:37 +00:00
|
|
|
markCached(ctx, cv)
|
2017-12-15 08:06:54 +00:00
|
|
|
return ref.(Ref), nil
|
2017-08-25 20:08:18 +00:00
|
|
|
}
|
2017-08-10 01:20:33 +00:00
|
|
|
continue
|
2017-06-14 00:15:55 +00:00
|
|
|
}
|
2017-08-10 01:20:33 +00:00
|
|
|
return r.Reference, nil
|
2017-06-14 00:15:55 +00:00
|
|
|
}
|
2017-08-10 01:20:33 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func (j *job) pipe(ctx context.Context, ch chan *client.SolveStatus) error {
|
2017-09-26 03:57:38 +00:00
|
|
|
vs := &vertexStream{cache: map[digest.Digest]*client.Vertex{}}
|
2017-08-10 01:20:33 +00:00
|
|
|
pr := j.pr.Reader(ctx)
|
2017-09-26 03:57:38 +00:00
|
|
|
defer func() {
|
|
|
|
if enc := vs.encore(); len(enc) > 0 {
|
|
|
|
ch <- &client.SolveStatus{Vertexes: enc}
|
|
|
|
}
|
|
|
|
}()
|
2017-06-14 00:15:55 +00:00
|
|
|
for {
|
2017-06-15 23:08:20 +00:00
|
|
|
p, err := pr.Read(ctx)
|
2017-06-14 00:15:55 +00:00
|
|
|
if err != nil {
|
2017-06-16 22:49:03 +00:00
|
|
|
if err == io.EOF {
|
|
|
|
return nil
|
|
|
|
}
|
2017-06-14 00:15:55 +00:00
|
|
|
return err
|
|
|
|
}
|
2017-06-16 22:49:03 +00:00
|
|
|
ss := &client.SolveStatus{}
|
|
|
|
for _, p := range p {
|
|
|
|
switch v := p.Sys.(type) {
|
|
|
|
case client.Vertex:
|
2017-09-26 03:57:38 +00:00
|
|
|
ss.Vertexes = append(ss.Vertexes, vs.append(v)...)
|
2017-06-16 22:49:03 +00:00
|
|
|
|
|
|
|
case progress.Status:
|
2017-06-17 05:12:27 +00:00
|
|
|
vtx, ok := p.Meta("vertex")
|
|
|
|
if !ok {
|
|
|
|
logrus.Warnf("progress %s status without vertex info", p.ID)
|
|
|
|
continue
|
|
|
|
}
|
2017-06-16 22:49:03 +00:00
|
|
|
vs := &client.VertexStatus{
|
2017-06-17 05:12:27 +00:00
|
|
|
ID: p.ID,
|
|
|
|
Vertex: vtx.(digest.Digest),
|
2017-06-16 22:49:03 +00:00
|
|
|
Name: v.Action,
|
|
|
|
Total: int64(v.Total),
|
|
|
|
Current: int64(v.Current),
|
|
|
|
Timestamp: p.Timestamp,
|
|
|
|
Started: v.Started,
|
|
|
|
Completed: v.Completed,
|
|
|
|
}
|
|
|
|
ss.Statuses = append(ss.Statuses, vs)
|
2017-06-19 05:24:41 +00:00
|
|
|
case client.VertexLog:
|
|
|
|
vtx, ok := p.Meta("vertex")
|
|
|
|
if !ok {
|
2017-06-30 03:34:29 +00:00
|
|
|
logrus.Warnf("progress %s log without vertex info", p.ID)
|
2017-06-19 05:24:41 +00:00
|
|
|
continue
|
|
|
|
}
|
|
|
|
v.Vertex = vtx.(digest.Digest)
|
|
|
|
v.Timestamp = p.Timestamp
|
|
|
|
ss.Logs = append(ss.Logs, &v)
|
2017-06-15 23:08:20 +00:00
|
|
|
}
|
2017-06-14 00:15:55 +00:00
|
|
|
}
|
2017-06-16 22:49:03 +00:00
|
|
|
select {
|
|
|
|
case <-ctx.Done():
|
|
|
|
return ctx.Err()
|
|
|
|
case ch <- ss:
|
|
|
|
}
|
2017-06-14 00:15:55 +00:00
|
|
|
}
|
|
|
|
}
|
2017-09-26 03:57:38 +00:00
|
|
|
|
|
|
|
type vertexStream struct {
|
|
|
|
cache map[digest.Digest]*client.Vertex
|
|
|
|
}
|
|
|
|
|
|
|
|
func (vs *vertexStream) append(v client.Vertex) []*client.Vertex {
|
|
|
|
var out []*client.Vertex
|
|
|
|
vs.cache[v.Digest] = &v
|
|
|
|
if v.Cached {
|
|
|
|
for _, inp := range v.Inputs {
|
|
|
|
if inpv, ok := vs.cache[inp]; ok {
|
|
|
|
if !inpv.Cached && inpv.Completed == nil {
|
|
|
|
inpv.Cached = true
|
|
|
|
inpv.Started = v.Completed
|
|
|
|
inpv.Completed = v.Completed
|
|
|
|
}
|
2017-12-21 05:32:54 +00:00
|
|
|
delete(vs.cache, inp)
|
|
|
|
out = append(out, vs.append(*inpv)...)
|
2017-09-26 03:57:38 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2017-09-28 05:38:54 +00:00
|
|
|
vcopy := v
|
|
|
|
return append(out, &vcopy)
|
2017-09-26 03:57:38 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func (vs *vertexStream) encore() []*client.Vertex {
|
|
|
|
var out []*client.Vertex
|
|
|
|
for _, v := range vs.cache {
|
|
|
|
if v.Started != nil && v.Completed == nil {
|
|
|
|
now := time.Now()
|
|
|
|
v.Completed = &now
|
|
|
|
v.Error = context.Canceled.Error()
|
|
|
|
out = append(out, v)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return out
|
|
|
|
}
|