Merge pull request #590 from tonistiigi/fix-panic
solver: fix panic on creating input requestsdocker-18.09
commit
7e971435a4
|
@ -205,7 +205,7 @@ func (e *edge) probeCache(d *dep, depKeys []CacheKeyWithSelector) bool {
|
|||
// checkDepMatchPossible checks if any cache matches are possible past this point
|
||||
func (e *edge) checkDepMatchPossible(dep *dep) {
|
||||
depHasSlowCache := e.cacheMap.Deps[dep.index].ComputeDigestFunc != nil
|
||||
if !e.noCacheMatchPossible && (((!dep.slowCacheFoundKey && dep.slowCacheComplete && depHasSlowCache) || (!depHasSlowCache && dep.state >= edgeStatusCacheSlow)) && len(dep.keys) == 0) {
|
||||
if !e.noCacheMatchPossible && (((!dep.slowCacheFoundKey && dep.slowCacheComplete && depHasSlowCache) || (!depHasSlowCache && dep.state >= edgeStatusCacheSlow)) && len(dep.keyMap) == 0) {
|
||||
e.noCacheMatchPossible = true
|
||||
}
|
||||
}
|
||||
|
@ -220,12 +220,16 @@ func (e *edge) slowCacheFunc(dep *dep) ResultBasedCacheFunc {
|
|||
|
||||
// allDepsHaveKeys checks if all dependencies have at least one key. used for
|
||||
// determining if there is enough data for combining cache key for edge
|
||||
func (e *edge) allDepsHaveKeys() bool {
|
||||
func (e *edge) allDepsHaveKeys(matching bool) bool {
|
||||
if e.cacheMap == nil {
|
||||
return false
|
||||
}
|
||||
for _, d := range e.deps {
|
||||
if len(d.keys) == 0 && d.slowCacheKey == nil && d.result == nil {
|
||||
cond := len(d.keys) == 0
|
||||
if matching {
|
||||
cond = len(d.keyMap) == 0
|
||||
}
|
||||
if cond && d.slowCacheKey == nil && d.result == nil {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
@ -387,7 +391,7 @@ func (e *edge) processUpdate(upt pipe.Receiver) (depChanged bool) {
|
|||
}
|
||||
e.state = edgeStatusCacheSlow
|
||||
}
|
||||
if e.allDepsHaveKeys() {
|
||||
if e.allDepsHaveKeys(false) {
|
||||
e.keysDidChange = true
|
||||
}
|
||||
// probe keys that were loaded before cache map
|
||||
|
@ -432,7 +436,7 @@ func (e *edge) processUpdate(upt pipe.Receiver) (depChanged bool) {
|
|||
if e.cacheMap != nil {
|
||||
e.probeCache(dep, withSelector(newKeys, e.cacheMap.Deps[dep.index].Selector))
|
||||
dep.edgeState.keys = state.keys
|
||||
if e.allDepsHaveKeys() {
|
||||
if e.allDepsHaveKeys(false) {
|
||||
e.keysDidChange = true
|
||||
}
|
||||
}
|
||||
|
@ -580,7 +584,7 @@ func (e *edge) recalcCurrentState() {
|
|||
if isSlowIncomplete || dep.state < edgeStatusCacheSlow {
|
||||
allDepsCompletedCacheSlow = false
|
||||
}
|
||||
if dep.state < edgeStatusCacheSlow && len(dep.keys) == 0 {
|
||||
if dep.state < edgeStatusCacheSlow && len(dep.keyMap) == 0 {
|
||||
allDepsStateCacheSlow = false
|
||||
}
|
||||
}
|
||||
|
@ -702,20 +706,20 @@ func (e *edge) createInputRequests(desiredState edgeStatusType, f *pipeFactory)
|
|||
desiredStateDep = edgeStatusCacheFast
|
||||
} else if dep.state == edgeStatusCacheFast && desiredState > dep.state {
|
||||
// wait all deps to complete cache fast before continuing with slow cache
|
||||
if (e.allDepsCompletedCacheFast && len(e.keys) == 0) || len(dep.keys) == 0 || e.allDepsHaveKeys() {
|
||||
if !e.skipPhase2FastCache(dep) {
|
||||
if (e.allDepsCompletedCacheFast && len(e.keys) == 0) || len(dep.keyMap) == 0 || e.allDepsHaveKeys(true) {
|
||||
if !e.skipPhase2FastCache(dep) && e.cacheMap != nil {
|
||||
desiredStateDep = edgeStatusCacheSlow
|
||||
}
|
||||
}
|
||||
} else if dep.state == edgeStatusCacheSlow && desiredState == edgeStatusComplete {
|
||||
} else if e.cacheMap != nil && dep.state == edgeStatusCacheSlow && desiredState == edgeStatusComplete {
|
||||
// if all deps have completed cache-slow or content based cache for input is available
|
||||
if (len(dep.keys) == 0 || e.allDepsCompletedCacheSlow || (!e.skipPhase2FastCache(dep) && e.slowCacheFunc(dep) != nil)) && (len(e.cacheRecords) == 0) {
|
||||
if len(dep.keys) == 0 || !e.skipPhase2SlowCache(dep) && e.allDepsStateCacheSlow {
|
||||
if (len(dep.keyMap) == 0 || e.allDepsCompletedCacheSlow || (!e.skipPhase2FastCache(dep) && e.slowCacheFunc(dep) != nil)) && (len(e.cacheRecords) == 0) {
|
||||
if len(dep.keyMap) == 0 || !e.skipPhase2SlowCache(dep) {
|
||||
desiredStateDep = edgeStatusComplete
|
||||
}
|
||||
}
|
||||
} else if dep.state == edgeStatusCacheSlow && e.slowCacheFunc(dep) != nil && desiredState == edgeStatusCacheSlow {
|
||||
if len(dep.keys) == 0 || !e.skipPhase2SlowCache(dep) && e.allDepsStateCacheSlow {
|
||||
} else if e.cacheMap != nil && dep.state == edgeStatusCacheSlow && e.slowCacheFunc(dep) != nil && desiredState == edgeStatusCacheSlow {
|
||||
if len(dep.keyMap) == 0 || !e.skipPhase2SlowCache(dep) {
|
||||
desiredStateDep = edgeStatusComplete
|
||||
}
|
||||
}
|
||||
|
|
|
@ -2,6 +2,7 @@ package solver
|
|||
|
||||
import (
|
||||
"context"
|
||||
"os"
|
||||
"sync"
|
||||
|
||||
"github.com/moby/buildkit/solver/internal/pipe"
|
||||
|
@ -10,7 +11,13 @@ import (
|
|||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
const debugScheduler = false // TODO: replace with logs in build trace
|
||||
var debugScheduler = false // TODO: replace with logs in build trace
|
||||
|
||||
func init() {
|
||||
if os.Getenv("BUILDKIT_SCHEDULER_DEBUG") == "1" {
|
||||
debugScheduler = true
|
||||
}
|
||||
}
|
||||
|
||||
func newScheduler(ef edgeFactory) *scheduler {
|
||||
s := &scheduler{
|
||||
|
|
|
@ -2947,6 +2947,133 @@ func TestMergedEdgesLookup(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestInputRequestDeadlock(t *testing.T) {
|
||||
t.Parallel()
|
||||
ctx := context.TODO()
|
||||
|
||||
s := NewSolver(SolverOpt{
|
||||
ResolveOpFunc: testOpResolver,
|
||||
})
|
||||
defer s.Close()
|
||||
|
||||
j0, err := s.NewJob("job0")
|
||||
require.NoError(t, err)
|
||||
|
||||
defer func() {
|
||||
if j0 != nil {
|
||||
j0.Discard()
|
||||
}
|
||||
}()
|
||||
|
||||
g0 := Edge{
|
||||
Vertex: vtx(vtxOpt{
|
||||
name: "v0",
|
||||
cacheKeySeed: "seed0",
|
||||
value: "result0",
|
||||
inputs: []Edge{
|
||||
{
|
||||
Vertex: vtx(vtxOpt{
|
||||
name: "v1",
|
||||
cacheKeySeed: "seed1",
|
||||
value: "result1",
|
||||
}),
|
||||
},
|
||||
{
|
||||
Vertex: vtx(vtxOpt{
|
||||
name: "v2",
|
||||
cacheKeySeed: "seed2",
|
||||
value: "result2",
|
||||
}),
|
||||
},
|
||||
},
|
||||
}),
|
||||
}
|
||||
|
||||
_, err = j0.Build(ctx, g0)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, j0.Discard())
|
||||
j0 = nil
|
||||
|
||||
j1, err := s.NewJob("job1")
|
||||
require.NoError(t, err)
|
||||
|
||||
defer func() {
|
||||
if j1 != nil {
|
||||
j1.Discard()
|
||||
}
|
||||
}()
|
||||
|
||||
g1 := Edge{
|
||||
Vertex: vtx(vtxOpt{
|
||||
name: "v0",
|
||||
cacheKeySeed: "seed0-1",
|
||||
value: "result0",
|
||||
inputs: []Edge{
|
||||
{
|
||||
Vertex: vtx(vtxOpt{
|
||||
name: "v1",
|
||||
cacheKeySeed: "seed1-1",
|
||||
value: "result1",
|
||||
}),
|
||||
},
|
||||
{
|
||||
Vertex: vtx(vtxOpt{
|
||||
name: "v2",
|
||||
cacheKeySeed: "seed2-1",
|
||||
value: "result2",
|
||||
}),
|
||||
},
|
||||
},
|
||||
}),
|
||||
}
|
||||
|
||||
_, err = j1.Build(ctx, g1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, j1.Discard())
|
||||
j1 = nil
|
||||
|
||||
j2, err := s.NewJob("job2")
|
||||
require.NoError(t, err)
|
||||
|
||||
defer func() {
|
||||
if j2 != nil {
|
||||
j2.Discard()
|
||||
}
|
||||
}()
|
||||
|
||||
g2 := Edge{
|
||||
Vertex: vtx(vtxOpt{
|
||||
name: "v0",
|
||||
cacheKeySeed: "seed0-1",
|
||||
value: "result0",
|
||||
inputs: []Edge{
|
||||
{
|
||||
Vertex: vtx(vtxOpt{
|
||||
name: "v1",
|
||||
cacheKeySeed: "seed1",
|
||||
value: "result1",
|
||||
}),
|
||||
},
|
||||
{
|
||||
Vertex: vtx(vtxOpt{
|
||||
name: "v2",
|
||||
cacheKeySeed: "seed2-1",
|
||||
value: "result2",
|
||||
}),
|
||||
},
|
||||
},
|
||||
slowCacheCompute: map[int]ResultBasedCacheFunc{
|
||||
1: digestFromResult,
|
||||
},
|
||||
}),
|
||||
}
|
||||
|
||||
_, err = j2.Build(ctx, g2)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, j2.Discard())
|
||||
j2 = nil
|
||||
}
|
||||
|
||||
func generateSubGraph(nodes int) (Edge, int) {
|
||||
if nodes == 1 {
|
||||
value := rand.Int() % 500
|
||||
|
|
Loading…
Reference in New Issue