Merge pull request #535 from tonistiigi/merge-fix
solver: fix edge merge skipping issuedocker-18.09
commit
89eee2126e
|
@ -417,7 +417,7 @@ func (e *edge) processUpdate(upt pipe.Receiver) (depChanged bool) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// response for requests to dependencies
|
// response for requests to dependencies
|
||||||
if dep, ok := e.depRequests[upt]; ok { // TODO: ignore canceled
|
if dep, ok := e.depRequests[upt]; ok {
|
||||||
if err := upt.Status().Err; !upt.Status().Canceled && upt.Status().Completed && err != nil {
|
if err := upt.Status().Err; !upt.Status().Canceled && upt.Status().Completed && err != nil {
|
||||||
if e.err == nil {
|
if e.err == nil {
|
||||||
e.err = err
|
e.err = err
|
||||||
|
@ -431,6 +431,7 @@ func (e *edge) processUpdate(upt pipe.Receiver) (depChanged bool) {
|
||||||
newKeys := state.keys[len(dep.keys):]
|
newKeys := state.keys[len(dep.keys):]
|
||||||
if e.cacheMap != nil {
|
if e.cacheMap != nil {
|
||||||
e.probeCache(dep, withSelector(newKeys, e.cacheMap.Deps[dep.index].Selector))
|
e.probeCache(dep, withSelector(newKeys, e.cacheMap.Deps[dep.index].Selector))
|
||||||
|
dep.edgeState.keys = state.keys
|
||||||
if e.allDepsHaveKeys() {
|
if e.allDepsHaveKeys() {
|
||||||
e.keysDidChange = true
|
e.keysDidChange = true
|
||||||
}
|
}
|
||||||
|
|
|
@ -2892,6 +2892,61 @@ func TestCacheExportingMergedKey(t *testing.T) {
|
||||||
require.Equal(t, len(expTarget.records), 5)
|
require.Equal(t, len(expTarget.records), 5)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// moby/buildkit#434
|
||||||
|
func TestMergedEdgesLookup(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
|
||||||
|
rand.Seed(time.Now().UnixNano())
|
||||||
|
|
||||||
|
// this test requires multiple runs to trigger the race
|
||||||
|
for i := 0; i < 20; i++ {
|
||||||
|
func() {
|
||||||
|
ctx := context.TODO()
|
||||||
|
|
||||||
|
cacheManager := newTrackingCacheManager(NewInMemoryCacheManager())
|
||||||
|
|
||||||
|
l := NewSolver(SolverOpt{
|
||||||
|
ResolveOpFunc: testOpResolver,
|
||||||
|
DefaultCache: cacheManager,
|
||||||
|
})
|
||||||
|
defer l.Close()
|
||||||
|
|
||||||
|
j0, err := l.NewJob("j0")
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
defer func() {
|
||||||
|
if j0 != nil {
|
||||||
|
j0.Discard()
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
g := Edge{
|
||||||
|
Vertex: vtxSum(3, vtxOpt{inputs: []Edge{
|
||||||
|
{Vertex: vtxSum(0, vtxOpt{inputs: []Edge{
|
||||||
|
{Vertex: vtxSum(2, vtxOpt{inputs: []Edge{
|
||||||
|
{Vertex: vtxConst(2, vtxOpt{})},
|
||||||
|
}})},
|
||||||
|
{Vertex: vtxConst(0, vtxOpt{})},
|
||||||
|
}})},
|
||||||
|
{Vertex: vtxSum(2, vtxOpt{inputs: []Edge{
|
||||||
|
{Vertex: vtxConst(2, vtxOpt{})},
|
||||||
|
}})},
|
||||||
|
}}),
|
||||||
|
}
|
||||||
|
g.Vertex.(*vertexSum).setupCallCounters()
|
||||||
|
|
||||||
|
res, err := j0.Build(ctx, g)
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.Equal(t, unwrapInt(res), 11)
|
||||||
|
require.Equal(t, int64(7), *g.Vertex.(*vertexSum).cacheCallCount)
|
||||||
|
require.Equal(t, int64(0), cacheManager.loadCounter)
|
||||||
|
|
||||||
|
require.NoError(t, j0.Discard())
|
||||||
|
j0 = nil
|
||||||
|
}()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
func generateSubGraph(nodes int) (Edge, int) {
|
func generateSubGraph(nodes int) (Edge, int) {
|
||||||
if nodes == 1 {
|
if nodes == 1 {
|
||||||
value := rand.Int() % 500
|
value := rand.Int() % 500
|
||||||
|
|
Loading…
Reference in New Issue