solver: optimize cache storage interface

Signed-off-by: Tonis Tiigi <tonistiigi@gmail.com>
docker-18.09
Tonis Tiigi 2018-03-16 13:34:06 -07:00
parent 513018806b
commit 5593bb9f48
5 changed files with 333 additions and 253 deletions

View File

@ -10,7 +10,6 @@ import (
"github.com/moby/buildkit/identity"
digest "github.com/opencontainers/go-digest"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
"golang.org/x/sync/errgroup"
)
@ -52,21 +51,6 @@ type inMemoryCacheKey struct {
}
func (ck *inMemoryCacheKey) Deps() []CacheKeyWithSelector {
if len(ck.deps) == 0 || len(ck.CacheKeyInfo.Deps) > 0 {
deps := make([]CacheKeyWithSelector, len(ck.CacheKeyInfo.Deps))
for i, dep := range ck.CacheKeyInfo.Deps {
k, err := ck.manager.backend.Get(dep.ID)
if err != nil {
logrus.Errorf("dependency %s not found", dep.ID)
} else {
deps[i] = CacheKeyWithSelector{
CacheKey: withExporter(ck.manager.toInMemoryCacheKey(k), nil, nil),
Selector: dep.Selector,
}
}
}
ck.deps = deps
}
return ck.deps
}
@ -77,18 +61,18 @@ func (ck *inMemoryCacheKey) Output() Index {
return Index(ck.CacheKeyInfo.Output)
}
func withExporter(ck *inMemoryCacheKey, cacheResult *CacheResult, dep *CacheKeyWithSelector) ExportableCacheKey {
func withExporter(ck *inMemoryCacheKey, cacheResult *CacheResult, deps []CacheKeyWithSelector) ExportableCacheKey {
return ExportableCacheKey{ck, &cacheExporter{
inMemoryCacheKey: ck,
cacheResult: cacheResult,
dep: dep,
deps: deps,
}}
}
type cacheExporter struct {
*inMemoryCacheKey
cacheResult *CacheResult
dep *CacheKeyWithSelector
deps []CacheKeyWithSelector
}
func (ce *cacheExporter) Export(ctx context.Context, m map[digest.Digest]*ExportRecord, converter func(context.Context, Result) (*Remote, error)) (*ExportRecord, error) {
@ -130,8 +114,6 @@ func (ce *cacheExporter) Export(ctx context.Context, m map[digest.Digest]*Export
cacheID = remote.Descriptors[0].Digest
}
deps := ce.deps
rec, ok := m[cacheID]
if !ok {
rec = &ExportRecord{
@ -142,20 +124,17 @@ func (ce *cacheExporter) Export(ctx context.Context, m map[digest.Digest]*Export
m[cacheID] = rec
}
if len(deps) == 0 {
if len(ce.Deps()) == 0 {
rec.Links[CacheLink{
Output: ce.Output(),
Base: ce.Digest(),
}] = struct{}{}
}
allDeps := ce.Deps()
if ce.dep != nil {
allDeps = []CacheKeyWithSelector{*ce.dep}
}
for i, dep := range allDeps {
for i, dep := range ce.deps {
if dep.CacheKey.Exporter == nil {
continue
}
r, err := dep.CacheKey.Export(ctx, m, converter)
if err != nil {
return nil, err
@ -185,12 +164,15 @@ func (c *inMemoryCacheManager) ID() string {
return c.id
}
func (c *inMemoryCacheManager) toInMemoryCacheKey(cki CacheKeyInfo) *inMemoryCacheKey {
return &inMemoryCacheKey{
func (c *inMemoryCacheManager) toInMemoryCacheKey(cki CacheKeyInfo, deps []CacheKeyWithSelector) *inMemoryCacheKey {
ck := &inMemoryCacheKey{
CacheKeyInfo: cki,
manager: c,
CacheKey: NewCacheKey("", 0, nil),
deps: deps,
}
ck.SetValue(internalMemoryKey, cki.ID)
return ck
}
func (c *inMemoryCacheManager) getBestResult(cki CacheKeyInfo) (*CacheResult, error) {
@ -213,72 +195,55 @@ func (c *inMemoryCacheManager) getBestResult(cki CacheKeyInfo) (*CacheResult, er
return nil, nil
}
func (c *inMemoryCacheManager) Query(deps []ExportableCacheKey, input Index, dgst digest.Digest, output Index, selector digest.Digest) ([]*CacheRecord, error) {
func (c *inMemoryCacheManager) Query(deps []CacheKeyWithSelector, input Index, dgst digest.Digest, output Index) ([]*CacheRecord, error) {
c.mu.RLock()
defer c.mu.RUnlock()
refs := map[string]*CacheKeyWithSelector{}
sublinks := map[string]*CacheKeyWithSelector{}
type dep struct {
results map[string]struct{}
key CacheKeyWithSelector
internalKey *inMemoryCacheKey
}
formatDeps := func(deps []dep, index int) []CacheKeyWithSelector {
keys := make([]CacheKeyWithSelector, index+1)
if len(deps) == 1 {
keys[index] = deps[0].key
} else {
k2 := make([]CacheKeyWithSelector, 0, len(deps))
for _, d := range deps {
k2 = append(k2, d.key)
}
keys[index] = CacheKeyWithSelector{CacheKey: ExportableCacheKey{CacheKey: NewCacheKey("", 0, k2)}}
}
return keys
}
allDeps := make([]dep, 0, len(deps))
for _, d := range deps {
var dd []CacheKeyWithSelector
if d.Digest() == "" && d.Output() == 0 {
for _, dep := range d.Deps() {
dd = append(dd, dep)
}
} else {
dd = append(dd, CacheKeyWithSelector{Selector: selector, CacheKey: d})
}
for _, dep := range dd {
ck, err := c.getInternalKey(dep.CacheKey, false)
if err == nil {
if err := c.backend.WalkLinks(ck.CacheKeyInfo.ID, CacheInfoLink{input, output, dgst, selector}, func(id string) error {
refs[id] = &CacheKeyWithSelector{Selector: selector, CacheKey: d}
return nil
}); err != nil {
return nil, err
}
if err := c.backend.WalkLinks(ck.CacheKeyInfo.ID, CacheInfoLink{Index(-1), Index(0), "", selector}, func(id string) error {
sublinks[id] = &CacheKeyWithSelector{Selector: selector, CacheKey: d}
return nil
}); err != nil {
return nil, err
}
if err := c.backend.WalkLinks(ck.CacheKeyInfo.ID, CacheInfoLink{Index(-1), Index(0), "", NoSelector}, func(id string) error {
sublinks[id] = &CacheKeyWithSelector{Selector: selector, CacheKey: d}
return nil
}); err != nil {
return nil, err
}
for _, k := range c.getAllKeys(d) {
d := dep{key: k, results: map[string]struct{}{}}
internalKey, err := c.getInternalKey(k.CacheKey, false)
if err != nil {
if errors.Cause(err) == ErrNotFound {
allDeps = append(allDeps, d)
} else {
return nil, err
}
} else {
d.internalKey = internalKey
}
allDeps = append(allDeps, d)
}
}
for id, mainKey := range sublinks {
ck, err := c.backend.Get(id)
if err == nil {
mainCk, err := c.getInternalKey(mainKey.CacheKey, false)
addNewKey := mainKey.CacheKey.Digest() == "" && (err != nil || mainCk.CacheKeyInfo.ID != ck.ID)
if err := c.backend.WalkLinks(ck.ID, CacheInfoLink{input, output, dgst, ""}, func(id string) error {
if addNewKey {
ck, err := c.getInternalKey(mainKey.CacheKey, true)
if err != nil {
return err
}
err = c.backend.AddLink(ck.CacheKeyInfo.ID, CacheInfoLink{
Input: input,
Output: output,
Digest: dgst,
Selector: "",
}, id)
if err != nil {
return err
}
}
refs[id] = mainKey
allRes := map[string]struct{}{}
for _, d := range allDeps {
if d.internalKey != nil {
if err := c.backend.WalkLinks(d.internalKey.CacheKeyInfo.ID, CacheInfoLink{input, output, dgst, d.key.Selector}, func(id string) error {
d.results[id] = struct{}{}
allRes[id] = struct{}{}
return nil
}); err != nil {
return nil, err
@ -287,51 +252,77 @@ func (c *inMemoryCacheManager) Query(deps []ExportableCacheKey, input Index, dgs
}
if len(deps) == 0 {
ck, err := c.getInternalKey(NewCacheKey(dgst, 0, nil), false)
if err != nil {
return nil, nil
}
refs[ck.CacheKeyInfo.ID] = nil
allRes[digest.FromBytes([]byte(fmt.Sprintf("%s@%d", dgst, output))).String()] = struct{}{}
}
keys := make(map[string]*CacheKeyWithSelector)
outs := make([]*CacheRecord, 0, len(refs))
for id, dep := range refs {
cki, err := c.backend.Get(id)
if err == nil {
k := c.toInMemoryCacheKey(cki)
keys[cki.ID] = dep
if err := c.backend.WalkResults(id, func(r CacheResult) error {
if c.results.Exists(r.ID) {
outs = append(outs, &CacheRecord{
ID: id + "@" + r.ID,
CacheKey: withExporter(k, &r, dep),
CacheManager: c,
Loadable: true,
CreatedAt: r.CreatedAt,
})
} else {
c.backend.Release(r.ID)
outs := make([]*CacheRecord, 0, len(allRes))
for res := range allRes {
for _, d := range allDeps {
if d.internalKey == nil {
internalKey, err := c.getInternalKey(d.key.CacheKey, true)
if err != nil {
return nil, err
}
d.internalKey = internalKey
}
if _, ok := d.results[res]; !ok {
if err := c.backend.AddLink(d.internalKey.CacheKeyInfo.ID, CacheInfoLink{
Input: input,
Output: output,
Digest: dgst,
Selector: d.key.Selector,
}, res); err != nil {
return nil, err
}
return nil
}); err != nil {
return nil, err
}
}
}
hadResults := false
if len(outs) == 0 {
for id, dep := range keys {
cki, err := c.backend.Get(id)
if err == nil {
k := c.toInMemoryCacheKey(cki)
outs = append(outs, &CacheRecord{
ID: k.CacheKeyInfo.ID,
CacheKey: withExporter(k, nil, dep),
CacheManager: c,
Loadable: false,
})
cki, err := c.backend.Get(res)
if err != nil {
if errors.Cause(err) == ErrNotFound {
continue
}
return nil, errors.Wrapf(err, "failed lookup by internal ID %s", res)
}
deps := formatDeps(allDeps, int(input))
k := c.toInMemoryCacheKey(cki, deps)
// TODO: invoke this only once per input
if err := c.backend.WalkResults(res, func(r CacheResult) error {
if c.results.Exists(r.ID) {
outs = append(outs, &CacheRecord{
ID: res + "@" + r.ID,
CacheKey: withExporter(k, &r, deps),
CacheManager: c,
Loadable: true,
CreatedAt: r.CreatedAt,
})
hadResults = true
} else {
c.backend.Release(r.ID)
}
return nil
}); err != nil {
return nil, err
}
if !hadResults {
cki, err := c.backend.Get(res)
if err != nil {
if errors.Cause(err) == ErrNotFound {
continue
}
return nil, errors.Wrapf(err, "failed lookup by internal ID %s", res)
}
deps := formatDeps(allDeps, int(input))
k := c.toInMemoryCacheKey(cki, deps)
outs = append(outs, &CacheRecord{
ID: res,
CacheKey: withExporter(k, nil, deps),
CacheManager: c,
Loadable: false,
})
}
}
@ -379,7 +370,42 @@ func (c *inMemoryCacheManager) Save(k CacheKey, r Result) (ExportableCacheKey, e
return empty, err
}
return withExporter(ck, &res, nil), nil
return withExporter(ck, &res, ck.Deps()), nil
}
func (c *inMemoryCacheManager) getInternalKeys(d CacheKeyWithSelector, createIfNotExist bool) ([]CacheKeyWithSelector, error) {
keys := make([]CacheKeyWithSelector, 0, 1)
if d.CacheKey.Digest() == "" {
for _, d := range d.CacheKey.Deps() {
k, err := c.getInternalKey(d.CacheKey, createIfNotExist)
if err != nil {
if !createIfNotExist && errors.Cause(err) == ErrNotFound {
continue
}
return nil, err
}
keys = append(keys, CacheKeyWithSelector{Selector: d.Selector, CacheKey: ExportableCacheKey{CacheKey: k, Exporter: d.CacheKey.Exporter}})
}
} else {
k, err := c.getInternalKey(d.CacheKey, createIfNotExist)
if err != nil {
return nil, err
}
keys = append(keys, CacheKeyWithSelector{Selector: d.Selector, CacheKey: ExportableCacheKey{CacheKey: k, Exporter: d.CacheKey.Exporter}})
}
return keys, nil
}
func (c *inMemoryCacheManager) getAllKeys(d CacheKeyWithSelector) []CacheKeyWithSelector {
keys := make([]CacheKeyWithSelector, 0, 1)
if d.CacheKey.Digest() == "" {
for _, d := range d.CacheKey.Deps() {
keys = append(keys, d)
}
} else {
keys = append(keys, d)
}
return keys
}
func (c *inMemoryCacheManager) getInternalKey(k CacheKey, createIfNotExist bool) (*inMemoryCacheKey, error) {
@ -391,66 +417,107 @@ func (c *inMemoryCacheManager) getInternalKey(k CacheKey, createIfNotExist bool)
}
internalV := k.GetValue(internalMemoryKey)
if internalV != nil {
ck, err := c.backend.Get(internalV.(string))
cki, err := c.backend.Get(internalV.(string))
if err != nil {
return nil, errors.Wrapf(err, "failed lookup by internal ID %s", internalV.(string))
}
return c.toInMemoryCacheKey(ck), nil
return c.toInMemoryCacheKey(cki, k.Deps()), nil
}
inputs := make([]CacheKeyInfoWithSelector, len(k.Deps()))
dgstr := digest.SHA256.Digester()
matches := make(map[string]struct{})
deps := make([][]CacheKeyWithSelector, 0, len(k.Deps()))
for i, inp := range k.Deps() {
ck, err := c.getInternalKey(inp.CacheKey, createIfNotExist)
allKeys := c.getAllKeys(inp)
cks := make([]CacheKeyWithSelector, 0, len(allKeys))
for _, k := range allKeys {
internalKey, err := c.getInternalKey(k.CacheKey, createIfNotExist)
if err == nil {
cks = append(cks, CacheKeyWithSelector{Selector: k.Selector, CacheKey: ExportableCacheKey{CacheKey: internalKey, Exporter: k.CacheKey.Exporter}})
}
}
if len(cks) == 0 {
return nil, errors.WithStack(ErrNotFound)
}
if i == 0 || len(matches) > 0 {
for _, ck := range cks {
internalCk := ck.CacheKey.CacheKey.(*inMemoryCacheKey)
m2 := make(map[string]struct{})
if err := c.backend.WalkLinks(internalCk.CacheKeyInfo.ID, CacheInfoLink{
Input: Index(i),
Output: Index(k.Output()),
Digest: k.Digest(),
Selector: ck.Selector,
}, func(id string) error {
if i == 0 {
matches[id] = struct{}{}
} else {
m2[id] = struct{}{}
}
return nil
}); err != nil {
return nil, err
}
if i != 0 {
for id := range matches {
if _, ok := m2[id]; !ok {
delete(matches, id)
}
}
}
}
}
deps = append(deps, cks)
}
var internalKey string
if len(matches) == 0 && len(k.Deps()) > 0 {
if createIfNotExist {
internalKey = identity.NewID()
} else {
return nil, errors.WithStack(ErrNotFound)
}
} else {
for k := range matches {
internalKey = k
break
}
if len(k.Deps()) == 0 {
internalKey = digest.FromBytes([]byte(fmt.Sprintf("%s@%d", k.Digest(), k.Output()))).String()
}
cki, err := c.backend.Get(internalKey)
if err != nil {
return nil, err
}
inputs[i] = CacheKeyInfoWithSelector{ID: ck.CacheKeyInfo.ID, Selector: inp.Selector}
if _, err := dgstr.Hash().Write([]byte(fmt.Sprintf("%s:%s,", ck.CacheKeyInfo.ID, inp.Selector))); err != nil {
return nil, err
}
}
if _, err := dgstr.Hash().Write([]byte(k.Digest())); err != nil {
return nil, err
}
if _, err := dgstr.Hash().Write([]byte(fmt.Sprintf("%d", k.Output()))); err != nil {
return nil, err
}
internalKey := string(dgstr.Digest())
cki, err := c.backend.Get(internalKey)
if err != nil {
if errors.Cause(err) == ErrNotFound {
if !createIfNotExist {
if errors.Cause(err) == ErrNotFound {
if !createIfNotExist {
return nil, err
}
} else {
return nil, err
}
} else {
return nil, err
return c.toInMemoryCacheKey(cki, k.Deps()), nil
}
}
cki = CacheKeyInfo{
ID: internalKey,
Base: k.Digest(),
Output: int(k.Output()),
Deps: inputs,
}
cki := CacheKeyInfo{
ID: internalKey,
Base: k.Digest(),
Output: int(k.Output()),
}
if err := c.backend.Set(cki); err != nil {
return nil, err
}
if err := c.backend.Set(cki); err != nil {
return nil, err
}
for i, inp := range inputs {
if cki.Base == "" {
i = -1
}
err := c.backend.AddLink(inp.ID, CacheInfoLink{
for i, dep := range deps {
for _, ck := range dep {
internalCk := ck.CacheKey.CacheKey.(*inMemoryCacheKey)
err := c.backend.AddLink(internalCk.CacheKeyInfo.ID, CacheInfoLink{
Input: Index(i),
Output: Index(cki.Output),
Digest: cki.Base,
Selector: inp.Selector,
Selector: ck.Selector,
}, cki.ID)
if err != nil {
return nil, err
@ -458,15 +525,7 @@ func (c *inMemoryCacheManager) getInternalKey(k CacheKey, createIfNotExist bool)
}
}
ck := &inMemoryCacheKey{
CacheKey: k,
CacheKeyInfo: cki,
manager: c,
deps: k.Deps(),
}
ck.SetValue(internalMemoryKey, internalKey)
return ck, nil
return c.toInMemoryCacheKey(cki, k.Deps()), nil
}
func newCombinedCacheManager(cms []CacheManager, main CacheManager) CacheManager {
@ -491,14 +550,14 @@ func (cm *combinedCacheManager) ID() string {
return cm.id
}
func (cm *combinedCacheManager) Query(inp []ExportableCacheKey, inputIndex Index, dgst digest.Digest, outputIndex Index, selector digest.Digest) ([]*CacheRecord, error) {
func (cm *combinedCacheManager) Query(inp []CacheKeyWithSelector, inputIndex Index, dgst digest.Digest, outputIndex Index) ([]*CacheRecord, error) {
eg, _ := errgroup.WithContext(context.TODO())
res := make(map[string]*CacheRecord, len(cm.cms))
var mu sync.Mutex
for i, c := range cm.cms {
func(i int, c CacheManager) {
eg.Go(func() error {
recs, err := c.Query(inp, inputIndex, dgst, outputIndex, selector)
recs, err := c.Query(inp, inputIndex, dgst, outputIndex)
if err != nil {
return err
}

View File

@ -9,6 +9,14 @@ import (
"github.com/stretchr/testify/require"
)
func depKeys(cks ...CacheKey) []CacheKeyWithSelector {
var keys []CacheKeyWithSelector
for _, ck := range cks {
keys = append(keys, CacheKeyWithSelector{CacheKey: ExportableCacheKey{CacheKey: ck}})
}
return keys
}
func TestInMemoryCache(t *testing.T) {
ctx := context.TODO()
@ -17,7 +25,7 @@ func TestInMemoryCache(t *testing.T) {
cacheFoo, err := m.Save(NewCacheKey(dgst("foo"), 0, nil), testResult("result0"))
require.NoError(t, err)
matches, err := m.Query(nil, 0, dgst("foo"), 0, "")
matches, err := m.Query(nil, 0, dgst("foo"), 0)
require.NoError(t, err)
require.Equal(t, len(matches), 1)
@ -29,7 +37,7 @@ func TestInMemoryCache(t *testing.T) {
cacheBar, err := m.Save(NewCacheKey(dgst("bar"), 0, nil), testResult("result1"))
require.NoError(t, err)
matches, err = m.Query(nil, 0, dgst("bar"), 0, "")
matches, err = m.Query(nil, 0, dgst("bar"), 0)
require.NoError(t, err)
require.Equal(t, len(matches), 1)
@ -38,7 +46,7 @@ func TestInMemoryCache(t *testing.T) {
require.Equal(t, "result1", unwrap(res))
// invalid request
matches, err = m.Query(nil, 0, dgst("baz"), 0, "")
matches, err = m.Query(nil, 0, dgst("baz"), 0)
require.NoError(t, err)
require.Equal(t, len(matches), 0)
@ -49,19 +57,19 @@ func TestInMemoryCache(t *testing.T) {
cacheBaz, err := m.Save(k, testResult("result2"))
require.NoError(t, err)
matches, err = m.Query(nil, 0, dgst("baz"), 0, "")
matches, err = m.Query(nil, 0, dgst("baz"), 0)
require.NoError(t, err)
require.Equal(t, len(matches), 0)
matches, err = m.Query([]ExportableCacheKey{cacheFoo}, 0, dgst("baz"), 0, "")
matches, err = m.Query(depKeys(cacheFoo), 0, dgst("baz"), 0)
require.NoError(t, err)
require.Equal(t, len(matches), 0)
matches, err = m.Query([]ExportableCacheKey{cacheFoo}, 1, dgst("baz"), Index(1), "")
matches, err = m.Query(depKeys(cacheFoo), 1, dgst("baz"), Index(1))
require.NoError(t, err)
require.Equal(t, len(matches), 0)
matches, err = m.Query([]ExportableCacheKey{cacheFoo}, 0, dgst("baz"), Index(1), "")
matches, err = m.Query(depKeys(cacheFoo), 0, dgst("baz"), Index(1))
require.NoError(t, err)
require.Equal(t, len(matches), 1)
@ -69,7 +77,7 @@ func TestInMemoryCache(t *testing.T) {
require.NoError(t, err)
require.Equal(t, "result2", unwrap(res))
matches2, err := m.Query([]ExportableCacheKey{cacheBar}, 1, dgst("baz"), Index(1), "")
matches2, err := m.Query(depKeys(cacheBar), 1, dgst("baz"), Index(1))
require.NoError(t, err)
require.Equal(t, len(matches2), 1)
@ -81,7 +89,7 @@ func TestInMemoryCache(t *testing.T) {
_, err = m.Save(k, testResult("result3"))
require.NoError(t, err)
matches, err = m.Query([]ExportableCacheKey{cacheFoo}, 0, dgst("baz"), Index(1), "")
matches, err = m.Query(depKeys(cacheFoo), 0, dgst("baz"), Index(1))
require.NoError(t, err)
require.Equal(t, len(matches), 2)
@ -97,18 +105,18 @@ func TestInMemoryCache(t *testing.T) {
require.NoError(t, err)
// foo, bar, baz should all point to result4
matches, err = m.Query([]ExportableCacheKey{cacheFoo}, 0, dgst("bax"), 0, "")
matches, err = m.Query(depKeys(cacheFoo), 0, dgst("bax"), 0)
require.NoError(t, err)
require.Equal(t, len(matches), 1)
id := matches[0].ID
matches, err = m.Query([]ExportableCacheKey{cacheBar}, 1, dgst("bax"), 0, "")
matches, err = m.Query(depKeys(cacheBar), 1, dgst("bax"), 0)
require.NoError(t, err)
require.Equal(t, len(matches), 1)
require.Equal(t, matches[0].ID, id)
matches, err = m.Query([]ExportableCacheKey{cacheBaz}, 0, dgst("bax"), 0, "")
matches, err = m.Query(depKeys(cacheBaz), 0, dgst("bax"), 0)
require.NoError(t, err)
require.Equal(t, len(matches), 1)
require.Equal(t, matches[0].ID, id)
@ -127,15 +135,15 @@ func TestInMemoryCacheSelector(t *testing.T) {
}), testResult("result1"))
require.NoError(t, err)
matches, err := m.Query([]ExportableCacheKey{cacheFoo}, 0, dgst("bar"), 0, "")
matches, err := m.Query(depKeys(cacheFoo), 0, dgst("bar"), 0)
require.NoError(t, err)
require.Equal(t, len(matches), 0)
matches, err = m.Query([]ExportableCacheKey{cacheFoo}, 0, dgst("bar"), 0, "sel-invalid")
matches, err = m.Query([]CacheKeyWithSelector{{Selector: "sel-invalid", CacheKey: ExportableCacheKey{CacheKey: cacheFoo}}}, 0, dgst("bar"), 0)
require.NoError(t, err)
require.Equal(t, len(matches), 0)
matches, err = m.Query([]ExportableCacheKey{cacheFoo}, 0, dgst("bar"), 0, dgst("sel0"))
matches, err = m.Query([]CacheKeyWithSelector{{Selector: dgst("sel0"), CacheKey: ExportableCacheKey{CacheKey: cacheFoo}}}, 0, dgst("bar"), 0)
require.NoError(t, err)
require.Equal(t, len(matches), 1)
@ -154,7 +162,7 @@ func TestInMemoryCacheSelectorNested(t *testing.T) {
k2 := NewCacheKey("", 0, []CacheKeyWithSelector{
{CacheKey: cacheFoo, Selector: dgst("sel0")},
{CacheKey: ExportableCacheKey{CacheKey: NewCacheKey(dgst("second"), 0, nil)}, Selector: NoSelector},
{CacheKey: ExportableCacheKey{CacheKey: NewCacheKey(dgst("second"), 0, nil)}},
})
_, err = m.Save(NewCacheKey(dgst("bar"), 0, []CacheKeyWithSelector{
@ -162,34 +170,29 @@ func TestInMemoryCacheSelectorNested(t *testing.T) {
}), testResult("result1"))
require.NoError(t, err)
matches, err := m.Query([]ExportableCacheKey{cacheFoo}, 0, dgst("bar"), 0, dgst("sel0"))
matches, err := m.Query([]CacheKeyWithSelector{{Selector: dgst("sel0"), CacheKey: ExportableCacheKey{CacheKey: cacheFoo}}}, 0, dgst("bar"), 0)
require.NoError(t, err)
require.Equal(t, len(matches), 1)
res, err := m.Load(ctx, matches[0])
require.NoError(t, err)
require.Equal(t, "result1", unwrap(res))
matches, err = m.Query([]ExportableCacheKey{cacheFoo}, 0, dgst("bar"), 0, "")
matches, err = m.Query(depKeys(cacheFoo), 0, dgst("bar"), 0)
require.NoError(t, err)
require.Equal(t, len(matches), 0)
matches, err = m.Query([]ExportableCacheKey{cacheFoo}, 0, dgst("bar"), 0, dgst("bar"))
matches, err = m.Query([]CacheKeyWithSelector{{Selector: dgst("bar"), CacheKey: ExportableCacheKey{CacheKey: cacheFoo}}}, 0, dgst("bar"), 0)
require.NoError(t, err)
require.Equal(t, len(matches), 0)
// keys with NoSelector always match
matches, err = m.Query([]ExportableCacheKey{{CacheKey: NewCacheKey(dgst("second"), 0, nil)}}, 0, dgst("bar"), 0, dgst("sel0"))
matches, err = m.Query(depKeys(NewCacheKey(dgst("second"), 0, nil)), 0, dgst("bar"), 0)
require.NoError(t, err)
require.Equal(t, len(matches), 1)
res, err = m.Load(ctx, matches[0])
require.NoError(t, err)
require.Equal(t, "result1", unwrap(res))
matches, err = m.Query([]ExportableCacheKey{{CacheKey: NewCacheKey(dgst("second"), 0, nil)}}, 0, dgst("bar"), 0, "")
require.NoError(t, err)
require.Equal(t, len(matches), 1)
matches, err = m.Query([]ExportableCacheKey{{CacheKey: NewCacheKey(dgst("second"), 0, nil)}}, 0, dgst("bar"), 0, dgst("bar"))
matches, err = m.Query(depKeys(NewCacheKey(dgst("second"), 0, nil)), 0, dgst("bar"), 0)
require.NoError(t, err)
require.Equal(t, len(matches), 1)
}
@ -209,7 +212,7 @@ func TestInMemoryCacheReleaseParent(t *testing.T) {
}), res1)
require.NoError(t, err)
matches, err := m.Query(nil, 0, dgst("foo"), 0, "")
matches, err := m.Query(nil, 0, dgst("foo"), 0)
require.NoError(t, err)
require.Equal(t, len(matches), 1)
@ -219,13 +222,13 @@ func TestInMemoryCacheReleaseParent(t *testing.T) {
require.NoError(t, err)
// foo becomes unloadable
matches, err = m.Query(nil, 0, dgst("foo"), 0, "")
matches, err = m.Query(nil, 0, dgst("foo"), 0)
require.NoError(t, err)
require.Equal(t, len(matches), 1)
require.False(t, matches[0].Loadable)
matches, err = m.Query([]ExportableCacheKey{matches[0].CacheKey}, 0, dgst("bar"), 0, "")
matches, err = m.Query(depKeys(matches[0].CacheKey), 0, dgst("bar"), 0)
require.NoError(t, err)
require.Equal(t, len(matches), 1)
@ -235,7 +238,7 @@ func TestInMemoryCacheReleaseParent(t *testing.T) {
err = storage.Release(res1.ID())
require.NoError(t, err)
matches, err = m.Query(nil, 0, dgst("foo"), 0, "")
matches, err = m.Query(nil, 0, dgst("foo"), 0)
require.NoError(t, err)
require.Equal(t, len(matches), 0)
}
@ -263,12 +266,12 @@ func TestInMemoryCacheRestoreOfflineDeletion(t *testing.T) {
m = NewCacheManager(identity.NewID(), storage, results2)
matches, err := m.Query(nil, 0, dgst("foo"), 0, "")
matches, err := m.Query(nil, 0, dgst("foo"), 0)
require.NoError(t, err)
require.Equal(t, len(matches), 1)
require.False(t, matches[0].Loadable)
matches, err = m.Query([]ExportableCacheKey{matches[0].CacheKey}, 0, dgst("bar"), 0, "")
matches, err = m.Query(depKeys(matches[0].CacheKey), 0, dgst("bar"), 0)
require.NoError(t, err)
require.Equal(t, len(matches), 1)
@ -299,11 +302,11 @@ func TestCarryOverFromSublink(t *testing.T) {
{CacheKey: ExportableCacheKey{CacheKey: NewCacheKey(dgst("content0"), 0, nil)}, Selector: NoSelector},
})
matches, err := m.Query([]ExportableCacheKey{{CacheKey: k3}}, 0, dgst("res"), 0, "")
matches, err := m.Query(depKeys(k3), 0, dgst("res"), 0)
require.NoError(t, err)
require.Equal(t, len(matches), 1)
matches, err = m.Query([]ExportableCacheKey{{CacheKey: cacheBar}}, 0, dgst("res"), 0, dgst("sel0"))
matches, err = m.Query([]CacheKeyWithSelector{{Selector: dgst("sel0"), CacheKey: ExportableCacheKey{CacheKey: cacheBar}}}, 0, dgst("res"), 0)
require.NoError(t, err)
require.Equal(t, len(matches), 1)
}

View File

@ -30,7 +30,7 @@ type CacheKeyInfo struct {
ID string
Base digest.Digest
Output int
Deps []CacheKeyInfoWithSelector
// Deps []CacheKeyInfoWithSelector
}
// CacheKeyInfoWithSelector is CacheKeyInfo combined with a selector

View File

@ -164,14 +164,14 @@ func (e *edge) updateIncoming(req pipe.Sender) {
// probeCache is called with unprocessed cache keys for dependency
// if the key could match the edge, the cacheRecords for dependency are filled
func (e *edge) probeCache(d *dep, keys []ExportableCacheKey) bool {
func (e *edge) probeCache(d *dep, keys []CacheKeyWithSelector) bool {
if len(keys) == 0 {
return false
}
if e.op.IgnoreCache() {
return false
}
records, err := e.op.Cache().Query(keys, d.index, e.cacheMap.Digest, e.edge.Index, e.cacheMap.Deps[d.index].Selector)
records, err := e.op.Cache().Query(keys, d.index, e.cacheMap.Digest, e.edge.Index)
if err != nil {
e.err = errors.Wrap(err, "error on cache query")
}
@ -309,6 +309,14 @@ func (e *edge) unpark(incoming []pipe.Sender, updates, allPipes []pipe.Receiver,
}
func withSelector(keys []ExportableCacheKey, selector digest.Digest) []CacheKeyWithSelector {
out := make([]CacheKeyWithSelector, len(keys))
for i, k := range keys {
out[i] = CacheKeyWithSelector{Selector: selector, CacheKey: k}
}
return out
}
// processUpdate is called by unpark for every updated pipe request
func (e *edge) processUpdate(upt pipe.Receiver) (depChanged bool) {
// response for cachemap request
@ -322,7 +330,7 @@ func (e *edge) processUpdate(upt pipe.Receiver) (depChanged bool) {
e.cacheMap = upt.Status().Value.(*CacheMap)
if len(e.deps) == 0 {
if !e.op.IgnoreCache() {
records, err := e.op.Cache().Query(nil, 0, e.cacheMap.Digest, e.edge.Index, "")
records, err := e.op.Cache().Query(nil, 0, e.cacheMap.Digest, e.edge.Index)
if err != nil {
logrus.Error(errors.Wrap(err, "invalid query response")) // make the build fail for this error
} else {
@ -342,8 +350,8 @@ func (e *edge) processUpdate(upt pipe.Receiver) (depChanged bool) {
e.state = edgeStatusCacheSlow
}
// probe keys that were loaded before cache map
for _, dep := range e.deps {
e.probeCache(dep, dep.keys)
for i, dep := range e.deps {
e.probeCache(dep, withSelector(dep.keys, e.cacheMap.Deps[i].Selector))
e.checkDepMatchPossible(dep)
}
}
@ -377,9 +385,8 @@ func (e *edge) processUpdate(upt pipe.Receiver) (depChanged bool) {
if len(dep.keys) < len(state.keys) {
newKeys := state.keys[len(dep.keys):]
if e.cacheMap != nil {
e.probeCache(dep, newKeys)
e.probeCache(dep, withSelector(newKeys, e.cacheMap.Deps[dep.index].Selector))
if e.allDepsHaveKeys() {
e.keysDidChange = true
}
@ -417,7 +424,7 @@ func (e *edge) processUpdate(upt pipe.Receiver) (depChanged bool) {
{CacheKey: ExportableCacheKey{CacheKey: k, Exporter: &emptyExporter{k}}, Selector: NoSelector},
})
dep.slowCacheKey = &ExportableCacheKey{ck, &emptyExporter{ck}}
dep.slowCacheFoundKey = e.probeCache(dep, []ExportableCacheKey{*dep.slowCacheKey})
dep.slowCacheFoundKey = e.probeCache(dep, []CacheKeyWithSelector{{CacheKey: ExportableCacheKey{CacheKey: *dep.slowCacheKey}}})
dep.slowCacheComplete = true
e.keysDidChange = true
e.checkDepMatchPossible(dep) // not matching key here doesn't set nocachematch possible to true
@ -456,9 +463,15 @@ func (e *edge) recalcCurrentState() {
}
for k, r := range newRecords {
e.keys = append(e.keys, r.CacheKey)
mergedKey := r.CacheKey
if len(e.deps) > 0 {
mergedKey = toMergedCacheKey(e.deps, k)
}
e.keys = append(e.keys, mergedKey)
if r.Loadable {
e.cacheRecords[k] = r
r2 := r
r2.CacheKey = mergedKey
e.cacheRecords[k] = r2
}
}
@ -714,24 +727,12 @@ func (e *edge) loadCache(ctx context.Context) (interface{}, error) {
rec = r
}
}
var exp Exporter
if len(e.deps) > 0 {
for _, dep := range e.deps {
if exp == nil {
exp = dep.cacheRecords[rec.ID].CacheKey
} else {
exp = &mergedExporter{exp, dep.cacheRecords[rec.ID].CacheKey}
}
}
} else {
exp = rec.CacheKey
}
logrus.Debugf("load cache for %s with %s", e.edge.Vertex.Name(), rec.ID)
res, err := e.op.Cache().Load(ctx, rec)
if err != nil {
return nil, err
}
return NewCachedResult(res, rec.CacheKey, exp), nil
return NewCachedResult(res, rec.CacheKey, rec.CacheKey), nil
}
// execOp creates a request to execute the vertex operation
@ -810,20 +811,37 @@ func (e *emptyExporter) Export(ctx context.Context, m map[digest.Digest]*ExportR
}
type mergedExporter struct {
e1, e2 Exporter
exporters []Exporter
}
func (e *mergedExporter) Export(ctx context.Context, m map[digest.Digest]*ExportRecord, fn func(context.Context, Result) (*Remote, error)) (*ExportRecord, error) {
_, err := e.e1.Export(ctx, m, fn)
if err != nil {
return nil, err
func (e *mergedExporter) Export(ctx context.Context, m map[digest.Digest]*ExportRecord, fn func(context.Context, Result) (*Remote, error)) (er *ExportRecord, err error) {
for _, e := range e.exporters {
er, err = e.Export(ctx, m, fn)
if err != nil {
return nil, err
}
}
r, err := e.e2.Export(ctx, m, fn)
if err != nil {
return nil, err
}
return r, nil
return
}
func toMergedCacheKey(deps []*dep, id string) ExportableCacheKey {
depKeys := make([]CacheKeyWithSelector, len(deps))
exporters := make([]Exporter, len(deps))
for i, d := range deps {
depKeys[i] = d.cacheRecords[id].CacheKey.Deps()[i]
exporters[i] = d.cacheRecords[id].CacheKey
}
return ExportableCacheKey{
CacheKey: &mergedCacheKey{CacheKey: deps[0].cacheRecords[id].CacheKey, deps: depKeys},
Exporter: &mergedExporter{exporters: exporters},
}
}
type mergedCacheKey struct {
CacheKey
deps []CacheKeyWithSelector
}
func (ck *mergedCacheKey) Deps() []CacheKeyWithSelector {
return ck.deps
}

View File

@ -143,7 +143,7 @@ type CacheManager interface {
ID() string
// Query searches for cache paths from one cache key to the output of a
// possible match.
Query(inp []ExportableCacheKey, inputIndex Index, dgst digest.Digest, outputIndex Index, selector digest.Digest) ([]*CacheRecord, error)
Query(inp []CacheKeyWithSelector, inputIndex Index, dgst digest.Digest, outputIndex Index) ([]*CacheRecord, error)
// Load pulls and returns the cached result
Load(ctx context.Context, rec *CacheRecord) (Result, error)
// Save saves a result based on a cache key