solver: separate cache metadata storage interface
Signed-off-by: Tonis Tiigi <tonistiigi@gmail.com>docker-18.09
parent
47af144c29
commit
ff7d75def0
|
@ -0,0 +1,465 @@
|
|||
package solver
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"sort"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
"github.com/moby/buildkit/identity"
|
||||
digest "github.com/opencontainers/go-digest"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
"golang.org/x/sync/errgroup"
|
||||
)
|
||||
|
||||
type internalMemoryKeyT string
|
||||
|
||||
var internalMemoryKey = internalMemoryKeyT("buildkit/memory-cache-id")
|
||||
|
||||
var NoSelector = digest.FromBytes(nil)
|
||||
|
||||
func NewInMemoryCacheManager() CacheManager {
|
||||
return &inMemoryCacheManager{
|
||||
id: identity.NewID(),
|
||||
backend: NewInMemoryCacheStorage(),
|
||||
results: NewInMemoryResultStorage(),
|
||||
}
|
||||
}
|
||||
|
||||
type inMemoryCacheKey struct {
|
||||
CacheKeyInfo CacheKeyInfo
|
||||
manager *inMemoryCacheManager
|
||||
cacheResult CacheResult
|
||||
deps []CacheKeyWithSelector // only []*inMemoryCacheKey
|
||||
CacheKey
|
||||
}
|
||||
|
||||
func (ck *inMemoryCacheKey) Deps() []CacheKeyWithSelector {
|
||||
if len(ck.deps) == 0 || len(ck.CacheKeyInfo.Deps) > 0 {
|
||||
deps := make([]CacheKeyWithSelector, len(ck.CacheKeyInfo.Deps))
|
||||
for i, dep := range ck.CacheKeyInfo.Deps {
|
||||
k, err := ck.manager.backend.Get(dep.ID)
|
||||
if err != nil {
|
||||
logrus.Errorf("dependency %s not found", dep.ID)
|
||||
} else {
|
||||
deps[i] = CacheKeyWithSelector{
|
||||
CacheKey: withExporter(ck.manager.toInMemoryCacheKey(k), nil),
|
||||
Selector: dep.Selector,
|
||||
}
|
||||
}
|
||||
}
|
||||
ck.deps = deps
|
||||
}
|
||||
return ck.deps
|
||||
}
|
||||
|
||||
func (ck *inMemoryCacheKey) Digest() digest.Digest {
|
||||
return ck.CacheKeyInfo.Base
|
||||
}
|
||||
func (ck *inMemoryCacheKey) Output() Index {
|
||||
return Index(ck.CacheKeyInfo.Output)
|
||||
}
|
||||
|
||||
func withExporter(ck *inMemoryCacheKey, cacheResult *CacheResult) ExportableCacheKey {
|
||||
return ExportableCacheKey{ck, &cacheExporter{
|
||||
inMemoryCacheKey: ck,
|
||||
cacheResult: cacheResult,
|
||||
}}
|
||||
}
|
||||
|
||||
type cacheExporter struct {
|
||||
*inMemoryCacheKey
|
||||
cacheResult *CacheResult
|
||||
}
|
||||
|
||||
func (ce *cacheExporter) Export(ctx context.Context, m map[digest.Digest]*ExportRecord, converter func(context.Context, Result) (*Remote, error)) (*ExportRecord, error) {
|
||||
var res Result
|
||||
if ce.cacheResult == nil {
|
||||
cr, err := ce.inMemoryCacheKey.manager.getBestResult(ce.inMemoryCacheKey.CacheKeyInfo)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
ce.cacheResult = cr
|
||||
}
|
||||
|
||||
var remote *Remote
|
||||
var err error
|
||||
|
||||
if ce.cacheResult != nil {
|
||||
remote, err = ce.inMemoryCacheKey.manager.results.LoadRemote(ctx, *ce.cacheResult)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if remote == nil {
|
||||
res, err = ce.inMemoryCacheKey.manager.results.Load(ctx, *ce.cacheResult)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if res != nil && remote == nil {
|
||||
remote, err = converter(ctx, res)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
cacheID := digest.FromBytes([]byte(ce.inMemoryCacheKey.CacheKeyInfo.ID))
|
||||
if remote != nil && len(remote.Descriptors) > 0 && remote.Descriptors[0].Digest != "" {
|
||||
cacheID = remote.Descriptors[0].Digest
|
||||
}
|
||||
|
||||
deps := ce.deps
|
||||
|
||||
rec, ok := m[cacheID]
|
||||
if !ok {
|
||||
rec = &ExportRecord{
|
||||
Digest: cacheID,
|
||||
Remote: remote,
|
||||
Links: make(map[CacheLink]struct{}),
|
||||
}
|
||||
m[cacheID] = rec
|
||||
}
|
||||
|
||||
if len(deps) == 0 {
|
||||
rec.Links[CacheLink{
|
||||
Output: ce.Output(),
|
||||
Base: ce.Digest(),
|
||||
}] = struct{}{}
|
||||
}
|
||||
|
||||
for i, dep := range ce.Deps() {
|
||||
r, err := dep.CacheKey.Export(ctx, m, converter)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
link := CacheLink{
|
||||
Source: r.Digest,
|
||||
Input: Index(i),
|
||||
Output: ce.Output(),
|
||||
Base: ce.Digest(),
|
||||
Selector: dep.Selector,
|
||||
}
|
||||
rec.Links[link] = struct{}{}
|
||||
}
|
||||
|
||||
return rec, nil
|
||||
}
|
||||
|
||||
type inMemoryCacheManager struct {
|
||||
mu sync.RWMutex
|
||||
id string
|
||||
|
||||
backend CacheKeyStorage
|
||||
results CacheResultStorage
|
||||
}
|
||||
|
||||
func (c *inMemoryCacheManager) ID() string {
|
||||
return c.id
|
||||
}
|
||||
|
||||
func (c *inMemoryCacheManager) toInMemoryCacheKey(cki CacheKeyInfo) *inMemoryCacheKey {
|
||||
return &inMemoryCacheKey{
|
||||
CacheKeyInfo: cki,
|
||||
manager: c,
|
||||
CacheKey: NewCacheKey("", 0, nil),
|
||||
}
|
||||
}
|
||||
|
||||
func (c *inMemoryCacheManager) getBestResult(cki CacheKeyInfo) (*CacheResult, error) {
|
||||
var results []*CacheResult
|
||||
if err := c.backend.WalkResults(cki.ID, func(res CacheResult) error {
|
||||
results = append(results, &res)
|
||||
return nil
|
||||
}); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
sort.Slice(results, func(i, j int) bool {
|
||||
return results[i].CreatedAt.Before(results[j].CreatedAt)
|
||||
})
|
||||
|
||||
if len(results) > 0 {
|
||||
return results[0], nil
|
||||
}
|
||||
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func (c *inMemoryCacheManager) Query(deps []ExportableCacheKey, input Index, dgst digest.Digest, output Index, selector digest.Digest) ([]*CacheRecord, error) {
|
||||
c.mu.RLock()
|
||||
defer c.mu.RUnlock()
|
||||
|
||||
refs := map[string]struct{}{}
|
||||
sublinks := map[string]struct{}{}
|
||||
|
||||
for _, dep := range deps {
|
||||
ck, err := c.getInternalKey(dep, false)
|
||||
if err == nil {
|
||||
if err := c.backend.WalkLinks(ck.CacheKeyInfo.ID, CacheInfoLink{input, output, dgst, selector}, func(id string) error {
|
||||
refs[id] = struct{}{}
|
||||
return nil
|
||||
}); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err := c.backend.WalkLinks(ck.CacheKeyInfo.ID, CacheInfoLink{Index(-1), Index(0), "", selector}, func(id string) error {
|
||||
sublinks[id] = struct{}{}
|
||||
return nil
|
||||
}); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err := c.backend.WalkLinks(ck.CacheKeyInfo.ID, CacheInfoLink{Index(-1), Index(0), "", NoSelector}, func(id string) error {
|
||||
sublinks[id] = struct{}{}
|
||||
return nil
|
||||
}); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for id := range sublinks {
|
||||
ck, err := c.backend.Get(id)
|
||||
if err == nil {
|
||||
if err := c.backend.WalkLinks(ck.ID, CacheInfoLink{input, output, dgst, ""}, func(id string) error {
|
||||
refs[id] = struct{}{}
|
||||
return nil
|
||||
}); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if len(deps) == 0 {
|
||||
ck, err := c.getInternalKey(NewCacheKey(dgst, 0, nil), false)
|
||||
if err != nil {
|
||||
return nil, nil
|
||||
}
|
||||
refs[ck.CacheKeyInfo.ID] = struct{}{}
|
||||
}
|
||||
|
||||
outs := make([]*CacheRecord, 0, len(refs))
|
||||
for id := range refs {
|
||||
cki, err := c.backend.Get(id)
|
||||
if err == nil {
|
||||
k := c.toInMemoryCacheKey(cki)
|
||||
if err := c.backend.WalkResults(id, func(r CacheResult) error {
|
||||
outs = append(outs, &CacheRecord{
|
||||
ID: id + "@" + r.ID,
|
||||
CacheKey: withExporter(k, &r),
|
||||
CacheManager: c,
|
||||
CreatedAt: r.CreatedAt,
|
||||
})
|
||||
return nil
|
||||
}); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return outs, nil
|
||||
}
|
||||
|
||||
func (c *inMemoryCacheManager) Load(ctx context.Context, rec *CacheRecord) (Result, error) {
|
||||
c.mu.RLock()
|
||||
defer c.mu.RUnlock()
|
||||
|
||||
keyParts := strings.Split(rec.ID, "@")
|
||||
if len(keyParts) != 2 {
|
||||
return nil, errors.Errorf("invalid cache record ID")
|
||||
}
|
||||
ck, err := c.getInternalKey(rec.CacheKey, false)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
res, err := c.backend.Load(ck.CacheKeyInfo.ID, keyParts[1])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return c.results.Load(ctx, res)
|
||||
}
|
||||
|
||||
func (c *inMemoryCacheManager) Save(k CacheKey, r Result) (ExportableCacheKey, error) {
|
||||
c.mu.Lock()
|
||||
defer c.mu.Unlock()
|
||||
|
||||
empty := ExportableCacheKey{}
|
||||
|
||||
ck, err := c.getInternalKey(k, true)
|
||||
if err != nil {
|
||||
return empty, err
|
||||
}
|
||||
|
||||
res, err := c.results.Save(r)
|
||||
if err != nil {
|
||||
return empty, err
|
||||
}
|
||||
|
||||
if err := c.backend.AddResult(ck.CacheKeyInfo.ID, res); err != nil {
|
||||
return empty, err
|
||||
}
|
||||
|
||||
return withExporter(ck, &res), nil
|
||||
}
|
||||
|
||||
func (c *inMemoryCacheManager) getInternalKey(k CacheKey, createIfNotExist bool) (*inMemoryCacheKey, error) {
|
||||
if ck, ok := k.(ExportableCacheKey); ok {
|
||||
k = ck.CacheKey
|
||||
}
|
||||
if ck, ok := k.(*inMemoryCacheKey); ok {
|
||||
return ck, nil
|
||||
}
|
||||
internalV := k.GetValue(internalMemoryKey)
|
||||
if internalV != nil {
|
||||
ck, err := c.backend.Get(internalV.(string))
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "failed lookup by internal ID %s", internalV.(string))
|
||||
}
|
||||
return c.toInMemoryCacheKey(ck), nil
|
||||
}
|
||||
|
||||
inputs := make([]CacheKeyInfoWithSelector, len(k.Deps()))
|
||||
dgstr := digest.SHA256.Digester()
|
||||
for i, inp := range k.Deps() {
|
||||
ck, err := c.getInternalKey(inp.CacheKey, createIfNotExist)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
inputs[i] = CacheKeyInfoWithSelector{ID: ck.CacheKeyInfo.ID, Selector: inp.Selector}
|
||||
if _, err := dgstr.Hash().Write([]byte(fmt.Sprintf("%s:%s,", ck.CacheKeyInfo.ID, inp.Selector))); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
if _, err := dgstr.Hash().Write([]byte(k.Digest())); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if _, err := dgstr.Hash().Write([]byte(fmt.Sprintf("%d", k.Output()))); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
internalKey := string(dgstr.Digest())
|
||||
cki, err := c.backend.Get(internalKey)
|
||||
if err != nil {
|
||||
if errors.Cause(err) == ErrNotFound {
|
||||
if !createIfNotExist {
|
||||
return nil, err
|
||||
}
|
||||
} else {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
cki = CacheKeyInfo{
|
||||
ID: internalKey,
|
||||
Base: k.Digest(),
|
||||
Output: int(k.Output()),
|
||||
Deps: inputs,
|
||||
}
|
||||
|
||||
if err := c.backend.Set(cki.ID, cki); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
for i, inp := range inputs {
|
||||
if cki.Base == "" {
|
||||
i = -1
|
||||
}
|
||||
|
||||
err := c.backend.AddLink(inp.ID, CacheInfoLink{
|
||||
Input: Index(i),
|
||||
Output: Index(cki.Output),
|
||||
Digest: cki.Base,
|
||||
Selector: inp.Selector,
|
||||
}, cki.ID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
ck := &inMemoryCacheKey{
|
||||
CacheKey: k,
|
||||
CacheKeyInfo: cki,
|
||||
manager: c,
|
||||
deps: k.Deps(),
|
||||
}
|
||||
ck.SetValue(internalMemoryKey, internalKey)
|
||||
|
||||
return ck, nil
|
||||
}
|
||||
|
||||
func newCombinedCacheManager(cms []CacheManager, main CacheManager) CacheManager {
|
||||
return &combinedCacheManager{cms: cms, main: main}
|
||||
}
|
||||
|
||||
type combinedCacheManager struct {
|
||||
cms []CacheManager
|
||||
main CacheManager
|
||||
id string
|
||||
idOnce sync.Once
|
||||
}
|
||||
|
||||
func (cm *combinedCacheManager) ID() string {
|
||||
cm.idOnce.Do(func() {
|
||||
ids := make([]string, len(cm.cms))
|
||||
for i, c := range cm.cms {
|
||||
ids[i] = c.ID()
|
||||
}
|
||||
cm.id = digest.FromBytes([]byte(strings.Join(ids, ","))).String()
|
||||
})
|
||||
return cm.id
|
||||
}
|
||||
|
||||
func (cm *combinedCacheManager) Query(inp []ExportableCacheKey, inputIndex Index, dgst digest.Digest, outputIndex Index, selector digest.Digest) ([]*CacheRecord, error) {
|
||||
eg, _ := errgroup.WithContext(context.TODO())
|
||||
res := make(map[string]*CacheRecord, len(cm.cms))
|
||||
var mu sync.Mutex
|
||||
for i, c := range cm.cms {
|
||||
func(i int, c CacheManager) {
|
||||
eg.Go(func() error {
|
||||
recs, err := c.Query(inp, inputIndex, dgst, outputIndex, selector)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
mu.Lock()
|
||||
for _, r := range recs {
|
||||
if _, ok := res[r.ID]; !ok || c == cm.main {
|
||||
r.CacheManager = c
|
||||
if c == cm.main {
|
||||
r.Priority = 1
|
||||
}
|
||||
res[r.ID] = r
|
||||
}
|
||||
}
|
||||
mu.Unlock()
|
||||
return nil
|
||||
})
|
||||
}(i, c)
|
||||
}
|
||||
|
||||
if err := eg.Wait(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
out := make([]*CacheRecord, 0, len(res))
|
||||
for _, r := range res {
|
||||
out = append(out, r)
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func (cm *combinedCacheManager) Load(ctx context.Context, rec *CacheRecord) (Result, error) {
|
||||
return rec.CacheManager.Load(ctx, rec)
|
||||
}
|
||||
|
||||
func (cm *combinedCacheManager) Save(key CacheKey, s Result) (ExportableCacheKey, error) {
|
||||
return cm.main.Save(key, s)
|
||||
}
|
|
@ -0,0 +1,61 @@
|
|||
package solver
|
||||
|
||||
import (
|
||||
"context"
|
||||
"time"
|
||||
|
||||
digest "github.com/opencontainers/go-digest"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
var ErrNotFound = errors.Errorf("not found")
|
||||
|
||||
// CacheKeyStorage is interface for persisting cache metadata
|
||||
type CacheKeyStorage interface {
|
||||
Get(id string) (CacheKeyInfo, error)
|
||||
Set(id string, info CacheKeyInfo) error
|
||||
|
||||
WalkResults(id string, fn func(CacheResult) error) error
|
||||
Load(id string, resultID string) (CacheResult, error)
|
||||
AddResult(id string, res CacheResult) error
|
||||
Release(resultID string) error
|
||||
|
||||
AddLink(id string, link CacheInfoLink, target string) error
|
||||
WalkLinks(id string, link CacheInfoLink, fn func(id string) error) error
|
||||
}
|
||||
|
||||
// CacheKeyInfo is storable metadata about single cache key
|
||||
type CacheKeyInfo struct {
|
||||
ID string
|
||||
Base digest.Digest
|
||||
Output int
|
||||
Deps []CacheKeyInfoWithSelector
|
||||
}
|
||||
|
||||
// CacheKeyInfoWithSelector is CacheKeyInfo combined with a selector
|
||||
type CacheKeyInfoWithSelector struct {
|
||||
ID string
|
||||
Selector digest.Digest
|
||||
}
|
||||
|
||||
// CacheResult is a record for a single solve result
|
||||
type CacheResult struct {
|
||||
// Payload []byte
|
||||
CreatedAt time.Time
|
||||
ID string
|
||||
}
|
||||
|
||||
// CacheInfoLink is a link between two cache keys
|
||||
type CacheInfoLink struct {
|
||||
Input, Output Index
|
||||
Digest digest.Digest
|
||||
Selector digest.Digest
|
||||
}
|
||||
|
||||
// CacheResultStorage is interface for converting cache metadata result to
|
||||
// actual solve result
|
||||
type CacheResultStorage interface {
|
||||
Save(Result) (CacheResult, error)
|
||||
Load(ctx context.Context, res CacheResult) (Result, error)
|
||||
LoadRemote(ctx context.Context, res CacheResult) (*Remote, error)
|
||||
}
|
|
@ -1,389 +0,0 @@
|
|||
package solver
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/moby/buildkit/identity"
|
||||
digest "github.com/opencontainers/go-digest"
|
||||
"github.com/pkg/errors"
|
||||
"golang.org/x/sync/errgroup"
|
||||
)
|
||||
|
||||
type internalMemoryKeyT string
|
||||
|
||||
var internalMemoryKey = internalMemoryKeyT("buildkit/memory-cache-id")
|
||||
|
||||
var NoSelector = digest.FromBytes(nil)
|
||||
|
||||
func NewInMemoryCacheManager() CacheManager {
|
||||
return &inMemoryCacheManager{
|
||||
byID: map[string]*inMemoryCacheKey{},
|
||||
id: identity.NewID(),
|
||||
}
|
||||
}
|
||||
|
||||
type inMemoryCacheKey struct {
|
||||
CacheKey
|
||||
id string
|
||||
dgst digest.Digest
|
||||
output Index
|
||||
deps []CacheKeyWithSelector // only []*inMemoryCacheKey
|
||||
|
||||
results map[Index]map[string]savedResult
|
||||
links map[link]map[string]struct{}
|
||||
}
|
||||
|
||||
func (ck *inMemoryCacheKey) Deps() []CacheKeyWithSelector {
|
||||
return ck.deps
|
||||
}
|
||||
func (ck *inMemoryCacheKey) Digest() digest.Digest {
|
||||
return ck.dgst
|
||||
}
|
||||
func (ck *inMemoryCacheKey) Index() Index {
|
||||
return ck.output
|
||||
}
|
||||
|
||||
func withExporter(ck *inMemoryCacheKey, result Result, deps [][]CacheKeyWithSelector) ExportableCacheKey {
|
||||
return ExportableCacheKey{ck, &cacheExporter{
|
||||
inMemoryCacheKey: ck,
|
||||
result: result,
|
||||
deps: deps,
|
||||
}}
|
||||
}
|
||||
|
||||
type cacheExporter struct {
|
||||
*inMemoryCacheKey
|
||||
result Result
|
||||
deps [][]CacheKeyWithSelector
|
||||
}
|
||||
|
||||
func (ce *cacheExporter) Export(ctx context.Context, m map[digest.Digest]*ExportRecord, converter func(context.Context, Result) (*Remote, error)) (*ExportRecord, error) {
|
||||
remote, err := converter(ctx, ce.result)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
cacheID := digest.FromBytes([]byte(ce.inMemoryCacheKey.id))
|
||||
if len(remote.Descriptors) > 0 && remote.Descriptors[0].Digest != "" {
|
||||
cacheID = remote.Descriptors[0].Digest
|
||||
}
|
||||
|
||||
deps := ce.deps
|
||||
|
||||
rec, ok := m[cacheID]
|
||||
if !ok {
|
||||
rec = &ExportRecord{
|
||||
Digest: cacheID,
|
||||
Remote: remote,
|
||||
Links: make(map[CacheLink]struct{}),
|
||||
}
|
||||
m[cacheID] = rec
|
||||
}
|
||||
|
||||
if len(deps) == 0 {
|
||||
rec.Links[CacheLink{
|
||||
Output: ce.Output(),
|
||||
Base: ce.Digest(),
|
||||
}] = struct{}{}
|
||||
}
|
||||
|
||||
for i, deps := range ce.deps {
|
||||
for _, dep := range deps {
|
||||
r, err := dep.CacheKey.Export(ctx, m, converter)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
link := CacheLink{
|
||||
Source: r.Digest,
|
||||
Input: Index(i),
|
||||
Output: ce.Output(),
|
||||
Base: ce.Digest(),
|
||||
Selector: dep.Selector,
|
||||
}
|
||||
rec.Links[link] = struct{}{}
|
||||
}
|
||||
}
|
||||
|
||||
return rec, nil
|
||||
}
|
||||
|
||||
type savedResult struct {
|
||||
result Result
|
||||
createdAt time.Time
|
||||
}
|
||||
|
||||
type link struct {
|
||||
input, output Index
|
||||
dgst digest.Digest
|
||||
selector digest.Digest
|
||||
}
|
||||
|
||||
type inMemoryCacheManager struct {
|
||||
mu sync.RWMutex
|
||||
byID map[string]*inMemoryCacheKey
|
||||
id string
|
||||
}
|
||||
|
||||
func (c *inMemoryCacheManager) ID() string {
|
||||
return c.id
|
||||
}
|
||||
|
||||
func (c *inMemoryCacheManager) Query(deps []ExportableCacheKey, input Index, dgst digest.Digest, output Index, selector digest.Digest) ([]*CacheRecord, error) {
|
||||
c.mu.RLock()
|
||||
defer c.mu.RUnlock()
|
||||
|
||||
refs := map[string]struct{}{}
|
||||
sublinks := map[string]struct{}{}
|
||||
|
||||
allDeps := make([][]CacheKeyWithSelector, int(input)+1)
|
||||
|
||||
for _, dep := range deps {
|
||||
ck, err := c.getInternalKey(dep, false)
|
||||
if err == nil {
|
||||
for key := range ck.links[link{input, output, dgst, selector}] {
|
||||
refs[key] = struct{}{}
|
||||
}
|
||||
for key := range ck.links[link{Index(-1), Index(0), "", selector}] {
|
||||
sublinks[key] = struct{}{}
|
||||
}
|
||||
for key := range ck.links[link{Index(-1), Index(0), "", NoSelector}] {
|
||||
sublinks[key] = struct{}{}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for id := range sublinks {
|
||||
if ck, ok := c.byID[id]; ok {
|
||||
for key := range ck.links[link{input, output, dgst, ""}] {
|
||||
refs[key] = struct{}{}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if len(deps) == 0 {
|
||||
ck, err := c.getInternalKey(NewCacheKey(dgst, 0, nil), false)
|
||||
if err != nil {
|
||||
return nil, nil
|
||||
}
|
||||
refs[ck.id] = struct{}{}
|
||||
}
|
||||
|
||||
for _, d := range deps {
|
||||
allDeps[int(input)] = append(allDeps[int(input)], CacheKeyWithSelector{CacheKey: d, Selector: selector})
|
||||
}
|
||||
|
||||
outs := make([]*CacheRecord, 0, len(refs))
|
||||
for id := range refs {
|
||||
if ck, ok := c.byID[id]; ok {
|
||||
for _, res := range ck.results[output] {
|
||||
outs = append(outs, &CacheRecord{
|
||||
ID: id + "@" + res.result.ID(),
|
||||
CacheKey: withExporter(ck, res.result, allDeps),
|
||||
CacheManager: c,
|
||||
CreatedAt: res.createdAt,
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return outs, nil
|
||||
}
|
||||
|
||||
func (c *inMemoryCacheManager) Load(ctx context.Context, rec *CacheRecord) (Result, error) {
|
||||
c.mu.RLock()
|
||||
defer c.mu.RUnlock()
|
||||
|
||||
keyParts := strings.Split(rec.ID, "@")
|
||||
if len(keyParts) != 2 {
|
||||
return nil, errors.Errorf("invalid cache record ID")
|
||||
}
|
||||
ck, err := c.getInternalKey(rec.CacheKey, false)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
for output := range ck.results {
|
||||
res, ok := ck.results[output][keyParts[1]]
|
||||
if ok {
|
||||
return res.result, nil
|
||||
}
|
||||
}
|
||||
return nil, errors.Errorf("failed to load cache record") // TODO: typed error
|
||||
}
|
||||
|
||||
func (c *inMemoryCacheManager) Save(k CacheKey, r Result) (ExportableCacheKey, error) {
|
||||
c.mu.Lock()
|
||||
defer c.mu.Unlock()
|
||||
|
||||
empty := ExportableCacheKey{}
|
||||
|
||||
ck, err := c.getInternalKey(k, true)
|
||||
if err != nil {
|
||||
return empty, err
|
||||
}
|
||||
if err := c.addResult(ck, k.Output(), r); err != nil {
|
||||
return empty, err
|
||||
}
|
||||
allDeps := make([][]CacheKeyWithSelector, len(k.Deps()))
|
||||
for i, d := range k.Deps() {
|
||||
allDeps[i] = append(allDeps[i], d)
|
||||
}
|
||||
|
||||
return withExporter(ck, r, allDeps), nil
|
||||
}
|
||||
|
||||
func (c *inMemoryCacheManager) getInternalKey(k CacheKey, createIfNotExist bool) (*inMemoryCacheKey, error) {
|
||||
if ck, ok := k.(*inMemoryCacheKey); ok {
|
||||
return ck, nil
|
||||
}
|
||||
internalV := k.GetValue(internalMemoryKey)
|
||||
if internalV != nil {
|
||||
ck, ok := c.byID[internalV.(string)]
|
||||
if !ok {
|
||||
return nil, errors.Errorf("failed lookup by internal ID %s", internalV.(string))
|
||||
}
|
||||
return ck, nil
|
||||
}
|
||||
inputs := make([]CacheKeyWithSelector, len(k.Deps()))
|
||||
dgstr := digest.SHA256.Digester()
|
||||
for i, inp := range k.Deps() {
|
||||
ck, err := c.getInternalKey(inp.CacheKey, createIfNotExist)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
inputs[i] = CacheKeyWithSelector{CacheKey: ExportableCacheKey{CacheKey: ck}, Selector: inp.Selector}
|
||||
if _, err := dgstr.Hash().Write([]byte(fmt.Sprintf("%s:%s,", ck.id, inp.Selector))); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
if _, err := dgstr.Hash().Write([]byte(k.Digest())); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if _, err := dgstr.Hash().Write([]byte(fmt.Sprintf("%d", k.Output()))); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
internalKey := string(dgstr.Digest())
|
||||
ck, ok := c.byID[internalKey]
|
||||
if !ok {
|
||||
if !createIfNotExist {
|
||||
return nil, errors.Errorf("not-found")
|
||||
}
|
||||
ck = &inMemoryCacheKey{
|
||||
CacheKey: k,
|
||||
id: internalKey,
|
||||
dgst: k.Digest(),
|
||||
output: k.Output(),
|
||||
deps: inputs,
|
||||
results: map[Index]map[string]savedResult{},
|
||||
links: map[link]map[string]struct{}{},
|
||||
}
|
||||
ck.SetValue(internalMemoryKey, internalKey)
|
||||
c.byID[internalKey] = ck
|
||||
}
|
||||
|
||||
for i, inp := range inputs {
|
||||
if ck.dgst == "" {
|
||||
i = -1
|
||||
}
|
||||
if err := c.addLink(link{Index(i), ck.output, ck.dgst, inp.Selector}, inp.CacheKey.CacheKey.(*inMemoryCacheKey), ck); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
return ck, nil
|
||||
}
|
||||
|
||||
func (c *inMemoryCacheManager) addResult(ck *inMemoryCacheKey, output Index, r Result) error {
|
||||
m, ok := ck.results[output]
|
||||
if !ok {
|
||||
m = map[string]savedResult{}
|
||||
ck.results[output] = m
|
||||
}
|
||||
m[r.ID()] = savedResult{result: r, createdAt: time.Now()}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *inMemoryCacheManager) addLink(l link, from, to *inMemoryCacheKey) error {
|
||||
m, ok := from.links[l]
|
||||
if !ok {
|
||||
m = map[string]struct{}{}
|
||||
from.links[l] = m
|
||||
}
|
||||
m[to.id] = struct{}{}
|
||||
return nil
|
||||
}
|
||||
|
||||
func newCombinedCacheManager(cms []CacheManager, main CacheManager) CacheManager {
|
||||
return &combinedCacheManager{cms: cms, main: main}
|
||||
}
|
||||
|
||||
type combinedCacheManager struct {
|
||||
cms []CacheManager
|
||||
main CacheManager
|
||||
id string
|
||||
idOnce sync.Once
|
||||
}
|
||||
|
||||
func (cm *combinedCacheManager) ID() string {
|
||||
cm.idOnce.Do(func() {
|
||||
ids := make([]string, len(cm.cms))
|
||||
for i, c := range cm.cms {
|
||||
ids[i] = c.ID()
|
||||
}
|
||||
cm.id = digest.FromBytes([]byte(strings.Join(ids, ","))).String()
|
||||
})
|
||||
return cm.id
|
||||
}
|
||||
|
||||
func (cm *combinedCacheManager) Query(inp []ExportableCacheKey, inputIndex Index, dgst digest.Digest, outputIndex Index, selector digest.Digest) ([]*CacheRecord, error) {
|
||||
eg, _ := errgroup.WithContext(context.TODO())
|
||||
res := make(map[string]*CacheRecord, len(cm.cms))
|
||||
var mu sync.Mutex
|
||||
for i, c := range cm.cms {
|
||||
func(i int, c CacheManager) {
|
||||
eg.Go(func() error {
|
||||
recs, err := c.Query(inp, inputIndex, dgst, outputIndex, selector)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
mu.Lock()
|
||||
for _, r := range recs {
|
||||
if _, ok := res[r.ID]; !ok || c == cm.main {
|
||||
r.CacheManager = c
|
||||
if c == cm.main {
|
||||
r.Priority = 1
|
||||
}
|
||||
res[r.ID] = r
|
||||
}
|
||||
}
|
||||
mu.Unlock()
|
||||
return nil
|
||||
})
|
||||
}(i, c)
|
||||
}
|
||||
|
||||
if err := eg.Wait(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
out := make([]*CacheRecord, 0, len(res))
|
||||
for _, r := range res {
|
||||
out = append(out, r)
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func (cm *combinedCacheManager) Load(ctx context.Context, rec *CacheRecord) (Result, error) {
|
||||
return rec.CacheManager.Load(ctx, rec)
|
||||
}
|
||||
|
||||
func (cm *combinedCacheManager) Save(key CacheKey, s Result) (ExportableCacheKey, error) {
|
||||
return cm.main.Save(key, s)
|
||||
}
|
|
@ -0,0 +1,151 @@
|
|||
package solver
|
||||
|
||||
import (
|
||||
"context"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
func NewInMemoryCacheStorage() CacheKeyStorage {
|
||||
return &inMemoryStore{byID: map[string]*inMemoryKey{}}
|
||||
}
|
||||
|
||||
type inMemoryStore struct {
|
||||
mu sync.RWMutex
|
||||
byID map[string]*inMemoryKey
|
||||
}
|
||||
|
||||
type inMemoryKey struct {
|
||||
CacheKeyInfo
|
||||
|
||||
results map[string]CacheResult
|
||||
links map[CacheInfoLink]map[string]struct{}
|
||||
}
|
||||
|
||||
func (s *inMemoryStore) Get(id string) (CacheKeyInfo, error) {
|
||||
s.mu.RLock()
|
||||
defer s.mu.RUnlock()
|
||||
k, ok := s.byID[id]
|
||||
if !ok {
|
||||
return CacheKeyInfo{}, errors.WithStack(ErrNotFound)
|
||||
}
|
||||
return k.CacheKeyInfo, nil
|
||||
}
|
||||
|
||||
func (s *inMemoryStore) Set(id string, info CacheKeyInfo) error {
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
k, ok := s.byID[id]
|
||||
if !ok {
|
||||
k = &inMemoryKey{
|
||||
results: map[string]CacheResult{},
|
||||
links: map[CacheInfoLink]map[string]struct{}{},
|
||||
}
|
||||
s.byID[id] = k
|
||||
}
|
||||
k.CacheKeyInfo = info
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *inMemoryStore) WalkResults(id string, fn func(CacheResult) error) error {
|
||||
s.mu.RLock()
|
||||
defer s.mu.RUnlock()
|
||||
k, ok := s.byID[id]
|
||||
if !ok {
|
||||
return nil
|
||||
}
|
||||
for _, res := range k.results {
|
||||
if err := fn(res); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *inMemoryStore) Load(id string, resultID string) (CacheResult, error) {
|
||||
s.mu.RLock()
|
||||
defer s.mu.RUnlock()
|
||||
k, ok := s.byID[id]
|
||||
if !ok {
|
||||
return CacheResult{}, errors.Wrapf(ErrNotFound, "no such key %s", id)
|
||||
}
|
||||
r, ok := k.results[resultID]
|
||||
if !ok {
|
||||
return CacheResult{}, errors.WithStack(ErrNotFound)
|
||||
}
|
||||
return r, nil
|
||||
}
|
||||
|
||||
func (s *inMemoryStore) AddResult(id string, res CacheResult) error {
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
k, ok := s.byID[id]
|
||||
if !ok {
|
||||
return errors.Wrapf(ErrNotFound, "no such key %s", id)
|
||||
}
|
||||
k.results[res.ID] = res
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *inMemoryStore) Release(resultID string) error {
|
||||
return errors.Errorf("not-implemented")
|
||||
}
|
||||
|
||||
func (s *inMemoryStore) AddLink(id string, link CacheInfoLink, target string) error {
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
k, ok := s.byID[id]
|
||||
if !ok {
|
||||
return errors.Wrapf(ErrNotFound, "no such key %s", id)
|
||||
}
|
||||
m, ok := k.links[link]
|
||||
if !ok {
|
||||
m = map[string]struct{}{}
|
||||
k.links[link] = m
|
||||
}
|
||||
|
||||
m[target] = struct{}{}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *inMemoryStore) WalkLinks(id string, link CacheInfoLink, fn func(id string) error) error {
|
||||
s.mu.RLock()
|
||||
defer s.mu.RUnlock()
|
||||
k, ok := s.byID[id]
|
||||
if !ok {
|
||||
return errors.Wrapf(ErrNotFound, "no such key %s", id)
|
||||
}
|
||||
for target := range k.links[link] {
|
||||
if err := fn(target); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func NewInMemoryResultStorage() CacheResultStorage {
|
||||
return &inMemoryResultStore{m: &sync.Map{}}
|
||||
}
|
||||
|
||||
type inMemoryResultStore struct {
|
||||
m *sync.Map
|
||||
}
|
||||
|
||||
func (s *inMemoryResultStore) Save(r Result) (CacheResult, error) {
|
||||
s.m.Store(r.ID(), r)
|
||||
return CacheResult{ID: r.ID(), CreatedAt: time.Now()}, nil
|
||||
}
|
||||
|
||||
func (s *inMemoryResultStore) Load(ctx context.Context, res CacheResult) (Result, error) {
|
||||
v, ok := s.m.Load(res.ID)
|
||||
if !ok {
|
||||
return nil, errors.WithStack(ErrNotFound)
|
||||
}
|
||||
return v.(Result), nil
|
||||
}
|
||||
|
||||
func (s *inMemoryResultStore) LoadRemote(ctx context.Context, res CacheResult) (*Remote, error) {
|
||||
return nil, nil
|
||||
}
|
Loading…
Reference in New Issue