Merge pull request #777 from tonistiigi/export-cache-inline

allow exporting cache metadata in the image config
docker-19.03
Akihiro Suda 2019-01-27 06:40:59 +01:00 committed by GitHub
commit 3ba3f5b1ff
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
16 changed files with 607 additions and 33 deletions

View File

@ -4,13 +4,20 @@ import (
"context"
"encoding/json"
"io"
"sync"
"time"
"github.com/containerd/containerd/content"
"github.com/containerd/containerd/images"
v1 "github.com/moby/buildkit/cache/remotecache/v1"
"github.com/moby/buildkit/solver"
"github.com/moby/buildkit/util/imageutil"
"github.com/moby/buildkit/worker"
digest "github.com/opencontainers/go-digest"
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
"golang.org/x/sync/errgroup"
)
// ResolveCacheImporterFunc returns importer and descriptor.
@ -55,7 +62,7 @@ func (ci *contentCacheImporter) Resolve(ctx context.Context, desc ocispec.Descri
}
if configDesc.Digest == "" {
return nil, errors.Errorf("invalid build cache from %+v, lacks manifest with MediaType=%s", desc, v1.CacheConfigMediaTypeV0)
return ci.importInlineCache(ctx, dt, id, w)
}
dt, err = readBlob(ctx, ci.provider, configDesc)
@ -95,3 +102,169 @@ func readBlob(ctx context.Context, provider content.Provider, desc ocispec.Descr
}
return dt, err
}
func (ci *contentCacheImporter) importInlineCache(ctx context.Context, dt []byte, id string, w worker.Worker) (solver.CacheManager, error) {
m := map[digest.Digest][]byte{}
if err := ci.allDistributionManifests(ctx, dt, m); err != nil {
return nil, err
}
var mu sync.Mutex
cc := v1.NewCacheChains()
eg, ctx := errgroup.WithContext(ctx)
for dgst, dt := range m {
func(dgst digest.Digest, dt []byte) {
eg.Go(func() error {
var m ocispec.Manifest
if err := json.Unmarshal(dt, &m); err != nil {
return err
}
if m.Config.Digest == "" || len(m.Layers) == 0 {
return nil
}
p, err := content.ReadBlob(ctx, ci.provider, m.Config)
if err != nil {
return err
}
var img image
if err := json.Unmarshal(p, &img); err != nil {
return err
}
if len(img.Rootfs.DiffIDs) != len(m.Layers) {
logrus.Warnf("invalid image with mismatching manifest and config")
return nil
}
if img.Cache == nil {
return nil
}
var config v1.CacheConfig
if err := json.Unmarshal(img.Cache, &config.Records); err != nil {
return err
}
createdDates, createdMsg, err := parseCreatedLayerInfo(img)
if err != nil {
return err
}
layers := v1.DescriptorProvider{}
for i, m := range m.Layers {
if m.Annotations == nil {
m.Annotations = map[string]string{}
}
if createdAt := createdDates[i]; createdAt != "" {
m.Annotations["buildkit/createdat"] = createdAt
}
if createdBy := createdMsg[i]; createdBy != "" {
m.Annotations["buildkit/description"] = createdBy
}
m.Annotations["containerd.io/uncompressed"] = img.Rootfs.DiffIDs[i].String()
layers[m.Digest] = v1.DescriptorProviderPair{
Descriptor: m,
Provider: ci.provider,
}
config.Layers = append(config.Layers, v1.CacheLayer{
Blob: m.Digest,
ParentIndex: i - 1,
})
}
dt, err = json.Marshal(config)
if err != nil {
return err
}
mu.Lock()
if err := v1.ParseConfig(config, layers, cc); err != nil {
return err
}
mu.Unlock()
return nil
})
}(dgst, dt)
}
if err := eg.Wait(); err != nil {
return nil, err
}
keysStorage, resultStorage, err := v1.NewCacheKeyStorage(cc, w)
if err != nil {
return nil, err
}
return solver.NewCacheManager(id, keysStorage, resultStorage), nil
}
func (ci *contentCacheImporter) allDistributionManifests(ctx context.Context, dt []byte, m map[digest.Digest][]byte) error {
mt, err := imageutil.DetectManifestBlobMediaType(dt)
if err != nil {
return err
}
switch mt {
case images.MediaTypeDockerSchema2Manifest, ocispec.MediaTypeImageManifest:
m[digest.FromBytes(dt)] = dt
case images.MediaTypeDockerSchema2ManifestList, ocispec.MediaTypeImageIndex:
var index ocispec.Index
if err := json.Unmarshal(dt, &index); err != nil {
return err
}
for _, d := range index.Manifests {
if _, ok := m[d.Digest]; ok {
continue
}
p, err := content.ReadBlob(ctx, ci.provider, d)
if err != nil {
return err
}
if err := ci.allDistributionManifests(ctx, p, m); err != nil {
return err
}
}
}
return nil
}
type image struct {
Rootfs struct {
DiffIDs []digest.Digest `json:"diff_ids"`
} `json:"rootfs"`
Cache []byte `json:"moby.buildkit.cache.v0"`
History []struct {
Created *time.Time `json:"created,omitempty"`
CreatedBy string `json:"created_by,omitempty"`
EmptyLayer bool `json:"empty_layer,omitempty"`
} `json:"history,omitempty"`
}
func parseCreatedLayerInfo(img image) ([]string, []string, error) {
dates := make([]string, 0, len(img.Rootfs.DiffIDs))
createdBy := make([]string, 0, len(img.Rootfs.DiffIDs))
for _, h := range img.History {
if !h.EmptyLayer {
str := ""
if h.Created != nil {
dt, err := h.Created.MarshalText()
if err != nil {
return nil, nil, err
}
str = string(dt)
}
dates = append(dates, str)
createdBy = append(createdBy, h.CreatedBy)
}
}
return dates, createdBy, nil
}

92
cache/remotecache/inline/inline.go vendored Normal file
View File

@ -0,0 +1,92 @@
package registry
import (
"context"
"encoding/json"
"github.com/moby/buildkit/cache/remotecache"
v1 "github.com/moby/buildkit/cache/remotecache/v1"
"github.com/moby/buildkit/solver"
digest "github.com/opencontainers/go-digest"
"github.com/sirupsen/logrus"
)
func ResolveCacheExporterFunc() remotecache.ResolveCacheExporterFunc {
return func(ctx context.Context, _ map[string]string) (remotecache.Exporter, error) {
return NewExporter(), nil
}
}
func NewExporter() remotecache.Exporter {
cc := v1.NewCacheChains()
return &exporter{CacheExporterTarget: cc, chains: cc}
}
type exporter struct {
solver.CacheExporterTarget
chains *v1.CacheChains
}
func (ce *exporter) Finalize(ctx context.Context) (map[string]string, error) {
return nil, nil
}
func (ce *exporter) ExportForLayers(layers []digest.Digest) ([]byte, error) {
config, descs, err := ce.chains.Marshal()
if err != nil {
return nil, err
}
descs2 := map[digest.Digest]v1.DescriptorProviderPair{}
for _, k := range layers {
if v, ok := descs[k]; ok {
descs2[k] = v
}
}
cc := v1.NewCacheChains()
if err := v1.ParseConfig(*config, descs2, cc); err != nil {
return nil, err
}
cfg, _, err := cc.Marshal()
if err != nil {
return nil, err
}
if len(cfg.Layers) == 0 {
logrus.Warn("failed to match any cache with layers")
return nil, nil
}
cache := map[digest.Digest]int{}
// reorder layers based on the order in the image
for i, r := range cfg.Records {
for j, rr := range r.Results {
n := getSortedLayerIndex(rr.LayerIndex, cfg.Layers, cache)
rr.LayerIndex = n
r.Results[j] = rr
cfg.Records[i] = r
}
}
dt, err := json.Marshal(cfg.Records)
if err != nil {
return nil, err
}
return dt, nil
}
func getSortedLayerIndex(idx int, layers []v1.CacheLayer, cache map[digest.Digest]int) int {
if idx == -1 {
return -1
}
l := layers[idx]
if i, ok := cache[l.Blob]; ok {
return i
}
cache[l.Blob] = getSortedLayerIndex(l.ParentIndex, layers, cache) + 1
return cache[l.Blob]
}

View File

@ -27,6 +27,7 @@ func NewCacheKeyStorage(cc *CacheChains, w worker.Worker) (solver.CacheKeyStorag
results := &cacheResultStorage{
w: w,
byID: storage.byID,
byItem: storage.byItem,
byResult: storage.byResult,
}
@ -155,8 +156,22 @@ func (cs *cacheKeyStorage) WalkLinks(id string, link solver.CacheInfoLink, fn fu
return nil
}
// TODO:
func (cs *cacheKeyStorage) WalkBacklinks(id string, fn func(id string, link solver.CacheInfoLink) error) error {
for k, it := range cs.byID {
for nl, ids := range it.links {
for _, id2 := range ids {
if id == id2 {
if err := fn(k, solver.CacheInfoLink{
Input: solver.Index(nl.input),
Selector: digest.Digest(nl.selector),
Digest: nl.dgst,
}); err != nil {
return err
}
}
}
}
}
return nil
}
@ -190,19 +205,54 @@ type cacheResultStorage struct {
w worker.Worker
byID map[string]*itemWithOutgoingLinks
byResult map[string]map[string]struct{}
byItem map[*item]string
}
func (cs *cacheResultStorage) Save(res solver.Result, createdAt time.Time) (solver.CacheResult, error) {
return solver.CacheResult{}, errors.Errorf("importer is immutable")
}
func (cs *cacheResultStorage) Load(ctx context.Context, res solver.CacheResult) (solver.Result, error) {
remote, err := cs.LoadRemote(ctx, res)
if err != nil {
func (cs *cacheResultStorage) LoadWithParents(ctx context.Context, res solver.CacheResult) (map[string]solver.Result, error) {
v := cs.byResultID(res.ID)
if v == nil || v.result == nil {
return nil, errors.WithStack(solver.ErrNotFound)
}
m := map[string]solver.Result{}
if err := v.walkAllResults(func(i *item) error {
if i.result == nil {
return nil
}
id, ok := cs.byItem[i]
if !ok {
return nil
}
if isSubRemote(*i.result, *v.result) {
ref, err := cs.w.FromRemote(ctx, i.result)
if err != nil {
return err
}
m[id] = worker.NewWorkerRefResult(ref, cs.w)
}
return nil
}); err != nil {
for _, v := range m {
v.Release(context.TODO())
}
return nil, err
}
ref, err := cs.w.FromRemote(ctx, remote)
return m, nil
}
func (cs *cacheResultStorage) Load(ctx context.Context, res solver.CacheResult) (solver.Result, error) {
item := cs.byResultID(res.ID)
if item == nil || item.result == nil {
return nil, errors.WithStack(solver.ErrNotFound)
}
ref, err := cs.w.FromRemote(ctx, item.result)
if err != nil {
return nil, err
}
@ -210,8 +260,8 @@ func (cs *cacheResultStorage) Load(ctx context.Context, res solver.CacheResult)
}
func (cs *cacheResultStorage) LoadRemote(ctx context.Context, res solver.CacheResult) (*solver.Remote, error) {
if r := cs.byResultID(res.ID); r != nil {
return r, nil
if r := cs.byResultID(res.ID); r != nil && r.result != nil {
return r.result, nil
}
return nil, errors.WithStack(solver.ErrNotFound)
}
@ -220,7 +270,7 @@ func (cs *cacheResultStorage) Exists(id string) bool {
return cs.byResultID(id) != nil
}
func (cs *cacheResultStorage) byResultID(resultID string) *solver.Remote {
func (cs *cacheResultStorage) byResultID(resultID string) *itemWithOutgoingLinks {
m, ok := cs.byResult[resultID]
if !ok || len(m) == 0 {
return nil
@ -229,9 +279,7 @@ func (cs *cacheResultStorage) byResultID(resultID string) *solver.Remote {
for id := range m {
it, ok := cs.byID[id]
if ok {
if r := it.result; r != nil {
return r
}
return it
}
}

View File

@ -128,6 +128,20 @@ func (c *item) LinkFrom(rec solver.CacheExporterRecord, index int, selector stri
c.links[index][link{src: src, selector: selector}] = struct{}{}
}
func (c *item) walkAllResults(fn func(i *item) error) error {
if err := fn(c); err != nil {
return err
}
for _, links := range c.links {
for l := range links {
if err := l.src.walkAllResults(fn); err != nil {
return err
}
}
}
return nil
}
type nopRecord struct {
}

View File

@ -6,7 +6,7 @@ package cacheimport
// https://github.com/opencontainers/image-spec/blob/master/image-index.md .
// Manifests array contains descriptors to the cache layers and one instance of
// build cache config with media type application/vnd.buildkit.cacheconfig.v0 .
// The cache layer descripts need to have an annotation with uncompressed digest
// The cache layer descriptors need to have an annotation with uncompressed digest
// to allow deduplication on extraction and optionally "buildkit/createdat"
// annotation to support maintaining original timestamps.
//

View File

@ -15,6 +15,10 @@ func Parse(configJSON []byte, provider DescriptorProvider, t solver.CacheExporte
return err
}
return ParseConfig(config, provider, t)
}
func ParseConfig(config CacheConfig, provider DescriptorProvider, t solver.CacheExporterTarget) error {
cache := map[int]solver.CacheExporterRecord{}
for i := range config.Records {
@ -22,7 +26,6 @@ func Parse(configJSON []byte, provider DescriptorProvider, t solver.CacheExporte
return err
}
}
return nil
}
@ -57,7 +60,9 @@ func parseRecord(cc CacheConfig, idx int, provider DescriptorProvider, t solver.
if err != nil {
return nil, err
}
r.AddResult(res.CreatedAt, remote)
if remote != nil {
r.AddResult(res.CreatedAt, remote)
}
}
cache[idx] = r
@ -78,7 +83,7 @@ func getRemoteChain(layers []CacheLayer, idx int, provider DescriptorProvider, v
descPair, ok := provider[l.Blob]
if !ok {
return nil, errors.Errorf("missing blob for %s", l.Blob)
return nil, nil
}
var r *solver.Remote
@ -88,6 +93,9 @@ func getRemoteChain(layers []CacheLayer, idx int, provider DescriptorProvider, v
if err != nil {
return nil, err
}
if r == nil {
return nil, nil
}
r.Descriptors = append(r.Descriptors, descPair.Descriptor)
mp := contentutil.NewMultiProvider(r.Provider)
mp.Add(descPair.Descriptor.Digest, descPair.Provider)

View File

@ -304,3 +304,15 @@ func marshalItem(it *item, state *marshalState) error {
state.records = append(state.records, rec)
return nil
}
func isSubRemote(sub, main solver.Remote) bool {
if len(sub.Descriptors) > len(main.Descriptors) {
return false
}
for i := range sub.Descriptors {
if sub.Descriptors[i].Digest != main.Descriptors[i].Digest {
return false
}
}
return true
}

View File

@ -82,6 +82,7 @@ func TestClientIntegration(t *testing.T) {
testSSHMount,
testStdinClosed,
testHostnameLookup,
testBasicInlineCacheImportExport,
},
integration.WithMirroredImages(integration.OfficialImages("busybox:latest", "alpine:latest")),
)
@ -1252,6 +1253,79 @@ func testBasicLocalCacheImportExport(t *testing.T, sb integration.Sandbox) {
testBasicCacheImportExport(t, sb, o)
}
func testBasicInlineCacheImportExport(t *testing.T, sb integration.Sandbox) {
requiresLinux(t)
registry, err := sb.NewRegistry()
if errors.Cause(err) == integration.ErrorRequirements {
t.Skip(err.Error())
}
require.NoError(t, err)
c, err := New(context.TODO(), sb.Address())
require.NoError(t, err)
defer c.Close()
busybox := llb.Image("busybox:latest")
st := llb.Scratch()
run := func(cmd string) {
st = busybox.Run(llb.Shlex(cmd), llb.Dir("/wd")).AddMount("/wd", st)
}
run(`sh -c "echo -n foobar > const"`)
run(`sh -c "cat /dev/urandom | head -c 100 | sha256sum > unique"`)
def, err := st.Marshal()
require.NoError(t, err)
target := registry + "/buildkit/testexportinline:latest"
resp, err := c.Solve(context.TODO(), def, SolveOpt{
Exporter: ExporterImage,
ExporterAttrs: map[string]string{
"name": target,
"push": "true",
},
CacheExports: []CacheOptionsEntry{
{
Type: "inline",
},
},
}, nil)
require.NoError(t, err)
dgst, ok := resp.ExporterResponse["containerimage.digest"]
require.Equal(t, ok, true)
err = c.Prune(context.TODO(), nil, PruneAll)
require.NoError(t, err)
checkAllRemoved(t, c, sb)
resp, err = c.Solve(context.TODO(), def, SolveOpt{
Exporter: ExporterImage,
CacheExports: []CacheOptionsEntry{
{
Type: "inline",
},
},
CacheImports: []CacheOptionsEntry{
{
Type: "registry",
Attrs: map[string]string{
"ref": target,
},
},
},
}, nil)
require.NoError(t, err)
dgst2, ok := resp.ExporterResponse["containerimage.digest"]
require.Equal(t, ok, true)
require.Equal(t, dgst, dgst2)
}
func testCachedMounts(t *testing.T, sb integration.Sandbox) {
requiresLinux(t)
c, err := New(context.TODO(), sb.Address())

View File

@ -22,6 +22,7 @@ import (
"github.com/docker/go-connections/sockets"
"github.com/grpc-ecosystem/grpc-opentracing/go/otgrpc"
"github.com/moby/buildkit/cache/remotecache"
inlineremotecache "github.com/moby/buildkit/cache/remotecache/inline"
localremotecache "github.com/moby/buildkit/cache/remotecache/local"
registryremotecache "github.com/moby/buildkit/cache/remotecache/registry"
"github.com/moby/buildkit/client"
@ -513,6 +514,7 @@ func newController(c *cli.Context, cfg *config.Config) (*control.Controller, err
remoteCacheExporterFuncs := map[string]remotecache.ResolveCacheExporterFunc{
"registry": registryremotecache.ResolveCacheExporterFunc(sessionManager, resolverFn),
"local": localremotecache.ResolveCacheExporterFunc(sessionManager),
"inline": inlineremotecache.ResolveCacheExporterFunc(),
}
remoteCacheImporterFuncs := map[string]remotecache.ResolveCacheImporterFunc{
"registry": registryremotecache.ResolveCacheImporterFunc(sessionManager, resolverFn),

View File

@ -3,6 +3,7 @@ package exptypes
import specs "github.com/opencontainers/image-spec/specs-go/v1"
const ExporterImageConfigKey = "containerimage.config"
const ExporterInlineCache = "containerimage.inlinecache"
const ExporterPlatformsKey = "refs.platforms"
type Platforms struct {

View File

@ -56,7 +56,7 @@ func (ic *ImageWriter) Commit(ctx context.Context, inp exporter.Source, oci bool
if err != nil {
return nil, err
}
return ic.commitDistributionManifest(ctx, inp.Ref, inp.Metadata[exptypes.ExporterImageConfigKey], layers[0], oci)
return ic.commitDistributionManifest(ctx, inp.Ref, inp.Metadata[exptypes.ExporterImageConfigKey], layers[0], oci, inp.Metadata[exptypes.ExporterInlineCache])
}
var p exptypes.Platforms
@ -108,7 +108,7 @@ func (ic *ImageWriter) Commit(ctx context.Context, inp exporter.Source, oci bool
}
config := inp.Metadata[fmt.Sprintf("%s/%s", exptypes.ExporterImageConfigKey, p.ID)]
desc, err := ic.commitDistributionManifest(ctx, r, config, layers[layersMap[p.ID]], oci)
desc, err := ic.commitDistributionManifest(ctx, r, config, layers[layersMap[p.ID]], oci, inp.Metadata[fmt.Sprintf("%s/%s", exptypes.ExporterInlineCache, p.ID)])
if err != nil {
return nil, err
}
@ -173,7 +173,7 @@ func (ic *ImageWriter) exportLayers(ctx context.Context, refs ...cache.Immutable
return out, nil
}
func (ic *ImageWriter) commitDistributionManifest(ctx context.Context, ref cache.ImmutableRef, config []byte, layers []blobs.DiffPair, oci bool) (*ocispec.Descriptor, error) {
func (ic *ImageWriter) commitDistributionManifest(ctx context.Context, ref cache.ImmutableRef, config []byte, layers []blobs.DiffPair, oci bool, cache []byte) (*ocispec.Descriptor, error) {
if len(config) == 0 {
var err error
config, err = emptyImageConfig()
@ -189,7 +189,7 @@ func (ic *ImageWriter) commitDistributionManifest(ctx context.Context, ref cache
diffPairs, history := normalizeLayersAndHistory(layers, history, ref)
config, err = patchImageConfig(config, diffPairs, history)
config, err = patchImageConfig(config, diffPairs, history, cache)
if err != nil {
return nil, err
}
@ -312,7 +312,7 @@ func parseHistoryFromConfig(dt []byte) ([]ocispec.History, error) {
return config.History, nil
}
func patchImageConfig(dt []byte, dps []blobs.DiffPair, history []ocispec.History) ([]byte, error) {
func patchImageConfig(dt []byte, dps []blobs.DiffPair, history []ocispec.History, cache []byte) ([]byte, error) {
m := map[string]json.RawMessage{}
if err := json.Unmarshal(dt, &m); err != nil {
return nil, errors.Wrap(err, "failed to parse image config for patch")
@ -349,6 +349,14 @@ func patchImageConfig(dt []byte, dps []blobs.DiffPair, history []ocispec.History
m["created"] = dt
}
if cache != nil {
dt, err := json.Marshal(cache)
if err != nil {
return nil, err
}
m["moby.buildkit.cache.v0"] = dt
}
dt, err = json.Marshal(m)
return dt, errors.Wrap(err, "failed to marshal config after patch")
}

View File

@ -144,6 +144,79 @@ func (c *cacheManager) Load(ctx context.Context, rec *CacheRecord) (Result, erro
return c.results.Load(ctx, res)
}
type LoadedResult struct {
Result Result
CacheResult CacheResult
CacheKey *CacheKey
}
func (c *cacheManager) filterResults(m map[string]Result, ck *CacheKey) (results []LoadedResult, err error) {
id := c.getID(ck)
if err := c.backend.WalkResults(id, func(cr CacheResult) error {
res, ok := m[id]
if ok {
results = append(results, LoadedResult{
Result: res,
CacheKey: ck,
CacheResult: cr,
})
delete(m, id)
}
return nil
}); err != nil {
for _, r := range results {
r.Result.Release(context.TODO())
}
}
for _, keys := range ck.Deps() {
for _, key := range keys {
res, err := c.filterResults(m, key.CacheKey.CacheKey)
if err != nil {
for _, r := range results {
r.Result.Release(context.TODO())
}
return nil, err
}
results = append(results, res...)
}
}
return
}
func (c *cacheManager) LoadWithParents(ctx context.Context, rec *CacheRecord) ([]LoadedResult, error) {
lwp, ok := c.results.(interface {
LoadWithParents(context.Context, CacheResult) (map[string]Result, error)
})
if !ok {
res, err := c.Load(ctx, rec)
if err != nil {
return nil, err
}
return []LoadedResult{{Result: res, CacheKey: rec.key, CacheResult: CacheResult{ID: c.getID(rec.key), CreatedAt: rec.CreatedAt}}}, nil
}
c.mu.RLock()
defer c.mu.RUnlock()
cr, err := c.backend.Load(c.getID(rec.key), rec.ID)
if err != nil {
return nil, err
}
m, err := lwp.LoadWithParents(ctx, cr)
if err != nil {
return nil, err
}
results, err := c.filterResults(m, rec.key)
if err != nil {
for _, r := range m {
r.Release(context.TODO())
}
}
return results, nil
}
func (c *cacheManager) Save(k *CacheKey, r Result, createdAt time.Time) (*ExportableCacheKey, error) {
c.mu.Lock()
defer c.mu.Unlock()

View File

@ -67,15 +67,27 @@ func (cm *combinedCacheManager) Query(inp []CacheKeyWithSelector, inputIndex Ind
return out, nil
}
func (cm *combinedCacheManager) Load(ctx context.Context, rec *CacheRecord) (Result, error) {
res, err := rec.cacheManager.Load(ctx, rec)
func (cm *combinedCacheManager) Load(ctx context.Context, rec *CacheRecord) (res Result, err error) {
results, err := rec.cacheManager.LoadWithParents(ctx, rec)
if err != nil {
return nil, err
}
if _, err := cm.main.Save(rec.key, res, rec.CreatedAt); err != nil {
return nil, err
defer func() {
for i, res := range results {
if err == nil && i == 0 {
continue
}
res.Result.Release(context.TODO())
}
}()
if rec.cacheManager != cm.main {
for _, res := range results {
if _, err := cm.main.Save(res.CacheKey, res.Result, res.CacheResult.CreatedAt); err != nil {
return nil, err
}
}
}
return res, nil
return results[0].Result, nil
}
func (cm *combinedCacheManager) Save(key *CacheKey, s Result, createdAt time.Time) (*ExportableCacheKey, error) {

View File

@ -2,6 +2,7 @@ package llbsolver
import (
"context"
"fmt"
"strings"
"time"
@ -10,6 +11,7 @@ import (
"github.com/moby/buildkit/client"
controlgateway "github.com/moby/buildkit/control/gateway"
"github.com/moby/buildkit/exporter"
"github.com/moby/buildkit/exporter/containerimage/exptypes"
"github.com/moby/buildkit/frontend"
"github.com/moby/buildkit/frontend/gateway"
"github.com/moby/buildkit/identity"
@ -138,7 +140,7 @@ func (s *Solver) Solve(ctx context.Context, id string, req frontend.SolveRequest
}()
var exporterResponse map[string]string
if exp := exp.Exporter; exp != nil {
if e := exp.Exporter; e != nil {
inp := exporter.Source{
Metadata: res.Metadata,
}
@ -151,6 +153,14 @@ func (s *Solver) Solve(ctx context.Context, id string, req frontend.SolveRequest
return nil, errors.Errorf("invalid reference: %T", res.Sys())
}
inp.Ref = workerRef.ImmutableRef
dt, err := inlineCache(ctx, exp.CacheExporter, res)
if err != nil {
return nil, err
}
if dt != nil {
inp.Metadata[exptypes.ExporterInlineCache] = dt
}
}
if res.Refs != nil {
m := make(map[string]cache.ImmutableRef, len(res.Refs))
@ -163,13 +173,21 @@ func (s *Solver) Solve(ctx context.Context, id string, req frontend.SolveRequest
return nil, errors.Errorf("invalid reference: %T", res.Sys())
}
m[k] = workerRef.ImmutableRef
dt, err := inlineCache(ctx, exp.CacheExporter, res)
if err != nil {
return nil, err
}
if dt != nil {
inp.Metadata[fmt.Sprintf("%s/%s", exptypes.ExporterInlineCache, k)] = dt
}
}
}
inp.Refs = m
}
if err := inVertexContext(j.Context(ctx), exp.Name(), "", func(ctx context.Context) error {
exporterResponse, err = exp.Export(ctx, inp)
if err := inVertexContext(j.Context(ctx), e.Name(), "", func(ctx context.Context) error {
exporterResponse, err = e.Export(ctx, inp)
return err
}); err != nil {
return nil, err
@ -218,6 +236,37 @@ func (s *Solver) Solve(ctx context.Context, id string, req frontend.SolveRequest
}, nil
}
func inlineCache(ctx context.Context, e remotecache.Exporter, res solver.CachedResult) ([]byte, error) {
if efl, ok := e.(interface {
ExportForLayers([]digest.Digest) ([]byte, error)
}); ok {
workerRef, ok := res.Sys().(*worker.WorkerRef)
if !ok {
return nil, errors.Errorf("invalid reference: %T", res.Sys())
}
remote, err := workerRef.Worker.GetRemote(ctx, workerRef.ImmutableRef, true)
if err != nil || remote == nil {
return nil, nil
}
digests := make([]digest.Digest, 0, len(remote.Descriptors))
for _, desc := range remote.Descriptors {
digests = append(digests, desc.Digest)
}
if _, err := res.CacheKeys()[0].Exporter.ExportTo(ctx, e, solver.CacheExportOpt{
Convert: workerRefConverter,
Mode: solver.CacheExportModeMin,
}); err != nil {
return nil, err
}
return efl.ExportForLayers(digests)
}
return nil, nil
}
func (s *Solver) Status(ctx context.Context, id string, statusChan chan *client.SolveStatus) error {
j, err := s.solver.Get(id)
if err != nil {

View File

@ -135,17 +135,21 @@ func childrenConfigHandler(provider content.Provider, platform platforms.MatchCo
func DetectManifestMediaType(ra content.ReaderAt) (string, error) {
// TODO: schema1
p := make([]byte, ra.Size())
if _, err := ra.ReadAt(p, 0); err != nil {
dt := make([]byte, ra.Size())
if _, err := ra.ReadAt(dt, 0); err != nil {
return "", err
}
return DetectManifestBlobMediaType(dt)
}
func DetectManifestBlobMediaType(dt []byte) (string, error) {
var mfst struct {
MediaType string `json:"mediaType"`
Config json.RawMessage `json:"config"`
}
if err := json.Unmarshal(p, &mfst); err != nil {
if err := json.Unmarshal(dt, &mfst); err != nil {
return "", err
}

View File

@ -372,7 +372,11 @@ func (w *Worker) FromRemote(ctx context.Context, remote *solver.Remote) (cache.I
return nil, err
}
}
ref, err := w.CacheManager.Get(ctx, chainID, cache.WithDescription(fmt.Sprintf("imported %s", remote.Descriptors[i].Digest)), cache.WithCreationTime(tm))
descr := fmt.Sprintf("imported %s", remote.Descriptors[i].Digest)
if v, ok := remote.Descriptors[i].Annotations["buildkit/description"]; ok {
descr = v
}
ref, err := w.CacheManager.Get(ctx, chainID, cache.WithDescription(descr), cache.WithCreationTime(tm))
if err != nil {
return nil, err
}