2017-05-27 06:12:13 +00:00
|
|
|
package cache
|
2017-05-26 00:16:32 +00:00
|
|
|
|
|
|
|
import (
|
2019-07-22 21:43:16 +00:00
|
|
|
"archive/tar"
|
|
|
|
"bytes"
|
|
|
|
"compress/gzip"
|
2017-05-26 18:56:12 +00:00
|
|
|
"context"
|
2021-07-21 22:53:16 +00:00
|
|
|
"fmt"
|
2019-07-22 21:43:16 +00:00
|
|
|
"io"
|
2017-05-26 00:16:32 +00:00
|
|
|
"io/ioutil"
|
|
|
|
"os"
|
2017-05-26 17:20:41 +00:00
|
|
|
"path/filepath"
|
2020-07-27 17:24:31 +00:00
|
|
|
"runtime"
|
2021-07-21 22:53:16 +00:00
|
|
|
"strconv"
|
2021-07-26 02:00:48 +00:00
|
|
|
"sync"
|
2017-05-26 00:16:32 +00:00
|
|
|
"testing"
|
2021-07-21 22:53:16 +00:00
|
|
|
"time"
|
2017-05-26 00:16:32 +00:00
|
|
|
|
2021-07-26 02:00:48 +00:00
|
|
|
ctdcompression "github.com/containerd/containerd/archive/compression"
|
2019-07-22 21:43:16 +00:00
|
|
|
"github.com/containerd/containerd/content"
|
|
|
|
"github.com/containerd/containerd/content/local"
|
2019-09-18 00:18:32 +00:00
|
|
|
"github.com/containerd/containerd/diff/apply"
|
2021-07-21 22:53:16 +00:00
|
|
|
"github.com/containerd/containerd/diff/walking"
|
2021-07-26 02:00:48 +00:00
|
|
|
"github.com/containerd/containerd/errdefs"
|
2019-07-22 21:43:16 +00:00
|
|
|
"github.com/containerd/containerd/leases"
|
|
|
|
ctdmetadata "github.com/containerd/containerd/metadata"
|
2017-06-13 23:01:47 +00:00
|
|
|
"github.com/containerd/containerd/namespaces"
|
2017-12-28 07:07:13 +00:00
|
|
|
"github.com/containerd/containerd/snapshots"
|
2018-04-03 09:37:16 +00:00
|
|
|
"github.com/containerd/containerd/snapshots/native"
|
2021-07-26 02:00:48 +00:00
|
|
|
"github.com/containerd/stargz-snapshotter/estargz"
|
2017-07-05 02:00:27 +00:00
|
|
|
"github.com/moby/buildkit/cache/metadata"
|
2017-07-25 22:14:46 +00:00
|
|
|
"github.com/moby/buildkit/client"
|
2021-07-21 22:53:16 +00:00
|
|
|
"github.com/moby/buildkit/session"
|
2017-06-22 20:15:46 +00:00
|
|
|
"github.com/moby/buildkit/snapshot"
|
2019-07-22 21:43:16 +00:00
|
|
|
containerdsnapshot "github.com/moby/buildkit/snapshot/containerd"
|
2021-07-21 22:53:16 +00:00
|
|
|
"github.com/moby/buildkit/util/compression"
|
|
|
|
"github.com/moby/buildkit/util/contentutil"
|
2019-07-22 21:43:16 +00:00
|
|
|
"github.com/moby/buildkit/util/leaseutil"
|
2021-07-21 22:53:16 +00:00
|
|
|
"github.com/moby/buildkit/util/winlayers"
|
2020-05-13 15:37:27 +00:00
|
|
|
digest "github.com/opencontainers/go-digest"
|
2021-07-26 08:53:30 +00:00
|
|
|
ocispecs "github.com/opencontainers/image-spec/specs-go/v1"
|
2017-05-26 17:59:33 +00:00
|
|
|
"github.com/pkg/errors"
|
2017-07-14 18:59:31 +00:00
|
|
|
"github.com/stretchr/testify/require"
|
2019-07-22 21:43:16 +00:00
|
|
|
bolt "go.etcd.io/bbolt"
|
2021-07-21 22:53:16 +00:00
|
|
|
"golang.org/x/sync/errgroup"
|
2017-05-26 00:16:32 +00:00
|
|
|
)
|
|
|
|
|
2019-07-22 21:43:16 +00:00
|
|
|
type cmOpt struct {
|
|
|
|
snapshotterName string
|
|
|
|
snapshotter snapshots.Snapshotter
|
|
|
|
tmpdir string
|
|
|
|
}
|
|
|
|
|
|
|
|
type cmOut struct {
|
|
|
|
manager Manager
|
|
|
|
lm leases.Manager
|
|
|
|
cs content.Store
|
|
|
|
}
|
|
|
|
|
|
|
|
func newCacheManager(ctx context.Context, opt cmOpt) (co *cmOut, cleanup func() error, err error) {
|
|
|
|
ns, ok := namespaces.Namespace(ctx)
|
|
|
|
if !ok {
|
|
|
|
return nil, nil, errors.Errorf("namespace required for test")
|
|
|
|
}
|
|
|
|
|
|
|
|
if opt.snapshotterName == "" {
|
|
|
|
opt.snapshotterName = "native"
|
|
|
|
}
|
|
|
|
|
|
|
|
tmpdir, err := ioutil.TempDir("", "cachemanager")
|
|
|
|
if err != nil {
|
|
|
|
return nil, nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
defers := make([]func() error, 0)
|
|
|
|
cleanup = func() error {
|
|
|
|
var err error
|
|
|
|
for i := range defers {
|
|
|
|
if err1 := defers[len(defers)-1-i](); err1 != nil && err == nil {
|
|
|
|
err = err1
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
defer func() {
|
|
|
|
if err != nil {
|
|
|
|
cleanup()
|
|
|
|
}
|
|
|
|
}()
|
|
|
|
if opt.tmpdir == "" {
|
|
|
|
defers = append(defers, func() error {
|
|
|
|
return os.RemoveAll(tmpdir)
|
|
|
|
})
|
|
|
|
} else {
|
|
|
|
os.RemoveAll(tmpdir)
|
|
|
|
tmpdir = opt.tmpdir
|
|
|
|
}
|
|
|
|
|
|
|
|
if opt.snapshotter == nil {
|
|
|
|
snapshotter, err := native.NewSnapshotter(filepath.Join(tmpdir, "snapshots"))
|
|
|
|
if err != nil {
|
|
|
|
return nil, nil, err
|
|
|
|
}
|
|
|
|
opt.snapshotter = snapshotter
|
|
|
|
}
|
|
|
|
|
|
|
|
md, err := metadata.NewStore(filepath.Join(tmpdir, "metadata.db"))
|
|
|
|
if err != nil {
|
|
|
|
return nil, nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
store, err := local.NewStore(tmpdir)
|
|
|
|
if err != nil {
|
|
|
|
return nil, nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
db, err := bolt.Open(filepath.Join(tmpdir, "containerdmeta.db"), 0644, nil)
|
|
|
|
if err != nil {
|
|
|
|
return nil, nil, err
|
|
|
|
}
|
|
|
|
defers = append(defers, func() error {
|
|
|
|
return db.Close()
|
|
|
|
})
|
|
|
|
|
|
|
|
mdb := ctdmetadata.NewDB(db, store, map[string]snapshots.Snapshotter{
|
|
|
|
opt.snapshotterName: opt.snapshotter,
|
|
|
|
})
|
|
|
|
if err := mdb.Init(context.TODO()); err != nil {
|
|
|
|
return nil, nil, err
|
|
|
|
}
|
|
|
|
|
2021-07-21 22:53:16 +00:00
|
|
|
store = containerdsnapshot.NewContentStore(mdb.ContentStore(), ns)
|
|
|
|
lm := leaseutil.WithNamespace(ctdmetadata.NewLeaseManager(mdb), ns)
|
2019-07-22 21:43:16 +00:00
|
|
|
|
|
|
|
cm, err := NewManager(ManagerOpt{
|
|
|
|
Snapshotter: snapshot.FromContainerdSnapshotter(opt.snapshotterName, containerdsnapshot.NSSnapshotter(ns, mdb.Snapshotter(opt.snapshotterName)), nil),
|
|
|
|
MetadataStore: md,
|
2021-07-21 22:53:16 +00:00
|
|
|
ContentStore: store,
|
|
|
|
LeaseManager: lm,
|
2019-07-22 21:43:16 +00:00
|
|
|
GarbageCollect: mdb.GarbageCollect,
|
2021-07-21 22:53:16 +00:00
|
|
|
Applier: winlayers.NewFileSystemApplierWithWindows(store, apply.NewFileSystemApplier(store)),
|
|
|
|
Differ: winlayers.NewWalkingDiffWithWindows(store, walking.NewWalkingDiff(store)),
|
2019-07-22 21:43:16 +00:00
|
|
|
})
|
|
|
|
if err != nil {
|
|
|
|
return nil, nil, err
|
|
|
|
}
|
|
|
|
return &cmOut{
|
|
|
|
manager: cm,
|
|
|
|
lm: lm,
|
2021-07-21 22:53:16 +00:00
|
|
|
cs: store,
|
2019-07-22 21:43:16 +00:00
|
|
|
}, cleanup, nil
|
|
|
|
}
|
|
|
|
|
2017-05-27 06:12:13 +00:00
|
|
|
func TestManager(t *testing.T) {
|
2017-12-09 02:19:08 +00:00
|
|
|
t.Parallel()
|
2019-07-22 21:43:16 +00:00
|
|
|
|
2017-06-13 23:01:47 +00:00
|
|
|
ctx := namespaces.WithNamespace(context.Background(), "buildkit-test")
|
|
|
|
|
2017-05-26 00:16:32 +00:00
|
|
|
tmpdir, err := ioutil.TempDir("", "cachemanager")
|
2017-07-14 18:59:31 +00:00
|
|
|
require.NoError(t, err)
|
2017-05-26 00:16:32 +00:00
|
|
|
defer os.RemoveAll(tmpdir)
|
|
|
|
|
2018-04-03 09:37:16 +00:00
|
|
|
snapshotter, err := native.NewSnapshotter(filepath.Join(tmpdir, "snapshots"))
|
2017-11-06 09:44:23 +00:00
|
|
|
require.NoError(t, err)
|
2019-07-22 21:43:16 +00:00
|
|
|
|
|
|
|
co, cleanup, err := newCacheManager(ctx, cmOpt{
|
|
|
|
snapshotter: snapshotter,
|
|
|
|
snapshotterName: "native",
|
|
|
|
})
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
defer cleanup()
|
|
|
|
cm := co.manager
|
2017-05-26 00:16:32 +00:00
|
|
|
|
2017-06-13 23:01:47 +00:00
|
|
|
_, err = cm.Get(ctx, "foobar")
|
2017-07-14 18:59:31 +00:00
|
|
|
require.Error(t, err)
|
2017-05-26 00:34:59 +00:00
|
|
|
|
2017-11-17 02:09:35 +00:00
|
|
|
checkDiskUsage(ctx, t, cm, 0, 0)
|
2017-05-26 21:17:38 +00:00
|
|
|
|
2020-10-27 06:13:39 +00:00
|
|
|
active, err := cm.New(ctx, nil, nil, CachePolicyRetain)
|
2017-07-14 18:59:31 +00:00
|
|
|
require.NoError(t, err)
|
2017-05-26 17:20:41 +00:00
|
|
|
|
2020-10-27 06:13:39 +00:00
|
|
|
m, err := active.Mount(ctx, false, nil)
|
2017-07-14 18:59:31 +00:00
|
|
|
require.NoError(t, err)
|
2017-05-26 17:20:41 +00:00
|
|
|
|
|
|
|
lm := snapshot.LocalMounter(m)
|
|
|
|
target, err := lm.Mount()
|
2017-07-14 18:59:31 +00:00
|
|
|
require.NoError(t, err)
|
2017-05-26 17:20:41 +00:00
|
|
|
|
|
|
|
fi, err := os.Stat(target)
|
2017-07-14 18:59:31 +00:00
|
|
|
require.NoError(t, err)
|
|
|
|
require.Equal(t, fi.IsDir(), true)
|
2017-05-26 17:20:41 +00:00
|
|
|
|
|
|
|
err = lm.Unmount()
|
2017-07-14 18:59:31 +00:00
|
|
|
require.NoError(t, err)
|
2017-05-26 17:20:41 +00:00
|
|
|
|
2017-06-13 23:01:47 +00:00
|
|
|
_, err = cm.GetMutable(ctx, active.ID())
|
2017-07-14 18:59:31 +00:00
|
|
|
require.Error(t, err)
|
2020-04-19 05:17:47 +00:00
|
|
|
require.Equal(t, true, errors.Is(err, ErrLocked))
|
2017-05-26 17:59:33 +00:00
|
|
|
|
2017-11-17 02:09:35 +00:00
|
|
|
checkDiskUsage(ctx, t, cm, 1, 0)
|
2017-05-26 21:17:38 +00:00
|
|
|
|
2017-07-14 18:59:31 +00:00
|
|
|
snap, err := active.Commit(ctx)
|
|
|
|
require.NoError(t, err)
|
2017-05-26 17:59:33 +00:00
|
|
|
|
2017-11-17 02:09:35 +00:00
|
|
|
checkDiskUsage(ctx, t, cm, 1, 0)
|
2017-05-26 21:17:38 +00:00
|
|
|
|
2017-06-13 23:01:47 +00:00
|
|
|
_, err = cm.GetMutable(ctx, active.ID())
|
2017-07-14 18:59:31 +00:00
|
|
|
require.Error(t, err)
|
2020-04-19 05:17:47 +00:00
|
|
|
require.Equal(t, true, errors.Is(err, ErrLocked))
|
2017-05-26 18:56:12 +00:00
|
|
|
|
2017-06-13 23:01:47 +00:00
|
|
|
err = snap.Release(ctx)
|
2017-07-14 18:59:31 +00:00
|
|
|
require.NoError(t, err)
|
2017-05-26 18:56:12 +00:00
|
|
|
|
2017-11-17 02:09:35 +00:00
|
|
|
checkDiskUsage(ctx, t, cm, 0, 1)
|
2017-05-26 21:17:38 +00:00
|
|
|
|
2017-06-13 23:01:47 +00:00
|
|
|
active, err = cm.GetMutable(ctx, active.ID())
|
2017-07-14 18:59:31 +00:00
|
|
|
require.NoError(t, err)
|
2017-05-26 18:56:12 +00:00
|
|
|
|
2017-11-17 02:09:35 +00:00
|
|
|
checkDiskUsage(ctx, t, cm, 1, 0)
|
2017-05-26 21:17:38 +00:00
|
|
|
|
2017-07-14 18:59:31 +00:00
|
|
|
snap, err = active.Commit(ctx)
|
|
|
|
require.NoError(t, err)
|
2017-05-26 17:59:33 +00:00
|
|
|
|
2017-11-17 02:09:35 +00:00
|
|
|
checkDiskUsage(ctx, t, cm, 1, 0)
|
2017-05-26 21:17:38 +00:00
|
|
|
|
2021-07-01 02:50:54 +00:00
|
|
|
err = snap.(*immutableRef).finalizeLocked(ctx)
|
2017-07-14 18:59:31 +00:00
|
|
|
require.NoError(t, err)
|
|
|
|
|
2017-06-13 23:01:47 +00:00
|
|
|
err = snap.Release(ctx)
|
2017-07-14 18:59:31 +00:00
|
|
|
require.NoError(t, err)
|
2017-05-26 17:59:33 +00:00
|
|
|
|
2017-06-13 23:01:47 +00:00
|
|
|
_, err = cm.GetMutable(ctx, active.ID())
|
2017-07-14 18:59:31 +00:00
|
|
|
require.Error(t, err)
|
2020-04-19 05:17:47 +00:00
|
|
|
require.Equal(t, true, errors.Is(err, errNotFound))
|
2017-05-26 18:56:12 +00:00
|
|
|
|
2017-06-13 23:01:47 +00:00
|
|
|
_, err = cm.GetMutable(ctx, snap.ID())
|
2017-07-14 18:59:31 +00:00
|
|
|
require.Error(t, err)
|
2020-04-19 05:17:47 +00:00
|
|
|
require.Equal(t, true, errors.Is(err, errInvalid))
|
2017-05-26 18:56:12 +00:00
|
|
|
|
2017-06-13 23:01:47 +00:00
|
|
|
snap, err = cm.Get(ctx, snap.ID())
|
2017-07-14 18:59:31 +00:00
|
|
|
require.NoError(t, err)
|
2017-05-26 18:56:12 +00:00
|
|
|
|
2017-06-13 23:01:47 +00:00
|
|
|
snap2, err := cm.Get(ctx, snap.ID())
|
2017-07-14 18:59:31 +00:00
|
|
|
require.NoError(t, err)
|
2017-05-26 18:56:12 +00:00
|
|
|
|
2017-11-17 02:09:35 +00:00
|
|
|
checkDiskUsage(ctx, t, cm, 1, 0)
|
2017-05-26 21:17:38 +00:00
|
|
|
|
2017-06-13 23:01:47 +00:00
|
|
|
err = snap.Release(ctx)
|
2017-07-14 18:59:31 +00:00
|
|
|
require.NoError(t, err)
|
2017-05-26 18:56:12 +00:00
|
|
|
|
2020-10-27 06:13:39 +00:00
|
|
|
active2, err := cm.New(ctx, snap2, nil, CachePolicyRetain)
|
2017-07-14 18:59:31 +00:00
|
|
|
require.NoError(t, err)
|
2017-05-26 20:45:01 +00:00
|
|
|
|
2017-11-17 02:09:35 +00:00
|
|
|
checkDiskUsage(ctx, t, cm, 2, 0)
|
2017-05-26 21:17:38 +00:00
|
|
|
|
2017-07-14 18:59:31 +00:00
|
|
|
snap3, err := active2.Commit(ctx)
|
|
|
|
require.NoError(t, err)
|
2017-05-26 20:45:01 +00:00
|
|
|
|
2017-06-13 23:01:47 +00:00
|
|
|
err = snap2.Release(ctx)
|
2017-07-14 18:59:31 +00:00
|
|
|
require.NoError(t, err)
|
2017-05-26 20:45:01 +00:00
|
|
|
|
2017-11-17 02:09:35 +00:00
|
|
|
checkDiskUsage(ctx, t, cm, 2, 0)
|
2017-05-26 21:17:38 +00:00
|
|
|
|
2017-06-13 23:01:47 +00:00
|
|
|
err = snap3.Release(ctx)
|
2017-07-14 18:59:31 +00:00
|
|
|
require.NoError(t, err)
|
2017-05-26 17:59:33 +00:00
|
|
|
|
2017-11-17 02:09:35 +00:00
|
|
|
checkDiskUsage(ctx, t, cm, 0, 2)
|
2017-05-26 21:17:38 +00:00
|
|
|
|
2017-12-27 01:22:50 +00:00
|
|
|
buf := pruneResultBuffer()
|
2018-07-26 00:20:57 +00:00
|
|
|
err = cm.Prune(ctx, buf.C, client.PruneInfo{})
|
2017-12-27 01:22:50 +00:00
|
|
|
buf.close()
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
checkDiskUsage(ctx, t, cm, 0, 0)
|
|
|
|
|
|
|
|
require.Equal(t, len(buf.all), 2)
|
|
|
|
|
2017-05-26 00:16:32 +00:00
|
|
|
err = cm.Close()
|
2017-07-14 18:59:31 +00:00
|
|
|
require.NoError(t, err)
|
2017-12-27 01:22:50 +00:00
|
|
|
|
|
|
|
dirs, err := ioutil.ReadDir(filepath.Join(tmpdir, "snapshots/snapshots"))
|
|
|
|
require.NoError(t, err)
|
|
|
|
require.Equal(t, 0, len(dirs))
|
|
|
|
}
|
|
|
|
|
2021-07-07 15:56:02 +00:00
|
|
|
func TestLazyGetByBlob(t *testing.T) {
|
|
|
|
t.Parallel()
|
|
|
|
ctx := namespaces.WithNamespace(context.Background(), "buildkit-test")
|
|
|
|
|
|
|
|
tmpdir, err := ioutil.TempDir("", "cachemanager")
|
|
|
|
require.NoError(t, err)
|
|
|
|
defer os.RemoveAll(tmpdir)
|
|
|
|
|
|
|
|
snapshotter, err := native.NewSnapshotter(filepath.Join(tmpdir, "snapshots"))
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
co, cleanup, err := newCacheManager(ctx, cmOpt{
|
|
|
|
snapshotter: snapshotter,
|
|
|
|
snapshotterName: "native",
|
|
|
|
})
|
|
|
|
require.NoError(t, err)
|
|
|
|
defer cleanup()
|
|
|
|
cm := co.manager
|
|
|
|
|
|
|
|
// Test for #2226 https://github.com/moby/buildkit/issues/2226, create lazy blobs with the same diff ID but
|
|
|
|
// different digests (due to different compression) and make sure GetByBlob still works
|
|
|
|
_, desc, err := mapToBlob(map[string]string{"foo": "bar"}, true)
|
|
|
|
require.NoError(t, err)
|
|
|
|
descHandlers := DescHandlers(make(map[digest.Digest]*DescHandler))
|
|
|
|
descHandlers[desc.Digest] = &DescHandler{}
|
|
|
|
diffID, err := diffIDFromDescriptor(desc)
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
_, err = cm.GetByBlob(ctx, desc, nil, descHandlers)
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
_, desc2, err := mapToBlob(map[string]string{"foo": "bar"}, false)
|
|
|
|
require.NoError(t, err)
|
|
|
|
descHandlers2 := DescHandlers(make(map[digest.Digest]*DescHandler))
|
|
|
|
descHandlers2[desc2.Digest] = &DescHandler{}
|
|
|
|
diffID2, err := diffIDFromDescriptor(desc2)
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
require.NotEqual(t, desc.Digest, desc2.Digest)
|
|
|
|
require.Equal(t, diffID, diffID2)
|
|
|
|
|
|
|
|
_, err = cm.GetByBlob(ctx, desc2, nil, descHandlers2)
|
|
|
|
require.NoError(t, err)
|
|
|
|
}
|
|
|
|
|
2019-09-18 00:18:32 +00:00
|
|
|
func TestSnapshotExtract(t *testing.T) {
|
2020-07-27 17:24:31 +00:00
|
|
|
if runtime.GOOS == "windows" {
|
|
|
|
t.Skip("Depends on unimplemented containerd bind-mount support on Windows")
|
|
|
|
}
|
|
|
|
|
2019-09-18 00:18:32 +00:00
|
|
|
t.Parallel()
|
|
|
|
ctx := namespaces.WithNamespace(context.Background(), "buildkit-test")
|
|
|
|
|
|
|
|
tmpdir, err := ioutil.TempDir("", "cachemanager")
|
|
|
|
require.NoError(t, err)
|
|
|
|
defer os.RemoveAll(tmpdir)
|
|
|
|
|
|
|
|
snapshotter, err := native.NewSnapshotter(filepath.Join(tmpdir, "snapshots"))
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
co, cleanup, err := newCacheManager(ctx, cmOpt{
|
|
|
|
snapshotter: snapshotter,
|
|
|
|
snapshotterName: "native",
|
|
|
|
})
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
defer cleanup()
|
|
|
|
|
|
|
|
cm := co.manager
|
|
|
|
|
2021-07-07 15:56:02 +00:00
|
|
|
b, desc, err := mapToBlob(map[string]string{"foo": "bar"}, true)
|
2019-09-18 00:18:32 +00:00
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
err = content.WriteBlob(ctx, co.cs, "ref1", bytes.NewBuffer(b), desc)
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
snap, err := cm.GetByBlob(ctx, desc, nil)
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
require.Equal(t, false, snap.Info().Extracted)
|
|
|
|
|
2021-07-07 15:56:02 +00:00
|
|
|
b2, desc2, err := mapToBlob(map[string]string{"foo": "bar123"}, true)
|
2019-09-18 00:18:32 +00:00
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
err = content.WriteBlob(ctx, co.cs, "ref1", bytes.NewBuffer(b2), desc2)
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
snap2, err := cm.GetByBlob(ctx, desc2, snap)
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
size, err := snap2.Size(ctx)
|
|
|
|
require.NoError(t, err)
|
|
|
|
require.Equal(t, int64(len(b2)), size)
|
|
|
|
|
|
|
|
require.Equal(t, false, snap2.Info().Extracted)
|
|
|
|
|
|
|
|
dirs, err := ioutil.ReadDir(filepath.Join(tmpdir, "snapshots/snapshots"))
|
|
|
|
require.NoError(t, err)
|
|
|
|
require.Equal(t, 0, len(dirs))
|
|
|
|
|
|
|
|
checkNumBlobs(ctx, t, co.cs, 2)
|
|
|
|
|
2020-10-27 06:13:39 +00:00
|
|
|
err = snap2.Extract(ctx, nil)
|
2019-09-18 00:18:32 +00:00
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
require.Equal(t, true, snap.Info().Extracted)
|
|
|
|
require.Equal(t, true, snap2.Info().Extracted)
|
|
|
|
|
|
|
|
dirs, err = ioutil.ReadDir(filepath.Join(tmpdir, "snapshots/snapshots"))
|
|
|
|
require.NoError(t, err)
|
|
|
|
require.Equal(t, 2, len(dirs))
|
|
|
|
|
|
|
|
buf := pruneResultBuffer()
|
|
|
|
err = cm.Prune(ctx, buf.C, client.PruneInfo{})
|
|
|
|
buf.close()
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
checkDiskUsage(ctx, t, cm, 2, 0)
|
|
|
|
|
|
|
|
require.Equal(t, len(buf.all), 0)
|
|
|
|
|
|
|
|
dirs, err = ioutil.ReadDir(filepath.Join(tmpdir, "snapshots/snapshots"))
|
|
|
|
require.NoError(t, err)
|
|
|
|
require.Equal(t, 2, len(dirs))
|
|
|
|
|
|
|
|
checkNumBlobs(ctx, t, co.cs, 2)
|
|
|
|
|
|
|
|
id := snap.ID()
|
|
|
|
|
|
|
|
err = snap.Release(context.TODO())
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
buf = pruneResultBuffer()
|
|
|
|
err = cm.Prune(ctx, buf.C, client.PruneInfo{})
|
|
|
|
buf.close()
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
checkDiskUsage(ctx, t, cm, 2, 0)
|
|
|
|
|
|
|
|
dirs, err = ioutil.ReadDir(filepath.Join(tmpdir, "snapshots/snapshots"))
|
|
|
|
require.NoError(t, err)
|
|
|
|
require.Equal(t, 2, len(dirs))
|
|
|
|
|
|
|
|
snap, err = cm.Get(ctx, id)
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
checkDiskUsage(ctx, t, cm, 2, 0)
|
|
|
|
|
|
|
|
err = snap2.Release(context.TODO())
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
checkDiskUsage(ctx, t, cm, 1, 1)
|
|
|
|
|
|
|
|
buf = pruneResultBuffer()
|
|
|
|
err = cm.Prune(ctx, buf.C, client.PruneInfo{})
|
|
|
|
buf.close()
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
checkDiskUsage(ctx, t, cm, 1, 0)
|
|
|
|
|
|
|
|
require.Equal(t, len(buf.all), 1)
|
|
|
|
|
|
|
|
dirs, err = ioutil.ReadDir(filepath.Join(tmpdir, "snapshots/snapshots"))
|
|
|
|
require.NoError(t, err)
|
|
|
|
require.Equal(t, 1, len(dirs))
|
|
|
|
|
|
|
|
checkNumBlobs(ctx, t, co.cs, 1)
|
|
|
|
|
|
|
|
err = snap.Release(context.TODO())
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
buf = pruneResultBuffer()
|
|
|
|
err = cm.Prune(ctx, buf.C, client.PruneInfo{})
|
|
|
|
buf.close()
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
checkDiskUsage(ctx, t, cm, 0, 0)
|
|
|
|
|
|
|
|
dirs, err = ioutil.ReadDir(filepath.Join(tmpdir, "snapshots/snapshots"))
|
|
|
|
require.NoError(t, err)
|
|
|
|
require.Equal(t, 0, len(dirs))
|
|
|
|
|
|
|
|
checkNumBlobs(ctx, t, co.cs, 0)
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestExtractOnMutable(t *testing.T) {
|
2020-07-27 17:24:31 +00:00
|
|
|
if runtime.GOOS == "windows" {
|
|
|
|
t.Skip("Depends on unimplemented containerd bind-mount support on Windows")
|
|
|
|
}
|
|
|
|
|
2019-09-18 00:18:32 +00:00
|
|
|
t.Parallel()
|
|
|
|
ctx := namespaces.WithNamespace(context.Background(), "buildkit-test")
|
|
|
|
|
|
|
|
tmpdir, err := ioutil.TempDir("", "cachemanager")
|
|
|
|
require.NoError(t, err)
|
|
|
|
defer os.RemoveAll(tmpdir)
|
|
|
|
|
|
|
|
snapshotter, err := native.NewSnapshotter(filepath.Join(tmpdir, "snapshots"))
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
co, cleanup, err := newCacheManager(ctx, cmOpt{
|
|
|
|
snapshotter: snapshotter,
|
|
|
|
snapshotterName: "native",
|
|
|
|
})
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
defer cleanup()
|
|
|
|
|
|
|
|
cm := co.manager
|
|
|
|
|
2020-10-27 06:13:39 +00:00
|
|
|
active, err := cm.New(ctx, nil, nil)
|
2019-09-18 00:18:32 +00:00
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
snap, err := active.Commit(ctx)
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
2021-07-07 15:56:02 +00:00
|
|
|
b, desc, err := mapToBlob(map[string]string{"foo": "bar"}, true)
|
2019-09-18 00:18:32 +00:00
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
err = content.WriteBlob(ctx, co.cs, "ref1", bytes.NewBuffer(b), desc)
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
2021-07-07 15:56:02 +00:00
|
|
|
b2, desc2, err := mapToBlob(map[string]string{"foo2": "1"}, true)
|
2019-09-18 00:18:32 +00:00
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
err = content.WriteBlob(ctx, co.cs, "ref2", bytes.NewBuffer(b2), desc2)
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
2020-07-18 21:52:44 +00:00
|
|
|
_, err = cm.GetByBlob(ctx, desc2, snap)
|
2019-09-18 00:18:32 +00:00
|
|
|
require.Error(t, err)
|
|
|
|
|
2020-05-28 20:46:33 +00:00
|
|
|
leaseCtx, done, err := leaseutil.WithLease(ctx, co.lm, leases.WithExpiration(0))
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
2021-07-26 02:00:48 +00:00
|
|
|
compressionType := compression.FromMediaType(desc.MediaType)
|
|
|
|
if compressionType == compression.UnknownCompression {
|
|
|
|
t.Errorf("unhandled layer media type: %q", desc.MediaType)
|
|
|
|
}
|
|
|
|
err = snap.(*immutableRef).setBlob(leaseCtx, compressionType, desc)
|
2020-05-28 20:46:33 +00:00
|
|
|
done(context.TODO())
|
2019-09-18 00:18:32 +00:00
|
|
|
require.NoError(t, err)
|
2021-07-21 22:53:16 +00:00
|
|
|
err = snap.(*immutableRef).setChains(leaseCtx)
|
|
|
|
require.NoError(t, err)
|
2019-09-18 00:18:32 +00:00
|
|
|
|
2020-07-18 21:52:44 +00:00
|
|
|
snap2, err := cm.GetByBlob(ctx, desc2, snap)
|
2019-09-18 00:18:32 +00:00
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
err = snap.Release(context.TODO())
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
require.Equal(t, false, snap2.Info().Extracted)
|
|
|
|
|
|
|
|
size, err := snap2.Size(ctx)
|
|
|
|
require.NoError(t, err)
|
|
|
|
require.Equal(t, int64(len(b2)), size)
|
|
|
|
|
|
|
|
dirs, err := ioutil.ReadDir(filepath.Join(tmpdir, "snapshots/snapshots"))
|
|
|
|
require.NoError(t, err)
|
|
|
|
require.Equal(t, 1, len(dirs))
|
|
|
|
|
|
|
|
checkNumBlobs(ctx, t, co.cs, 2)
|
|
|
|
|
2020-10-27 06:13:39 +00:00
|
|
|
err = snap2.Extract(ctx, nil)
|
2019-09-18 00:18:32 +00:00
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
require.Equal(t, true, snap.Info().Extracted)
|
|
|
|
require.Equal(t, true, snap2.Info().Extracted)
|
|
|
|
|
|
|
|
buf := pruneResultBuffer()
|
|
|
|
err = cm.Prune(ctx, buf.C, client.PruneInfo{})
|
|
|
|
buf.close()
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
checkDiskUsage(ctx, t, cm, 2, 0)
|
|
|
|
|
|
|
|
require.Equal(t, len(buf.all), 0)
|
|
|
|
|
|
|
|
dirs, err = ioutil.ReadDir(filepath.Join(tmpdir, "snapshots/snapshots"))
|
|
|
|
require.NoError(t, err)
|
|
|
|
require.Equal(t, 2, len(dirs))
|
|
|
|
|
|
|
|
err = snap2.Release(context.TODO())
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
checkDiskUsage(ctx, t, cm, 0, 2)
|
|
|
|
|
|
|
|
buf = pruneResultBuffer()
|
|
|
|
err = cm.Prune(ctx, buf.C, client.PruneInfo{})
|
|
|
|
buf.close()
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
checkDiskUsage(ctx, t, cm, 0, 0)
|
|
|
|
|
|
|
|
require.Equal(t, len(buf.all), 2)
|
|
|
|
|
|
|
|
dirs, err = ioutil.ReadDir(filepath.Join(tmpdir, "snapshots/snapshots"))
|
|
|
|
require.NoError(t, err)
|
|
|
|
require.Equal(t, 0, len(dirs))
|
|
|
|
|
|
|
|
checkNumBlobs(ctx, t, co.cs, 0)
|
|
|
|
}
|
|
|
|
|
2019-07-22 21:43:16 +00:00
|
|
|
func TestSetBlob(t *testing.T) {
|
|
|
|
t.Parallel()
|
|
|
|
ctx := namespaces.WithNamespace(context.Background(), "buildkit-test")
|
|
|
|
|
|
|
|
tmpdir, err := ioutil.TempDir("", "cachemanager")
|
|
|
|
require.NoError(t, err)
|
|
|
|
defer os.RemoveAll(tmpdir)
|
|
|
|
|
|
|
|
snapshotter, err := native.NewSnapshotter(filepath.Join(tmpdir, "snapshots"))
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
co, cleanup, err := newCacheManager(ctx, cmOpt{
|
|
|
|
snapshotter: snapshotter,
|
|
|
|
snapshotterName: "native",
|
|
|
|
})
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
defer cleanup()
|
|
|
|
|
2020-05-28 20:46:33 +00:00
|
|
|
ctx, done, err := leaseutil.WithLease(ctx, co.lm, leaseutil.MakeTemporary)
|
|
|
|
require.NoError(t, err)
|
|
|
|
defer done(context.TODO())
|
|
|
|
|
2019-07-22 21:43:16 +00:00
|
|
|
cm := co.manager
|
|
|
|
|
2020-10-27 06:13:39 +00:00
|
|
|
active, err := cm.New(ctx, nil, nil)
|
2019-07-22 21:43:16 +00:00
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
snap, err := active.Commit(ctx)
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
info := snap.Info()
|
|
|
|
require.Equal(t, "", string(info.DiffID))
|
|
|
|
require.Equal(t, "", string(info.Blob))
|
|
|
|
require.Equal(t, "", string(info.ChainID))
|
|
|
|
require.Equal(t, "", string(info.BlobChainID))
|
|
|
|
require.Equal(t, info.Extracted, true)
|
|
|
|
|
|
|
|
ctx, clean, err := leaseutil.WithLease(ctx, co.lm)
|
2019-09-19 21:38:25 +00:00
|
|
|
require.NoError(t, err)
|
|
|
|
defer clean(context.TODO())
|
2019-07-22 21:43:16 +00:00
|
|
|
|
2021-07-07 15:56:02 +00:00
|
|
|
b, desc, err := mapToBlob(map[string]string{"foo": "bar"}, true)
|
2019-07-22 21:43:16 +00:00
|
|
|
require.NoError(t, err)
|
|
|
|
|
2019-09-18 00:18:32 +00:00
|
|
|
err = content.WriteBlob(ctx, co.cs, "ref1", bytes.NewBuffer(b), desc)
|
2019-07-22 21:43:16 +00:00
|
|
|
require.NoError(t, err)
|
|
|
|
|
2021-07-26 02:00:48 +00:00
|
|
|
err = snap.(*immutableRef).setBlob(ctx, compression.UnknownCompression, ocispecs.Descriptor{
|
2019-09-18 00:18:32 +00:00
|
|
|
Digest: digest.FromBytes([]byte("foobar")),
|
|
|
|
Annotations: map[string]string{
|
|
|
|
"containerd.io/uncompressed": digest.FromBytes([]byte("foobar2")).String(),
|
|
|
|
},
|
|
|
|
})
|
2019-07-22 21:43:16 +00:00
|
|
|
require.Error(t, err)
|
|
|
|
|
2021-07-26 02:00:48 +00:00
|
|
|
compressionType := compression.FromMediaType(desc.MediaType)
|
|
|
|
if compressionType == compression.UnknownCompression {
|
|
|
|
t.Errorf("unhandled layer media type: %q", desc.MediaType)
|
|
|
|
}
|
|
|
|
err = snap.(*immutableRef).setBlob(ctx, compressionType, desc)
|
2019-07-22 21:43:16 +00:00
|
|
|
require.NoError(t, err)
|
2021-07-21 22:53:16 +00:00
|
|
|
err = snap.(*immutableRef).setChains(ctx)
|
|
|
|
require.NoError(t, err)
|
2019-07-22 21:43:16 +00:00
|
|
|
|
|
|
|
info = snap.Info()
|
2019-09-18 00:18:32 +00:00
|
|
|
require.Equal(t, desc.Annotations["containerd.io/uncompressed"], string(info.DiffID))
|
|
|
|
require.Equal(t, desc.Digest, info.Blob)
|
|
|
|
require.Equal(t, desc.MediaType, info.MediaType)
|
|
|
|
require.Equal(t, info.DiffID, info.ChainID)
|
|
|
|
require.Equal(t, digest.FromBytes([]byte(desc.Digest+" "+info.DiffID)), info.BlobChainID)
|
2019-07-22 21:43:16 +00:00
|
|
|
require.Equal(t, snap.ID(), info.SnapshotID)
|
|
|
|
require.Equal(t, info.Extracted, true)
|
|
|
|
|
2020-10-27 06:13:39 +00:00
|
|
|
active, err = cm.New(ctx, snap, nil)
|
2019-07-22 21:43:16 +00:00
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
snap2, err := active.Commit(ctx)
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
2021-07-07 15:56:02 +00:00
|
|
|
b2, desc2, err := mapToBlob(map[string]string{"foo2": "bar2"}, true)
|
2019-07-22 21:43:16 +00:00
|
|
|
require.NoError(t, err)
|
|
|
|
|
2019-09-18 00:18:32 +00:00
|
|
|
err = content.WriteBlob(ctx, co.cs, "ref2", bytes.NewBuffer(b2), desc2)
|
2019-07-22 21:43:16 +00:00
|
|
|
require.NoError(t, err)
|
|
|
|
|
2021-07-26 02:00:48 +00:00
|
|
|
compressionType2 := compression.FromMediaType(desc2.MediaType)
|
|
|
|
if compressionType2 == compression.UnknownCompression {
|
|
|
|
t.Errorf("unhandled layer media type: %q", desc2.MediaType)
|
|
|
|
}
|
|
|
|
err = snap2.(*immutableRef).setBlob(ctx, compressionType2, desc2)
|
2019-07-22 21:43:16 +00:00
|
|
|
require.NoError(t, err)
|
2021-07-21 22:53:16 +00:00
|
|
|
err = snap2.(*immutableRef).setChains(ctx)
|
|
|
|
require.NoError(t, err)
|
2019-07-22 21:43:16 +00:00
|
|
|
|
2019-09-18 00:18:32 +00:00
|
|
|
info2 := snap2.Info()
|
|
|
|
require.Equal(t, desc2.Annotations["containerd.io/uncompressed"], string(info2.DiffID))
|
|
|
|
require.Equal(t, desc2.Digest, info2.Blob)
|
|
|
|
require.Equal(t, desc2.MediaType, info2.MediaType)
|
|
|
|
require.Equal(t, digest.FromBytes([]byte(info.ChainID+" "+info2.DiffID)), info2.ChainID)
|
|
|
|
require.Equal(t, digest.FromBytes([]byte(info.BlobChainID+" "+digest.FromBytes([]byte(desc2.Digest+" "+info2.DiffID)))), info2.BlobChainID)
|
|
|
|
require.Equal(t, snap2.ID(), info2.SnapshotID)
|
|
|
|
require.Equal(t, info2.Extracted, true)
|
2019-07-22 21:43:16 +00:00
|
|
|
|
2021-07-07 15:56:02 +00:00
|
|
|
b3, desc3, err := mapToBlob(map[string]string{"foo3": "bar3"}, true)
|
2019-07-22 21:43:16 +00:00
|
|
|
require.NoError(t, err)
|
|
|
|
|
2019-09-18 00:18:32 +00:00
|
|
|
err = content.WriteBlob(ctx, co.cs, "ref3", bytes.NewBuffer(b3), desc3)
|
2019-07-22 21:43:16 +00:00
|
|
|
require.NoError(t, err)
|
|
|
|
|
2019-09-18 00:18:32 +00:00
|
|
|
snap3, err := cm.GetByBlob(ctx, desc3, snap)
|
2019-07-22 21:43:16 +00:00
|
|
|
require.NoError(t, err)
|
|
|
|
|
2019-09-18 00:18:32 +00:00
|
|
|
info3 := snap3.Info()
|
|
|
|
require.Equal(t, desc3.Annotations["containerd.io/uncompressed"], string(info3.DiffID))
|
|
|
|
require.Equal(t, desc3.Digest, info3.Blob)
|
|
|
|
require.Equal(t, desc3.MediaType, info3.MediaType)
|
|
|
|
require.Equal(t, digest.FromBytes([]byte(info.ChainID+" "+info3.DiffID)), info3.ChainID)
|
|
|
|
require.Equal(t, digest.FromBytes([]byte(info.BlobChainID+" "+digest.FromBytes([]byte(desc3.Digest+" "+info3.DiffID)))), info3.BlobChainID)
|
|
|
|
require.Equal(t, string(info3.ChainID), info3.SnapshotID)
|
|
|
|
require.Equal(t, info3.Extracted, false)
|
2019-07-22 21:43:16 +00:00
|
|
|
|
|
|
|
// snap4 is same as snap2
|
2019-09-18 00:18:32 +00:00
|
|
|
snap4, err := cm.GetByBlob(ctx, desc2, snap)
|
2019-07-22 21:43:16 +00:00
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
require.Equal(t, snap2.ID(), snap4.ID())
|
|
|
|
|
|
|
|
// snap5 is same different blob but same diffID as snap2
|
2021-07-07 15:56:02 +00:00
|
|
|
b5, desc5, err := mapToBlob(map[string]string{"foo5": "bar5"}, true)
|
2019-07-22 21:43:16 +00:00
|
|
|
require.NoError(t, err)
|
|
|
|
|
2019-09-18 00:18:32 +00:00
|
|
|
desc5.Annotations["containerd.io/uncompressed"] = info2.DiffID.String()
|
|
|
|
|
|
|
|
err = content.WriteBlob(ctx, co.cs, "ref5", bytes.NewBuffer(b5), desc5)
|
2019-07-22 21:43:16 +00:00
|
|
|
require.NoError(t, err)
|
|
|
|
|
2019-09-18 00:18:32 +00:00
|
|
|
snap5, err := cm.GetByBlob(ctx, desc5, snap)
|
2019-07-22 21:43:16 +00:00
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
require.NotEqual(t, snap2.ID(), snap5.ID())
|
|
|
|
require.Equal(t, snap2.Info().SnapshotID, snap5.Info().SnapshotID)
|
2019-09-18 00:18:32 +00:00
|
|
|
require.Equal(t, info2.DiffID, snap5.Info().DiffID)
|
|
|
|
require.Equal(t, desc5.Digest, snap5.Info().Blob)
|
2019-07-22 21:43:16 +00:00
|
|
|
|
|
|
|
require.Equal(t, snap2.Info().ChainID, snap5.Info().ChainID)
|
|
|
|
require.NotEqual(t, snap2.Info().BlobChainID, snap5.Info().BlobChainID)
|
2019-09-18 00:18:32 +00:00
|
|
|
require.Equal(t, digest.FromBytes([]byte(info.BlobChainID+" "+digest.FromBytes([]byte(desc5.Digest+" "+info2.DiffID)))), snap5.Info().BlobChainID)
|
2019-07-22 21:43:16 +00:00
|
|
|
|
|
|
|
// snap6 is a child of snap3
|
2021-07-07 15:56:02 +00:00
|
|
|
b6, desc6, err := mapToBlob(map[string]string{"foo6": "bar6"}, true)
|
2019-07-22 21:43:16 +00:00
|
|
|
require.NoError(t, err)
|
|
|
|
|
2019-09-18 00:18:32 +00:00
|
|
|
err = content.WriteBlob(ctx, co.cs, "ref6", bytes.NewBuffer(b6), desc6)
|
2019-07-22 21:43:16 +00:00
|
|
|
require.NoError(t, err)
|
|
|
|
|
2019-09-18 00:18:32 +00:00
|
|
|
snap6, err := cm.GetByBlob(ctx, desc6, snap3)
|
2019-07-22 21:43:16 +00:00
|
|
|
require.NoError(t, err)
|
|
|
|
|
2019-09-18 00:18:32 +00:00
|
|
|
info6 := snap6.Info()
|
|
|
|
require.Equal(t, desc6.Annotations["containerd.io/uncompressed"], string(info6.DiffID))
|
|
|
|
require.Equal(t, desc6.Digest, info6.Blob)
|
|
|
|
require.Equal(t, digest.FromBytes([]byte(snap3.Info().ChainID+" "+info6.DiffID)), info6.ChainID)
|
|
|
|
require.Equal(t, digest.FromBytes([]byte(info3.BlobChainID+" "+digest.FromBytes([]byte(info6.Blob+" "+info6.DiffID)))), info6.BlobChainID)
|
|
|
|
require.Equal(t, string(info6.ChainID), info6.SnapshotID)
|
|
|
|
require.Equal(t, info6.Extracted, false)
|
2019-07-22 21:43:16 +00:00
|
|
|
|
2021-07-26 08:53:30 +00:00
|
|
|
_, err = cm.GetByBlob(ctx, ocispecs.Descriptor{
|
2019-09-18 00:18:32 +00:00
|
|
|
Digest: digest.FromBytes([]byte("notexist")),
|
|
|
|
Annotations: map[string]string{
|
|
|
|
"containerd.io/uncompressed": digest.FromBytes([]byte("notexist")).String(),
|
|
|
|
},
|
|
|
|
}, snap3)
|
2019-07-22 21:43:16 +00:00
|
|
|
require.Error(t, err)
|
|
|
|
|
|
|
|
clean(context.TODO())
|
|
|
|
|
|
|
|
//snap.SetBlob()
|
|
|
|
}
|
|
|
|
|
2017-12-27 01:22:50 +00:00
|
|
|
func TestPrune(t *testing.T) {
|
|
|
|
t.Parallel()
|
|
|
|
ctx := namespaces.WithNamespace(context.Background(), "buildkit-test")
|
|
|
|
|
|
|
|
tmpdir, err := ioutil.TempDir("", "cachemanager")
|
|
|
|
require.NoError(t, err)
|
|
|
|
defer os.RemoveAll(tmpdir)
|
|
|
|
|
2018-04-03 09:37:16 +00:00
|
|
|
snapshotter, err := native.NewSnapshotter(filepath.Join(tmpdir, "snapshots"))
|
2017-12-27 01:22:50 +00:00
|
|
|
require.NoError(t, err)
|
2019-07-22 21:43:16 +00:00
|
|
|
|
|
|
|
co, cleanup, err := newCacheManager(ctx, cmOpt{
|
|
|
|
snapshotter: snapshotter,
|
|
|
|
snapshotterName: "native",
|
|
|
|
})
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
defer cleanup()
|
|
|
|
cm := co.manager
|
2017-12-27 01:22:50 +00:00
|
|
|
|
2020-10-27 06:13:39 +00:00
|
|
|
active, err := cm.New(ctx, nil, nil)
|
2017-12-27 01:22:50 +00:00
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
snap, err := active.Commit(ctx)
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
2020-10-27 06:13:39 +00:00
|
|
|
active, err = cm.New(ctx, snap, nil, CachePolicyRetain)
|
2017-12-27 01:22:50 +00:00
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
snap2, err := active.Commit(ctx)
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
checkDiskUsage(ctx, t, cm, 2, 0)
|
|
|
|
|
2019-07-22 21:43:16 +00:00
|
|
|
dirs, err := ioutil.ReadDir(filepath.Join(tmpdir, "snapshots/snapshots"))
|
|
|
|
require.NoError(t, err)
|
|
|
|
require.Equal(t, 2, len(dirs))
|
|
|
|
|
2017-12-27 01:22:50 +00:00
|
|
|
// prune with keeping refs does nothing
|
|
|
|
buf := pruneResultBuffer()
|
2018-07-26 00:20:57 +00:00
|
|
|
err = cm.Prune(ctx, buf.C, client.PruneInfo{})
|
2017-12-27 01:22:50 +00:00
|
|
|
buf.close()
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
checkDiskUsage(ctx, t, cm, 2, 0)
|
|
|
|
require.Equal(t, len(buf.all), 0)
|
|
|
|
|
2019-07-22 21:43:16 +00:00
|
|
|
dirs, err = ioutil.ReadDir(filepath.Join(tmpdir, "snapshots/snapshots"))
|
2017-12-27 01:22:50 +00:00
|
|
|
require.NoError(t, err)
|
|
|
|
require.Equal(t, 2, len(dirs))
|
|
|
|
|
|
|
|
err = snap2.Release(ctx)
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
checkDiskUsage(ctx, t, cm, 1, 1)
|
|
|
|
|
|
|
|
// prune with keeping single refs deletes one
|
|
|
|
buf = pruneResultBuffer()
|
2018-07-26 00:20:57 +00:00
|
|
|
err = cm.Prune(ctx, buf.C, client.PruneInfo{})
|
2017-12-27 01:22:50 +00:00
|
|
|
buf.close()
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
checkDiskUsage(ctx, t, cm, 1, 0)
|
|
|
|
require.Equal(t, len(buf.all), 1)
|
|
|
|
|
|
|
|
dirs, err = ioutil.ReadDir(filepath.Join(tmpdir, "snapshots/snapshots"))
|
|
|
|
require.NoError(t, err)
|
|
|
|
require.Equal(t, 1, len(dirs))
|
|
|
|
|
|
|
|
err = snap.Release(ctx)
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
2020-10-27 06:13:39 +00:00
|
|
|
active, err = cm.New(ctx, snap, nil, CachePolicyRetain)
|
2017-12-27 01:22:50 +00:00
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
snap2, err = active.Commit(ctx)
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
err = snap.Release(ctx)
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
checkDiskUsage(ctx, t, cm, 2, 0)
|
|
|
|
|
|
|
|
// prune with parent released does nothing
|
|
|
|
buf = pruneResultBuffer()
|
2018-07-26 00:20:57 +00:00
|
|
|
err = cm.Prune(ctx, buf.C, client.PruneInfo{})
|
2017-12-27 01:22:50 +00:00
|
|
|
buf.close()
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
checkDiskUsage(ctx, t, cm, 2, 0)
|
|
|
|
require.Equal(t, len(buf.all), 0)
|
|
|
|
|
|
|
|
// releasing last reference
|
|
|
|
err = snap2.Release(ctx)
|
|
|
|
require.NoError(t, err)
|
|
|
|
checkDiskUsage(ctx, t, cm, 0, 2)
|
|
|
|
|
|
|
|
buf = pruneResultBuffer()
|
2018-07-26 00:20:57 +00:00
|
|
|
err = cm.Prune(ctx, buf.C, client.PruneInfo{})
|
2017-12-27 01:22:50 +00:00
|
|
|
buf.close()
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
checkDiskUsage(ctx, t, cm, 0, 0)
|
|
|
|
require.Equal(t, len(buf.all), 2)
|
|
|
|
|
|
|
|
dirs, err = ioutil.ReadDir(filepath.Join(tmpdir, "snapshots/snapshots"))
|
|
|
|
require.NoError(t, err)
|
|
|
|
require.Equal(t, 0, len(dirs))
|
2017-07-14 18:59:31 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func TestLazyCommit(t *testing.T) {
|
2017-12-09 02:19:08 +00:00
|
|
|
t.Parallel()
|
2019-07-22 21:43:16 +00:00
|
|
|
|
2017-07-14 18:59:31 +00:00
|
|
|
ctx := namespaces.WithNamespace(context.Background(), "buildkit-test")
|
|
|
|
|
|
|
|
tmpdir, err := ioutil.TempDir("", "cachemanager")
|
|
|
|
require.NoError(t, err)
|
|
|
|
defer os.RemoveAll(tmpdir)
|
|
|
|
|
2018-04-03 09:37:16 +00:00
|
|
|
snapshotter, err := native.NewSnapshotter(filepath.Join(tmpdir, "snapshots"))
|
2017-11-06 09:44:23 +00:00
|
|
|
require.NoError(t, err)
|
2019-07-22 21:43:16 +00:00
|
|
|
|
|
|
|
co, cleanup, err := newCacheManager(ctx, cmOpt{
|
|
|
|
tmpdir: tmpdir,
|
|
|
|
snapshotter: snapshotter,
|
|
|
|
snapshotterName: "native",
|
|
|
|
})
|
|
|
|
require.NoError(t, err)
|
|
|
|
cm := co.manager
|
2017-07-14 18:59:31 +00:00
|
|
|
|
2020-10-27 06:13:39 +00:00
|
|
|
active, err := cm.New(ctx, nil, nil, CachePolicyRetain)
|
2017-07-14 18:59:31 +00:00
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
// after commit mutable is locked
|
|
|
|
snap, err := active.Commit(ctx)
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
_, err = cm.GetMutable(ctx, active.ID())
|
|
|
|
require.Error(t, err)
|
2020-04-19 05:17:47 +00:00
|
|
|
require.Equal(t, true, errors.Is(err, ErrLocked))
|
2017-07-14 18:59:31 +00:00
|
|
|
|
|
|
|
// immutable refs still work
|
|
|
|
snap2, err := cm.Get(ctx, snap.ID())
|
|
|
|
require.NoError(t, err)
|
|
|
|
require.Equal(t, snap.ID(), snap2.ID())
|
|
|
|
|
|
|
|
err = snap.Release(ctx)
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
err = snap2.Release(ctx)
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
// immutable work after final release as well
|
|
|
|
snap, err = cm.Get(ctx, snap.ID())
|
|
|
|
require.NoError(t, err)
|
|
|
|
require.Equal(t, snap.ID(), snap2.ID())
|
2017-07-19 23:39:32 +00:00
|
|
|
|
|
|
|
// active can't be get while immutable is held
|
|
|
|
_, err = cm.GetMutable(ctx, active.ID())
|
|
|
|
require.Error(t, err)
|
2020-04-19 05:17:47 +00:00
|
|
|
require.Equal(t, true, errors.Is(err, ErrLocked))
|
2017-07-19 23:39:32 +00:00
|
|
|
|
2017-07-14 18:59:31 +00:00
|
|
|
err = snap.Release(ctx)
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
// after release mutable becomes available again
|
|
|
|
active2, err := cm.GetMutable(ctx, active.ID())
|
|
|
|
require.NoError(t, err)
|
|
|
|
require.Equal(t, active2.ID(), active.ID())
|
|
|
|
|
|
|
|
// because ref was took mutable old immutable are cleared
|
|
|
|
_, err = cm.Get(ctx, snap.ID())
|
|
|
|
require.Error(t, err)
|
2020-04-19 05:17:47 +00:00
|
|
|
require.Equal(t, true, errors.Is(err, errNotFound))
|
2017-07-14 18:59:31 +00:00
|
|
|
|
|
|
|
snap, err = active2.Commit(ctx)
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
// this time finalize commit
|
2021-07-01 02:50:54 +00:00
|
|
|
err = snap.(*immutableRef).finalizeLocked(ctx)
|
2017-07-14 18:59:31 +00:00
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
err = snap.Release(ctx)
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
// mutable is gone after finalize
|
|
|
|
_, err = cm.GetMutable(ctx, active2.ID())
|
|
|
|
require.Error(t, err)
|
2020-04-19 05:17:47 +00:00
|
|
|
require.Equal(t, true, errors.Is(err, errNotFound))
|
2017-07-14 18:59:31 +00:00
|
|
|
|
|
|
|
// immutable still works
|
|
|
|
snap2, err = cm.Get(ctx, snap.ID())
|
|
|
|
require.NoError(t, err)
|
|
|
|
require.Equal(t, snap.ID(), snap2.ID())
|
|
|
|
|
|
|
|
err = snap2.Release(ctx)
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
// test restarting after commit
|
2020-10-27 06:13:39 +00:00
|
|
|
active, err = cm.New(ctx, nil, nil, CachePolicyRetain)
|
2017-07-14 18:59:31 +00:00
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
// after commit mutable is locked
|
|
|
|
snap, err = active.Commit(ctx)
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
err = cm.Close()
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
2019-07-22 21:43:16 +00:00
|
|
|
cleanup()
|
|
|
|
|
2018-09-18 18:18:08 +00:00
|
|
|
// we can't close snapshotter and open it twice (especially, its internal bbolt store)
|
2019-07-22 21:43:16 +00:00
|
|
|
co, cleanup, err = newCacheManager(ctx, cmOpt{
|
|
|
|
tmpdir: tmpdir,
|
|
|
|
snapshotter: snapshotter,
|
|
|
|
snapshotterName: "native",
|
|
|
|
})
|
|
|
|
require.NoError(t, err)
|
|
|
|
cm = co.manager
|
2017-07-14 18:59:31 +00:00
|
|
|
|
|
|
|
snap2, err = cm.Get(ctx, snap.ID())
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
err = snap2.Release(ctx)
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
active, err = cm.GetMutable(ctx, active.ID())
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
_, err = cm.Get(ctx, snap.ID())
|
|
|
|
require.Error(t, err)
|
2020-04-19 05:17:47 +00:00
|
|
|
require.Equal(t, true, errors.Is(err, errNotFound))
|
2017-07-14 18:59:31 +00:00
|
|
|
|
|
|
|
snap, err = active.Commit(ctx)
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
err = cm.Close()
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
2019-07-22 21:43:16 +00:00
|
|
|
cleanup()
|
|
|
|
|
|
|
|
co, cleanup, err = newCacheManager(ctx, cmOpt{
|
|
|
|
tmpdir: tmpdir,
|
|
|
|
snapshotter: snapshotter,
|
|
|
|
snapshotterName: "native",
|
|
|
|
})
|
|
|
|
require.NoError(t, err)
|
|
|
|
defer cleanup()
|
|
|
|
cm = co.manager
|
2017-07-14 18:59:31 +00:00
|
|
|
|
|
|
|
snap2, err = cm.Get(ctx, snap.ID())
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
2021-07-01 02:50:54 +00:00
|
|
|
err = snap2.(*immutableRef).finalizeLocked(ctx)
|
2017-07-14 18:59:31 +00:00
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
err = snap2.Release(ctx)
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
2020-07-18 21:52:44 +00:00
|
|
|
_, err = cm.GetMutable(ctx, active.ID())
|
2017-07-14 18:59:31 +00:00
|
|
|
require.Error(t, err)
|
2020-04-19 05:17:47 +00:00
|
|
|
require.Equal(t, true, errors.Is(err, errNotFound))
|
2017-07-14 18:59:31 +00:00
|
|
|
}
|
|
|
|
|
2021-07-21 22:53:16 +00:00
|
|
|
func TestGetRemote(t *testing.T) {
|
|
|
|
t.Parallel()
|
|
|
|
// windows fails when lazy blob is being extracted with "invalid windows mount type: 'bind'"
|
|
|
|
if runtime.GOOS != "linux" {
|
|
|
|
t.Skipf("unsupported GOOS: %s", runtime.GOOS)
|
|
|
|
}
|
|
|
|
|
|
|
|
ctx := namespaces.WithNamespace(context.Background(), "buildkit-test")
|
|
|
|
|
|
|
|
tmpdir, err := ioutil.TempDir("", "cachemanager")
|
|
|
|
require.NoError(t, err)
|
|
|
|
defer os.RemoveAll(tmpdir)
|
|
|
|
|
|
|
|
snapshotter, err := native.NewSnapshotter(filepath.Join(tmpdir, "snapshots"))
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
co, cleanup, err := newCacheManager(ctx, cmOpt{
|
|
|
|
snapshotter: snapshotter,
|
|
|
|
snapshotterName: "native",
|
|
|
|
})
|
|
|
|
require.NoError(t, err)
|
|
|
|
defer cleanup()
|
|
|
|
cm := co.manager
|
|
|
|
|
|
|
|
ctx, done, err := leaseutil.WithLease(ctx, co.lm, leaseutil.MakeTemporary)
|
|
|
|
require.NoError(t, err)
|
|
|
|
defer done(context.TODO())
|
|
|
|
|
|
|
|
contentBuffer := contentutil.NewBuffer()
|
|
|
|
|
|
|
|
descHandlers := DescHandlers(map[digest.Digest]*DescHandler{})
|
|
|
|
|
|
|
|
// make some lazy refs from blobs
|
|
|
|
expectedContent := map[digest.Digest]struct{}{}
|
2021-07-26 02:00:48 +00:00
|
|
|
variant := map[digest.Digest]digest.Digest{}
|
|
|
|
esgz2gzip := map[digest.Digest]digest.Digest{}
|
2021-07-21 22:53:16 +00:00
|
|
|
var descs []ocispecs.Descriptor
|
|
|
|
for i := 0; i < 2; i++ {
|
|
|
|
blobmap := map[string]string{"foo": strconv.Itoa(i)}
|
|
|
|
blobBytes, desc, err := mapToBlob(blobmap, true)
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
expectedContent[desc.Digest] = struct{}{}
|
|
|
|
descs = append(descs, desc)
|
|
|
|
|
|
|
|
cw, err := contentBuffer.Writer(ctx)
|
|
|
|
require.NoError(t, err)
|
|
|
|
_, err = cw.Write(blobBytes)
|
|
|
|
require.NoError(t, err)
|
|
|
|
err = cw.Commit(ctx, 0, cw.Digest())
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
descHandlers[desc.Digest] = &DescHandler{
|
|
|
|
Provider: func(_ session.Group) content.Provider { return contentBuffer },
|
|
|
|
}
|
|
|
|
|
2021-07-26 02:00:48 +00:00
|
|
|
uncompressedBlobBytes, uncompressedDesc, err := mapToBlob(blobmap, false)
|
2021-07-21 22:53:16 +00:00
|
|
|
require.NoError(t, err)
|
|
|
|
expectedContent[uncompressedDesc.Digest] = struct{}{}
|
2021-07-26 02:00:48 +00:00
|
|
|
|
|
|
|
esgzDgst, uncompressedEsgzDgst, err := esgzBlobDigest(uncompressedBlobBytes)
|
|
|
|
require.NoError(t, err)
|
|
|
|
expectedContent[esgzDgst] = struct{}{}
|
|
|
|
variant[uncompressedEsgzDgst] = uncompressedDesc.Digest
|
|
|
|
esgz2gzip[esgzDgst] = desc.Digest
|
2021-07-21 22:53:16 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Create 3 levels of mutable refs, where each parent ref has 2 children (this tests parallel creation of
|
|
|
|
// overlapping blob chains).
|
|
|
|
lazyRef, err := cm.GetByBlob(ctx, descs[0], nil, descHandlers)
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
refs := []ImmutableRef{lazyRef}
|
|
|
|
for i := 0; i < 3; i++ {
|
|
|
|
var newRefs []ImmutableRef
|
|
|
|
for j, ir := range refs {
|
|
|
|
for k := 0; k < 2; k++ {
|
|
|
|
mutRef, err := cm.New(ctx, ir, nil, descHandlers)
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
m, err := mutRef.Mount(ctx, false, nil)
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
lm := snapshot.LocalMounter(m)
|
|
|
|
target, err := lm.Mount()
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
f, err := os.Create(filepath.Join(target, fmt.Sprintf("%d-%d-%d", i, j, k)))
|
|
|
|
require.NoError(t, err)
|
|
|
|
err = os.Chtimes(f.Name(), time.Unix(0, 0), time.Unix(0, 0))
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
_, desc, err := fileToBlob(f, true)
|
|
|
|
require.NoError(t, err)
|
|
|
|
expectedContent[desc.Digest] = struct{}{}
|
2021-07-26 02:00:48 +00:00
|
|
|
uncompressedBlobBytes, uncompressedDesc, err := fileToBlob(f, false)
|
2021-07-21 22:53:16 +00:00
|
|
|
require.NoError(t, err)
|
2021-07-26 02:00:48 +00:00
|
|
|
expectedContent[uncompressedDesc.Digest] = struct{}{}
|
|
|
|
esgzDgst, uncompressedEsgzDgst, err := esgzBlobDigest(uncompressedBlobBytes)
|
|
|
|
require.NoError(t, err)
|
|
|
|
expectedContent[esgzDgst] = struct{}{}
|
|
|
|
variant[uncompressedEsgzDgst] = uncompressedDesc.Digest
|
|
|
|
esgz2gzip[esgzDgst] = desc.Digest
|
2021-07-21 22:53:16 +00:00
|
|
|
|
|
|
|
f.Close()
|
|
|
|
err = lm.Unmount()
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
immutRef, err := mutRef.Commit(ctx)
|
|
|
|
require.NoError(t, err)
|
|
|
|
newRefs = append(newRefs, immutRef)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
refs = newRefs
|
|
|
|
}
|
|
|
|
|
|
|
|
// also test the original lazyRef to get coverage for refs that don't have to be extracted from the snapshotter
|
|
|
|
lazyRef2, err := cm.GetByBlob(ctx, descs[1], nil, descHandlers)
|
|
|
|
require.NoError(t, err)
|
|
|
|
refs = append(refs, lazyRef2)
|
|
|
|
|
|
|
|
checkNumBlobs(ctx, t, co.cs, 1)
|
|
|
|
|
|
|
|
// Call GetRemote on all the refs
|
2021-07-26 02:00:48 +00:00
|
|
|
esgzRefs := map[digest.Digest]struct{}{}
|
|
|
|
var esgzRefsMu sync.Mutex
|
2021-07-21 22:53:16 +00:00
|
|
|
eg, egctx := errgroup.WithContext(ctx)
|
|
|
|
for _, ir := range refs {
|
|
|
|
ir := ir.(*immutableRef)
|
2021-07-26 02:00:48 +00:00
|
|
|
for _, compressionType := range []compression.Type{compression.Uncompressed, compression.Gzip, compression.EStargz} {
|
2021-07-21 22:53:16 +00:00
|
|
|
compressionType := compressionType
|
|
|
|
eg.Go(func() error {
|
|
|
|
remote, err := ir.GetRemote(egctx, true, compressionType, true, nil)
|
|
|
|
require.NoError(t, err)
|
|
|
|
refChain := ir.parentRefChain()
|
|
|
|
for i, desc := range remote.Descriptors {
|
|
|
|
switch compressionType {
|
|
|
|
case compression.Uncompressed:
|
|
|
|
require.Equal(t, ocispecs.MediaTypeImageLayer, desc.MediaType)
|
|
|
|
case compression.Gzip:
|
|
|
|
require.Equal(t, ocispecs.MediaTypeImageLayerGzip, desc.MediaType)
|
2021-07-26 02:00:48 +00:00
|
|
|
case compression.EStargz:
|
|
|
|
require.Equal(t, ocispecs.MediaTypeImageLayerGzip, desc.MediaType)
|
2021-07-21 22:53:16 +00:00
|
|
|
default:
|
|
|
|
require.Fail(t, "unhandled media type", compressionType)
|
|
|
|
}
|
2021-07-26 02:00:48 +00:00
|
|
|
dgst := desc.Digest
|
|
|
|
if v, ok := variant[dgst]; ok {
|
|
|
|
dgst = v
|
|
|
|
}
|
|
|
|
require.Contains(t, expectedContent, dgst)
|
|
|
|
checkDescriptor(ctx, t, co.cs, desc, compressionType)
|
2021-07-21 22:53:16 +00:00
|
|
|
|
|
|
|
r := refChain[i]
|
2021-07-26 02:00:48 +00:00
|
|
|
if compressionType == compression.EStargz {
|
|
|
|
if digest.Digest(getBlob(r.md)) == desc.Digest {
|
|
|
|
esgzRefsMu.Lock()
|
|
|
|
esgzRefs[desc.Digest] = struct{}{}
|
|
|
|
esgzRefsMu.Unlock()
|
|
|
|
}
|
|
|
|
}
|
2021-07-21 22:53:16 +00:00
|
|
|
isLazy, err := r.isLazy(egctx)
|
|
|
|
require.NoError(t, err)
|
2021-07-26 02:00:48 +00:00
|
|
|
needs, err := needsConversion(desc.MediaType, compressionType)
|
|
|
|
require.NoError(t, err)
|
|
|
|
if needs {
|
|
|
|
require.False(t, isLazy, "layer %q requires conversion so it must be unlazied", desc.Digest)
|
|
|
|
}
|
|
|
|
bDesc, err := r.getCompressionBlob(egctx, compressionType)
|
2021-07-21 22:53:16 +00:00
|
|
|
if isLazy {
|
|
|
|
require.Error(t, err)
|
|
|
|
} else {
|
|
|
|
require.NoError(t, err)
|
2021-07-26 02:00:48 +00:00
|
|
|
checkDescriptor(ctx, t, co.cs, bDesc, compressionType)
|
|
|
|
require.Equal(t, desc.Digest, bDesc.Digest)
|
2021-07-21 22:53:16 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
})
|
|
|
|
}
|
|
|
|
}
|
|
|
|
require.NoError(t, eg.Wait())
|
|
|
|
|
2021-07-26 02:00:48 +00:00
|
|
|
for dgst := range esgzRefs {
|
|
|
|
gzipDgst, ok := esgz2gzip[dgst]
|
|
|
|
require.True(t, ok, "match for gzip blob: %s", dgst)
|
|
|
|
delete(expectedContent, gzipDgst) // esgz blob is reused also as gzip. duplicated gzip blob is unexpected.
|
|
|
|
}
|
|
|
|
|
2021-07-21 22:53:16 +00:00
|
|
|
// verify there's a 1-to-1 mapping between the content store and what we expected to be there
|
|
|
|
err = co.cs.Walk(ctx, func(info content.Info) error {
|
2021-07-26 02:00:48 +00:00
|
|
|
dgst := info.Digest
|
|
|
|
if v, ok := variant[dgst]; ok {
|
|
|
|
dgst = v
|
|
|
|
}
|
2021-07-21 22:53:16 +00:00
|
|
|
var matched bool
|
|
|
|
for expected := range expectedContent {
|
2021-07-26 02:00:48 +00:00
|
|
|
if dgst == expected {
|
2021-07-21 22:53:16 +00:00
|
|
|
delete(expectedContent, expected)
|
|
|
|
matched = true
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
require.True(t, matched, "match for blob: %s", info.Digest)
|
2021-07-26 02:00:48 +00:00
|
|
|
checkInfo(ctx, t, co.cs, info)
|
2021-07-21 22:53:16 +00:00
|
|
|
return nil
|
|
|
|
})
|
|
|
|
require.NoError(t, err)
|
|
|
|
require.Equal(t, map[digest.Digest]struct{}{}, expectedContent)
|
|
|
|
}
|
|
|
|
|
2021-07-26 02:00:48 +00:00
|
|
|
func checkInfo(ctx context.Context, t *testing.T, cs content.Store, info content.Info) {
|
|
|
|
if info.Labels == nil {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
uncompressedDgst, ok := info.Labels[containerdUncompressed]
|
|
|
|
if !ok {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
ra, err := cs.ReaderAt(ctx, ocispecs.Descriptor{Digest: info.Digest})
|
|
|
|
require.NoError(t, err)
|
|
|
|
defer ra.Close()
|
|
|
|
decompressR, err := ctdcompression.DecompressStream(io.NewSectionReader(ra, 0, ra.Size()))
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
diffID := digest.Canonical.Digester()
|
|
|
|
_, err = io.Copy(diffID.Hash(), decompressR)
|
|
|
|
require.NoError(t, err)
|
|
|
|
require.Equal(t, diffID.Digest().String(), uncompressedDgst)
|
|
|
|
}
|
|
|
|
|
|
|
|
func checkDescriptor(ctx context.Context, t *testing.T, cs content.Store, desc ocispecs.Descriptor, compressionType compression.Type) {
|
|
|
|
if desc.Annotations == nil {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
// Check annotations exist
|
|
|
|
uncompressedDgst, ok := desc.Annotations[containerdUncompressed]
|
|
|
|
require.True(t, ok, "uncompressed digest annotation not found: %q", desc.Digest)
|
|
|
|
var uncompressedSize int64
|
|
|
|
if compressionType == compression.EStargz {
|
|
|
|
_, ok := desc.Annotations[estargz.TOCJSONDigestAnnotation]
|
|
|
|
require.True(t, ok, "toc digest annotation not found: %q", desc.Digest)
|
|
|
|
uncompressedSizeS, ok := desc.Annotations[estargz.StoreUncompressedSizeAnnotation]
|
|
|
|
require.True(t, ok, "uncompressed size annotation not found: %q", desc.Digest)
|
|
|
|
var err error
|
|
|
|
uncompressedSize, err = strconv.ParseInt(uncompressedSizeS, 10, 64)
|
|
|
|
require.NoError(t, err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Check annotation values are valid
|
|
|
|
c := new(counter)
|
|
|
|
ra, err := cs.ReaderAt(ctx, desc)
|
|
|
|
if err != nil && errdefs.IsNotFound(err) {
|
|
|
|
return // lazy layer
|
|
|
|
}
|
|
|
|
require.NoError(t, err)
|
|
|
|
defer ra.Close()
|
|
|
|
decompressR, err := ctdcompression.DecompressStream(io.NewSectionReader(ra, 0, ra.Size()))
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
diffID := digest.Canonical.Digester()
|
|
|
|
_, err = io.Copy(io.MultiWriter(diffID.Hash(), c), decompressR)
|
|
|
|
require.NoError(t, err)
|
|
|
|
require.Equal(t, diffID.Digest().String(), uncompressedDgst)
|
|
|
|
if compressionType == compression.EStargz {
|
|
|
|
require.Equal(t, c.size(), uncompressedSize)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-11-17 02:09:35 +00:00
|
|
|
func checkDiskUsage(ctx context.Context, t *testing.T, cm Manager, inuse, unused int) {
|
2017-07-25 22:14:46 +00:00
|
|
|
du, err := cm.DiskUsage(ctx, client.DiskUsageInfo{})
|
2017-07-14 18:59:31 +00:00
|
|
|
require.NoError(t, err)
|
2017-05-26 21:17:38 +00:00
|
|
|
var inuseActual, unusedActual int
|
|
|
|
for _, r := range du {
|
|
|
|
if r.InUse {
|
|
|
|
inuseActual++
|
|
|
|
} else {
|
|
|
|
unusedActual++
|
|
|
|
}
|
|
|
|
}
|
2017-07-14 18:59:31 +00:00
|
|
|
require.Equal(t, inuse, inuseActual)
|
|
|
|
require.Equal(t, unused, unusedActual)
|
2017-05-26 21:17:38 +00:00
|
|
|
}
|
2017-12-27 01:22:50 +00:00
|
|
|
|
2021-07-26 02:00:48 +00:00
|
|
|
func esgzBlobDigest(uncompressedBlobBytes []byte) (digest.Digest, digest.Digest, error) {
|
|
|
|
buf := new(bytes.Buffer)
|
|
|
|
compressorFunc, _ := writeEStargz()
|
|
|
|
w, err := compressorFunc(buf, ocispecs.MediaTypeImageLayerGzip)
|
|
|
|
if err != nil {
|
|
|
|
return "", "", err
|
|
|
|
}
|
|
|
|
if _, err := io.Copy(w, bytes.NewReader(uncompressedBlobBytes)); err != nil {
|
|
|
|
return "", "", err
|
|
|
|
}
|
|
|
|
if err := w.Close(); err != nil {
|
|
|
|
return "", "", err
|
|
|
|
}
|
|
|
|
b := buf.Bytes()
|
|
|
|
esgzDgst := digest.FromBytes(b)
|
|
|
|
ur, err := gzip.NewReader(bytes.NewReader(b))
|
|
|
|
if err != nil {
|
|
|
|
return "", "", err
|
|
|
|
}
|
|
|
|
defer ur.Close()
|
|
|
|
uncompressedDgst, err := digest.FromReader(ur)
|
|
|
|
if err != nil {
|
|
|
|
return "", "", err
|
|
|
|
}
|
|
|
|
return esgzDgst, uncompressedDgst, nil
|
|
|
|
}
|
|
|
|
|
2019-09-18 00:18:32 +00:00
|
|
|
func checkNumBlobs(ctx context.Context, t *testing.T, cs content.Store, expected int) {
|
|
|
|
c := 0
|
|
|
|
err := cs.Walk(ctx, func(_ content.Info) error {
|
|
|
|
c++
|
|
|
|
return nil
|
|
|
|
})
|
|
|
|
require.NoError(t, err)
|
|
|
|
require.Equal(t, expected, c)
|
|
|
|
}
|
|
|
|
|
2017-12-27 01:22:50 +00:00
|
|
|
func pruneResultBuffer() *buf {
|
|
|
|
b := &buf{C: make(chan client.UsageInfo), closed: make(chan struct{})}
|
|
|
|
go func() {
|
|
|
|
for c := range b.C {
|
|
|
|
b.all = append(b.all, c)
|
|
|
|
}
|
|
|
|
close(b.closed)
|
|
|
|
}()
|
|
|
|
return b
|
|
|
|
}
|
|
|
|
|
|
|
|
type buf struct {
|
|
|
|
C chan client.UsageInfo
|
|
|
|
closed chan struct{}
|
|
|
|
all []client.UsageInfo
|
|
|
|
}
|
|
|
|
|
|
|
|
func (b *buf) close() {
|
|
|
|
close(b.C)
|
|
|
|
<-b.closed
|
|
|
|
}
|
2019-07-22 21:43:16 +00:00
|
|
|
|
2021-07-07 15:56:02 +00:00
|
|
|
type bufferCloser struct {
|
|
|
|
*bytes.Buffer
|
|
|
|
}
|
|
|
|
|
|
|
|
func (b bufferCloser) Close() error {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2021-07-26 08:53:30 +00:00
|
|
|
func mapToBlob(m map[string]string, compress bool) ([]byte, ocispecs.Descriptor, error) {
|
2019-07-22 21:43:16 +00:00
|
|
|
buf := bytes.NewBuffer(nil)
|
|
|
|
sha := digest.SHA256.Digester()
|
2021-07-07 15:56:02 +00:00
|
|
|
|
|
|
|
var dest io.WriteCloser = bufferCloser{buf}
|
|
|
|
if compress {
|
|
|
|
dest = gzip.NewWriter(buf)
|
|
|
|
}
|
|
|
|
tw := tar.NewWriter(io.MultiWriter(sha.Hash(), dest))
|
2019-07-22 21:43:16 +00:00
|
|
|
|
|
|
|
for k, v := range m {
|
|
|
|
if err := tw.WriteHeader(&tar.Header{
|
|
|
|
Name: k,
|
|
|
|
Size: int64(len(v)),
|
|
|
|
}); err != nil {
|
2021-07-26 08:53:30 +00:00
|
|
|
return nil, ocispecs.Descriptor{}, err
|
2019-07-22 21:43:16 +00:00
|
|
|
}
|
|
|
|
if _, err := tw.Write([]byte(v)); err != nil {
|
2021-07-26 08:53:30 +00:00
|
|
|
return nil, ocispecs.Descriptor{}, err
|
2019-07-22 21:43:16 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
if err := tw.Close(); err != nil {
|
2021-07-26 08:53:30 +00:00
|
|
|
return nil, ocispecs.Descriptor{}, err
|
2019-07-22 21:43:16 +00:00
|
|
|
}
|
2021-07-07 15:56:02 +00:00
|
|
|
if err := dest.Close(); err != nil {
|
2021-07-26 08:53:30 +00:00
|
|
|
return nil, ocispecs.Descriptor{}, err
|
2019-07-22 21:43:16 +00:00
|
|
|
}
|
2021-07-07 15:56:02 +00:00
|
|
|
|
2021-07-26 08:53:30 +00:00
|
|
|
mediaType := ocispecs.MediaTypeImageLayer
|
2021-07-07 15:56:02 +00:00
|
|
|
if compress {
|
2021-07-26 08:53:30 +00:00
|
|
|
mediaType = ocispecs.MediaTypeImageLayerGzip
|
2021-07-07 15:56:02 +00:00
|
|
|
}
|
2021-07-26 08:53:30 +00:00
|
|
|
return buf.Bytes(), ocispecs.Descriptor{
|
2019-09-18 00:18:32 +00:00
|
|
|
Digest: digest.FromBytes(buf.Bytes()),
|
2021-07-07 15:56:02 +00:00
|
|
|
MediaType: mediaType,
|
2019-09-18 00:18:32 +00:00
|
|
|
Size: int64(buf.Len()),
|
|
|
|
Annotations: map[string]string{
|
|
|
|
"containerd.io/uncompressed": sha.Digest().String(),
|
|
|
|
},
|
|
|
|
}, nil
|
2019-07-22 21:43:16 +00:00
|
|
|
}
|
2021-07-21 22:53:16 +00:00
|
|
|
|
|
|
|
func fileToBlob(file *os.File, compress bool) ([]byte, ocispecs.Descriptor, error) {
|
|
|
|
buf := bytes.NewBuffer(nil)
|
|
|
|
sha := digest.SHA256.Digester()
|
|
|
|
|
|
|
|
var dest io.WriteCloser = bufferCloser{buf}
|
|
|
|
if compress {
|
|
|
|
dest = gzip.NewWriter(buf)
|
|
|
|
}
|
|
|
|
tw := tar.NewWriter(io.MultiWriter(sha.Hash(), dest))
|
|
|
|
|
|
|
|
info, err := file.Stat()
|
|
|
|
if err != nil {
|
|
|
|
return nil, ocispecs.Descriptor{}, err
|
|
|
|
}
|
|
|
|
|
|
|
|
fi, err := tar.FileInfoHeader(info, "")
|
|
|
|
if err != nil {
|
|
|
|
return nil, ocispecs.Descriptor{}, err
|
|
|
|
}
|
|
|
|
fi.Format = tar.FormatPAX
|
|
|
|
fi.ModTime = fi.ModTime.Truncate(time.Second)
|
|
|
|
fi.AccessTime = time.Time{}
|
|
|
|
fi.ChangeTime = time.Time{}
|
|
|
|
|
|
|
|
if err := tw.WriteHeader(fi); err != nil {
|
|
|
|
return nil, ocispecs.Descriptor{}, err
|
|
|
|
}
|
|
|
|
if _, err := io.Copy(tw, file); err != nil {
|
|
|
|
return nil, ocispecs.Descriptor{}, err
|
|
|
|
}
|
|
|
|
|
|
|
|
if err := tw.Close(); err != nil {
|
|
|
|
return nil, ocispecs.Descriptor{}, err
|
|
|
|
}
|
|
|
|
if err := dest.Close(); err != nil {
|
|
|
|
return nil, ocispecs.Descriptor{}, err
|
|
|
|
}
|
|
|
|
|
|
|
|
mediaType := ocispecs.MediaTypeImageLayer
|
|
|
|
if compress {
|
|
|
|
mediaType = ocispecs.MediaTypeImageLayerGzip
|
|
|
|
}
|
|
|
|
return buf.Bytes(), ocispecs.Descriptor{
|
|
|
|
Digest: digest.FromBytes(buf.Bytes()),
|
|
|
|
MediaType: mediaType,
|
|
|
|
Size: int64(buf.Len()),
|
|
|
|
Annotations: map[string]string{
|
|
|
|
"containerd.io/uncompressed": sha.Digest().String(),
|
|
|
|
},
|
|
|
|
}, nil
|
|
|
|
}
|