cachemanager: size reporting in diskusage

Signed-off-by: Tonis Tiigi <tonistiigi@gmail.com>
docker-18.09
Tonis Tiigi 2017-05-31 16:45:04 -07:00
parent 5a0f803917
commit 1c5dbe562a
6 changed files with 447 additions and 18 deletions

45
cache/manager.go vendored
View File

@ -6,6 +6,8 @@ import (
"path/filepath"
"sync"
"golang.org/x/sync/errgroup"
"github.com/boltdb/bolt"
cdsnapshot "github.com/containerd/containerd/snapshot"
"github.com/pkg/errors"
@ -33,7 +35,7 @@ type Accessor interface {
}
type Controller interface {
DiskUsage(ctx context.Context) ([]UsageInfo, error)
DiskUsage(ctx context.Context) ([]*UsageInfo, error)
Prune(ctx context.Context) (map[string]int64, error)
GC(ctx context.Context) error
}
@ -128,6 +130,7 @@ func (cm *cacheManager) get(id string) (ImmutableRef, error) {
cm: cm,
refs: make(map[*cacheRef]struct{}),
parent: parent,
size: sizeUnknown,
}
cm.records[id] = rec // TODO: store to db
}
@ -172,6 +175,7 @@ func (cm *cacheManager) New(s ImmutableRef) (MutableRef, error) {
cm: cm,
refs: make(map[*cacheRef]struct{}),
parent: parent,
size: sizeUnknown,
}
cm.mu.Lock()
@ -203,23 +207,52 @@ func (cm *cacheManager) GetMutable(id string) (MutableRef, error) { // Rebase?
return rec.ref(), nil
}
func (cm *cacheManager) DiskUsage(ctx context.Context) ([]UsageInfo, error) {
func (cm *cacheManager) DiskUsage(ctx context.Context) ([]*UsageInfo, error) {
cm.mu.Lock()
defer cm.mu.Unlock()
var du []UsageInfo
var du []*UsageInfo
for id, cr := range cm.records {
cr.mu.Lock()
c := UsageInfo{
c := &UsageInfo{
ID: id,
Active: cr.mutable,
InUse: len(cr.refs) > 0,
Size: -1, // TODO:
Size: cr.size,
}
if cr.mutable && len(cr.refs) > 0 && !cr.frozen {
c.Size = 0 // size can not be determined because it is changing
}
cr.mu.Unlock()
du = append(du, c)
}
cm.mu.Unlock()
eg, ctx := errgroup.WithContext(ctx)
for _, d := range du {
if d.Size == sizeUnknown {
func(d *UsageInfo) {
eg.Go(func() error {
ref, err := cm.Get(d.ID)
if err != nil {
d.Size = 0
return nil
}
s, err := ref.Size(ctx)
if err != nil {
return err
}
d.Size = s
return ref.Release()
})
}(d)
}
}
if err := eg.Wait(); err != nil {
return du, err
}
return du, nil
}

39
cache/refs.go vendored
View File

@ -1,20 +1,23 @@
package cache
import (
"context"
"crypto/rand"
"encoding/hex"
"sync"
"github.com/containerd/containerd/mount"
"github.com/pkg/errors"
"github.com/tonistiigi/buildkit_poc/util/flightcontrol"
"golang.org/x/net/context"
)
const sizeUnknown int64 = -1
type ImmutableRef interface {
Mountable
ID() string
Release() error
Size() (int64, error)
Size(ctx context.Context) (int64, error)
// Prepare() / ChainID() / Meta()
}
@ -23,7 +26,7 @@ type MutableRef interface {
ID() string
Freeze() (ImmutableRef, error)
ReleaseAndCommit(ctx context.Context) (ImmutableRef, error)
Size() (int64, error)
Size(ctx context.Context) (int64, error)
}
type Mountable interface {
@ -41,6 +44,9 @@ type cacheRecord struct {
parent ImmutableRef
view string
viewMount []mount.Mount
sizeG flightcontrol.Group
size int64
}
// hold manager lock before calling
@ -50,6 +56,27 @@ func (cr *cacheRecord) ref() *cacheRef {
return ref
}
func (cr *cacheRecord) Size(ctx context.Context) (int64, error) {
// this expects that usage() is implemented lazily
s, err, _ := cr.sizeG.Do(ctx, cr.id, func(ctx context.Context) (interface{}, error) {
cr.mu.Lock()
s := cr.size
cr.mu.Unlock()
if s != sizeUnknown {
return s, nil
}
usage, err := cr.cm.ManagerOpt.Snapshotter.Usage(ctx, cr.id)
if err != nil {
return s, errors.Wrapf(err, "failed to get usage for %s", cr.id)
}
cr.mu.Lock()
cr.size = s
cr.mu.Unlock()
return usage.Size, nil
})
return s.(int64), err
}
type cacheRef struct {
*cacheRecord
}
@ -130,6 +157,7 @@ func (sr *cacheRef) Freeze() (ImmutableRef, error) {
}
sr.frozen = true
sr.size = sizeUnknown
return sr, nil
}
@ -164,16 +192,13 @@ func (sr *cacheRef) ReleaseAndCommit(ctx context.Context) (ImmutableRef, error)
id: id,
cm: sr.cm,
refs: make(map[*cacheRef]struct{}),
size: sizeUnknown,
}
sr.cm.records[id] = rec // TODO: save to db
return rec.ref(), nil
}
func (sr *cacheRef) Size() (int64, error) {
return -1, errors.New("Size not implemented")
}
func (sr *cacheRef) ID() string {
return sr.id
}

View File

@ -16,7 +16,7 @@ import (
"github.com/containerd/containerd/mount"
"github.com/containerd/containerd/rootfs"
cdsnapshot "github.com/containerd/containerd/snapshot"
"github.com/containerd/containerd/snapshot/naive"
"github.com/containerd/containerd/snapshot/overlay"
digest "github.com/opencontainers/go-digest"
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
"github.com/pkg/errors"
@ -89,8 +89,23 @@ func TestControl(t *testing.T) {
lm.Unmount()
assert.NoError(t, err)
du, err := cm.DiskUsage(context.TODO())
assert.NoError(t, err)
// fmt.Printf("du1:\n")
// for _, d := range du {
// fmt.Printf("du1: %+v\n", d)
// }
err = snap.Release()
assert.NoError(t, err)
du, err = cm.DiskUsage(context.TODO())
assert.NoError(t, err)
for _, d := range du {
assert.True(t, d.Size >= 8192)
}
}
type containerd struct {
@ -100,7 +115,7 @@ type containerd struct {
}
func localContainerd(root string) (*containerd, error) {
s, err := naive.NewSnapshotter(filepath.Join(root, "snapshots"))
s, err := overlay.NewSnapshotter(filepath.Join(root, "snapshots"))
if err != nil {
return nil, err
}
@ -131,7 +146,6 @@ func (a *localApplier) Apply(ctx context.Context, desc ocispec.Descriptor, mount
return ocispec.Descriptor{}, errors.Wrap(err, "failed to create temporary directory")
}
defer os.RemoveAll(dir)
if err := mount.MountAll(mounts, dir); err != nil {
return ocispec.Descriptor{}, errors.Wrap(err, "failed to mount")
}

View File

@ -4,4 +4,4 @@ set -eu -o pipefail -x
# update this to iidfile after 17.06
docker build -t buildkit_poc:test -f ./hack/dockerfiles/test.Dockerfile --force-rm .
docker run --cap-add=SYS_ADMIN buildkit_poc:test go test ${TESTFLAGS:--v} ${TESTPKGS:-./...}
docker run -v /tmp --cap-add=SYS_ADMIN buildkit_poc:test go test ${TESTFLAGS:--v} ${TESTPKGS:-./...}

View File

@ -44,7 +44,7 @@ func (lm *localMounter) Mount() (string, error) {
return "", err
}
lm.target = dir
return "", nil
return dir, nil
}
func (lm *localMounter) Unmount() error {

View File

@ -0,0 +1,357 @@
// +build linux
package overlay
import (
"context"
"fmt"
"io/ioutil"
"os"
"path/filepath"
"strings"
"github.com/containerd/containerd/fs"
"github.com/containerd/containerd/log"
"github.com/containerd/containerd/mount"
"github.com/containerd/containerd/plugin"
"github.com/containerd/containerd/snapshot"
"github.com/containerd/containerd/snapshot/storage"
"github.com/pkg/errors"
)
func init() {
plugin.Register("snapshot-overlay", &plugin.Registration{
Type: plugin.SnapshotPlugin,
Init: func(ic *plugin.InitContext) (interface{}, error) {
return NewSnapshotter(filepath.Join(ic.Root, "snapshot", "overlay"))
},
})
}
type snapshotter struct {
root string
ms *storage.MetaStore
}
type activeSnapshot struct {
id string
name string
parentID interface{}
readonly bool
}
// NewSnapshotter returns a Snapshotter which uses overlayfs. The overlayfs
// diffs are stored under the provided root. A metadata file is stored under
// the root.
func NewSnapshotter(root string) (snapshot.Snapshotter, error) {
if err := os.MkdirAll(root, 0700); err != nil {
return nil, err
}
supportsDType, err := fs.SupportsDType(root)
if err != nil {
return nil, err
}
if !supportsDType {
return nil, fmt.Errorf("%s does not support d_type. If the backing filesystem is xfs, please reformat with ftype=1 to enable d_type support.", root)
}
ms, err := storage.NewMetaStore(filepath.Join(root, "metadata.db"))
if err != nil {
return nil, err
}
if err := os.Mkdir(filepath.Join(root, "snapshots"), 0700); err != nil && !os.IsExist(err) {
return nil, err
}
return &snapshotter{
root: root,
ms: ms,
}, nil
}
// Stat returns the info for an active or committed snapshot by name or
// key.
//
// Should be used for parent resolution, existence checks and to discern
// the kind of snapshot.
func (o *snapshotter) Stat(ctx context.Context, key string) (snapshot.Info, error) {
ctx, t, err := o.ms.TransactionContext(ctx, false)
if err != nil {
return snapshot.Info{}, err
}
defer t.Rollback()
_, info, _, err := storage.GetInfo(ctx, key)
if err != nil {
return snapshot.Info{}, err
}
return info, nil
}
// Usage returns the resources taken by the snapshot identified by key.
//
// For active snapshots, this will scan the usage of the overlay "diff" (aka
// "upper") directory and may take some time.
//
// For committed snapshots, the value is returned from the metadata database.
func (o *snapshotter) Usage(ctx context.Context, key string) (snapshot.Usage, error) {
ctx, t, err := o.ms.TransactionContext(ctx, false)
if err != nil {
return snapshot.Usage{}, err
}
id, info, usage, err := storage.GetInfo(ctx, key)
if err != nil {
return snapshot.Usage{}, err
}
upperPath := o.upperPath(id)
t.Rollback() // transaction no longer needed at this point.
if info.Kind == snapshot.KindActive {
du, err := fs.DiskUsage(upperPath)
if err != nil {
// TODO(stevvooe): Consider not reporting an error in this case.
return snapshot.Usage{}, err
}
usage = snapshot.Usage(du)
}
return usage, nil
}
func (o *snapshotter) Prepare(ctx context.Context, key, parent string) ([]mount.Mount, error) {
return o.createActive(ctx, key, parent, false)
}
func (o *snapshotter) View(ctx context.Context, key, parent string) ([]mount.Mount, error) {
return o.createActive(ctx, key, parent, true)
}
// Mounts returns the mounts for the transaction identified by key. Can be
// called on an read-write or readonly transaction.
//
// This can be used to recover mounts after calling View or Prepare.
func (o *snapshotter) Mounts(ctx context.Context, key string) ([]mount.Mount, error) {
ctx, t, err := o.ms.TransactionContext(ctx, false)
if err != nil {
return nil, err
}
active, err := storage.GetActive(ctx, key)
t.Rollback()
if err != nil {
return nil, errors.Wrap(err, "failed to get active mount")
}
return o.mounts(active), nil
}
func (o *snapshotter) Commit(ctx context.Context, name, key string) error {
ctx, t, err := o.ms.TransactionContext(ctx, true)
if err != nil {
return err
}
defer func() {
if err != nil {
if rerr := t.Rollback(); rerr != nil {
log.G(ctx).WithError(rerr).Warn("Failure rolling back transaction")
}
}
}()
// grab the existing id
id, _, _, err := storage.GetInfo(ctx, key)
if err != nil {
return err
}
usage, err := fs.DiskUsage(o.upperPath(id))
if err != nil {
return err
}
if _, err = storage.CommitActive(ctx, key, name, snapshot.Usage(usage)); err != nil {
return errors.Wrap(err, "failed to commit snapshot")
}
return t.Commit()
}
// Remove abandons the transaction identified by key. All resources
// associated with the key will be removed.
func (o *snapshotter) Remove(ctx context.Context, key string) (err error) {
ctx, t, err := o.ms.TransactionContext(ctx, true)
if err != nil {
return err
}
defer func() {
if err != nil && t != nil {
if rerr := t.Rollback(); rerr != nil {
log.G(ctx).WithError(rerr).Warn("Failure rolling back transaction")
}
}
}()
id, _, err := storage.Remove(ctx, key)
if err != nil {
return errors.Wrap(err, "failed to remove")
}
path := filepath.Join(o.root, "snapshots", id)
renamed := filepath.Join(o.root, "snapshots", "rm-"+id)
if err := os.Rename(path, renamed); err != nil {
return errors.Wrap(err, "failed to rename")
}
err = t.Commit()
t = nil
if err != nil {
if err1 := os.Rename(renamed, path); err1 != nil {
// May cause inconsistent data on disk
log.G(ctx).WithError(err1).WithField("path", renamed).Errorf("Failed to rename after failed commit")
}
return errors.Wrap(err, "failed to commit")
}
if err := os.RemoveAll(renamed); err != nil {
// Must be cleaned up, any "rm-*" could be removed if no active transactions
log.G(ctx).WithError(err).WithField("path", renamed).Warnf("Failed to remove root filesystem")
}
return nil
}
// Walk the committed snapshots.
func (o *snapshotter) Walk(ctx context.Context, fn func(context.Context, snapshot.Info) error) error {
ctx, t, err := o.ms.TransactionContext(ctx, false)
if err != nil {
return err
}
defer t.Rollback()
return storage.WalkInfo(ctx, fn)
}
func (o *snapshotter) createActive(ctx context.Context, key, parent string, readonly bool) ([]mount.Mount, error) {
var (
path string
snapshotDir = filepath.Join(o.root, "snapshots")
)
td, err := ioutil.TempDir(snapshotDir, "new-")
if err != nil {
return nil, errors.Wrap(err, "failed to create temp dir")
}
defer func() {
if err != nil {
if td != "" {
if err1 := os.RemoveAll(td); err1 != nil {
err = errors.Wrapf(err, "remove failed: %v", err1)
}
}
if path != "" {
if err1 := os.RemoveAll(path); err1 != nil {
err = errors.Wrapf(err, "failed to remove path: %v", err1)
}
}
}
}()
if err = os.MkdirAll(filepath.Join(td, "fs"), 0711); err != nil {
return nil, err
}
if !readonly {
if err = os.MkdirAll(filepath.Join(td, "work"), 0700); err != nil {
return nil, err
}
}
ctx, t, err := o.ms.TransactionContext(ctx, true)
if err != nil {
return nil, err
}
active, err := storage.CreateActive(ctx, key, parent, readonly)
if err != nil {
if rerr := t.Rollback(); rerr != nil {
log.G(ctx).WithError(rerr).Warn("Failure rolling back transaction")
}
return nil, errors.Wrap(err, "failed to create active")
}
path = filepath.Join(snapshotDir, active.ID)
if err = os.Rename(td, path); err != nil {
if rerr := t.Rollback(); rerr != nil {
log.G(ctx).WithError(rerr).Warn("Failure rolling back transaction")
}
return nil, errors.Wrap(err, "failed to rename")
}
td = ""
if err = t.Commit(); err != nil {
return nil, errors.Wrap(err, "commit failed")
}
return o.mounts(active), nil
}
func (o *snapshotter) mounts(active storage.Active) []mount.Mount {
if len(active.ParentIDs) == 0 {
// if we only have one layer/no parents then just return a bind mount as overlay
// will not work
roFlag := "rw"
if active.Readonly {
roFlag = "ro"
}
return []mount.Mount{
{
Source: o.upperPath(active.ID),
Type: "bind",
Options: []string{
roFlag,
"rbind",
},
},
}
}
var options []string
if !active.Readonly {
options = append(options,
fmt.Sprintf("workdir=%s", o.workPath(active.ID)),
fmt.Sprintf("upperdir=%s", o.upperPath(active.ID)),
)
} else if len(active.ParentIDs) == 1 {
return []mount.Mount{
{
Source: o.upperPath(active.ParentIDs[0]),
Type: "bind",
Options: []string{
"ro",
"rbind",
},
},
}
}
parentPaths := make([]string, len(active.ParentIDs))
for i := range active.ParentIDs {
parentPaths[i] = o.upperPath(active.ParentIDs[i])
}
options = append(options, fmt.Sprintf("lowerdir=%s", strings.Join(parentPaths, ":")))
return []mount.Mount{
{
Type: "overlay",
Source: "overlay",
Options: options,
},
}
}
func (o *snapshotter) upperPath(id string) string {
return filepath.Join(o.root, "snapshots", id, "fs")
}
func (o *snapshotter) workPath(id string) string {
return filepath.Join(o.root, "snapshots", id, "work")
}