exporter: add OCI exporter

Signed-off-by: Tonis Tiigi <tonistiigi@gmail.com>
docker-18.09
Tonis Tiigi 2017-12-12 16:59:06 -08:00
parent 8840dc7439
commit 6edccb7913
9 changed files with 623 additions and 4 deletions

View File

@ -3,6 +3,8 @@ package client
const (
ExporterImage = "image"
ExporterLocal = "local"
ExporterOCI = "oci"
exporterLocalOutputDir = "output"
exporterOCIDestination = "output"
)

View File

@ -8,6 +8,7 @@ import (
"strings"
"time"
"github.com/containerd/console"
controlapi "github.com/moby/buildkit/api/services/control"
"github.com/moby/buildkit/client/llb"
"github.com/moby/buildkit/identity"
@ -71,12 +72,30 @@ func (c *Client) Solve(ctx context.Context, def *llb.Definition, opt SolveOpt, s
s.Allow(auth.NewDockerAuthProvider())
if opt.Exporter == ExporterLocal {
switch opt.Exporter {
case ExporterLocal:
outputDir, ok := opt.ExporterAttrs[exporterLocalOutputDir]
if !ok {
return errors.Errorf("output directory is required for local exporter")
}
s.Allow(filesync.NewFSSyncTarget(outputDir))
case ExporterOCI:
outputFile, ok := opt.ExporterAttrs[exporterOCIDestination]
if ok {
fi, err := os.Stat(outputFile)
if err != nil && !os.IsNotExist(err) {
return errors.Wrapf(err, "invlid destination file: %s", outputFile)
}
if err == nil && fi.IsDir() {
return errors.Errorf("destination file is a directory")
}
} else {
if _, err := console.ConsoleFromFile(os.Stdout); err == nil {
return errors.Errorf("output file is required for OCI exporter. refusing to write to console")
}
outputFile = ""
}
s.Allow(filesync.NewFSSyncTargetFile(outputFile))
}
eg.Go(func() error {

View File

@ -45,6 +45,8 @@ func (e *imageExporter) Resolve(ctx context.Context, opt map[string]string) (exp
i.push = true
case keyInsecure:
i.insecure = true
case exporterImageConfig:
i.config = []byte(v)
default:
logrus.Warnf("image exporter: unknown option %s", k)
}
@ -57,6 +59,7 @@ type imageExporterInstance struct {
targetName string
push bool
insecure bool
config []byte
}
func (e *imageExporterInstance) Name() string {
@ -64,7 +67,10 @@ func (e *imageExporterInstance) Name() string {
}
func (e *imageExporterInstance) Export(ctx context.Context, ref cache.ImmutableRef, opt map[string][]byte) error {
desc, err := e.opt.ImageWriter.Commit(ctx, ref, opt[exporterImageConfig])
if config, ok := opt[exporterImageConfig]; ok {
e.config = config
}
desc, err := e.opt.ImageWriter.Commit(ctx, ref, e.config)
if err != nil {
return err
}

108
exporter/oci/export.go Normal file
View File

@ -0,0 +1,108 @@
package oci
import (
"errors"
"time"
"github.com/containerd/containerd/images/oci"
"github.com/moby/buildkit/cache"
"github.com/moby/buildkit/exporter"
"github.com/moby/buildkit/exporter/containerimage"
"github.com/moby/buildkit/session"
"github.com/moby/buildkit/session/filesync"
"github.com/moby/buildkit/util/progress"
"github.com/sirupsen/logrus"
"golang.org/x/net/context"
)
const (
exporterImageConfig = "containerimage.config"
)
type Opt struct {
SessionManager *session.Manager
ImageWriter *containerimage.ImageWriter
}
type imageExporter struct {
opt Opt
}
func New(opt Opt) (exporter.Exporter, error) {
im := &imageExporter{opt: opt}
return im, nil
}
func (e *imageExporter) Resolve(ctx context.Context, opt map[string]string) (exporter.ExporterInstance, error) {
id := session.FromContext(ctx)
if id == "" {
return nil, errors.New("could not access local files without session")
}
timeoutCtx, cancel := context.WithTimeout(ctx, 5*time.Second)
defer cancel()
caller, err := e.opt.SessionManager.Get(timeoutCtx, id)
if err != nil {
return nil, err
}
i := &imageExporterInstance{imageExporter: e, caller: caller}
for k, v := range opt {
switch k {
case exporterImageConfig:
i.config = []byte(v)
default:
logrus.Warnf("oci exporter: unknown option %s", k)
}
}
return i, nil
}
type imageExporterInstance struct {
*imageExporter
config []byte
caller session.Caller
}
func (e *imageExporterInstance) Name() string {
return "exporting to oci image format"
}
func (e *imageExporterInstance) Export(ctx context.Context, ref cache.ImmutableRef, opt map[string][]byte) error {
if config, ok := opt[exporterImageConfig]; ok {
e.config = config
}
desc, err := e.opt.ImageWriter.Commit(ctx, ref, e.config)
if err != nil {
return err
}
w, err := filesync.CopyFileWriter(ctx, e.caller)
if err != nil {
return err
}
report := oneOffProgress(ctx, "sending tarball")
if err := (&oci.V1Exporter{}).Export(ctx, e.opt.ImageWriter.ContentStore(), *desc, w); err != nil {
w.Close()
return report(err)
}
return report(w.Close())
}
func oneOffProgress(ctx context.Context, id string) func(err error) error {
pw, _, _ := progress.FromContext(ctx)
now := time.Now()
st := progress.Status{
Started: &now,
}
pw.Write(id, st)
return func(err error) error {
// TODO: set error on status
now := time.Now()
st.Completed = &now
pw.Write(id, st)
pw.Close()
return err
}
}

View File

@ -1,9 +1,12 @@
package filesync
import (
"bufio"
io "io"
"os"
"time"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
"github.com/tonistiigi/fsutil"
"google.golang.org/grpc"
@ -17,6 +20,46 @@ func sendDiffCopy(stream grpc.Stream, dir string, includes, excludes []string, p
}, progress)
}
func newStreamWriter(stream grpc.ClientStream) io.WriteCloser {
wc := &streamWriterCloser{ClientStream: stream}
return &bufferedWriteCloser{Writer: bufio.NewWriter(wc), Closer: wc}
}
type bufferedWriteCloser struct {
*bufio.Writer
io.Closer
}
func (bwc *bufferedWriteCloser) Close() error {
if err := bwc.Writer.Flush(); err != nil {
return err
}
return bwc.Closer.Close()
}
type streamWriterCloser struct {
grpc.ClientStream
}
func (wc *streamWriterCloser) Write(dt []byte) (int, error) {
if err := wc.ClientStream.SendMsg(&BytesMessage{Data: dt}); err != nil {
return 0, err
}
return len(dt), nil
}
func (wc *streamWriterCloser) Close() error {
if err := wc.ClientStream.CloseSend(); err != nil {
return err
}
// block until receiver is done
var bm BytesMessage
if err := wc.ClientStream.RecvMsg(&bm); err != io.EOF {
return err
}
return nil
}
func recvDiffCopy(ds grpc.Stream, dest string, cu CacheUpdater, progress progressCb) error {
st := time.Now()
defer func() {
@ -53,3 +96,18 @@ func syncTargetDiffCopy(ds grpc.Stream, dest string) error {
}(),
})
}
func writeTargetFile(ds grpc.Stream, wc io.WriteCloser) error {
for {
bm := BytesMessage{}
if err := ds.RecvMsg(&bm); err != nil {
if errors.Cause(err) == io.EOF {
return nil
}
return err
}
if _, err := wc.Write(bm.Data); err != nil {
return err
}
}
}

View File

@ -2,6 +2,7 @@ package filesync
import (
"fmt"
io "io"
"os"
"strings"
@ -213,8 +214,17 @@ func NewFSSyncTarget(outdir string) session.Attachable {
return p
}
// NewFSSyncTargetFile allows writing into a file. Empty file means stdout
func NewFSSyncTargetFile(outfile string) session.Attachable {
p := &fsSyncTarget{
outfile: outfile,
}
return p
}
type fsSyncTarget struct {
outdir string
outdir string
outfile string
}
func (sp *fsSyncTarget) Register(server *grpc.Server) {
@ -222,7 +232,21 @@ func (sp *fsSyncTarget) Register(server *grpc.Server) {
}
func (sp *fsSyncTarget) DiffCopy(stream FileSend_DiffCopyServer) error {
return syncTargetDiffCopy(stream, sp.outdir)
if sp.outdir != "" {
return syncTargetDiffCopy(stream, sp.outdir)
}
var f *os.File
if sp.outfile == "" {
f = os.Stdout
} else {
var err error
f, err = os.Create(sp.outfile)
if err != nil {
return err
}
}
defer f.Close()
return writeTargetFile(stream, f)
}
func CopyToCaller(ctx context.Context, srcPath string, c session.Caller, progress func(int, bool)) error {
@ -240,3 +264,19 @@ func CopyToCaller(ctx context.Context, srcPath string, c session.Caller, progres
return sendDiffCopy(cc, srcPath, nil, nil, progress, nil)
}
func CopyFileWriter(ctx context.Context, c session.Caller) (io.WriteCloser, error) {
method := session.MethodURL(_FileSend_serviceDesc.ServiceName, "diffcopy")
if !c.Supports(method) {
return nil, errors.Errorf("method %s not supported by the client", method)
}
client := NewFileSendClient(c.Conn())
cc, err := client.DiffCopy(ctx)
if err != nil {
return nil, err
}
return newStreamWriter(cc), nil
}

View File

@ -0,0 +1,188 @@
package oci
import (
"archive/tar"
"context"
"encoding/json"
"io"
"sort"
"github.com/containerd/containerd/content"
"github.com/containerd/containerd/images"
"github.com/containerd/containerd/platforms"
ocispecs "github.com/opencontainers/image-spec/specs-go"
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
"github.com/pkg/errors"
)
// V1Exporter implements OCI Image Spec v1.
// It is up to caller to put "org.opencontainers.image.ref.name" annotation to desc.
//
// TODO(AkihiroSuda): add V1Exporter{TranslateMediaTypes: true} that transforms media types,
// e.g. application/vnd.docker.image.rootfs.diff.tar.gzip
// -> application/vnd.oci.image.layer.v1.tar+gzip
type V1Exporter struct {
}
// Export implements Exporter.
func (oe *V1Exporter) Export(ctx context.Context, store content.Store, desc ocispec.Descriptor, writer io.Writer) error {
tw := tar.NewWriter(writer)
defer tw.Close()
records := []tarRecord{
ociLayoutFile(""),
ociIndexRecord(desc),
}
algorithms := map[string]struct{}{}
exportHandler := func(ctx context.Context, desc ocispec.Descriptor) ([]ocispec.Descriptor, error) {
records = append(records, blobRecord(store, desc))
algorithms[desc.Digest.Algorithm().String()] = struct{}{}
return nil, nil
}
handlers := images.Handlers(
images.ChildrenHandler(store, platforms.Default()),
images.HandlerFunc(exportHandler),
)
// Walk sequentially since the number of fetchs is likely one and doing in
// parallel requires locking the export handler
if err := images.Walk(ctx, handlers, desc); err != nil {
return err
}
if len(algorithms) > 0 {
records = append(records, directoryRecord("blobs/", 0755))
for alg := range algorithms {
records = append(records, directoryRecord("blobs/"+alg+"/", 0755))
}
}
return writeTar(ctx, tw, records)
}
type tarRecord struct {
Header *tar.Header
CopyTo func(context.Context, io.Writer) (int64, error)
}
func blobRecord(cs content.Store, desc ocispec.Descriptor) tarRecord {
path := "blobs/" + desc.Digest.Algorithm().String() + "/" + desc.Digest.Hex()
return tarRecord{
Header: &tar.Header{
Name: path,
Mode: 0444,
Size: desc.Size,
Typeflag: tar.TypeReg,
},
CopyTo: func(ctx context.Context, w io.Writer) (int64, error) {
r, err := cs.ReaderAt(ctx, desc.Digest)
if err != nil {
return 0, err
}
defer r.Close()
// Verify digest
dgstr := desc.Digest.Algorithm().Digester()
n, err := io.Copy(io.MultiWriter(w, dgstr.Hash()), content.NewReader(r))
if err != nil {
return 0, err
}
if dgstr.Digest() != desc.Digest {
return 0, errors.Errorf("unexpected digest %s copied", dgstr.Digest())
}
return n, nil
},
}
}
func directoryRecord(name string, mode int64) tarRecord {
return tarRecord{
Header: &tar.Header{
Name: name,
Mode: mode,
Typeflag: tar.TypeDir,
},
}
}
func ociLayoutFile(version string) tarRecord {
if version == "" {
version = ocispec.ImageLayoutVersion
}
layout := ocispec.ImageLayout{
Version: version,
}
b, err := json.Marshal(layout)
if err != nil {
panic(err)
}
return tarRecord{
Header: &tar.Header{
Name: ocispec.ImageLayoutFile,
Mode: 0444,
Size: int64(len(b)),
Typeflag: tar.TypeReg,
},
CopyTo: func(ctx context.Context, w io.Writer) (int64, error) {
n, err := w.Write(b)
return int64(n), err
},
}
}
func ociIndexRecord(manifests ...ocispec.Descriptor) tarRecord {
index := ocispec.Index{
Versioned: ocispecs.Versioned{
SchemaVersion: 2,
},
Manifests: manifests,
}
b, err := json.Marshal(index)
if err != nil {
panic(err)
}
return tarRecord{
Header: &tar.Header{
Name: "index.json",
Mode: 0644,
Size: int64(len(b)),
Typeflag: tar.TypeReg,
},
CopyTo: func(ctx context.Context, w io.Writer) (int64, error) {
n, err := w.Write(b)
return int64(n), err
},
}
}
func writeTar(ctx context.Context, tw *tar.Writer, records []tarRecord) error {
sort.Slice(records, func(i, j int) bool {
return records[i].Header.Name < records[j].Header.Name
})
for _, record := range records {
if err := tw.WriteHeader(record.Header); err != nil {
return err
}
if record.CopyTo != nil {
n, err := record.CopyTo(ctx, tw)
if err != nil {
return err
}
if n != record.Header.Size {
return errors.Errorf("unexpected copy size for %s", record.Header.Name)
}
} else if record.Header.Size > 0 {
return errors.Errorf("no content to write to record with non-zero size for %s", record.Header.Name)
}
}
return nil
}

View File

@ -0,0 +1,188 @@
// Package oci provides the importer and the exporter for OCI Image Spec.
package oci
import (
"archive/tar"
"context"
"encoding/json"
"fmt"
"io"
"io/ioutil"
"path"
"strings"
"github.com/containerd/containerd/content"
"github.com/containerd/containerd/errdefs"
"github.com/containerd/containerd/images"
digest "github.com/opencontainers/go-digest"
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
"github.com/pkg/errors"
)
// V1Importer implements OCI Image Spec v1.
type V1Importer struct {
// ImageName is preprended to either `:` + OCI ref name or `@` + digest (for anonymous refs).
// This field is mandatory atm, but may change in the future. maybe ref map[string]string as in moby/moby#33355
ImageName string
}
var _ images.Importer = &V1Importer{}
// Import implements Importer.
func (oi *V1Importer) Import(ctx context.Context, store content.Store, reader io.Reader) ([]images.Image, error) {
if oi.ImageName == "" {
return nil, errors.New("ImageName not set")
}
tr := tar.NewReader(reader)
var imgrecs []images.Image
foundIndexJSON := false
for {
hdr, err := tr.Next()
if err == io.EOF {
break
}
if err != nil {
return nil, err
}
if hdr.Typeflag != tar.TypeReg && hdr.Typeflag != tar.TypeRegA {
continue
}
hdrName := path.Clean(hdr.Name)
if hdrName == "index.json" {
if foundIndexJSON {
return nil, errors.New("duplicated index.json")
}
foundIndexJSON = true
imgrecs, err = onUntarIndexJSON(tr, oi.ImageName)
if err != nil {
return nil, err
}
continue
}
if strings.HasPrefix(hdrName, "blobs/") {
if err := onUntarBlob(ctx, tr, store, hdrName, hdr.Size); err != nil {
return nil, err
}
}
}
if !foundIndexJSON {
return nil, errors.New("no index.json found")
}
for _, img := range imgrecs {
err := setGCRefContentLabels(ctx, store, img.Target)
if err != nil {
return imgrecs, err
}
}
// FIXME(AkihiroSuda): set GC labels for unreferrenced blobs (i.e. with unknown media types)?
return imgrecs, nil
}
func onUntarIndexJSON(r io.Reader, imageName string) ([]images.Image, error) {
b, err := ioutil.ReadAll(r)
if err != nil {
return nil, err
}
var idx ocispec.Index
if err := json.Unmarshal(b, &idx); err != nil {
return nil, err
}
var imgrecs []images.Image
for _, m := range idx.Manifests {
ref, err := normalizeImageRef(imageName, m)
if err != nil {
return nil, err
}
imgrecs = append(imgrecs, images.Image{
Name: ref,
Target: m,
})
}
return imgrecs, nil
}
func normalizeImageRef(imageName string, manifest ocispec.Descriptor) (string, error) {
digest := manifest.Digest
if digest == "" {
return "", errors.Errorf("manifest with empty digest: %v", manifest)
}
ociRef := manifest.Annotations[ocispec.AnnotationRefName]
if ociRef == "" {
return imageName + "@" + digest.String(), nil
}
return imageName + ":" + ociRef, nil
}
func onUntarBlob(ctx context.Context, r io.Reader, store content.Store, name string, size int64) error {
// name is like "blobs/sha256/deadbeef"
split := strings.Split(name, "/")
if len(split) != 3 {
return errors.Errorf("unexpected name: %q", name)
}
algo := digest.Algorithm(split[1])
if !algo.Available() {
return errors.Errorf("unsupported algorithm: %s", algo)
}
dgst := digest.NewDigestFromHex(algo.String(), split[2])
return content.WriteBlob(ctx, store, "unknown-"+dgst.String(), r, size, dgst)
}
// GetChildrenDescriptors returns children blob descriptors for the following supported types:
// - images.MediaTypeDockerSchema2Manifest, ocispec.MediaTypeImageManifest
// - images.MediaTypeDockerSchema2ManifestList, ocispec.MediaTypeImageIndex
func GetChildrenDescriptors(r io.Reader, desc ocispec.Descriptor) ([]ocispec.Descriptor, error) {
switch desc.MediaType {
case images.MediaTypeDockerSchema2Manifest, ocispec.MediaTypeImageManifest:
var manifest ocispec.Manifest
if err := json.NewDecoder(r).Decode(&manifest); err != nil {
return nil, err
}
return append([]ocispec.Descriptor{manifest.Config}, manifest.Layers...), nil
case images.MediaTypeDockerSchema2ManifestList, ocispec.MediaTypeImageIndex:
var index ocispec.Index
if err := json.NewDecoder(r).Decode(&index); err != nil {
return nil, err
}
return index.Manifests, nil
}
return nil, nil
}
func setGCRefContentLabels(ctx context.Context, store content.Store, desc ocispec.Descriptor) error {
info, err := store.Info(ctx, desc.Digest)
if err != nil {
if errdefs.IsNotFound(err) {
// when the archive is created from multi-arch image,
// it may contain only blobs for a certain platform.
// So ErrNotFound (on manifest list) is expected here.
return nil
}
return err
}
ra, err := store.ReaderAt(ctx, desc.Digest)
if err != nil {
return err
}
defer ra.Close()
r := content.NewReader(ra)
children, err := GetChildrenDescriptors(r, desc)
if err != nil {
return err
}
if info.Labels == nil {
info.Labels = map[string]string{}
}
for i, child := range children {
// Note: child blob is not guaranteed to be written to the content store. (multi-arch)
info.Labels[fmt.Sprintf("containerd.io/gc.ref.content.%d", i)] = child.Digest.String()
}
if _, err := store.Update(ctx, info, "labels"); err != nil {
return err
}
for _, child := range children {
if err := setGCRefContentLabels(ctx, store, child); err != nil {
return err
}
}
return nil
}

View File

@ -16,6 +16,7 @@ import (
"github.com/moby/buildkit/exporter"
imageexporter "github.com/moby/buildkit/exporter/containerimage"
localexporter "github.com/moby/buildkit/exporter/local"
ociexporter "github.com/moby/buildkit/exporter/oci"
"github.com/moby/buildkit/session"
"github.com/moby/buildkit/snapshot/blobmapping"
"github.com/moby/buildkit/source"
@ -166,6 +167,15 @@ func NewWorker(opt WorkerOpt) (*Worker, error) {
}
exporters[client.ExporterLocal] = localExporter
ociExporter, err := ociexporter.New(ociexporter.Opt{
SessionManager: opt.SessionManager,
ImageWriter: iw,
})
if err != nil {
return nil, err
}
exporters[client.ExporterOCI] = ociExporter
ce := cacheimport.NewCacheExporter(cacheimport.ExporterOpt{
Snapshotter: bmSnapshotter,
ContentStore: opt.ContentStore,