Merge pull request #2344 from tonistiigi/zstd

exporter: support creating blobs with zstd compression
master
Akihiro Suda 2021-09-08 02:24:13 +09:00 committed by GitHub
commit ea773f6a9b
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
13 changed files with 350 additions and 247 deletions

View File

@ -230,7 +230,7 @@ Keys supported by image output:
* `unpack=true`: unpack image after creation (for use with containerd)
* `dangling-name-prefix=[value]`: name image with `prefix@<digest>` , used for anonymous images
* `name-canonical=true`: add additional canonical name `name@<digest>`
* `compression=[uncompressed,gzip,estargz]`: choose compression type for layers newly created and cached, gzip is default value. estargz should be used with `oci-mediatypes=true`.
* `compression=[uncompressed,gzip,estargz,zstd]`: choose compression type for layers newly created and cached, gzip is default value. estargz should be used with `oci-mediatypes=true`.
* `force-compression=true`: forcefully apply `compression` option to all layers (including already existing layers).
If credentials are required, `buildctl` will attempt to read Docker configuration file `$DOCKER_CONFIG/config.json`.

8
cache/blobs.go vendored
View File

@ -11,6 +11,7 @@ import (
"github.com/containerd/containerd/diff"
"github.com/containerd/containerd/leases"
"github.com/containerd/containerd/mount"
"github.com/klauspost/compress/zstd"
"github.com/moby/buildkit/session"
"github.com/moby/buildkit/util/compression"
"github.com/moby/buildkit/util/flightcontrol"
@ -79,6 +80,9 @@ func computeBlobChain(ctx context.Context, sr *immutableRef, createIfNeeded bool
case compression.EStargz:
compressorFunc, finalize = writeEStargz()
mediaType = ocispecs.MediaTypeImageLayerGzip
case compression.Zstd:
compressorFunc = zstdWriter
mediaType = ocispecs.MediaTypeImageLayer + "+zstd"
default:
return nil, errors.Errorf("unknown layer compression type: %q", compressionType)
}
@ -350,3 +354,7 @@ func ensureCompression(ctx context.Context, ref *immutableRef, compressionType c
})
return err
}
func zstdWriter(dest io.Writer, requiredMediaType string) (io.WriteCloser, error) {
return zstd.NewWriter(dest)
}

View File

@ -1,3 +1,4 @@
//go:build linux
// +build linux
package cache
@ -34,7 +35,6 @@ var emptyDesc = ocispecs.Descriptor{}
// be computed (e.g. because the mounts aren't overlayfs), it returns
// an error.
func (sr *immutableRef) tryComputeOverlayBlob(ctx context.Context, lower, upper []mount.Mount, mediaType string, ref string, compressorFunc compressor) (_ ocispecs.Descriptor, ok bool, err error) {
// Get upperdir location if mounts are overlayfs that can be processed by this differ.
upperdir, err := getOverlayUpperdir(lower, upper)
if err != nil {
@ -50,6 +50,8 @@ func (sr *immutableRef) tryComputeOverlayBlob(ctx context.Context, lower, upper
compressorFunc = func(dest io.Writer, requiredMediaType string) (io.WriteCloser, error) {
return ctdcompression.CompressStream(dest, ctdcompression.Gzip)
}
case ocispecs.MediaTypeImageLayer + "+zstd":
compressorFunc = zstdWriter
default:
return emptyDesc, false, errors.Errorf("unsupported diff media type: %v", mediaType)
}

119
cache/converter.go vendored
View File

@ -6,12 +6,13 @@ import (
"fmt"
"io"
cdcompression "github.com/containerd/containerd/archive/compression"
"github.com/containerd/containerd/content"
"github.com/containerd/containerd/errdefs"
"github.com/containerd/containerd/images"
"github.com/containerd/containerd/images/converter"
"github.com/containerd/containerd/images/converter/uncompress"
"github.com/containerd/containerd/labels"
"github.com/klauspost/compress/zstd"
"github.com/moby/buildkit/util/compression"
digest "github.com/opencontainers/go-digest"
ocispecs "github.com/opencontainers/image-spec/specs-go/v1"
@ -23,11 +24,15 @@ import (
func needsConversion(mediaType string, compressionType compression.Type) (bool, error) {
switch compressionType {
case compression.Uncompressed:
if !images.IsLayerType(mediaType) || uncompress.IsUncompressedType(mediaType) {
if !images.IsLayerType(mediaType) || compression.FromMediaType(mediaType) == compression.Uncompressed {
return false, nil
}
case compression.Gzip:
if !images.IsLayerType(mediaType) || isGzipCompressedType(mediaType) {
if !images.IsLayerType(mediaType) || compression.FromMediaType(mediaType) == compression.Gzip {
return false, nil
}
case compression.Zstd:
if !images.IsLayerType(mediaType) || compression.FromMediaType(mediaType) == compression.Zstd {
return false, nil
}
case compression.EStargz:
@ -49,23 +54,49 @@ func getConverter(desc ocispecs.Descriptor, compressionType compression.Type) (c
// No conversion. No need to return an error here.
return nil, nil
}
switch compressionType {
c := conversion{target: compressionType}
from := compression.FromMediaType(desc.MediaType)
switch from {
case compression.Uncompressed:
return uncompress.LayerConvertFunc, nil
case compression.Gzip:
convertFunc := func(w io.Writer) (io.WriteCloser, error) { return gzip.NewWriter(w), nil }
return gzipLayerConvertFunc(compressionType, convertFunc, nil), nil
case compression.EStargz:
compressorFunc, finalize := writeEStargz()
convertFunc := func(w io.Writer) (io.WriteCloser, error) { return compressorFunc(w, ocispecs.MediaTypeImageLayerGzip) }
return gzipLayerConvertFunc(compressionType, convertFunc, finalize), nil
case compression.Gzip, compression.Zstd:
c.decompress = cdcompression.DecompressStream
default:
return nil, fmt.Errorf("unknown compression type during conversion: %q", compressionType)
}
return nil, errors.Errorf("unsupported source compression type %q from mediatype %q", from, desc.MediaType)
}
func gzipLayerConvertFunc(compressionType compression.Type, convertFunc func(w io.Writer) (io.WriteCloser, error), finalize func(context.Context, content.Store) (map[string]string, error)) converter.ConvertFunc {
return func(ctx context.Context, cs content.Store, desc ocispecs.Descriptor) (*ocispecs.Descriptor, error) {
switch compressionType {
case compression.Uncompressed:
case compression.Gzip:
c.compress = func(w io.Writer) (io.WriteCloser, error) {
return gzip.NewWriter(w), nil
}
case compression.Zstd:
c.compress = func(w io.Writer) (io.WriteCloser, error) {
return zstd.NewWriter(w)
}
case compression.EStargz:
compressorFunc, finalize := writeEStargz()
c.compress = func(w io.Writer) (io.WriteCloser, error) {
return compressorFunc(w, ocispecs.MediaTypeImageLayerGzip)
}
c.finalize = finalize
default:
return nil, errors.Errorf("unknown target compression type during conversion: %q", compressionType)
}
return (&c).convert, nil
}
type conversion struct {
target compression.Type
decompress func(io.Reader) (cdcompression.DecompressReadCloser, error)
compress func(w io.Writer) (io.WriteCloser, error)
finalize func(context.Context, content.Store) (map[string]string, error)
}
func (c *conversion) convert(ctx context.Context, cs content.Store, desc ocispecs.Descriptor) (*ocispecs.Descriptor, error) {
// prepare the source and destination
info, err := cs.Info(ctx, desc.Digest)
if err != nil {
@ -80,7 +111,7 @@ func gzipLayerConvertFunc(compressionType compression.Type, convertFunc func(w i
return nil, err
}
defer ra.Close()
ref := fmt.Sprintf("convert-from-%s-to-%s", desc.Digest, compressionType.String())
ref := fmt.Sprintf("convert-from-%s-to-%s", desc.Digest, c.target.String())
w, err := cs.Writer(ctx, content.WithRef(ref))
if err != nil {
return nil, err
@ -89,20 +120,36 @@ func gzipLayerConvertFunc(compressionType compression.Type, convertFunc func(w i
if err := w.Truncate(0); err != nil { // Old written data possibly remains
return nil, err
}
zw, err := convertFunc(w)
var zw io.WriteCloser = w
var compress io.WriteCloser
if c.compress != nil {
zw, err = c.compress(zw)
if err != nil {
return nil, err
}
defer zw.Close()
compress = zw
}
// convert this layer
diffID := digest.Canonical.Digester()
if _, err := io.Copy(zw, io.TeeReader(io.NewSectionReader(ra, 0, ra.Size()), diffID.Hash())); err != nil {
var rdr io.Reader = io.NewSectionReader(ra, 0, ra.Size())
if c.decompress != nil {
rc, err := c.decompress(rdr)
if err != nil {
return nil, err
}
if err := zw.Close(); err != nil { // Flush the writer
defer rc.Close()
rdr = rc
}
if _, err := io.Copy(zw, io.TeeReader(rdr, diffID.Hash())); err != nil {
return nil, err
}
if compress != nil {
if err := compress.Close(); err != nil { // Flush the writer
return nil, err
}
}
labelz[labels.LabelUncompressed] = diffID.Digest().String() // update diffID label
if err = w.Commit(ctx, 0, "", content.WithLabels(labelz)); err != nil && !errdefs.IsAlreadyExists(err) {
return nil, err
@ -116,11 +163,11 @@ func gzipLayerConvertFunc(compressionType compression.Type, convertFunc func(w i
}
newDesc := desc
newDesc.MediaType = convertMediaTypeToGzip(desc.MediaType)
newDesc.MediaType = c.target.DefaultMediaType()
newDesc.Digest = info.Digest
newDesc.Size = info.Size
if finalize != nil {
a, err := finalize(ctx, cs)
if c.finalize != nil {
a, err := c.finalize(ctx, cs)
if err != nil {
return nil, errors.Wrapf(err, "failed finalize compression")
}
@ -133,29 +180,3 @@ func gzipLayerConvertFunc(compressionType compression.Type, convertFunc func(w i
}
return &newDesc, nil
}
}
func isGzipCompressedType(mt string) bool {
switch mt {
case
images.MediaTypeDockerSchema2LayerGzip,
images.MediaTypeDockerSchema2LayerForeignGzip,
ocispecs.MediaTypeImageLayerGzip,
ocispecs.MediaTypeImageLayerNonDistributableGzip:
return true
default:
return false
}
}
func convertMediaTypeToGzip(mt string) string {
if uncompress.IsUncompressedType(mt) {
if images.IsDockerType(mt) {
mt += ".gzip"
} else {
mt += "+gzip"
}
return mt
}
return mt
}

9
cache/estargz.go vendored
View File

@ -6,9 +6,10 @@ import (
"io"
"sync"
"github.com/containerd/containerd/archive/compression"
cdcompression "github.com/containerd/containerd/archive/compression"
"github.com/containerd/containerd/content"
"github.com/containerd/stargz-snapshotter/estargz"
"github.com/moby/buildkit/util/compression"
digest "github.com/opencontainers/go-digest"
"github.com/pkg/errors"
)
@ -22,8 +23,8 @@ func writeEStargz() (compressorFunc compressor, finalize func(context.Context, c
var bInfo blobInfo
var mu sync.Mutex
return func(dest io.Writer, requiredMediaType string) (io.WriteCloser, error) {
if !isGzipCompressedType(requiredMediaType) {
return nil, fmt.Errorf("unsupported media type for estargz compressor %q", requiredMediaType)
if compression.FromMediaType(requiredMediaType) != compression.Gzip {
return nil, errors.Errorf("unsupported media type for estargz compressor %q", requiredMediaType)
}
done := make(chan struct{})
pr, pw := io.Pipe()
@ -127,7 +128,7 @@ func calculateBlob() (io.WriteCloser, chan blobInfo) {
c := new(counter)
dgstr := digest.Canonical.Digester()
diffID := digest.Canonical.Digester()
decompressR, err := compression.DecompressStream(io.TeeReader(pr, dgstr.Hash()))
decompressR, err := cdcompression.DecompressStream(io.TeeReader(pr, dgstr.Hash()))
if err != nil {
pr.CloseWithError(err)
return

63
cache/manager_test.go vendored
View File

@ -28,6 +28,7 @@ import (
"github.com/containerd/containerd/snapshots"
"github.com/containerd/containerd/snapshots/native"
"github.com/containerd/stargz-snapshotter/estargz"
"github.com/klauspost/compress/zstd"
"github.com/moby/buildkit/cache/metadata"
"github.com/moby/buildkit/client"
"github.com/moby/buildkit/session"
@ -1065,7 +1066,6 @@ func TestGetRemote(t *testing.T) {
// make some lazy refs from blobs
expectedContent := map[digest.Digest]struct{}{}
variant := map[digest.Digest]digest.Digest{}
esgz2gzip := map[digest.Digest]digest.Digest{}
var descs []ocispecs.Descriptor
for i := 0; i < 2; i++ {
@ -1091,11 +1091,14 @@ func TestGetRemote(t *testing.T) {
require.NoError(t, err)
expectedContent[uncompressedDesc.Digest] = struct{}{}
esgzDgst, uncompressedEsgzDgst, err := esgzBlobDigest(uncompressedBlobBytes)
esgzDgst, err := esgzBlobDigest(uncompressedBlobBytes)
require.NoError(t, err)
expectedContent[esgzDgst] = struct{}{}
variant[uncompressedEsgzDgst] = uncompressedDesc.Digest
// expectedContent[esgzDgst] = struct{}{} // disabled
esgz2gzip[esgzDgst] = desc.Digest
zstdDigest, err := zstdBlobDigest(uncompressedBlobBytes)
require.NoError(t, err)
expectedContent[zstdDigest] = struct{}{}
}
// Create 3 levels of mutable refs, where each parent ref has 2 children (this tests parallel creation of
@ -1129,12 +1132,15 @@ func TestGetRemote(t *testing.T) {
uncompressedBlobBytes, uncompressedDesc, err := fileToBlob(f, false)
require.NoError(t, err)
expectedContent[uncompressedDesc.Digest] = struct{}{}
esgzDgst, uncompressedEsgzDgst, err := esgzBlobDigest(uncompressedBlobBytes)
esgzDgst, err := esgzBlobDigest(uncompressedBlobBytes)
require.NoError(t, err)
expectedContent[esgzDgst] = struct{}{}
variant[uncompressedEsgzDgst] = uncompressedDesc.Digest
// expectedContent[esgzDgst] = struct{}{}
esgz2gzip[esgzDgst] = desc.Digest
zstdDigest, err := zstdBlobDigest(uncompressedBlobBytes)
require.NoError(t, err)
expectedContent[zstdDigest] = struct{}{}
f.Close()
err = lm.Unmount()
require.NoError(t, err)
@ -1160,7 +1166,7 @@ func TestGetRemote(t *testing.T) {
eg, egctx := errgroup.WithContext(ctx)
for _, ir := range refs {
ir := ir.(*immutableRef)
for _, compressionType := range []compression.Type{compression.Uncompressed, compression.Gzip, compression.EStargz} {
for _, compressionType := range []compression.Type{compression.Uncompressed, compression.Gzip /*compression.EStargz,*/, compression.Zstd} {
compressionType := compressionType
eg.Go(func() error {
remote, err := ir.GetRemote(egctx, true, compressionType, true, nil)
@ -1174,14 +1180,13 @@ func TestGetRemote(t *testing.T) {
require.Equal(t, ocispecs.MediaTypeImageLayerGzip, desc.MediaType)
case compression.EStargz:
require.Equal(t, ocispecs.MediaTypeImageLayerGzip, desc.MediaType)
case compression.Zstd:
require.Equal(t, ocispecs.MediaTypeImageLayer+"+zstd", desc.MediaType)
default:
require.Fail(t, "unhandled media type", compressionType)
}
dgst := desc.Digest
if v, ok := variant[dgst]; ok {
dgst = v
}
require.Contains(t, expectedContent, dgst)
require.Contains(t, expectedContent, dgst, "for %v", compressionType)
checkDescriptor(ctx, t, co.cs, desc, compressionType)
r := refChain[i]
@ -1223,9 +1228,6 @@ func TestGetRemote(t *testing.T) {
// verify there's a 1-to-1 mapping between the content store and what we expected to be there
err = co.cs.Walk(ctx, func(info content.Info) error {
dgst := info.Digest
if v, ok := variant[dgst]; ok {
dgst = v
}
var matched bool
for expected := range expectedContent {
if dgst == expected {
@ -1234,7 +1236,7 @@ func TestGetRemote(t *testing.T) {
break
}
}
require.True(t, matched, "match for blob: %s", info.Digest)
require.True(t, matched, "unexpected blob: %s", info.Digest)
checkInfo(ctx, t, co.cs, info)
return nil
})
@ -1316,31 +1318,36 @@ func checkDiskUsage(ctx context.Context, t *testing.T, cm Manager, inuse, unused
require.Equal(t, unused, unusedActual)
}
func esgzBlobDigest(uncompressedBlobBytes []byte) (digest.Digest, digest.Digest, error) {
func esgzBlobDigest(uncompressedBlobBytes []byte) (digest.Digest, error) {
buf := new(bytes.Buffer)
compressorFunc, _ := writeEStargz()
w, err := compressorFunc(buf, ocispecs.MediaTypeImageLayerGzip)
if err != nil {
return "", "", err
return "", err
}
if _, err := io.Copy(w, bytes.NewReader(uncompressedBlobBytes)); err != nil {
return "", "", err
return "", err
}
if err := w.Close(); err != nil {
return "", "", err
return "", err
}
b := buf.Bytes()
esgzDgst := digest.FromBytes(b)
ur, err := gzip.NewReader(bytes.NewReader(b))
if err != nil {
return "", "", err
return digest.FromBytes(b), nil
}
defer ur.Close()
uncompressedDgst, err := digest.FromReader(ur)
func zstdBlobDigest(uncompressedBlobBytes []byte) (digest.Digest, error) {
b := bytes.NewBuffer(nil)
w, err := zstd.NewWriter(b)
if err != nil {
return "", "", err
return "", err
}
return esgzDgst, uncompressedDgst, nil
if _, err := w.Write(uncompressedBlobBytes); err != nil {
return "", err
}
if err := w.Close(); err != nil {
return "", err
}
return digest.FromBytes(b.Bytes()), nil
}
func checkNumBlobs(ctx context.Context, t *testing.T, cs content.Store, expected int) {

View File

@ -131,6 +131,8 @@ func TestIntegration(t *testing.T) {
testFileOpInputSwap,
testRelativeMountpoint,
testLocalSourceDiffer,
testBuildExportZstd,
testPullZstdImage,
}, mirrors)
integration.Run(t, []integration.Test{
@ -2165,6 +2167,156 @@ func testBuildExportWithUncompressed(t *testing.T, sb integration.Sandbox) {
require.Equal(t, []byte("gzip"), item.Data)
}
func testBuildExportZstd(t *testing.T, sb integration.Sandbox) {
c, err := New(sb.Context(), sb.Address())
require.NoError(t, err)
defer c.Close()
busybox := llb.Image("busybox:latest")
cmd := `sh -e -c "echo -n zstd > data"`
st := llb.Scratch()
st = busybox.Run(llb.Shlex(cmd), llb.Dir("/wd")).AddMount("/wd", st)
def, err := st.Marshal(sb.Context())
require.NoError(t, err)
destDir, err := ioutil.TempDir("", "buildkit")
require.NoError(t, err)
defer os.RemoveAll(destDir)
out := filepath.Join(destDir, "out.tar")
outW, err := os.Create(out)
require.NoError(t, err)
_, err = c.Solve(sb.Context(), def, SolveOpt{
Exports: []ExportEntry{
{
Type: ExporterOCI,
Output: fixedWriteCloser(outW),
Attrs: map[string]string{
"compression": "zstd",
},
},
},
}, nil)
require.NoError(t, err)
dt, err := ioutil.ReadFile(out)
require.NoError(t, err)
m, err := testutil.ReadTarToMap(dt, false)
require.NoError(t, err)
var index ocispecs.Index
err = json.Unmarshal(m["index.json"].Data, &index)
require.NoError(t, err)
var mfst ocispecs.Manifest
err = json.Unmarshal(m["blobs/sha256/"+index.Manifests[0].Digest.Hex()].Data, &mfst)
require.NoError(t, err)
lastLayer := mfst.Layers[len(mfst.Layers)-1]
require.Equal(t, ocispecs.MediaTypeImageLayer+"+zstd", lastLayer.MediaType)
zstdLayerDigest := lastLayer.Digest.Hex()
require.Equal(t, m["blobs/sha256/"+zstdLayerDigest].Data[:4], []byte{0x28, 0xb5, 0x2f, 0xfd})
// repeat without oci mediatype
outW, err = os.Create(out)
require.NoError(t, err)
_, err = c.Solve(sb.Context(), def, SolveOpt{
Exports: []ExportEntry{
{
Type: ExporterOCI,
Output: fixedWriteCloser(outW),
Attrs: map[string]string{
"compression": "zstd",
"oci-mediatypes": "false",
},
},
},
}, nil)
require.NoError(t, err)
dt, err = ioutil.ReadFile(out)
require.NoError(t, err)
m, err = testutil.ReadTarToMap(dt, false)
require.NoError(t, err)
err = json.Unmarshal(m["index.json"].Data, &index)
require.NoError(t, err)
err = json.Unmarshal(m["blobs/sha256/"+index.Manifests[0].Digest.Hex()].Data, &mfst)
require.NoError(t, err)
lastLayer = mfst.Layers[len(mfst.Layers)-1]
require.Equal(t, images.MediaTypeDockerSchema2Layer+".zstd", lastLayer.MediaType)
require.Equal(t, lastLayer.Digest.Hex(), zstdLayerDigest)
}
func testPullZstdImage(t *testing.T, sb integration.Sandbox) {
c, err := New(sb.Context(), sb.Address())
require.NoError(t, err)
defer c.Close()
busybox := llb.Image("busybox:latest")
cmd := `sh -e -c "echo -n zstd > data"`
st := llb.Scratch()
st = busybox.Run(llb.Shlex(cmd), llb.Dir("/wd")).AddMount("/wd", st)
def, err := st.Marshal(sb.Context())
require.NoError(t, err)
registry, err := sb.NewRegistry()
if errors.Is(err, integration.ErrorRequirements) {
t.Skip(err.Error())
}
require.NoError(t, err)
target := registry + "/buildkit/build/exporter:zstd"
_, err = c.Solve(sb.Context(), def, SolveOpt{
Exports: []ExportEntry{
{
Type: ExporterImage,
Attrs: map[string]string{
"name": target,
"push": "true",
"compression": "zstd",
},
},
},
}, nil)
require.NoError(t, err)
st = llb.Scratch().File(llb.Copy(llb.Image(target), "/data", "/zdata"))
def, err = st.Marshal(sb.Context())
require.NoError(t, err)
destDir, err := ioutil.TempDir("", "buildkit")
require.NoError(t, err)
defer os.RemoveAll(destDir)
_, err = c.Solve(sb.Context(), def, SolveOpt{
Exports: []ExportEntry{
{
Type: ExporterLocal,
OutputDir: destDir,
},
},
}, nil)
require.NoError(t, err)
dt, err := ioutil.ReadFile(filepath.Join(destDir, "zdata"))
require.NoError(t, err)
require.Equal(t, dt, []byte("zstd"))
}
func testBuildPushAndValidate(t *testing.T, sb integration.Sandbox) {
skipDockerd(t, sb)
requiresLinux(t)

View File

@ -144,6 +144,8 @@ func (e *imageExporter) Resolve(ctx context.Context, opt map[string]string) (exp
case "estargz":
i.layerCompression = compression.EStargz
esgz = true
case "zstd":
i.layerCompression = compression.Zstd
case "uncompressed":
i.layerCompression = compression.Uncompressed
default:

View File

@ -71,6 +71,8 @@ func (e *imageExporter) Resolve(ctx context.Context, opt map[string]string) (exp
case "estargz":
i.layerCompression = compression.EStargz
esgz = true
case "zstd":
i.layerCompression = compression.Zstd
case "uncompressed":
i.layerCompression = compression.Uncompressed
default:

2
go.mod
View File

@ -98,7 +98,7 @@ require (
github.com/hashicorp/errwrap v1.0.0 // indirect
github.com/hashicorp/go-multierror v1.1.1 // indirect
github.com/ishidawataru/sctp v0.0.0-20210226210310-f2269e66cdee // indirect
github.com/klauspost/compress v1.12.3 // indirect
github.com/klauspost/compress v1.12.3
github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 // indirect
github.com/moby/sys/mount v0.2.0 // indirect
github.com/moby/sys/mountinfo v0.4.1 // indirect

View File

@ -27,10 +27,18 @@ const (
// EStargz is used for estargz data.
EStargz
// Zstd is used for Zstandard data.
Zstd
// UnknownCompression means not supported yet.
UnknownCompression Type = -1
)
const (
mediaTypeDockerSchema2LayerZstd = images.MediaTypeDockerSchema2Layer + ".zstd"
mediaTypeImageLayerZstd = ocispecs.MediaTypeImageLayer + "+zstd" // unreleased image-spec#790
)
var Default = Gzip
func (ct Type) String() string {
@ -41,17 +49,34 @@ func (ct Type) String() string {
return "gzip"
case EStargz:
return "estargz"
case Zstd:
return "zstd"
default:
return "unknown"
}
}
func (ct Type) DefaultMediaType() string {
switch ct {
case Uncompressed:
return ocispecs.MediaTypeImageLayer
case Gzip, EStargz:
return ocispecs.MediaTypeImageLayerGzip
case Zstd:
return mediaTypeImageLayerZstd
default:
return ocispecs.MediaTypeImageLayer + "+unknown"
}
}
func FromMediaType(mediaType string) Type {
switch toOCILayerType[mediaType] {
case ocispecs.MediaTypeImageLayer:
return Uncompressed
case ocispecs.MediaTypeImageLayerGzip:
return Gzip
case mediaTypeImageLayerZstd:
return Zstd
default:
return UnknownCompression
}
@ -81,6 +106,7 @@ func DetectLayerMediaType(ctx context.Context, cs content.Store, id digest.Diges
return ocispecs.MediaTypeImageLayerGzip, nil
}
return images.MediaTypeDockerSchema2LayerGzip, nil
default:
return "", errors.Errorf("failed to detect layer %v compression type", id)
}
@ -108,6 +134,7 @@ func detectCompressionType(cr *io.SectionReader) (Type, error) {
for c, m := range map[Type][]byte{
Gzip: {0x1F, 0x8B, 0x08},
Zstd: {0x28, 0xB5, 0x2F, 0xFD},
} {
if n < len(m) {
continue
@ -127,6 +154,8 @@ var toDockerLayerType = map[string]string{
images.MediaTypeDockerSchema2LayerGzip: images.MediaTypeDockerSchema2LayerGzip,
images.MediaTypeDockerSchema2LayerForeign: images.MediaTypeDockerSchema2Layer,
images.MediaTypeDockerSchema2LayerForeignGzip: images.MediaTypeDockerSchema2LayerGzip,
mediaTypeImageLayerZstd: mediaTypeDockerSchema2LayerZstd,
mediaTypeDockerSchema2LayerZstd: mediaTypeDockerSchema2LayerZstd,
}
var toOCILayerType = map[string]string{
@ -136,6 +165,8 @@ var toOCILayerType = map[string]string{
images.MediaTypeDockerSchema2LayerGzip: ocispecs.MediaTypeImageLayerGzip,
images.MediaTypeDockerSchema2LayerForeign: ocispecs.MediaTypeImageLayer,
images.MediaTypeDockerSchema2LayerForeignGzip: ocispecs.MediaTypeImageLayerGzip,
mediaTypeImageLayerZstd: mediaTypeImageLayerZstd,
mediaTypeDockerSchema2LayerZstd: mediaTypeImageLayerZstd,
}
func convertLayerMediaType(mediaType string, oci bool) string {

View File

@ -1,122 +0,0 @@
/*
Copyright The containerd Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package uncompress
import (
"compress/gzip"
"context"
"fmt"
"io"
"github.com/containerd/containerd/content"
"github.com/containerd/containerd/errdefs"
"github.com/containerd/containerd/images"
"github.com/containerd/containerd/images/converter"
"github.com/containerd/containerd/labels"
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
)
var _ converter.ConvertFunc = LayerConvertFunc
// LayerConvertFunc converts tar.gz layers into uncompressed tar layers.
// Media type is changed, e.g., "application/vnd.oci.image.layer.v1.tar+gzip" -> "application/vnd.oci.image.layer.v1.tar"
func LayerConvertFunc(ctx context.Context, cs content.Store, desc ocispec.Descriptor) (*ocispec.Descriptor, error) {
if !images.IsLayerType(desc.MediaType) || IsUncompressedType(desc.MediaType) {
// No conversion. No need to return an error here.
return nil, nil
}
info, err := cs.Info(ctx, desc.Digest)
if err != nil {
return nil, err
}
readerAt, err := cs.ReaderAt(ctx, desc)
if err != nil {
return nil, err
}
defer readerAt.Close()
sr := io.NewSectionReader(readerAt, 0, desc.Size)
newR, err := gzip.NewReader(sr)
if err != nil {
return nil, err
}
defer newR.Close()
ref := fmt.Sprintf("convert-uncompress-from-%s", desc.Digest)
w, err := content.OpenWriter(ctx, cs, content.WithRef(ref))
if err != nil {
return nil, err
}
defer w.Close()
// Reset the writing position
// Old writer possibly remains without aborted
// (e.g. conversion interrupted by a signal)
if err := w.Truncate(0); err != nil {
return nil, err
}
n, err := io.Copy(w, newR)
if err != nil {
return nil, err
}
if err := newR.Close(); err != nil {
return nil, err
}
// no need to retain "containerd.io/uncompressed" label, but retain other labels ("containerd.io/distribution.source.*")
labelsMap := info.Labels
delete(labelsMap, labels.LabelUncompressed)
if err = w.Commit(ctx, 0, "", content.WithLabels(labelsMap)); err != nil && !errdefs.IsAlreadyExists(err) {
return nil, err
}
if err := w.Close(); err != nil {
return nil, err
}
newDesc := desc
newDesc.Digest = w.Digest()
newDesc.Size = n
newDesc.MediaType = convertMediaType(newDesc.MediaType)
return &newDesc, nil
}
// IsUncompressedType returns whether the provided media type is considered
// an uncompressed layer type
func IsUncompressedType(mt string) bool {
switch mt {
case
images.MediaTypeDockerSchema2Layer,
images.MediaTypeDockerSchema2LayerForeign,
ocispec.MediaTypeImageLayer,
ocispec.MediaTypeImageLayerNonDistributable:
return true
default:
return false
}
}
func convertMediaType(mt string) string {
switch mt {
case images.MediaTypeDockerSchema2LayerGzip:
return images.MediaTypeDockerSchema2Layer
case images.MediaTypeDockerSchema2LayerForeignGzip:
return images.MediaTypeDockerSchema2LayerForeign
case ocispec.MediaTypeImageLayerGzip:
return ocispec.MediaTypeImageLayer
case ocispec.MediaTypeImageLayerNonDistributableGzip:
return ocispec.MediaTypeImageLayerNonDistributable
default:
return mt
}
}

1
vendor/modules.txt vendored
View File

@ -93,7 +93,6 @@ github.com/containerd/containerd/identifiers
github.com/containerd/containerd/images
github.com/containerd/containerd/images/archive
github.com/containerd/containerd/images/converter
github.com/containerd/containerd/images/converter/uncompress
github.com/containerd/containerd/labels
github.com/containerd/containerd/leases
github.com/containerd/containerd/leases/proxy