Merge pull request #144 from tonistiigi/cache-export

Add support for build cache import/export
docker-18.09
Akihiro Suda 2017-10-17 18:57:22 +02:00 committed by GitHub
commit fac138e742
29 changed files with 2297 additions and 250 deletions

View File

@ -1,5 +1,6 @@
// Code generated by protoc-gen-gogo. DO NOT EDIT.
// Code generated by protoc-gen-gogo.
// source: control.proto
// DO NOT EDIT!
/*
Package moby_buildkit_v1 is a generated protocol buffer package.
@ -12,6 +13,7 @@
DiskUsageResponse
UsageRecord
SolveRequest
CacheOptions
SolveResponse
StatusRequest
StatusResponse
@ -173,6 +175,7 @@ type SolveRequest struct {
Session string `protobuf:"bytes,5,opt,name=Session,proto3" json:"Session,omitempty"`
Frontend string `protobuf:"bytes,6,opt,name=Frontend,proto3" json:"Frontend,omitempty"`
FrontendAttrs map[string]string `protobuf:"bytes,7,rep,name=FrontendAttrs" json:"FrontendAttrs,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
Cache CacheOptions `protobuf:"bytes,8,opt,name=Cache" json:"Cache"`
}
func (m *SolveRequest) Reset() { *m = SolveRequest{} }
@ -229,6 +232,37 @@ func (m *SolveRequest) GetFrontendAttrs() map[string]string {
return nil
}
func (m *SolveRequest) GetCache() CacheOptions {
if m != nil {
return m.Cache
}
return CacheOptions{}
}
type CacheOptions struct {
ExportRef string `protobuf:"bytes,1,opt,name=ExportRef,proto3" json:"ExportRef,omitempty"`
ImportRef string `protobuf:"bytes,2,opt,name=ImportRef,proto3" json:"ImportRef,omitempty"`
}
func (m *CacheOptions) Reset() { *m = CacheOptions{} }
func (m *CacheOptions) String() string { return proto.CompactTextString(m) }
func (*CacheOptions) ProtoMessage() {}
func (*CacheOptions) Descriptor() ([]byte, []int) { return fileDescriptorControl, []int{4} }
func (m *CacheOptions) GetExportRef() string {
if m != nil {
return m.ExportRef
}
return ""
}
func (m *CacheOptions) GetImportRef() string {
if m != nil {
return m.ImportRef
}
return ""
}
type SolveResponse struct {
Vtx []*Vertex `protobuf:"bytes,1,rep,name=vtx" json:"vtx,omitempty"`
}
@ -236,7 +270,7 @@ type SolveResponse struct {
func (m *SolveResponse) Reset() { *m = SolveResponse{} }
func (m *SolveResponse) String() string { return proto.CompactTextString(m) }
func (*SolveResponse) ProtoMessage() {}
func (*SolveResponse) Descriptor() ([]byte, []int) { return fileDescriptorControl, []int{4} }
func (*SolveResponse) Descriptor() ([]byte, []int) { return fileDescriptorControl, []int{5} }
func (m *SolveResponse) GetVtx() []*Vertex {
if m != nil {
@ -252,7 +286,7 @@ type StatusRequest struct {
func (m *StatusRequest) Reset() { *m = StatusRequest{} }
func (m *StatusRequest) String() string { return proto.CompactTextString(m) }
func (*StatusRequest) ProtoMessage() {}
func (*StatusRequest) Descriptor() ([]byte, []int) { return fileDescriptorControl, []int{5} }
func (*StatusRequest) Descriptor() ([]byte, []int) { return fileDescriptorControl, []int{6} }
func (m *StatusRequest) GetRef() string {
if m != nil {
@ -270,7 +304,7 @@ type StatusResponse struct {
func (m *StatusResponse) Reset() { *m = StatusResponse{} }
func (m *StatusResponse) String() string { return proto.CompactTextString(m) }
func (*StatusResponse) ProtoMessage() {}
func (*StatusResponse) Descriptor() ([]byte, []int) { return fileDescriptorControl, []int{6} }
func (*StatusResponse) Descriptor() ([]byte, []int) { return fileDescriptorControl, []int{7} }
func (m *StatusResponse) GetVertexes() []*Vertex {
if m != nil {
@ -306,7 +340,7 @@ type Vertex struct {
func (m *Vertex) Reset() { *m = Vertex{} }
func (m *Vertex) String() string { return proto.CompactTextString(m) }
func (*Vertex) ProtoMessage() {}
func (*Vertex) Descriptor() ([]byte, []int) { return fileDescriptorControl, []int{7} }
func (*Vertex) Descriptor() ([]byte, []int) { return fileDescriptorControl, []int{8} }
func (m *Vertex) GetName() string {
if m != nil {
@ -358,7 +392,7 @@ type VertexStatus struct {
func (m *VertexStatus) Reset() { *m = VertexStatus{} }
func (m *VertexStatus) String() string { return proto.CompactTextString(m) }
func (*VertexStatus) ProtoMessage() {}
func (*VertexStatus) Descriptor() ([]byte, []int) { return fileDescriptorControl, []int{8} }
func (*VertexStatus) Descriptor() ([]byte, []int) { return fileDescriptorControl, []int{9} }
func (m *VertexStatus) GetID() string {
if m != nil {
@ -419,7 +453,7 @@ type VertexLog struct {
func (m *VertexLog) Reset() { *m = VertexLog{} }
func (m *VertexLog) String() string { return proto.CompactTextString(m) }
func (*VertexLog) ProtoMessage() {}
func (*VertexLog) Descriptor() ([]byte, []int) { return fileDescriptorControl, []int{9} }
func (*VertexLog) Descriptor() ([]byte, []int) { return fileDescriptorControl, []int{10} }
func (m *VertexLog) GetTimestamp() time.Time {
if m != nil {
@ -449,7 +483,7 @@ type BytesMessage struct {
func (m *BytesMessage) Reset() { *m = BytesMessage{} }
func (m *BytesMessage) String() string { return proto.CompactTextString(m) }
func (*BytesMessage) ProtoMessage() {}
func (*BytesMessage) Descriptor() ([]byte, []int) { return fileDescriptorControl, []int{10} }
func (*BytesMessage) Descriptor() ([]byte, []int) { return fileDescriptorControl, []int{11} }
func (m *BytesMessage) GetData() []byte {
if m != nil {
@ -463,6 +497,7 @@ func init() {
proto.RegisterType((*DiskUsageResponse)(nil), "moby.buildkit.v1.DiskUsageResponse")
proto.RegisterType((*UsageRecord)(nil), "moby.buildkit.v1.UsageRecord")
proto.RegisterType((*SolveRequest)(nil), "moby.buildkit.v1.SolveRequest")
proto.RegisterType((*CacheOptions)(nil), "moby.buildkit.v1.CacheOptions")
proto.RegisterType((*SolveResponse)(nil), "moby.buildkit.v1.SolveResponse")
proto.RegisterType((*StatusRequest)(nil), "moby.buildkit.v1.StatusRequest")
proto.RegisterType((*StatusResponse)(nil), "moby.buildkit.v1.StatusResponse")
@ -924,6 +959,44 @@ func (m *SolveRequest) MarshalTo(dAtA []byte) (int, error) {
i += copy(dAtA[i:], v)
}
}
dAtA[i] = 0x42
i++
i = encodeVarintControl(dAtA, i, uint64(m.Cache.Size()))
n4, err := m.Cache.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
i += n4
return i, nil
}
func (m *CacheOptions) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalTo(dAtA)
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *CacheOptions) MarshalTo(dAtA []byte) (int, error) {
var i int
_ = i
var l int
_ = l
if len(m.ExportRef) > 0 {
dAtA[i] = 0xa
i++
i = encodeVarintControl(dAtA, i, uint64(len(m.ExportRef)))
i += copy(dAtA[i:], m.ExportRef)
}
if len(m.ImportRef) > 0 {
dAtA[i] = 0x12
i++
i = encodeVarintControl(dAtA, i, uint64(len(m.ImportRef)))
i += copy(dAtA[i:], m.ImportRef)
}
return i, nil
}
@ -1091,21 +1164,21 @@ func (m *Vertex) MarshalTo(dAtA []byte) (int, error) {
dAtA[i] = 0x2a
i++
i = encodeVarintControl(dAtA, i, uint64(github_com_gogo_protobuf_types.SizeOfStdTime(*m.Started)))
n4, err := github_com_gogo_protobuf_types.StdTimeMarshalTo(*m.Started, dAtA[i:])
n5, err := github_com_gogo_protobuf_types.StdTimeMarshalTo(*m.Started, dAtA[i:])
if err != nil {
return 0, err
}
i += n4
i += n5
}
if m.Completed != nil {
dAtA[i] = 0x32
i++
i = encodeVarintControl(dAtA, i, uint64(github_com_gogo_protobuf_types.SizeOfStdTime(*m.Completed)))
n5, err := github_com_gogo_protobuf_types.StdTimeMarshalTo(*m.Completed, dAtA[i:])
n6, err := github_com_gogo_protobuf_types.StdTimeMarshalTo(*m.Completed, dAtA[i:])
if err != nil {
return 0, err
}
i += n5
i += n6
}
if len(m.Error) > 0 {
dAtA[i] = 0x3a
@ -1162,30 +1235,30 @@ func (m *VertexStatus) MarshalTo(dAtA []byte) (int, error) {
dAtA[i] = 0x32
i++
i = encodeVarintControl(dAtA, i, uint64(github_com_gogo_protobuf_types.SizeOfStdTime(m.Timestamp)))
n6, err := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.Timestamp, dAtA[i:])
if err != nil {
return 0, err
}
i += n6
if m.Started != nil {
dAtA[i] = 0x3a
i++
i = encodeVarintControl(dAtA, i, uint64(github_com_gogo_protobuf_types.SizeOfStdTime(*m.Started)))
n7, err := github_com_gogo_protobuf_types.StdTimeMarshalTo(*m.Started, dAtA[i:])
n7, err := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.Timestamp, dAtA[i:])
if err != nil {
return 0, err
}
i += n7
if m.Started != nil {
dAtA[i] = 0x3a
i++
i = encodeVarintControl(dAtA, i, uint64(github_com_gogo_protobuf_types.SizeOfStdTime(*m.Started)))
n8, err := github_com_gogo_protobuf_types.StdTimeMarshalTo(*m.Started, dAtA[i:])
if err != nil {
return 0, err
}
i += n8
}
if m.Completed != nil {
dAtA[i] = 0x42
i++
i = encodeVarintControl(dAtA, i, uint64(github_com_gogo_protobuf_types.SizeOfStdTime(*m.Completed)))
n8, err := github_com_gogo_protobuf_types.StdTimeMarshalTo(*m.Completed, dAtA[i:])
n9, err := github_com_gogo_protobuf_types.StdTimeMarshalTo(*m.Completed, dAtA[i:])
if err != nil {
return 0, err
}
i += n8
i += n9
}
return i, nil
}
@ -1214,11 +1287,11 @@ func (m *VertexLog) MarshalTo(dAtA []byte) (int, error) {
dAtA[i] = 0x12
i++
i = encodeVarintControl(dAtA, i, uint64(github_com_gogo_protobuf_types.SizeOfStdTime(m.Timestamp)))
n9, err := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.Timestamp, dAtA[i:])
n10, err := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.Timestamp, dAtA[i:])
if err != nil {
return 0, err
}
i += n9
i += n10
if m.Stream != 0 {
dAtA[i] = 0x18
i++
@ -1381,6 +1454,22 @@ func (m *SolveRequest) Size() (n int) {
n += mapEntrySize + 1 + sovControl(uint64(mapEntrySize))
}
}
l = m.Cache.Size()
n += 1 + l + sovControl(uint64(l))
return n
}
func (m *CacheOptions) Size() (n int) {
var l int
_ = l
l = len(m.ExportRef)
if l > 0 {
n += 1 + l + sovControl(uint64(l))
}
l = len(m.ImportRef)
if l > 0 {
n += 1 + l + sovControl(uint64(l))
}
return n
}
@ -2389,6 +2478,144 @@ func (m *SolveRequest) Unmarshal(dAtA []byte) error {
m.FrontendAttrs[mapkey] = mapvalue
}
iNdEx = postIndex
case 8:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Cache", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowControl
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthControl
}
postIndex := iNdEx + msglen
if postIndex > l {
return io.ErrUnexpectedEOF
}
if err := m.Cache.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipControl(dAtA[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthControl
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *CacheOptions) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowControl
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: CacheOptions: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: CacheOptions: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field ExportRef", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowControl
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthControl
}
postIndex := iNdEx + intStringLen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.ExportRef = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
case 2:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field ImportRef", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowControl
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthControl
}
postIndex := iNdEx + intStringLen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.ImportRef = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipControl(dAtA[iNdEx:])
@ -3584,67 +3811,70 @@ var (
func init() { proto.RegisterFile("control.proto", fileDescriptorControl) }
var fileDescriptorControl = []byte{
// 978 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x56, 0xdf, 0x6e, 0xe3, 0xc4,
0x17, 0xfe, 0xd9, 0x4e, 0x9d, 0xe4, 0x34, 0xad, 0xfa, 0x1b, 0xa1, 0x95, 0x15, 0x44, 0x12, 0xcc,
0x4d, 0x54, 0xb1, 0xce, 0x6e, 0x01, 0x09, 0x15, 0x09, 0xed, 0xa6, 0x59, 0x44, 0xab, 0xad, 0x84,
0xa6, 0x5b, 0xb8, 0x76, 0x92, 0xa9, 0xd7, 0xaa, 0xe3, 0x31, 0x33, 0xe3, 0xa8, 0xe1, 0x29, 0x78,
0x17, 0x9e, 0x80, 0x0b, 0xc4, 0x5e, 0x72, 0x89, 0xb8, 0x28, 0xa8, 0x0f, 0xc0, 0x33, 0xa0, 0xf9,
0x63, 0xd7, 0xd9, 0x34, 0xdb, 0x3f, 0x7b, 0x95, 0x39, 0x93, 0xef, 0x7c, 0x73, 0xe6, 0x7c, 0x33,
0xdf, 0x18, 0xb6, 0x26, 0x34, 0x15, 0x8c, 0x26, 0x41, 0xc6, 0xa8, 0xa0, 0x68, 0x67, 0x46, 0xc7,
0x8b, 0x60, 0x9c, 0xc7, 0xc9, 0xf4, 0x3c, 0x16, 0xc1, 0xfc, 0x69, 0xfb, 0x71, 0x14, 0x8b, 0xd7,
0xf9, 0x38, 0x98, 0xd0, 0xd9, 0x20, 0xa2, 0x11, 0x1d, 0x28, 0xe0, 0x38, 0x3f, 0x53, 0x91, 0x0a,
0xd4, 0x48, 0x13, 0xb4, 0xbb, 0x11, 0xa5, 0x51, 0x42, 0xae, 0x51, 0x22, 0x9e, 0x11, 0x2e, 0xc2,
0x59, 0x66, 0x00, 0x9f, 0x56, 0xf8, 0xe4, 0x62, 0x83, 0x62, 0xb1, 0x01, 0xa7, 0xc9, 0x9c, 0xb0,
0x41, 0x36, 0x1e, 0xd0, 0x8c, 0x6b, 0xb4, 0xbf, 0x0b, 0x3b, 0xa3, 0x98, 0x9f, 0x9f, 0xf2, 0x30,
0x22, 0x98, 0xfc, 0x98, 0x13, 0x2e, 0xd0, 0x23, 0x70, 0xcf, 0xe2, 0x44, 0x10, 0xe6, 0x59, 0x3d,
0xab, 0xdf, 0xc4, 0x26, 0xf2, 0x8f, 0xe0, 0xff, 0x15, 0x2c, 0xcf, 0x68, 0xca, 0x09, 0xfa, 0x02,
0x5c, 0x46, 0x26, 0x94, 0x4d, 0x3d, 0xab, 0xe7, 0xf4, 0x37, 0xf7, 0x3e, 0x0a, 0xde, 0xde, 0x61,
0x60, 0x12, 0x24, 0x08, 0x1b, 0xb0, 0xff, 0xab, 0x0d, 0x9b, 0x95, 0x79, 0xb4, 0x0d, 0xf6, 0xe1,
0xc8, 0xac, 0x67, 0x1f, 0x8e, 0x90, 0x07, 0xf5, 0xe3, 0x5c, 0x84, 0xe3, 0x84, 0x78, 0x76, 0xcf,
0xea, 0x37, 0x70, 0x11, 0xa2, 0x0f, 0x60, 0xe3, 0x30, 0x3d, 0xe5, 0xc4, 0x73, 0xd4, 0xbc, 0x0e,
0x10, 0x82, 0xda, 0x49, 0xfc, 0x13, 0xf1, 0x6a, 0x3d, 0xab, 0xef, 0x60, 0x35, 0x96, 0xfb, 0xf8,
0x2e, 0x64, 0x24, 0x15, 0xde, 0x86, 0xde, 0x87, 0x8e, 0xd0, 0x10, 0x9a, 0x07, 0x8c, 0x84, 0x82,
0x4c, 0x9f, 0x0b, 0xcf, 0xed, 0x59, 0xfd, 0xcd, 0xbd, 0x76, 0xa0, 0xdb, 0x1a, 0x14, 0x6d, 0x0d,
0x5e, 0x15, 0x6d, 0x1d, 0x36, 0xde, 0x5c, 0x76, 0xff, 0xf7, 0xf3, 0xdf, 0x5d, 0x0b, 0x5f, 0xa7,
0xa1, 0x67, 0x00, 0x2f, 0x43, 0x2e, 0x4e, 0xb9, 0x22, 0xa9, 0xdf, 0x4a, 0x52, 0x53, 0x04, 0x95,
0x1c, 0xd4, 0x01, 0x50, 0x0d, 0x38, 0xa0, 0x79, 0x2a, 0xbc, 0x86, 0xaa, 0xbb, 0x32, 0x83, 0x7a,
0xb0, 0x39, 0x22, 0x7c, 0xc2, 0xe2, 0x4c, 0xc4, 0x34, 0xf5, 0x9a, 0x6a, 0x0b, 0xd5, 0x29, 0xff,
0x4f, 0x07, 0x5a, 0x27, 0x52, 0xd3, 0x42, 0xb8, 0x1d, 0x70, 0x30, 0x39, 0x33, 0x5d, 0x94, 0x43,
0x14, 0x00, 0x8c, 0xc8, 0x59, 0x9c, 0xc6, 0x8a, 0xc3, 0x56, 0x65, 0x6e, 0x07, 0xd9, 0x38, 0xb8,
0x9e, 0xc5, 0x15, 0x04, 0x6a, 0x43, 0xe3, 0xc5, 0x45, 0x46, 0x99, 0x14, 0xdf, 0x51, 0x34, 0x65,
0x8c, 0x7e, 0x80, 0xad, 0x62, 0xfc, 0x5c, 0x08, 0xc6, 0xbd, 0x9a, 0x12, 0xfc, 0xe9, 0xaa, 0xe0,
0xd5, 0xa2, 0x82, 0xa5, 0x9c, 0x17, 0xa9, 0x60, 0x0b, 0xbc, 0xcc, 0x23, 0xb5, 0x3e, 0x21, 0x9c,
0xcb, 0x0a, 0xb5, 0x50, 0x45, 0x28, 0xcb, 0xf9, 0x86, 0xd1, 0x54, 0x90, 0x74, 0xaa, 0x84, 0x6a,
0xe2, 0x32, 0x96, 0xe5, 0x14, 0x63, 0x5d, 0x4e, 0xfd, 0x4e, 0xe5, 0x2c, 0xe5, 0x98, 0x72, 0x96,
0xe6, 0xda, 0xcf, 0x00, 0xad, 0xd6, 0x2c, 0x7b, 0x7b, 0x4e, 0x16, 0x45, 0x6f, 0xcf, 0xc9, 0x42,
0x1e, 0xc4, 0x79, 0x98, 0xe4, 0xfa, 0x80, 0x36, 0xb1, 0x0e, 0xf6, 0xed, 0x2f, 0x2d, 0xc9, 0xb0,
0xba, 0xcc, 0x7d, 0x18, 0xfc, 0xaf, 0x60, 0xcb, 0x54, 0x6d, 0xae, 0xd9, 0x2e, 0x38, 0x73, 0x71,
0x61, 0xee, 0x98, 0xb7, 0xba, 0xc7, 0xef, 0x09, 0x13, 0xe4, 0x02, 0x4b, 0x90, 0xff, 0x31, 0x6c,
0x9d, 0x88, 0x50, 0xe4, 0x7c, 0xed, 0xb9, 0xf0, 0x7f, 0xb1, 0x60, 0xbb, 0xc0, 0x98, 0x15, 0x3e,
0x87, 0xc6, 0x5c, 0x91, 0x10, 0x7e, 0xeb, 0x32, 0x25, 0x12, 0xed, 0x43, 0x83, 0x2b, 0x1e, 0xc2,
0x3d, 0x5b, 0x65, 0x75, 0xd6, 0x65, 0x99, 0xf5, 0x4a, 0x3c, 0x1a, 0x40, 0x2d, 0xa1, 0x11, 0xf7,
0x1c, 0x95, 0xf7, 0xe1, 0xba, 0xbc, 0x97, 0x34, 0xc2, 0x0a, 0xe8, 0x5f, 0xda, 0xe0, 0xea, 0x39,
0x74, 0x04, 0xee, 0x34, 0x8e, 0x08, 0x17, 0x7a, 0x57, 0xc3, 0x3d, 0x79, 0x49, 0xff, 0xba, 0xec,
0xee, 0x56, 0xdc, 0x8f, 0x66, 0x24, 0x95, 0xee, 0x1b, 0xc6, 0x29, 0x61, 0x7c, 0x10, 0xd1, 0xc7,
0x3a, 0x25, 0x18, 0xa9, 0x1f, 0x6c, 0x18, 0x24, 0x57, 0x9c, 0x66, 0xb9, 0xd0, 0x3b, 0x78, 0x20,
0x97, 0x66, 0x90, 0x3e, 0x94, 0x86, 0x33, 0x62, 0x2e, 0x8f, 0x1a, 0x4b, 0x1f, 0x9a, 0x84, 0x93,
0xd7, 0x64, 0xaa, 0xdc, 0xa9, 0x81, 0x4d, 0x84, 0xf6, 0xa1, 0xce, 0x45, 0xc8, 0x04, 0x99, 0xaa,
0x73, 0x7f, 0x17, 0x03, 0x29, 0x12, 0xd0, 0xd7, 0xd0, 0x9c, 0xd0, 0x59, 0x96, 0x10, 0x99, 0xed,
0xde, 0x31, 0xfb, 0x3a, 0x45, 0x1e, 0x3d, 0xc2, 0x18, 0x65, 0xca, 0xba, 0x9a, 0x58, 0x07, 0xfe,
0xbf, 0x36, 0xb4, 0xaa, 0x62, 0xad, 0xd8, 0xf2, 0x11, 0xb8, 0x5a, 0x7a, 0x7d, 0x64, 0x1f, 0xd6,
0x2a, 0xcd, 0x70, 0x63, 0xab, 0x3c, 0xa8, 0x4f, 0x72, 0xa6, 0x3c, 0x5b, 0x3b, 0x79, 0x11, 0xca,
0x82, 0x05, 0x15, 0x61, 0xa2, 0x5a, 0xe5, 0x60, 0x1d, 0x48, 0x2b, 0x2f, 0xdf, 0xbf, 0xfb, 0x59,
0x79, 0x99, 0x56, 0x95, 0xa1, 0xfe, 0x5e, 0x32, 0x34, 0xee, 0x2d, 0x83, 0xff, 0x9b, 0x05, 0xcd,
0xf2, 0x94, 0x57, 0xba, 0x6b, 0xbd, 0x77, 0x77, 0x97, 0x3a, 0x63, 0x3f, 0xac, 0x33, 0x8f, 0xc0,
0xe5, 0x82, 0x91, 0x70, 0xa6, 0x34, 0x72, 0xb0, 0x89, 0xa4, 0x9f, 0xcc, 0x78, 0xa4, 0x14, 0x6a,
0x61, 0x39, 0xf4, 0x7d, 0x68, 0x0d, 0x17, 0x82, 0xf0, 0x63, 0xc2, 0xe5, 0x0b, 0x26, 0xb5, 0x9d,
0x86, 0x22, 0x54, 0xfb, 0x68, 0x61, 0x35, 0xde, 0xfb, 0xdd, 0x86, 0xfa, 0x81, 0xfe, 0x18, 0x42,
0xaf, 0xa0, 0x59, 0x7e, 0x4a, 0x20, 0x7f, 0xf5, 0xe6, 0xbf, 0xfd, 0x4d, 0xd2, 0xfe, 0xe4, 0x9d,
0x18, 0x63, 0x61, 0xdf, 0xc2, 0x86, 0x72, 0x4d, 0xd4, 0x79, 0xf7, 0x23, 0xd0, 0xee, 0xae, 0xfd,
0xdf, 0x30, 0x1d, 0x83, 0x6b, 0x6e, 0xc0, 0x4d, 0xd0, 0xaa, 0xb9, 0xb6, 0x7b, 0xeb, 0x01, 0x9a,
0xec, 0x89, 0x85, 0x8e, 0xcb, 0x17, 0xee, 0xa6, 0xd2, 0xaa, 0x9d, 0x6b, 0xdf, 0xf2, 0x7f, 0xdf,
0x7a, 0x62, 0x0d, 0x5b, 0x6f, 0xae, 0x3a, 0xd6, 0x1f, 0x57, 0x1d, 0xeb, 0x9f, 0xab, 0x8e, 0x35,
0x76, 0x95, 0x9c, 0x9f, 0xfd, 0x17, 0x00, 0x00, 0xff, 0xff, 0xbc, 0x66, 0x46, 0xe5, 0x6a, 0x0a,
0x00, 0x00,
// 1027 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xa4, 0x56, 0x5f, 0x6f, 0xe3, 0x44,
0x10, 0xc7, 0x4e, 0xea, 0xc4, 0xd3, 0xb4, 0x2a, 0x2b, 0x74, 0xb2, 0x02, 0xa4, 0xc1, 0xbc, 0x44,
0x15, 0xe7, 0xdc, 0x05, 0x90, 0x50, 0x91, 0xd0, 0x5d, 0x9a, 0x43, 0xb4, 0xba, 0x0a, 0xb4, 0xbd,
0xc2, 0xb3, 0x93, 0x6c, 0x7d, 0x56, 0x1d, 0xaf, 0xd9, 0x5d, 0x47, 0x0d, 0x9f, 0x82, 0x07, 0xbe,
0x09, 0x9f, 0x80, 0x07, 0xc4, 0x3d, 0xf2, 0xcc, 0x43, 0x41, 0xfd, 0x00, 0x7c, 0x86, 0xd3, 0xfe,
0x71, 0xe2, 0x5c, 0x9a, 0xeb, 0x9f, 0x7b, 0xf2, 0xce, 0xec, 0xcc, 0x6f, 0x67, 0xe7, 0x37, 0x3b,
0x63, 0xd8, 0x1a, 0xd1, 0x54, 0x30, 0x9a, 0x04, 0x19, 0xa3, 0x82, 0xa2, 0x9d, 0x09, 0x1d, 0xce,
0x82, 0x61, 0x1e, 0x27, 0xe3, 0xf3, 0x58, 0x04, 0xd3, 0xc7, 0xcd, 0x87, 0x51, 0x2c, 0x5e, 0xe6,
0xc3, 0x60, 0x44, 0x27, 0xdd, 0x88, 0x46, 0xb4, 0xab, 0x0c, 0x87, 0xf9, 0x99, 0x92, 0x94, 0xa0,
0x56, 0x1a, 0xa0, 0xb9, 0x1b, 0x51, 0x1a, 0x25, 0x64, 0x61, 0x25, 0xe2, 0x09, 0xe1, 0x22, 0x9c,
0x64, 0xc6, 0xe0, 0xb3, 0x12, 0x9e, 0x3c, 0xac, 0x5b, 0x1c, 0xd6, 0xe5, 0x34, 0x99, 0x12, 0xd6,
0xcd, 0x86, 0x5d, 0x9a, 0x71, 0x6d, 0xed, 0xef, 0xc1, 0xce, 0x20, 0xe6, 0xe7, 0xa7, 0x3c, 0x8c,
0x08, 0x26, 0x3f, 0xe7, 0x84, 0x0b, 0xf4, 0x00, 0x9c, 0xb3, 0x38, 0x11, 0x84, 0x79, 0x56, 0xdb,
0xea, 0xb8, 0xd8, 0x48, 0xfe, 0x11, 0xbc, 0x5f, 0xb2, 0xe5, 0x19, 0x4d, 0x39, 0x41, 0x5f, 0x82,
0xc3, 0xc8, 0x88, 0xb2, 0xb1, 0x67, 0xb5, 0x2b, 0x9d, 0xcd, 0xde, 0xc7, 0xc1, 0x9b, 0x37, 0x0c,
0x8c, 0x83, 0x34, 0xc2, 0xc6, 0xd8, 0xff, 0xc3, 0x86, 0xcd, 0x92, 0x1e, 0x6d, 0x83, 0x7d, 0x38,
0x30, 0xe7, 0xd9, 0x87, 0x03, 0xe4, 0x41, 0xed, 0x38, 0x17, 0xe1, 0x30, 0x21, 0x9e, 0xdd, 0xb6,
0x3a, 0x75, 0x5c, 0x88, 0xe8, 0x03, 0xd8, 0x38, 0x4c, 0x4f, 0x39, 0xf1, 0x2a, 0x4a, 0xaf, 0x05,
0x84, 0xa0, 0x7a, 0x12, 0xff, 0x42, 0xbc, 0x6a, 0xdb, 0xea, 0x54, 0xb0, 0x5a, 0xcb, 0x7b, 0xfc,
0x10, 0x32, 0x92, 0x0a, 0x6f, 0x43, 0xdf, 0x43, 0x4b, 0xa8, 0x0f, 0xee, 0x01, 0x23, 0xa1, 0x20,
0xe3, 0xa7, 0xc2, 0x73, 0xda, 0x56, 0x67, 0xb3, 0xd7, 0x0c, 0x74, 0x5a, 0x83, 0x22, 0xad, 0xc1,
0x8b, 0x22, 0xad, 0xfd, 0xfa, 0xab, 0xcb, 0xdd, 0xf7, 0x7e, 0xfd, 0x77, 0xd7, 0xc2, 0x0b, 0x37,
0xf4, 0x04, 0xe0, 0x79, 0xc8, 0xc5, 0x29, 0x57, 0x20, 0xb5, 0x1b, 0x41, 0xaa, 0x0a, 0xa0, 0xe4,
0x83, 0x5a, 0x00, 0x2a, 0x01, 0x07, 0x34, 0x4f, 0x85, 0x57, 0x57, 0x71, 0x97, 0x34, 0xa8, 0x0d,
0x9b, 0x03, 0xc2, 0x47, 0x2c, 0xce, 0x44, 0x4c, 0x53, 0xcf, 0x55, 0x57, 0x28, 0xab, 0xfc, 0xdf,
0xaa, 0xd0, 0x38, 0x91, 0x9c, 0x16, 0xc4, 0xed, 0x40, 0x05, 0x93, 0x33, 0x93, 0x45, 0xb9, 0x44,
0x01, 0xc0, 0x80, 0x9c, 0xc5, 0x69, 0xac, 0x30, 0x6c, 0x15, 0xe6, 0x76, 0x90, 0x0d, 0x83, 0x85,
0x16, 0x97, 0x2c, 0x50, 0x13, 0xea, 0xcf, 0x2e, 0x32, 0xca, 0x24, 0xf9, 0x15, 0x05, 0x33, 0x97,
0xd1, 0x4f, 0xb0, 0x55, 0xac, 0x9f, 0x0a, 0xc1, 0xb8, 0x57, 0x55, 0x84, 0x3f, 0x5e, 0x25, 0xbc,
0x1c, 0x54, 0xb0, 0xe4, 0xf3, 0x2c, 0x15, 0x6c, 0x86, 0x97, 0x71, 0x24, 0xd7, 0x27, 0x84, 0x73,
0x19, 0xa1, 0x26, 0xaa, 0x10, 0x65, 0x38, 0xdf, 0x32, 0x9a, 0x0a, 0x92, 0x8e, 0x15, 0x51, 0x2e,
0x9e, 0xcb, 0x32, 0x9c, 0x62, 0xad, 0xc3, 0xa9, 0xdd, 0x2a, 0x9c, 0x25, 0x1f, 0x13, 0xce, 0x92,
0x0e, 0xed, 0xc3, 0xc6, 0x41, 0x38, 0x7a, 0x49, 0x14, 0x27, 0x9b, 0xbd, 0xd6, 0x2a, 0xa0, 0xda,
0xfe, 0x5e, 0x91, 0xc0, 0xfb, 0x55, 0x59, 0x1e, 0x58, 0xbb, 0x34, 0x9f, 0x00, 0x5a, 0xbd, 0xaf,
0xe4, 0xe5, 0x9c, 0xcc, 0x0a, 0x5e, 0xce, 0xc9, 0x4c, 0x16, 0xf1, 0x34, 0x4c, 0x72, 0x5d, 0xdc,
0x2e, 0xd6, 0xc2, 0xbe, 0xfd, 0x95, 0x25, 0x11, 0x56, 0x43, 0xbc, 0x0b, 0x82, 0x7f, 0x04, 0x8d,
0x72, 0x80, 0xe8, 0x23, 0x70, 0x75, 0x4c, 0x8b, 0xda, 0x58, 0x28, 0xe4, 0xee, 0xe1, 0xa4, 0xd8,
0xd5, 0x58, 0x0b, 0x85, 0xff, 0x35, 0x6c, 0x99, 0xec, 0x99, 0xe7, 0xbe, 0x07, 0x95, 0xa9, 0xb8,
0x30, 0x6f, 0xdd, 0x5b, 0x4d, 0xcd, 0x8f, 0x84, 0x09, 0x72, 0x81, 0xa5, 0x91, 0xff, 0x09, 0x6c,
0x9d, 0x88, 0x50, 0xe4, 0x7c, 0x6d, 0x7d, 0xfa, 0xbf, 0x5b, 0xb0, 0x5d, 0xd8, 0x98, 0x13, 0xbe,
0x80, 0xfa, 0x54, 0x81, 0x10, 0x7e, 0xe3, 0x31, 0x73, 0x4b, 0xb4, 0x0f, 0x75, 0xae, 0x70, 0x08,
0xf7, 0x6c, 0xe5, 0xd5, 0x5a, 0xe7, 0x65, 0xce, 0x9b, 0xdb, 0xa3, 0x2e, 0x54, 0x13, 0x1a, 0x71,
0xaf, 0xa2, 0xfc, 0x3e, 0x5c, 0xe7, 0xf7, 0x9c, 0x46, 0x58, 0x19, 0xfa, 0x97, 0x36, 0x38, 0x5a,
0x87, 0x8e, 0xc0, 0x19, 0xc7, 0x11, 0xe1, 0x42, 0xdf, 0xaa, 0xdf, 0x93, 0xd5, 0xf0, 0xcf, 0xe5,
0xee, 0x5e, 0xa9, 0x0b, 0xd3, 0x8c, 0xa4, 0x72, 0x0a, 0x84, 0x71, 0x4a, 0x18, 0xef, 0x46, 0xf4,
0xa1, 0x76, 0x09, 0x06, 0xea, 0x83, 0x0d, 0x82, 0xc4, 0x8a, 0xd3, 0x2c, 0x17, 0xfa, 0x06, 0xf7,
0xc4, 0xd2, 0x08, 0xb2, 0x1f, 0xa6, 0xe1, 0x84, 0x98, 0x47, 0xac, 0xd6, 0xb2, 0x1f, 0x8e, 0x64,
0x61, 0x8c, 0x55, 0x97, 0xac, 0x63, 0x23, 0xa1, 0x7d, 0xa8, 0x71, 0x11, 0x32, 0x41, 0xc6, 0xea,
0xfd, 0xdd, 0xa6, 0x91, 0x15, 0x0e, 0xe8, 0x1b, 0x70, 0x47, 0x74, 0x92, 0x25, 0x44, 0x7a, 0x3b,
0xb7, 0xf4, 0x5e, 0xb8, 0xc8, 0x32, 0x26, 0x8c, 0x51, 0xa6, 0x5a, 0xa8, 0x8b, 0xb5, 0xe0, 0xff,
0x6f, 0x43, 0xa3, 0x4c, 0xd6, 0xca, 0x78, 0x38, 0x02, 0x47, 0x53, 0xaf, 0x4b, 0xf6, 0x7e, 0xa9,
0xd2, 0x08, 0xd7, 0xa6, 0xca, 0x83, 0xda, 0x28, 0x67, 0x6a, 0x76, 0xe8, 0x89, 0x52, 0x88, 0x32,
0x60, 0x41, 0x45, 0x98, 0xa8, 0x54, 0x55, 0xb0, 0x16, 0xe4, 0x48, 0x99, 0xcf, 0xe1, 0xbb, 0x8d,
0x94, 0xb9, 0x5b, 0x99, 0x86, 0xda, 0x3b, 0xd1, 0x50, 0xbf, 0x33, 0x0d, 0xfe, 0x9f, 0x16, 0xb8,
0xf3, 0x2a, 0x2f, 0x65, 0xd7, 0x7a, 0xe7, 0xec, 0x2e, 0x65, 0xc6, 0xbe, 0x5f, 0x66, 0x1e, 0x80,
0xc3, 0x05, 0x23, 0xe1, 0x44, 0x71, 0x54, 0xc1, 0x46, 0x92, 0xfd, 0x64, 0xc2, 0x23, 0xc5, 0x50,
0x03, 0xcb, 0xa5, 0xef, 0x43, 0xa3, 0x3f, 0x13, 0x84, 0x1f, 0x13, 0x2e, 0x27, 0xa9, 0xe4, 0x76,
0x1c, 0x8a, 0x50, 0xdd, 0xa3, 0x81, 0xd5, 0xba, 0xf7, 0x97, 0x0d, 0xb5, 0x03, 0xfd, 0x53, 0x86,
0x5e, 0x80, 0x3b, 0xff, 0xa5, 0x41, 0xfe, 0xea, 0xcb, 0x7f, 0xf3, 0xdf, 0xa8, 0xf9, 0xe9, 0x5b,
0x6d, 0x4c, 0x0b, 0xfb, 0x0e, 0x36, 0x54, 0xd7, 0x44, 0xad, 0xb7, 0x0f, 0xa3, 0xe6, 0xee, 0xda,
0x7d, 0x83, 0x74, 0x0c, 0x8e, 0x79, 0x01, 0xd7, 0x99, 0x96, 0x9b, 0x6b, 0xb3, 0xbd, 0xde, 0x40,
0x83, 0x3d, 0xb2, 0xd0, 0xf1, 0x7c, 0xd2, 0x5e, 0x17, 0x5a, 0x39, 0x73, 0xcd, 0x1b, 0xf6, 0x3b,
0xd6, 0x23, 0xab, 0xdf, 0x78, 0x75, 0xd5, 0xb2, 0xfe, 0xbe, 0x6a, 0x59, 0xff, 0x5d, 0xb5, 0xac,
0xa1, 0xa3, 0xe8, 0xfc, 0xfc, 0x75, 0x00, 0x00, 0x00, 0xff, 0xff, 0xe9, 0xaa, 0xee, 0xfc, 0xf2,
0x0a, 0x00, 0x00,
}

View File

@ -45,6 +45,12 @@ message SolveRequest {
string Session = 5;
string Frontend = 6;
map<string, string> FrontendAttrs = 7;
CacheOptions Cache = 8 [(gogoproto.nullable) = false];
}
message CacheOptions {
string ExportRef = 1;
string ImportRef = 2;
}
message SolveResponse {

94
cache/blobs/blobs.go vendored Normal file
View File

@ -0,0 +1,94 @@
package blobs
import (
gocontext "context"
"github.com/containerd/containerd/mount"
"github.com/containerd/containerd/rootfs"
"github.com/moby/buildkit/cache"
"github.com/moby/buildkit/snapshot"
"github.com/moby/buildkit/util/flightcontrol"
digest "github.com/opencontainers/go-digest"
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
"github.com/pkg/errors"
"golang.org/x/net/context"
"golang.org/x/sync/errgroup"
)
var g flightcontrol.Group
type DiffPair struct {
DiffID digest.Digest
Blobsum digest.Digest
}
type blobmapper interface {
GetBlob(ctx gocontext.Context, key string) (digest.Digest, digest.Digest, error)
SetBlob(ctx gocontext.Context, key string, diffID, blob digest.Digest) error
}
func GetDiffPairs(ctx context.Context, snapshotter snapshot.Snapshotter, differ rootfs.MountDiffer, ref cache.ImmutableRef) ([]DiffPair, error) {
blobmap, ok := snapshotter.(blobmapper)
if !ok {
return nil, errors.Errorf("image exporter requires snapshotter with blobs mapping support")
}
eg, ctx := errgroup.WithContext(ctx)
var diffPairs []DiffPair
var currentPair DiffPair
parent := ref.Parent()
if parent != nil {
defer parent.Release(context.TODO())
eg.Go(func() error {
dp, err := GetDiffPairs(ctx, snapshotter, differ, parent)
if err != nil {
return err
}
diffPairs = dp
return nil
})
}
eg.Go(func() error {
dp, err := g.Do(ctx, ref.ID(), func(ctx context.Context) (interface{}, error) {
diffID, blob, err := blobmap.GetBlob(ctx, ref.ID())
if err != nil {
return nil, err
}
if blob != "" {
return DiffPair{DiffID: diffID, Blobsum: blob}, nil
}
// reference needs to be committed
parent := ref.Parent()
var lower []mount.Mount
if parent != nil {
defer parent.Release(context.TODO())
lower, err = parent.Mount(ctx, true)
if err != nil {
return nil, err
}
}
upper, err := ref.Mount(ctx, true)
if err != nil {
return nil, err
}
descr, err := differ.DiffMounts(ctx, lower, upper, ocispec.MediaTypeImageLayer, ref.ID())
if err != nil {
return nil, err
}
if err := blobmap.SetBlob(ctx, ref.ID(), descr.Digest, descr.Digest); err != nil {
return nil, err
}
return DiffPair{DiffID: descr.Digest, Blobsum: descr.Digest}, nil
})
if err != nil {
return err
}
currentPair = dp.(DiffPair)
return nil
})
err := eg.Wait()
if err != nil {
return nil, err
}
return append(diffPairs, currentPair), nil
}

177
cache/cacheimport/export.go vendored Normal file
View File

@ -0,0 +1,177 @@
package cacheimport
import (
"bytes"
gocontext "context"
"encoding/json"
"github.com/containerd/containerd/content"
"github.com/containerd/containerd/images"
"github.com/containerd/containerd/rootfs"
"github.com/docker/distribution/manifest"
"github.com/docker/distribution/manifest/schema2"
"github.com/moby/buildkit/cache"
"github.com/moby/buildkit/cache/blobs"
"github.com/moby/buildkit/snapshot"
"github.com/moby/buildkit/util/push"
digest "github.com/opencontainers/go-digest"
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
"golang.org/x/net/context"
)
const mediaTypeConfig = "application/vnd.buildkit.cacheconfig.v0"
type blobmapper interface {
GetBlob(ctx gocontext.Context, key string) (digest.Digest, digest.Digest, error)
SetBlob(ctx gocontext.Context, key string, diffID, blob digest.Digest) error
}
type CacheRecord struct {
CacheKey digest.Digest
Reference cache.ImmutableRef
ContentKey digest.Digest
}
type ExporterOpt struct {
Snapshotter snapshot.Snapshotter
ContentStore content.Store
Differ rootfs.MountDiffer
}
func NewCacheExporter(opt ExporterOpt) *CacheExporter {
return &CacheExporter{opt: opt}
}
type CacheExporter struct {
opt ExporterOpt
}
func (ce *CacheExporter) Export(ctx context.Context, rec []CacheRecord, target string) error {
allBlobs := map[digest.Digest][]blobs.DiffPair{}
currentBlobs := map[digest.Digest]struct{}{}
type cr struct {
CacheRecord
dgst digest.Digest
}
list := make([]cr, 0, len(rec))
for _, r := range rec {
ref := r.Reference
if ref == nil {
list = append(list, cr{CacheRecord: r})
continue
}
dpairs, err := blobs.GetDiffPairs(ctx, ce.opt.Snapshotter, ce.opt.Differ, ref)
if err != nil {
return err
}
for i, dp := range dpairs {
allBlobs[dp.Blobsum] = dpairs[:i+1]
}
dgst := dpairs[len(dpairs)-1].Blobsum
list = append(list, cr{CacheRecord: r, dgst: dgst})
currentBlobs[dgst] = struct{}{}
}
for b := range allBlobs {
if _, ok := currentBlobs[b]; !ok {
list = append(list, cr{dgst: b})
}
}
// own type because oci type can't be pushed and docker type doesn't have annotations
type manifestList struct {
manifest.Versioned
// Manifests references platform specific manifests.
Manifests []ocispec.Descriptor `json:"manifests"`
}
var config cacheConfig
var mfst manifestList
mfst.SchemaVersion = 2
mfst.MediaType = images.MediaTypeDockerSchema2ManifestList
for _, l := range list {
var size int64
var parent digest.Digest
var diffID digest.Digest
if l.dgst != "" {
info, err := ce.opt.ContentStore.Info(ctx, l.dgst)
if err != nil {
return err
}
size = info.Size
chain := allBlobs[l.dgst]
if len(chain) > 1 {
parent = chain[len(chain)-2].Blobsum
}
diffID = chain[len(chain)-1].DiffID
mfst.Manifests = append(mfst.Manifests, ocispec.Descriptor{
MediaType: schema2.MediaTypeLayer,
Size: size,
Digest: l.dgst,
})
}
config.Items = append(config.Items, configItem{
Blobsum: l.dgst,
CacheKey: l.CacheKey,
ContentKey: l.ContentKey,
Parent: parent,
DiffID: diffID,
})
}
dt, err := json.Marshal(config)
if err != nil {
return err
}
dgst := digest.FromBytes(dt)
if err := content.WriteBlob(ctx, ce.opt.ContentStore, dgst.String(), bytes.NewReader(dt), int64(len(dt)), dgst); err != nil {
return errors.Wrap(err, "error writing config blob")
}
mfst.Manifests = append(mfst.Manifests, ocispec.Descriptor{
MediaType: mediaTypeConfig,
Size: int64(len(dt)),
Digest: dgst,
})
dt, err = json.Marshal(mfst)
if err != nil {
return errors.Wrap(err, "failed to marshal manifest")
}
dgst = digest.FromBytes(dt)
if err := content.WriteBlob(ctx, ce.opt.ContentStore, dgst.String(), bytes.NewReader(dt), int64(len(dt)), dgst); err != nil {
return errors.Wrap(err, "error writing manifest blob")
}
logrus.Debugf("cache-manifest: %s", dgst)
return push.Push(ctx, ce.opt.ContentStore, dgst, target)
}
type configItem struct {
Blobsum digest.Digest
CacheKey digest.Digest
ContentKey digest.Digest
Parent digest.Digest
DiffID digest.Digest
}
type cacheConfig struct {
Items []configItem
}

332
cache/cacheimport/import.go vendored Normal file
View File

@ -0,0 +1,332 @@
package cacheimport
import (
"encoding/json"
"fmt"
"net/http"
"time"
"github.com/containerd/containerd/content"
"github.com/containerd/containerd/remotes"
"github.com/containerd/containerd/remotes/docker"
"github.com/containerd/containerd/rootfs"
"github.com/moby/buildkit/cache"
"github.com/moby/buildkit/cache/blobs"
"github.com/moby/buildkit/client"
buildkitidentity "github.com/moby/buildkit/identity"
"github.com/moby/buildkit/snapshot"
"github.com/moby/buildkit/util/progress"
digest "github.com/opencontainers/go-digest"
"github.com/opencontainers/image-spec/identity"
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
"github.com/pkg/errors"
"golang.org/x/net/context"
"golang.org/x/sync/errgroup"
)
type ImportOpt struct {
ContentStore content.Store
Snapshotter snapshot.Snapshotter
Applier rootfs.Applier
CacheAccessor cache.Accessor
}
func NewCacheImporter(opt ImportOpt) *CacheImporter {
return &CacheImporter{opt: opt}
}
type CacheImporter struct {
opt ImportOpt
}
func (ci *CacheImporter) pull(ctx context.Context, ref string) (*ocispec.Descriptor, remotes.Fetcher, error) {
resolver := docker.NewResolver(docker.ResolverOptions{
Client: http.DefaultClient,
})
ref, desc, err := resolver.Resolve(ctx, ref)
if err != nil {
return nil, nil, err
}
fetcher, err := resolver.Fetcher(ctx, ref)
if err != nil {
return nil, nil, err
}
if _, err := remotes.FetchHandler(ci.opt.ContentStore, fetcher)(ctx, desc); err != nil {
return nil, nil, err
}
return &desc, fetcher, err
}
func (ci *CacheImporter) Import(ctx context.Context, ref string) (InstructionCache, error) {
desc, fetcher, err := ci.pull(ctx, ref)
if err != nil {
return nil, err
}
dt, err := content.ReadBlob(ctx, ci.opt.ContentStore, desc.Digest)
if err != nil {
return nil, err
}
var mfst ocispec.Index
if err := json.Unmarshal(dt, &mfst); err != nil {
return nil, err
}
allDesc := map[digest.Digest]ocispec.Descriptor{}
allBlobs := map[digest.Digest]configItem{}
byCacheKey := map[digest.Digest]configItem{}
byContentKey := map[digest.Digest][]digest.Digest{}
var configDesc ocispec.Descriptor
for _, m := range mfst.Manifests {
if m.MediaType == mediaTypeConfig {
configDesc = m
continue
}
allDesc[m.Digest] = m
}
if configDesc.Digest == "" {
return nil, errors.Errorf("invalid build cache: %s", ref)
}
if _, err := remotes.FetchHandler(ci.opt.ContentStore, fetcher)(ctx, configDesc); err != nil {
return nil, err
}
dt, err = content.ReadBlob(ctx, ci.opt.ContentStore, configDesc.Digest)
if err != nil {
return nil, err
}
var cfg cacheConfig
if err := json.Unmarshal(dt, &cfg); err != nil {
return nil, err
}
for _, ci := range cfg.Items {
if ci.Blobsum != "" {
allBlobs[ci.Blobsum] = ci
}
if ci.CacheKey != "" {
byCacheKey[ci.CacheKey] = ci
if ci.ContentKey != "" {
byContentKey[ci.ContentKey] = append(byContentKey[ci.ContentKey], ci.CacheKey)
}
}
}
return &importInfo{
CacheImporter: ci,
byCacheKey: byCacheKey,
byContentKey: byContentKey,
allBlobs: allBlobs,
allDesc: allDesc,
fetcher: fetcher,
ref: ref,
}, nil
}
type importInfo struct {
*CacheImporter
fetcher remotes.Fetcher
byCacheKey map[digest.Digest]configItem
byContentKey map[digest.Digest][]digest.Digest
allDesc map[digest.Digest]ocispec.Descriptor
allBlobs map[digest.Digest]configItem
ref string
}
func (ii *importInfo) Probe(ctx context.Context, key digest.Digest) (bool, error) {
_, ok := ii.byCacheKey[key]
return ok, nil
}
func (ii *importInfo) getChain(dgst digest.Digest) ([]blobs.DiffPair, error) {
cfg, ok := ii.allBlobs[dgst]
if !ok {
return nil, errors.Errorf("blob %s not found in cache", dgst)
}
parent := cfg.Parent
var out []blobs.DiffPair
if parent != "" {
parentChain, err := ii.getChain(parent)
if err != nil {
return nil, err
}
out = parentChain
}
return append(out, blobs.DiffPair{Blobsum: dgst, DiffID: cfg.DiffID}), nil
}
func (ii *importInfo) Lookup(ctx context.Context, key digest.Digest, msg string) (interface{}, error) {
desc, ok := ii.byCacheKey[key]
if !ok || desc.Blobsum == "" {
return nil, nil
}
var out interface{}
if err := inVertexContext(ctx, fmt.Sprintf("cache from %s for %s", ii.ref, msg), func(ctx context.Context) error {
ch, err := ii.getChain(desc.Blobsum)
if err != nil {
return err
}
res, err := ii.fetch(ctx, ch)
if err != nil {
return err
}
out = res
return nil
}); err != nil {
return nil, err
}
return out, nil
}
func (ii *importInfo) Set(key digest.Digest, ref interface{}) error {
return nil
}
func (ii *importInfo) SetContentMapping(contentKey, key digest.Digest) error {
return nil
}
func (ii *importInfo) GetContentMapping(dgst digest.Digest) ([]digest.Digest, error) {
dgsts, ok := ii.byContentKey[dgst]
if !ok {
return nil, nil
}
return dgsts, nil
}
func (ii *importInfo) fetch(ctx context.Context, chain []blobs.DiffPair) (cache.ImmutableRef, error) {
eg, ctx := errgroup.WithContext(ctx)
for _, dp := range chain {
func(dp blobs.DiffPair) {
eg.Go(func() error {
desc, ok := ii.allDesc[dp.Blobsum]
if !ok {
return errors.Errorf("failed to find %s for fetch", dp.Blobsum)
}
if _, err := remotes.FetchHandler(ii.opt.ContentStore, ii.fetcher)(ctx, desc); err != nil {
return err
}
return nil
})
}(dp)
}
if err := eg.Wait(); err != nil {
return nil, err
}
chainid, err := ii.unpack(ctx, chain)
if err != nil {
return nil, err
}
return ii.opt.CacheAccessor.Get(ctx, chainid, cache.WithDescription("imported cache")) // TODO: more descriptive name
}
func (ii *importInfo) unpack(ctx context.Context, dpairs []blobs.DiffPair) (string, error) {
layers, err := ii.getLayers(ctx, dpairs)
if err != nil {
return "", err
}
chainID, err := rootfs.ApplyLayers(ctx, layers, ii.opt.Snapshotter, ii.opt.Applier)
if err != nil {
return "", err
}
if err := ii.fillBlobMapping(ctx, layers); err != nil {
return "", err
}
return string(chainID), nil
}
func (ii *importInfo) fillBlobMapping(ctx context.Context, layers []rootfs.Layer) error {
var chain []digest.Digest
for _, l := range layers {
chain = append(chain, l.Diff.Digest)
chainID := identity.ChainID(chain)
if err := ii.opt.Snapshotter.(blobmapper).SetBlob(ctx, string(chainID), l.Diff.Digest, l.Blob.Digest); err != nil {
return err
}
}
return nil
}
func (ii *importInfo) getLayers(ctx context.Context, dpairs []blobs.DiffPair) ([]rootfs.Layer, error) {
layers := make([]rootfs.Layer, len(dpairs))
for i := range dpairs {
layers[i].Diff = ocispec.Descriptor{
// TODO: derive media type from compressed type
MediaType: ocispec.MediaTypeImageLayer,
Digest: dpairs[i].DiffID,
}
info, err := ii.opt.ContentStore.Info(ctx, dpairs[i].Blobsum)
if err != nil {
return nil, err
}
layers[i].Blob = ocispec.Descriptor{
// TODO: derive media type from compressed type
MediaType: ocispec.MediaTypeImageLayerGzip,
Digest: dpairs[i].Blobsum,
Size: info.Size,
}
}
return layers, nil
}
type InstructionCache interface {
Probe(ctx context.Context, key digest.Digest) (bool, error)
Lookup(ctx context.Context, key digest.Digest, msg string) (interface{}, error) // TODO: regular ref
Set(key digest.Digest, ref interface{}) error
SetContentMapping(contentKey, key digest.Digest) error
GetContentMapping(dgst digest.Digest) ([]digest.Digest, error)
}
func inVertexContext(ctx context.Context, name string, f func(ctx context.Context) error) error {
v := client.Vertex{
Digest: digest.FromBytes([]byte(buildkitidentity.NewID())),
Name: name,
}
pw, _, ctx := progress.FromContext(ctx, progress.WithMetadata("vertex", v.Digest))
notifyStarted(ctx, &v)
defer pw.Close()
err := f(ctx)
notifyCompleted(ctx, &v, err)
return err
}
func notifyStarted(ctx context.Context, v *client.Vertex) {
pw, _, _ := progress.FromContext(ctx)
defer pw.Close()
now := time.Now()
v.Started = &now
v.Completed = nil
pw.Write(v.Digest.String(), *v)
}
func notifyCompleted(ctx context.Context, v *client.Vertex, err error) {
pw, _, _ := progress.FromContext(ctx)
defer pw.Close()
now := time.Now()
if v.Started == nil {
v.Started = &now
}
v.Completed = &now
v.Cached = false
if err != nil {
v.Error = err.Error()
}
pw.Write(v.Digest.String(), *v)
}

View File

@ -41,7 +41,7 @@ func (ls *LocalStore) Probe(ctx context.Context, key digest.Digest) (bool, error
return ls.MetadataStore.Probe(index(key.String()))
}
func (ls *LocalStore) Lookup(ctx context.Context, key digest.Digest) (interface{}, error) {
func (ls *LocalStore) Lookup(ctx context.Context, key digest.Digest, msg string) (interface{}, error) {
snaps, err := ls.MetadataStore.Search(index(key.String()))
if err != nil {
return nil, err

View File

@ -27,6 +27,8 @@ type SolveOpt struct {
SharedKey string
Frontend string
FrontendAttrs map[string]string
ExportCache string
ImportCache string
// Session string
}
@ -99,6 +101,10 @@ func (c *Client) Solve(ctx context.Context, def *llb.Definition, opt SolveOpt, s
Session: s.ID(),
Frontend: opt.Frontend,
FrontendAttrs: opt.FrontendAttrs,
Cache: controlapi.CacheOptions{
ExportRef: opt.ExportCache,
ImportRef: opt.ImportCache,
},
})
if err != nil {
return errors.Wrap(err, "failed to solve")

View File

@ -57,6 +57,14 @@ var buildCommand = cli.Command{
Name: "no-cache",
Usage: "Disable cache for all the vertices. (Not yet implemented.) Frontend is not supported.",
},
cli.StringFlag{
Name: "export-cache",
Usage: "Reference to export build cache to",
},
cli.StringFlag{
Name: "import-cache",
Usage: "Reference to import build cache from",
},
},
}
@ -143,6 +151,8 @@ func build(clicontext *cli.Context) error {
LocalDirs: localDirs,
Frontend: clicontext.String("frontend"),
FrontendAttrs: frontendAttrs,
ExportCache: clicontext.String("export-cache"),
ImportCache: clicontext.String("import-cache"),
}, ch)
})

View File

@ -2,8 +2,10 @@ package control
import (
"github.com/containerd/containerd/snapshot"
"github.com/docker/distribution/reference"
controlapi "github.com/moby/buildkit/api/services/control"
"github.com/moby/buildkit/cache"
"github.com/moby/buildkit/cache/cacheimport"
"github.com/moby/buildkit/client"
"github.com/moby/buildkit/exporter"
"github.com/moby/buildkit/frontend"
@ -29,6 +31,8 @@ type Opt struct {
SessionManager *session.Manager
Frontends map[string]frontend.Frontend
ImageSource source.Source
CacheExporter *cacheimport.CacheExporter
CacheImporter *cacheimport.CacheImporter
}
type Controller struct { // TODO: ControlService
@ -46,6 +50,8 @@ func NewController(opt Opt) (*Controller, error) {
InstructionCache: opt.InstructionCache,
ImageSource: opt.ImageSource,
Frontends: opt.Frontends,
CacheExporter: opt.CacheExporter,
CacheImporter: opt.CacheImporter,
}),
}
return c, nil
@ -106,11 +112,31 @@ func (c *Controller) Solve(ctx context.Context, req *controlapi.SolveRequest) (*
}
}
exportCacheRef := ""
if ref := req.Cache.ExportRef; ref != "" {
parsed, err := reference.ParseNormalizedNamed(ref)
if err != nil {
return nil, err
}
exportCacheRef = reference.TagNameOnly(parsed).String()
}
importCacheRef := ""
if ref := req.Cache.ImportRef; ref != "" {
parsed, err := reference.ParseNormalizedNamed(ref)
if err != nil {
return nil, err
}
importCacheRef = reference.TagNameOnly(parsed).String()
}
if err := c.solver.Solve(ctx, req.Ref, solver.SolveRequest{
Frontend: frontend,
Definition: req.Definition,
Exporter: expi,
FrontendOpt: req.FrontendAttrs,
ExportCacheRef: exportCacheRef,
ImportCacheRef: importCacheRef,
}); err != nil {
return nil, err
}

View File

@ -10,6 +10,7 @@ import (
"github.com/containerd/containerd/rootfs"
ctdsnapshot "github.com/containerd/containerd/snapshot"
"github.com/moby/buildkit/cache"
"github.com/moby/buildkit/cache/cacheimport"
"github.com/moby/buildkit/cache/instructioncache"
"github.com/moby/buildkit/cache/metadata"
"github.com/moby/buildkit/client"
@ -111,7 +112,6 @@ func defaultControllerOpts(root string, pd pullDeps) (*Opt, error) {
Snapshotter: snapshotter,
ContentStore: pd.ContentStore,
Differ: pd.Differ,
CacheAccessor: cm,
Images: pd.Images,
})
if err != nil {
@ -131,6 +131,19 @@ func defaultControllerOpts(root string, pd pullDeps) (*Opt, error) {
frontends["dockerfile.v0"] = dockerfile.NewDockerfileFrontend()
frontends["gateway.v0"] = gateway.NewGatewayFrontend()
ce := cacheimport.NewCacheExporter(cacheimport.ExporterOpt{
Snapshotter: snapshotter,
ContentStore: pd.ContentStore,
Differ: pd.Differ,
})
ci := cacheimport.NewCacheImporter(cacheimport.ImportOpt{
Snapshotter: snapshotter,
ContentStore: pd.ContentStore,
Applier: pd.Applier,
CacheAccessor: cm,
})
return &Opt{
Snapshotter: snapshotter,
CacheManager: cm,
@ -140,5 +153,7 @@ func defaultControllerOpts(root string, pd pullDeps) (*Opt, error) {
SessionManager: sessm,
Frontends: frontends,
ImageSource: is,
CacheExporter: ce,
CacheImporter: ci,
}, nil
}

View File

@ -2,7 +2,6 @@ package containerimage
import (
"bytes"
gocontext "context"
"encoding/json"
"runtime"
"time"
@ -10,25 +9,26 @@ import (
"github.com/containerd/containerd/content"
"github.com/containerd/containerd/errdefs"
"github.com/containerd/containerd/images"
"github.com/containerd/containerd/mount"
"github.com/containerd/containerd/rootfs"
"github.com/docker/distribution"
"github.com/docker/distribution/manifest/schema2"
"github.com/moby/buildkit/cache"
"github.com/moby/buildkit/cache/metadata"
"github.com/moby/buildkit/cache/blobs"
"github.com/moby/buildkit/exporter"
"github.com/moby/buildkit/snapshot"
"github.com/moby/buildkit/util/flightcontrol"
"github.com/moby/buildkit/util/progress"
"github.com/moby/buildkit/util/push"
"github.com/moby/buildkit/util/system"
digest "github.com/opencontainers/go-digest"
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
"golang.org/x/net/context"
"golang.org/x/sync/errgroup"
)
const (
keyImageName = "name"
keyPush = "push"
exporterImageConfig = "containerimage.config"
)
@ -36,34 +36,15 @@ type Opt struct {
Snapshotter snapshot.Snapshotter
ContentStore content.Store
Differ rootfs.MountDiffer
CacheAccessor cache.Accessor
MetadataStore metadata.Store
Images images.Store
}
type imageExporter struct {
blobmap blobmapper
opt Opt
g flightcontrol.Group
}
type diffPair struct {
diffID digest.Digest
blobsum digest.Digest
}
type blobmapper interface {
GetBlob(ctx gocontext.Context, key string) (digest.Digest, error)
SetBlob(ctx gocontext.Context, key string, blob digest.Digest) error
}
func New(opt Opt) (exporter.Exporter, error) {
blobmap, ok := opt.Snapshotter.(blobmapper)
if !ok {
return nil, errors.Errorf("image exporter requires snapshotter with blobs mapping support")
}
im := &imageExporter{opt: opt, blobmap: blobmap}
im := &imageExporter{opt: opt}
return im, nil
}
@ -73,6 +54,8 @@ func (e *imageExporter) Resolve(ctx context.Context, opt map[string]string) (exp
switch k {
case keyImageName:
i.targetName = v
case keyPush:
i.push = true
default:
logrus.Warnf("unknown exporter option %s", k)
}
@ -80,74 +63,10 @@ func (e *imageExporter) Resolve(ctx context.Context, opt map[string]string) (exp
return i, nil
}
func (e *imageExporter) getBlobs(ctx context.Context, ref cache.ImmutableRef) ([]diffPair, error) {
eg, ctx := errgroup.WithContext(ctx)
var diffPairs []diffPair
var currentPair diffPair
parent := ref.Parent()
if parent != nil {
defer parent.Release(context.TODO())
eg.Go(func() error {
dp, err := e.getBlobs(ctx, parent)
if err != nil {
return err
}
diffPairs = dp
return nil
})
}
eg.Go(func() error {
dp, err := e.g.Do(ctx, ref.ID(), func(ctx context.Context) (interface{}, error) {
blob, err := e.blobmap.GetBlob(ctx, ref.ID())
if err != nil {
return nil, err
}
if blob != "" {
diffID, err := digest.Parse(ref.ID())
if err != nil {
diffID = blob
}
return diffPair{diffID: diffID, blobsum: blob}, nil
}
// reference needs to be committed
parent := ref.Parent()
var lower []mount.Mount
if parent != nil {
defer parent.Release(context.TODO())
lower, err = parent.Mount(ctx, true)
if err != nil {
return nil, err
}
}
upper, err := ref.Mount(ctx, true)
if err != nil {
return nil, err
}
descr, err := e.opt.Differ.DiffMounts(ctx, lower, upper, ocispec.MediaTypeImageLayer, ref.ID())
if err != nil {
return nil, err
}
if err := e.blobmap.SetBlob(ctx, ref.ID(), descr.Digest); err != nil {
return nil, err
}
return diffPair{diffID: descr.Digest, blobsum: descr.Digest}, nil
})
if err != nil {
return err
}
currentPair = dp.(diffPair)
return nil
})
err := eg.Wait()
if err != nil {
return nil, err
}
return append(diffPairs, currentPair), nil
}
type imageExporterInstance struct {
*imageExporter
targetName string
push bool
}
func (e *imageExporterInstance) Name() string {
@ -156,7 +75,7 @@ func (e *imageExporterInstance) Name() string {
func (e *imageExporterInstance) Export(ctx context.Context, ref cache.ImmutableRef, opt map[string][]byte) error {
layersDone := oneOffProgress(ctx, "exporting layers")
diffPairs, err := e.getBlobs(ctx, ref)
diffPairs, err := blobs.GetDiffPairs(ctx, e.opt.Snapshotter, e.opt.Differ, ref)
if err != nil {
return err
}
@ -164,7 +83,7 @@ func (e *imageExporterInstance) Export(ctx context.Context, ref cache.ImmutableR
diffIDs := make([]digest.Digest, 0, len(diffPairs))
for _, dp := range diffPairs {
diffIDs = append(diffIDs, dp.diffID)
diffIDs = append(diffIDs, dp.DiffID)
}
var dt []byte
@ -188,24 +107,25 @@ func (e *imageExporterInstance) Export(ctx context.Context, ref cache.ImmutableR
}
configDone(nil)
mfst := ocispec.Manifest{
Config: ocispec.Descriptor{
mfst := schema2.Manifest{
Config: distribution.Descriptor{
Digest: dgst,
Size: int64(len(dt)),
MediaType: ocispec.MediaTypeImageConfig,
MediaType: schema2.MediaTypeImageConfig,
},
}
mfst.SchemaVersion = 2
mfst.MediaType = schema2.MediaTypeManifest
for _, dp := range diffPairs {
info, err := e.opt.ContentStore.Info(ctx, dp.blobsum)
info, err := e.opt.ContentStore.Info(ctx, dp.Blobsum)
if err != nil {
return configDone(errors.Wrapf(err, "could not get blob %s", dp.blobsum))
return configDone(errors.Wrapf(err, "could not get blob %s", dp.Blobsum))
}
mfst.Layers = append(mfst.Layers, ocispec.Descriptor{
Digest: dp.blobsum,
mfst.Layers = append(mfst.Layers, distribution.Descriptor{
Digest: dp.Blobsum,
Size: info.Size,
MediaType: ocispec.MediaTypeImageLayerGzip,
MediaType: schema2.MediaTypeLayer,
})
}
@ -223,7 +143,8 @@ func (e *imageExporterInstance) Export(ctx context.Context, ref cache.ImmutableR
mfstDone(nil)
if e.opt.Images != nil && e.targetName != "" {
if e.targetName != "" {
if e.opt.Images != nil {
tagDone := oneOffProgress(ctx, "naming to "+e.targetName)
imgrec := images.Image{
Name: e.targetName,
@ -247,6 +168,10 @@ func (e *imageExporterInstance) Export(ctx context.Context, ref cache.ImmutableR
}
tagDone(nil)
}
if e.push {
return push.Push(ctx, e.opt.ContentStore, dgst, e.targetName)
}
}
return err
}

View File

@ -1,3 +1,3 @@
package moby_buildkit_v1_frontend
//go:generate protoc -I=. -I=../../../vendor/ -I=/Users/tonistiigi/gocode/src --gogo_out=plugins=grpc:. gateway.proto
//go:generate protoc -I=. -I=../../../vendor/ --gogo_out=plugins=grpc:. gateway.proto

View File

@ -24,6 +24,11 @@ type Info struct {
Blob string
}
type DiffPair struct {
Blobsum digest.Digest
DiffID digest.Digest
}
// this snapshotter keeps an internal mapping between a snapshot and a blob
type Snapshotter struct {
@ -43,7 +48,7 @@ func NewSnapshotter(opt Opt) (*Snapshotter, error) {
// Remove also removes a refrence to a blob. If it is a last reference then it deletes it the blob as well
// Remove is not safe to be called concurrently
func (s *Snapshotter) Remove(ctx context.Context, key string) error {
blob, err := s.GetBlob(ctx, key)
_, blob, err := s.GetBlob(ctx, key)
if err != nil {
return err
}
@ -70,7 +75,7 @@ func (s *Snapshotter) Usage(ctx context.Context, key string) (snapshot.Usage, er
if err != nil {
return snapshot.Usage{}, err
}
blob, err := s.GetBlob(ctx, key)
_, blob, err := s.GetBlob(ctx, key)
if err != nil {
return u, err
}
@ -84,34 +89,34 @@ func (s *Snapshotter) Usage(ctx context.Context, key string) (snapshot.Usage, er
return u, nil
}
func (s *Snapshotter) GetBlob(ctx context.Context, key string) (digest.Digest, error) {
func (s *Snapshotter) GetBlob(ctx context.Context, key string) (digest.Digest, digest.Digest, error) {
md, _ := s.opt.MetadataStore.Get(key)
v := md.Get(blobKey)
if v == nil {
return "", nil
return "", "", nil
}
var blob digest.Digest
var blob DiffPair
if err := v.Unmarshal(&blob); err != nil {
return "", err
return "", "", err
}
return blob, nil
return blob.DiffID, blob.Blobsum, nil
}
// Validates that there is no blob associated with the snapshot.
// Checks that there is a blob in the content store.
// If same blob has already been set then this is a noop.
func (s *Snapshotter) SetBlob(ctx context.Context, key string, blob digest.Digest) error {
_, err := s.opt.Content.Info(ctx, blob)
func (s *Snapshotter) SetBlob(ctx context.Context, key string, diffID, blobsum digest.Digest) error {
_, err := s.opt.Content.Info(ctx, blobsum)
if err != nil {
return err
}
md, _ := s.opt.MetadataStore.Get(key)
v, err := metadata.NewValue(blob)
v, err := metadata.NewValue(DiffPair{DiffID: diffID, Blobsum: blobsum})
if err != nil {
return err
}
v.Index = index(blob)
v.Index = index(blobsum)
return md.Update(func(b *bolt.Bucket) error {
return md.SetValue(b, blobKey, v)

View File

@ -4,6 +4,7 @@ import (
"encoding/json"
"fmt"
"path"
"runtime"
"sort"
"strings"
@ -38,9 +39,13 @@ func (e *execOp) CacheKey(ctx context.Context) (digest.Digest, error) {
dt, err := json.Marshal(struct {
Type string
Exec *pb.ExecOp
OS string
Arch string
}{
Type: execCacheType,
Exec: e.op,
OS: runtime.GOOS,
Arch: runtime.GOARCH,
})
if err != nil {
return "", err
@ -175,9 +180,13 @@ func (e *execOp) ContentMask(ctx context.Context) (digest.Digest, [][]string, er
dt, err := json.Marshal(struct {
Type string
Exec *pb.ExecOp
OS string
Arch string
}{
Type: execCacheType,
Exec: &ecopy,
OS: runtime.GOOS,
Arch: runtime.GOARCH,
})
if err != nil {
return "", nil, err

View File

@ -52,7 +52,7 @@ func (jl *jobList) new(ctx context.Context, id string, pr progress.Reader, cache
pw, _, _ := progress.FromContext(ctx) // TODO: remove this
sid := session.FromContext(ctx)
j := &job{l: jl, pr: progress.NewMultiReader(pr), pw: pw, session: sid, cache: cache}
j := &job{l: jl, pr: progress.NewMultiReader(pr), pw: pw, session: sid, cache: cache, cached: map[string]*cacheRecord{}}
jl.refs[id] = j
jl.updateCond.Broadcast()
go func() {
@ -97,6 +97,13 @@ type job struct {
pw progress.Writer
session string
cache InstructionCache
cached map[string]*cacheRecord
}
type cacheRecord struct {
VertexSolver
index Index
ref Reference
}
func (j *job) load(def *pb.Definition, resolveOp ResolveOpFunc) (*Input, error) {
@ -183,7 +190,31 @@ func (j *job) getRef(ctx context.Context, v *vertex, index Index) (Reference, er
if err != nil {
return nil, err
}
return getRef(s, ctx, v, index, j.cache)
ref, err := getRef(s, ctx, v, index, j.cache)
if err != nil {
return nil, err
}
j.keepCacheRef(s, index, ref)
return ref, nil
}
func (j *job) keepCacheRef(s VertexSolver, index Index, ref Reference) {
immutable, ok := toImmutableRef(ref)
if ok {
j.cached[immutable.ID()] = &cacheRecord{s, index, ref}
}
}
func (j *job) cacheExporter(ref Reference) (CacheExporter, error) {
immutable, ok := toImmutableRef(ref)
if !ok {
return nil, errors.Errorf("invalid reference")
}
cr, ok := j.cached[immutable.ID()]
if !ok {
return nil, errors.Errorf("invalid cache exporter")
}
return cr.Cache(cr.index, cr.ref), nil
}
func getRef(s VertexSolver, ctx context.Context, v *vertex, index Index, cache InstructionCache) (Reference, error) {
@ -194,7 +225,7 @@ func getRef(s VertexSolver, ctx context.Context, v *vertex, index Index, cache I
if err != nil {
return nil, err
}
ref, err := cache.Lookup(ctx, k)
ref, err := cache.Lookup(ctx, k, s.(*vertexSolver).v.Name())
if err != nil {
return nil, err
}
@ -215,7 +246,7 @@ func getRef(s VertexSolver, ctx context.Context, v *vertex, index Index, cache I
return nil, err
}
if r.CacheKey != "" {
ref, err := cache.Lookup(ctx, r.CacheKey)
ref, err := cache.Lookup(ctx, r.CacheKey, s.(*vertexSolver).v.Name())
if err != nil {
return nil, err
}

View File

@ -8,6 +8,7 @@ import (
"time"
"github.com/moby/buildkit/cache"
"github.com/moby/buildkit/cache/cacheimport"
"github.com/moby/buildkit/cache/contenthash"
"github.com/moby/buildkit/client"
"github.com/moby/buildkit/exporter"
@ -31,6 +32,8 @@ type LLBOpt struct {
InstructionCache InstructionCache
ImageSource source.Source
Frontends map[string]frontend.Frontend // used by nested invocations
CacheExporter *cacheimport.CacheExporter
CacheImporter *cacheimport.CacheImporter
}
func NewLLBSolver(opt LLBOpt) *Solver {
@ -46,7 +49,7 @@ func NewLLBSolver(opt LLBOpt) *Solver {
default:
return nil, nil
}
}, opt.InstructionCache, opt.ImageSource, opt.Worker, opt.CacheManager, opt.Frontends)
}, opt.InstructionCache, opt.ImageSource, opt.Worker, opt.CacheManager, opt.Frontends, opt.CacheExporter, opt.CacheImporter)
return s
}
@ -72,7 +75,7 @@ type Op interface {
type InstructionCache interface {
Probe(ctx context.Context, key digest.Digest) (bool, error)
Lookup(ctx context.Context, key digest.Digest) (interface{}, error) // TODO: regular ref
Lookup(ctx context.Context, key digest.Digest, msg string) (interface{}, error) // TODO: regular ref
Set(key digest.Digest, ref interface{}) error
SetContentMapping(contentKey, key digest.Digest) error
GetContentMapping(dgst digest.Digest) ([]digest.Digest, error)
@ -86,10 +89,12 @@ type Solver struct {
worker worker.Worker
cm cache.Manager // TODO: remove with immutableRef.New()
frontends map[string]frontend.Frontend
ce *cacheimport.CacheExporter
ci *cacheimport.CacheImporter
}
func New(resolve ResolveOpFunc, cache InstructionCache, imageSource source.Source, worker worker.Worker, cm cache.Manager, f map[string]frontend.Frontend) *Solver {
return &Solver{resolve: resolve, jobs: newJobList(), cache: cache, imageSource: imageSource, worker: worker, cm: cm, frontends: f}
func New(resolve ResolveOpFunc, cache InstructionCache, imageSource source.Source, worker worker.Worker, cm cache.Manager, f map[string]frontend.Frontend, ce *cacheimport.CacheExporter, ci *cacheimport.CacheImporter) *Solver {
return &Solver{resolve: resolve, jobs: newJobList(), cache: cache, imageSource: imageSource, worker: worker, cm: cm, frontends: f, ce: ce, ci: ci}
}
type SolveRequest struct {
@ -97,6 +102,8 @@ type SolveRequest struct {
Frontend frontend.Frontend
Exporter exporter.ExporterInstance
FrontendOpt map[string]string
ExportCacheRef string
ImportCacheRef string
}
func (s *Solver) solve(ctx context.Context, j *job, req SolveRequest) (Reference, map[string][]byte, error) {
@ -126,6 +133,14 @@ func (s *Solver) Solve(ctx context.Context, id string, req SolveRequest) error {
pr, ctx, closeProgressWriter := progress.NewContext(ctx)
defer closeProgressWriter()
if importRef := req.ImportCacheRef; importRef != "" {
cache, err := s.ci.Import(ctx, importRef)
if err != nil {
return err
}
s.cache = mergeRemoteCache(s.cache, cache)
}
// register a build job. vertex needs to be loaded to a job to run
ctx, j, err := s.jobs.new(ctx, id, pr, s.cache)
if err != nil {
@ -151,10 +166,31 @@ func (s *Solver) Solve(ctx context.Context, id string, req SolveRequest) error {
}
if exp := req.Exporter; exp != nil {
return inVertexContext(ctx, exp.Name(), func(ctx context.Context) error {
if err := inVertexContext(ctx, exp.Name(), func(ctx context.Context) error {
return exp.Export(ctx, immutable, exporterOpt)
})
}); err != nil {
return err
}
}
if exportName := req.ExportCacheRef; exportName != "" {
if err := inVertexContext(ctx, "exporting build cache", func(ctx context.Context) error {
cache, err := j.cacheExporter(ref)
if err != nil {
return err
}
records, err := cache.Export(ctx)
if err != nil {
return err
}
return s.ce.Export(ctx, records, exportName)
}); err != nil {
return err
}
}
return err
}
@ -188,13 +224,14 @@ func (s *Solver) subBuild(ctx context.Context, dgst digest.Digest, req SolveRequ
st = jl.actives[inp.Vertex.Digest()]
jl.mu.Unlock()
return getRef(st.solver, ctx, inp.Vertex.(*vertex), inp.Index, s.cache) // TODO: combine to pass single input
return getRef(st.solver, ctx, inp.Vertex.(*vertex), inp.Index, s.cache) // TODO: combine to pass single input // TODO: export cache for subbuilds
}
type VertexSolver interface {
CacheKey(ctx context.Context, index Index) (digest.Digest, error)
OutputEvaluator(Index) (VertexEvaluator, error)
Release() error
Cache(Index, Reference) CacheExporter
}
type vertexInput struct {
@ -218,6 +255,7 @@ type vertexSolver struct {
mu sync.Mutex
results []digest.Digest
markCachedOnce sync.Once
contentKey digest.Digest
signal *signal // used to notify that there are callers who need more data
}
@ -265,6 +303,78 @@ func markCached(ctx context.Context, cv client.Vertex) {
pw.Write(cv.Digest.String(), cv)
}
type CacheExporter interface {
Export(context.Context) ([]cacheimport.CacheRecord, error)
}
func (vs *vertexSolver) Cache(index Index, ref Reference) CacheExporter {
return &cacheExporter{vertexSolver: vs, index: index, ref: ref}
}
type cacheExporter struct {
*vertexSolver
index Index
ref Reference
}
func (ce *cacheExporter) Export(ctx context.Context) ([]cacheimport.CacheRecord, error) {
return ce.vertexSolver.Export(ctx, ce.index, ce.ref)
}
func (vs *vertexSolver) Export(ctx context.Context, index Index, ref Reference) ([]cacheimport.CacheRecord, error) {
mp := map[digest.Digest]cacheimport.CacheRecord{}
if err := vs.appendInputCache(ctx, mp); err != nil {
return nil, err
}
dgst, err := vs.mainCacheKey()
if err != nil {
return nil, err
}
immutable, ok := toImmutableRef(ref)
if !ok {
return nil, errors.Errorf("invalid reference")
}
dgst = cacheKeyForIndex(dgst, index)
mp[dgst] = cacheimport.CacheRecord{CacheKey: dgst, Reference: immutable}
out := make([]cacheimport.CacheRecord, 0, len(mp))
for _, cr := range mp {
out = append(out, cr)
}
return out, nil
}
func (vs *vertexSolver) appendInputCache(ctx context.Context, mp map[digest.Digest]cacheimport.CacheRecord) error {
for i, inp := range vs.inputs {
mainDgst, err := inp.solver.(*vertexSolver).mainCacheKey()
if err != nil {
return err
}
dgst := cacheKeyForIndex(mainDgst, vs.v.inputs[i].index)
if cr, ok := mp[dgst]; !ok || (cr.Reference == nil && inp.ref != nil) {
if err := inp.solver.(*vertexSolver).appendInputCache(ctx, mp); err != nil {
return err
}
if inp.ref != nil && len(inp.solver.(*vertexSolver).inputs) > 0 { // Ignore pushing the refs for sources
ref, ok := toImmutableRef(inp.ref)
if !ok {
return errors.Errorf("invalid reference")
}
mp[dgst] = cacheimport.CacheRecord{CacheKey: dgst, Reference: ref}
} else {
mp[dgst] = cacheimport.CacheRecord{CacheKey: dgst}
}
}
}
if ck := vs.contentKey; ck != "" {
mainDgst, err := vs.mainCacheKey()
if err != nil {
return err
}
mp[ck] = cacheimport.CacheRecord{CacheKey: mainDgst, ContentKey: ck}
}
return nil
}
func (vs *vertexSolver) CacheKey(ctx context.Context, index Index) (digest.Digest, error) {
vs.mu.Lock()
defer vs.mu.Unlock()
@ -408,7 +518,7 @@ func (vs *vertexSolver) run(ctx context.Context, signal func()) (retErr error) {
// check if current cache key is in cache
if len(inp.cacheKeys) > 0 {
ref, err := vs.cache.Lookup(ctx2, inp.cacheKeys[len(inp.cacheKeys)-1])
ref, err := vs.cache.Lookup(ctx2, inp.cacheKeys[len(inp.cacheKeys)-1], inp.solver.(*vertexSolver).v.Name())
if err != nil {
return err
}
@ -490,6 +600,7 @@ func (vs *vertexSolver) run(ctx context.Context, signal func()) (retErr error) {
if err != nil {
return err
}
vs.contentKey = contentKey
var extraKeys []digest.Digest
cks, err := vs.cache.GetContentMapping(contentKey)
@ -703,3 +814,51 @@ func (s *llbBridge) Exec(ctx context.Context, meta worker.Meta, rootFS cache.Imm
func cacheKeyForIndex(dgst digest.Digest, index Index) digest.Digest {
return digest.FromBytes([]byte(fmt.Sprintf("%s.%d", dgst, index)))
}
func mergeRemoteCache(local, remote InstructionCache) InstructionCache {
return &mergedCache{local: local, remote: remote}
}
type mergedCache struct {
local InstructionCache
remote InstructionCache
}
func (mc *mergedCache) Probe(ctx context.Context, key digest.Digest) (bool, error) {
v, err := mc.local.Probe(ctx, key)
if err != nil {
return false, err
}
if v {
return v, nil
}
return mc.remote.Probe(ctx, key)
}
func (mc *mergedCache) Lookup(ctx context.Context, key digest.Digest, msg string) (interface{}, error) {
v, err := mc.local.Probe(ctx, key)
if err != nil {
return false, err
}
if v {
return mc.local.Lookup(ctx, key, msg)
}
return mc.remote.Lookup(ctx, key, msg)
}
func (mc *mergedCache) Set(key digest.Digest, ref interface{}) error {
return mc.local.Set(key, ref)
}
func (mc *mergedCache) SetContentMapping(contentKey, key digest.Digest) error {
return mc.local.SetContentMapping(contentKey, key)
}
func (mc *mergedCache) GetContentMapping(dgst digest.Digest) ([]digest.Digest, error) {
localKeys, err := mc.local.GetContentMapping(dgst)
if err != nil {
return nil, err
}
remoteKeys, err := mc.remote.GetContentMapping(dgst)
if err != nil {
return nil, err
}
return append(localKeys, remoteKeys...), nil
}

View File

@ -38,8 +38,8 @@ type SourceOpt struct {
}
type blobmapper interface {
GetBlob(ctx gocontext.Context, key string) (digest.Digest, error)
SetBlob(ctx gocontext.Context, key string, blob digest.Digest) error
GetBlob(ctx gocontext.Context, key string) (digest.Digest, digest.Digest, error)
SetBlob(ctx gocontext.Context, key string, diffID, blob digest.Digest) error
}
type resolveRecord struct {
@ -211,7 +211,7 @@ func (is *imageSource) fillBlobMapping(ctx context.Context, layers []rootfs.Laye
for _, l := range layers {
chain = append(chain, l.Diff.Digest)
chainID := identity.ChainID(chain)
if err := is.SourceOpt.Snapshotter.(blobmapper).SetBlob(ctx, string(chainID), l.Blob.Digest); err != nil {
if err := is.SourceOpt.Snapshotter.(blobmapper).SetBlob(ctx, string(chainID), l.Diff.Digest, l.Blob.Digest); err != nil {
return err
}
}

View File

@ -133,7 +133,7 @@ func DetectManifestMediaType(ra content.ReaderAt) (string, error) {
}
if mfst.Config != nil {
return ocispec.MediaTypeImageManifest, nil
return images.MediaTypeDockerSchema2Manifest, nil
}
return ocispec.MediaTypeImageIndex, nil
return images.MediaTypeDockerSchema2ManifestList, nil
}

157
util/push/push.go Normal file
View File

@ -0,0 +1,157 @@
package push
import (
"context"
"encoding/json"
"fmt"
"net/http"
"sync"
"time"
"github.com/containerd/containerd/content"
"github.com/containerd/containerd/images"
"github.com/containerd/containerd/remotes"
"github.com/containerd/containerd/remotes/docker"
"github.com/moby/buildkit/util/imageutil"
"github.com/moby/buildkit/util/progress"
digest "github.com/opencontainers/go-digest"
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
"github.com/sirupsen/logrus"
)
func Push(ctx context.Context, cs content.Store, dgst digest.Digest, ref string) error {
resolver := docker.NewResolver(docker.ResolverOptions{
Client: http.DefaultClient,
})
pusher, err := resolver.Pusher(ctx, ref)
if err != nil {
return err
}
var m sync.Mutex
manifestStack := []ocispec.Descriptor{}
filterHandler := images.HandlerFunc(func(ctx context.Context, desc ocispec.Descriptor) ([]ocispec.Descriptor, error) {
switch desc.MediaType {
case images.MediaTypeDockerSchema2Manifest, ocispec.MediaTypeImageManifest,
images.MediaTypeDockerSchema2ManifestList, ocispec.MediaTypeImageIndex:
m.Lock()
manifestStack = append(manifestStack, desc)
m.Unlock()
return nil, images.StopHandler
default:
return nil, nil
}
})
pushHandler := remotes.PushHandler(cs, pusher)
handlers := append([]images.Handler{},
childrenHandler(cs),
filterHandler,
pushHandler,
)
info, err := cs.Info(ctx, dgst)
if err != nil {
return err
}
ra, err := cs.ReaderAt(ctx, dgst)
if err != nil {
return err
}
mtype, err := imageutil.DetectManifestMediaType(ra)
if err != nil {
return err
}
layersDone := oneOffProgress(ctx, "pushing layers")
err = images.Dispatch(ctx, images.Handlers(handlers...), ocispec.Descriptor{
Digest: dgst,
Size: info.Size,
MediaType: mtype,
})
layersDone(err)
if err != nil {
return err
}
mfstDone := oneOffProgress(ctx, fmt.Sprintf("pushing manifest for %s", ref))
for i := len(manifestStack) - 1; i >= 0; i-- {
_, err := pushHandler(ctx, manifestStack[i])
if err != nil {
mfstDone(err)
return err
}
}
mfstDone(nil)
return nil
}
func oneOffProgress(ctx context.Context, id string) func(err error) error {
pw, _, _ := progress.FromContext(ctx)
now := time.Now()
st := progress.Status{
Started: &now,
}
pw.Write(id, st)
return func(err error) error {
// TODO: set error on status
now := time.Now()
st.Completed = &now
pw.Write(id, st)
pw.Close()
return err
}
}
func childrenHandler(provider content.Provider) images.HandlerFunc {
return func(ctx context.Context, desc ocispec.Descriptor) ([]ocispec.Descriptor, error) {
var descs []ocispec.Descriptor
switch desc.MediaType {
case images.MediaTypeDockerSchema2Manifest, ocispec.MediaTypeImageManifest:
p, err := content.ReadBlob(ctx, provider, desc.Digest)
if err != nil {
return nil, err
}
// TODO(stevvooe): We just assume oci manifest, for now. There may be
// subtle differences from the docker version.
var manifest ocispec.Manifest
if err := json.Unmarshal(p, &manifest); err != nil {
return nil, err
}
descs = append(descs, manifest.Config)
descs = append(descs, manifest.Layers...)
case images.MediaTypeDockerSchema2ManifestList, ocispec.MediaTypeImageIndex:
p, err := content.ReadBlob(ctx, provider, desc.Digest)
if err != nil {
return nil, err
}
var index ocispec.Index
if err := json.Unmarshal(p, &index); err != nil {
return nil, err
}
for _, m := range index.Manifests {
if m.Digest != "" {
descs = append(descs, m)
}
}
case images.MediaTypeDockerSchema2Layer, images.MediaTypeDockerSchema2LayerGzip,
images.MediaTypeDockerSchema2Config, ocispec.MediaTypeImageConfig,
ocispec.MediaTypeImageLayer, ocispec.MediaTypeImageLayerGzip:
// childless data types.
return nil, nil
default:
logrus.Warnf("encountered unknown type %v; children may not be fetched", desc.MediaType)
}
return descs, nil
}
}

257
vendor/github.com/docker/distribution/blobs.go generated vendored Normal file
View File

@ -0,0 +1,257 @@
package distribution
import (
"context"
"errors"
"fmt"
"io"
"net/http"
"time"
"github.com/docker/distribution/reference"
"github.com/opencontainers/go-digest"
)
var (
// ErrBlobExists returned when blob already exists
ErrBlobExists = errors.New("blob exists")
// ErrBlobDigestUnsupported when blob digest is an unsupported version.
ErrBlobDigestUnsupported = errors.New("unsupported blob digest")
// ErrBlobUnknown when blob is not found.
ErrBlobUnknown = errors.New("unknown blob")
// ErrBlobUploadUnknown returned when upload is not found.
ErrBlobUploadUnknown = errors.New("blob upload unknown")
// ErrBlobInvalidLength returned when the blob has an expected length on
// commit, meaning mismatched with the descriptor or an invalid value.
ErrBlobInvalidLength = errors.New("blob invalid length")
)
// ErrBlobInvalidDigest returned when digest check fails.
type ErrBlobInvalidDigest struct {
Digest digest.Digest
Reason error
}
func (err ErrBlobInvalidDigest) Error() string {
return fmt.Sprintf("invalid digest for referenced layer: %v, %v",
err.Digest, err.Reason)
}
// ErrBlobMounted returned when a blob is mounted from another repository
// instead of initiating an upload session.
type ErrBlobMounted struct {
From reference.Canonical
Descriptor Descriptor
}
func (err ErrBlobMounted) Error() string {
return fmt.Sprintf("blob mounted from: %v to: %v",
err.From, err.Descriptor)
}
// Descriptor describes targeted content. Used in conjunction with a blob
// store, a descriptor can be used to fetch, store and target any kind of
// blob. The struct also describes the wire protocol format. Fields should
// only be added but never changed.
type Descriptor struct {
// MediaType describe the type of the content. All text based formats are
// encoded as utf-8.
MediaType string `json:"mediaType,omitempty"`
// Size in bytes of content.
Size int64 `json:"size,omitempty"`
// Digest uniquely identifies the content. A byte stream can be verified
// against against this digest.
Digest digest.Digest `json:"digest,omitempty"`
// URLs contains the source URLs of this content.
URLs []string `json:"urls,omitempty"`
// NOTE: Before adding a field here, please ensure that all
// other options have been exhausted. Much of the type relationships
// depend on the simplicity of this type.
}
// Descriptor returns the descriptor, to make it satisfy the Describable
// interface. Note that implementations of Describable are generally objects
// which can be described, not simply descriptors; this exception is in place
// to make it more convenient to pass actual descriptors to functions that
// expect Describable objects.
func (d Descriptor) Descriptor() Descriptor {
return d
}
// BlobStatter makes blob descriptors available by digest. The service may
// provide a descriptor of a different digest if the provided digest is not
// canonical.
type BlobStatter interface {
// Stat provides metadata about a blob identified by the digest. If the
// blob is unknown to the describer, ErrBlobUnknown will be returned.
Stat(ctx context.Context, dgst digest.Digest) (Descriptor, error)
}
// BlobDeleter enables deleting blobs from storage.
type BlobDeleter interface {
Delete(ctx context.Context, dgst digest.Digest) error
}
// BlobEnumerator enables iterating over blobs from storage
type BlobEnumerator interface {
Enumerate(ctx context.Context, ingester func(dgst digest.Digest) error) error
}
// BlobDescriptorService manages metadata about a blob by digest. Most
// implementations will not expose such an interface explicitly. Such mappings
// should be maintained by interacting with the BlobIngester. Hence, this is
// left off of BlobService and BlobStore.
type BlobDescriptorService interface {
BlobStatter
// SetDescriptor assigns the descriptor to the digest. The provided digest and
// the digest in the descriptor must map to identical content but they may
// differ on their algorithm. The descriptor must have the canonical
// digest of the content and the digest algorithm must match the
// annotators canonical algorithm.
//
// Such a facility can be used to map blobs between digest domains, with
// the restriction that the algorithm of the descriptor must match the
// canonical algorithm (ie sha256) of the annotator.
SetDescriptor(ctx context.Context, dgst digest.Digest, desc Descriptor) error
// Clear enables descriptors to be unlinked
Clear(ctx context.Context, dgst digest.Digest) error
}
// BlobDescriptorServiceFactory creates middleware for BlobDescriptorService.
type BlobDescriptorServiceFactory interface {
BlobAccessController(svc BlobDescriptorService) BlobDescriptorService
}
// ReadSeekCloser is the primary reader type for blob data, combining
// io.ReadSeeker with io.Closer.
type ReadSeekCloser interface {
io.ReadSeeker
io.Closer
}
// BlobProvider describes operations for getting blob data.
type BlobProvider interface {
// Get returns the entire blob identified by digest along with the descriptor.
Get(ctx context.Context, dgst digest.Digest) ([]byte, error)
// Open provides a ReadSeekCloser to the blob identified by the provided
// descriptor. If the blob is not known to the service, an error will be
// returned.
Open(ctx context.Context, dgst digest.Digest) (ReadSeekCloser, error)
}
// BlobServer can serve blobs via http.
type BlobServer interface {
// ServeBlob attempts to serve the blob, identified by dgst, via http. The
// service may decide to redirect the client elsewhere or serve the data
// directly.
//
// This handler only issues successful responses, such as 2xx or 3xx,
// meaning it serves data or issues a redirect. If the blob is not
// available, an error will be returned and the caller may still issue a
// response.
//
// The implementation may serve the same blob from a different digest
// domain. The appropriate headers will be set for the blob, unless they
// have already been set by the caller.
ServeBlob(ctx context.Context, w http.ResponseWriter, r *http.Request, dgst digest.Digest) error
}
// BlobIngester ingests blob data.
type BlobIngester interface {
// Put inserts the content p into the blob service, returning a descriptor
// or an error.
Put(ctx context.Context, mediaType string, p []byte) (Descriptor, error)
// Create allocates a new blob writer to add a blob to this service. The
// returned handle can be written to and later resumed using an opaque
// identifier. With this approach, one can Close and Resume a BlobWriter
// multiple times until the BlobWriter is committed or cancelled.
Create(ctx context.Context, options ...BlobCreateOption) (BlobWriter, error)
// Resume attempts to resume a write to a blob, identified by an id.
Resume(ctx context.Context, id string) (BlobWriter, error)
}
// BlobCreateOption is a general extensible function argument for blob creation
// methods. A BlobIngester may choose to honor any or none of the given
// BlobCreateOptions, which can be specific to the implementation of the
// BlobIngester receiving them.
// TODO (brianbland): unify this with ManifestServiceOption in the future
type BlobCreateOption interface {
Apply(interface{}) error
}
// CreateOptions is a collection of blob creation modifiers relevant to general
// blob storage intended to be configured by the BlobCreateOption.Apply method.
type CreateOptions struct {
Mount struct {
ShouldMount bool
From reference.Canonical
// Stat allows to pass precalculated descriptor to link and return.
// Blob access check will be skipped if set.
Stat *Descriptor
}
}
// BlobWriter provides a handle for inserting data into a blob store.
// Instances should be obtained from BlobWriteService.Writer and
// BlobWriteService.Resume. If supported by the store, a writer can be
// recovered with the id.
type BlobWriter interface {
io.WriteCloser
io.ReaderFrom
// Size returns the number of bytes written to this blob.
Size() int64
// ID returns the identifier for this writer. The ID can be used with the
// Blob service to later resume the write.
ID() string
// StartedAt returns the time this blob write was started.
StartedAt() time.Time
// Commit completes the blob writer process. The content is verified
// against the provided provisional descriptor, which may result in an
// error. Depending on the implementation, written data may be validated
// against the provisional descriptor fields. If MediaType is not present,
// the implementation may reject the commit or assign "application/octet-
// stream" to the blob. The returned descriptor may have a different
// digest depending on the blob store, referred to as the canonical
// descriptor.
Commit(ctx context.Context, provisional Descriptor) (canonical Descriptor, err error)
// Cancel ends the blob write without storing any data and frees any
// associated resources. Any data written thus far will be lost. Cancel
// implementations should allow multiple calls even after a commit that
// result in a no-op. This allows use of Cancel in a defer statement,
// increasing the assurance that it is correctly called.
Cancel(ctx context.Context) error
}
// BlobService combines the operations to access, read and write blobs. This
// can be used to describe remote blob services.
type BlobService interface {
BlobStatter
BlobProvider
BlobIngester
}
// BlobStore represent the entire suite of blob related operations. Such an
// implementation can access, read, write, delete and serve blobs.
type BlobStore interface {
BlobService
BlobServer
BlobDeleter
}

7
vendor/github.com/docker/distribution/doc.go generated vendored Normal file
View File

@ -0,0 +1,7 @@
// Package distribution will define the interfaces for the components of
// docker distribution. The goal is to allow users to reliably package, ship
// and store content related to docker images.
//
// This is currently a work in progress. More details are available in the
// README.md.
package distribution

115
vendor/github.com/docker/distribution/errors.go generated vendored Normal file
View File

@ -0,0 +1,115 @@
package distribution
import (
"errors"
"fmt"
"strings"
"github.com/opencontainers/go-digest"
)
// ErrAccessDenied is returned when an access to a requested resource is
// denied.
var ErrAccessDenied = errors.New("access denied")
// ErrManifestNotModified is returned when a conditional manifest GetByTag
// returns nil due to the client indicating it has the latest version
var ErrManifestNotModified = errors.New("manifest not modified")
// ErrUnsupported is returned when an unimplemented or unsupported action is
// performed
var ErrUnsupported = errors.New("operation unsupported")
// ErrTagUnknown is returned if the given tag is not known by the tag service
type ErrTagUnknown struct {
Tag string
}
func (err ErrTagUnknown) Error() string {
return fmt.Sprintf("unknown tag=%s", err.Tag)
}
// ErrRepositoryUnknown is returned if the named repository is not known by
// the registry.
type ErrRepositoryUnknown struct {
Name string
}
func (err ErrRepositoryUnknown) Error() string {
return fmt.Sprintf("unknown repository name=%s", err.Name)
}
// ErrRepositoryNameInvalid should be used to denote an invalid repository
// name. Reason may set, indicating the cause of invalidity.
type ErrRepositoryNameInvalid struct {
Name string
Reason error
}
func (err ErrRepositoryNameInvalid) Error() string {
return fmt.Sprintf("repository name %q invalid: %v", err.Name, err.Reason)
}
// ErrManifestUnknown is returned if the manifest is not known by the
// registry.
type ErrManifestUnknown struct {
Name string
Tag string
}
func (err ErrManifestUnknown) Error() string {
return fmt.Sprintf("unknown manifest name=%s tag=%s", err.Name, err.Tag)
}
// ErrManifestUnknownRevision is returned when a manifest cannot be found by
// revision within a repository.
type ErrManifestUnknownRevision struct {
Name string
Revision digest.Digest
}
func (err ErrManifestUnknownRevision) Error() string {
return fmt.Sprintf("unknown manifest name=%s revision=%s", err.Name, err.Revision)
}
// ErrManifestUnverified is returned when the registry is unable to verify
// the manifest.
type ErrManifestUnverified struct{}
func (ErrManifestUnverified) Error() string {
return "unverified manifest"
}
// ErrManifestVerification provides a type to collect errors encountered
// during manifest verification. Currently, it accepts errors of all types,
// but it may be narrowed to those involving manifest verification.
type ErrManifestVerification []error
func (errs ErrManifestVerification) Error() string {
var parts []string
for _, err := range errs {
parts = append(parts, err.Error())
}
return fmt.Sprintf("errors verifying manifest: %v", strings.Join(parts, ","))
}
// ErrManifestBlobUnknown returned when a referenced blob cannot be found.
type ErrManifestBlobUnknown struct {
Digest digest.Digest
}
func (err ErrManifestBlobUnknown) Error() string {
return fmt.Sprintf("unknown blob %v on manifest", err.Digest)
}
// ErrManifestNameInvalid should be used to denote an invalid manifest
// name. Reason may set, indicating the cause of invalidity.
type ErrManifestNameInvalid struct {
Name string
Reason error
}
func (err ErrManifestNameInvalid) Error() string {
return fmt.Sprintf("manifest name %q invalid: %v", err.Name, err.Reason)
}

View File

@ -0,0 +1 @@
package manifest

View File

@ -0,0 +1,85 @@
package schema2
import (
"context"
"github.com/docker/distribution"
"github.com/opencontainers/go-digest"
)
// builder is a type for constructing manifests.
type builder struct {
// bs is a BlobService used to publish the configuration blob.
bs distribution.BlobService
// configMediaType is media type used to describe configuration
configMediaType string
// configJSON references
configJSON []byte
// dependencies is a list of descriptors that gets built by successive
// calls to AppendReference. In case of image configuration these are layers.
dependencies []distribution.Descriptor
}
// NewManifestBuilder is used to build new manifests for the current schema
// version. It takes a BlobService so it can publish the configuration blob
// as part of the Build process.
func NewManifestBuilder(bs distribution.BlobService, configMediaType string, configJSON []byte) distribution.ManifestBuilder {
mb := &builder{
bs: bs,
configMediaType: configMediaType,
configJSON: make([]byte, len(configJSON)),
}
copy(mb.configJSON, configJSON)
return mb
}
// Build produces a final manifest from the given references.
func (mb *builder) Build(ctx context.Context) (distribution.Manifest, error) {
m := Manifest{
Versioned: SchemaVersion,
Layers: make([]distribution.Descriptor, len(mb.dependencies)),
}
copy(m.Layers, mb.dependencies)
configDigest := digest.FromBytes(mb.configJSON)
var err error
m.Config, err = mb.bs.Stat(ctx, configDigest)
switch err {
case nil:
// Override MediaType, since Put always replaces the specified media
// type with application/octet-stream in the descriptor it returns.
m.Config.MediaType = mb.configMediaType
return FromStruct(m)
case distribution.ErrBlobUnknown:
// nop
default:
return nil, err
}
// Add config to the blob store
m.Config, err = mb.bs.Put(ctx, mb.configMediaType, mb.configJSON)
// Override MediaType, since Put always replaces the specified media
// type with application/octet-stream in the descriptor it returns.
m.Config.MediaType = mb.configMediaType
if err != nil {
return nil, err
}
return FromStruct(m)
}
// AppendReference adds a reference to the current ManifestBuilder.
func (mb *builder) AppendReference(d distribution.Describable) error {
mb.dependencies = append(mb.dependencies, d.Descriptor())
return nil
}
// References returns the current references added to this builder.
func (mb *builder) References() []distribution.Descriptor {
return mb.dependencies
}

View File

@ -0,0 +1,138 @@
package schema2
import (
"encoding/json"
"errors"
"fmt"
"github.com/docker/distribution"
"github.com/docker/distribution/manifest"
"github.com/opencontainers/go-digest"
)
const (
// MediaTypeManifest specifies the mediaType for the current version.
MediaTypeManifest = "application/vnd.docker.distribution.manifest.v2+json"
// MediaTypeImageConfig specifies the mediaType for the image configuration.
MediaTypeImageConfig = "application/vnd.docker.container.image.v1+json"
// MediaTypePluginConfig specifies the mediaType for plugin configuration.
MediaTypePluginConfig = "application/vnd.docker.plugin.v1+json"
// MediaTypeLayer is the mediaType used for layers referenced by the
// manifest.
MediaTypeLayer = "application/vnd.docker.image.rootfs.diff.tar.gzip"
// MediaTypeForeignLayer is the mediaType used for layers that must be
// downloaded from foreign URLs.
MediaTypeForeignLayer = "application/vnd.docker.image.rootfs.foreign.diff.tar.gzip"
// MediaTypeUncompressedLayer is the mediaType used for layers which
// are not compressed.
MediaTypeUncompressedLayer = "application/vnd.docker.image.rootfs.diff.tar"
)
var (
// SchemaVersion provides a pre-initialized version structure for this
// packages version of the manifest.
SchemaVersion = manifest.Versioned{
SchemaVersion: 2,
MediaType: MediaTypeManifest,
}
)
func init() {
schema2Func := func(b []byte) (distribution.Manifest, distribution.Descriptor, error) {
m := new(DeserializedManifest)
err := m.UnmarshalJSON(b)
if err != nil {
return nil, distribution.Descriptor{}, err
}
dgst := digest.FromBytes(b)
return m, distribution.Descriptor{Digest: dgst, Size: int64(len(b)), MediaType: MediaTypeManifest}, err
}
err := distribution.RegisterManifestSchema(MediaTypeManifest, schema2Func)
if err != nil {
panic(fmt.Sprintf("Unable to register manifest: %s", err))
}
}
// Manifest defines a schema2 manifest.
type Manifest struct {
manifest.Versioned
// Config references the image configuration as a blob.
Config distribution.Descriptor `json:"config"`
// Layers lists descriptors for the layers referenced by the
// configuration.
Layers []distribution.Descriptor `json:"layers"`
}
// References returnes the descriptors of this manifests references.
func (m Manifest) References() []distribution.Descriptor {
references := make([]distribution.Descriptor, 0, 1+len(m.Layers))
references = append(references, m.Config)
references = append(references, m.Layers...)
return references
}
// Target returns the target of this signed manifest.
func (m Manifest) Target() distribution.Descriptor {
return m.Config
}
// DeserializedManifest wraps Manifest with a copy of the original JSON.
// It satisfies the distribution.Manifest interface.
type DeserializedManifest struct {
Manifest
// canonical is the canonical byte representation of the Manifest.
canonical []byte
}
// FromStruct takes a Manifest structure, marshals it to JSON, and returns a
// DeserializedManifest which contains the manifest and its JSON representation.
func FromStruct(m Manifest) (*DeserializedManifest, error) {
var deserialized DeserializedManifest
deserialized.Manifest = m
var err error
deserialized.canonical, err = json.MarshalIndent(&m, "", " ")
return &deserialized, err
}
// UnmarshalJSON populates a new Manifest struct from JSON data.
func (m *DeserializedManifest) UnmarshalJSON(b []byte) error {
m.canonical = make([]byte, len(b), len(b))
// store manifest in canonical
copy(m.canonical, b)
// Unmarshal canonical JSON into Manifest object
var manifest Manifest
if err := json.Unmarshal(m.canonical, &manifest); err != nil {
return err
}
m.Manifest = manifest
return nil
}
// MarshalJSON returns the contents of canonical. If canonical is empty,
// marshals the inner contents.
func (m *DeserializedManifest) MarshalJSON() ([]byte, error) {
if len(m.canonical) > 0 {
return m.canonical, nil
}
return nil, errors.New("JSON representation not initialized in DeserializedManifest")
}
// Payload returns the raw content of the manifest. The contents can be used to
// calculate the content identifier.
func (m DeserializedManifest) Payload() (string, []byte, error) {
return m.MediaType, m.canonical, nil
}

View File

@ -0,0 +1,12 @@
package manifest
// Versioned provides a struct with the manifest schemaVersion and mediaType.
// Incoming content with unknown schema version can be decoded against this
// struct to check the version.
type Versioned struct {
// SchemaVersion is the image manifest schema that this image follows
SchemaVersion int `json:"schemaVersion"`
// MediaType is the media type of this schema.
MediaType string `json:"mediaType,omitempty"`
}

125
vendor/github.com/docker/distribution/manifests.go generated vendored Normal file
View File

@ -0,0 +1,125 @@
package distribution
import (
"context"
"fmt"
"mime"
"github.com/opencontainers/go-digest"
)
// Manifest represents a registry object specifying a set of
// references and an optional target
type Manifest interface {
// References returns a list of objects which make up this manifest.
// A reference is anything which can be represented by a
// distribution.Descriptor. These can consist of layers, resources or other
// manifests.
//
// While no particular order is required, implementations should return
// them from highest to lowest priority. For example, one might want to
// return the base layer before the top layer.
References() []Descriptor
// Payload provides the serialized format of the manifest, in addition to
// the media type.
Payload() (mediaType string, payload []byte, err error)
}
// ManifestBuilder creates a manifest allowing one to include dependencies.
// Instances can be obtained from a version-specific manifest package. Manifest
// specific data is passed into the function which creates the builder.
type ManifestBuilder interface {
// Build creates the manifest from his builder.
Build(ctx context.Context) (Manifest, error)
// References returns a list of objects which have been added to this
// builder. The dependencies are returned in the order they were added,
// which should be from base to head.
References() []Descriptor
// AppendReference includes the given object in the manifest after any
// existing dependencies. If the add fails, such as when adding an
// unsupported dependency, an error may be returned.
//
// The destination of the reference is dependent on the manifest type and
// the dependency type.
AppendReference(dependency Describable) error
}
// ManifestService describes operations on image manifests.
type ManifestService interface {
// Exists returns true if the manifest exists.
Exists(ctx context.Context, dgst digest.Digest) (bool, error)
// Get retrieves the manifest specified by the given digest
Get(ctx context.Context, dgst digest.Digest, options ...ManifestServiceOption) (Manifest, error)
// Put creates or updates the given manifest returning the manifest digest
Put(ctx context.Context, manifest Manifest, options ...ManifestServiceOption) (digest.Digest, error)
// Delete removes the manifest specified by the given digest. Deleting
// a manifest that doesn't exist will return ErrManifestNotFound
Delete(ctx context.Context, dgst digest.Digest) error
}
// ManifestEnumerator enables iterating over manifests
type ManifestEnumerator interface {
// Enumerate calls ingester for each manifest.
Enumerate(ctx context.Context, ingester func(digest.Digest) error) error
}
// Describable is an interface for descriptors
type Describable interface {
Descriptor() Descriptor
}
// ManifestMediaTypes returns the supported media types for manifests.
func ManifestMediaTypes() (mediaTypes []string) {
for t := range mappings {
if t != "" {
mediaTypes = append(mediaTypes, t)
}
}
return
}
// UnmarshalFunc implements manifest unmarshalling a given MediaType
type UnmarshalFunc func([]byte) (Manifest, Descriptor, error)
var mappings = make(map[string]UnmarshalFunc, 0)
// UnmarshalManifest looks up manifest unmarshal functions based on
// MediaType
func UnmarshalManifest(ctHeader string, p []byte) (Manifest, Descriptor, error) {
// Need to look up by the actual media type, not the raw contents of
// the header. Strip semicolons and anything following them.
var mediaType string
if ctHeader != "" {
var err error
mediaType, _, err = mime.ParseMediaType(ctHeader)
if err != nil {
return nil, Descriptor{}, err
}
}
unmarshalFunc, ok := mappings[mediaType]
if !ok {
unmarshalFunc, ok = mappings[""]
if !ok {
return nil, Descriptor{}, fmt.Errorf("unsupported manifest media type and no default available: %s", mediaType)
}
}
return unmarshalFunc(p)
}
// RegisterManifestSchema registers an UnmarshalFunc for a given schema type. This
// should be called from specific
func RegisterManifestSchema(mediaType string, u UnmarshalFunc) error {
if _, ok := mappings[mediaType]; ok {
return fmt.Errorf("manifest media type registration would overwrite existing: %s", mediaType)
}
mappings[mediaType] = u
return nil
}

98
vendor/github.com/docker/distribution/registry.go generated vendored Normal file
View File

@ -0,0 +1,98 @@
package distribution
import (
"context"
"github.com/docker/distribution/reference"
)
// Scope defines the set of items that match a namespace.
type Scope interface {
// Contains returns true if the name belongs to the namespace.
Contains(name string) bool
}
type fullScope struct{}
func (f fullScope) Contains(string) bool {
return true
}
// GlobalScope represents the full namespace scope which contains
// all other scopes.
var GlobalScope = Scope(fullScope{})
// Namespace represents a collection of repositories, addressable by name.
// Generally, a namespace is backed by a set of one or more services,
// providing facilities such as registry access, trust, and indexing.
type Namespace interface {
// Scope describes the names that can be used with this Namespace. The
// global namespace will have a scope that matches all names. The scope
// effectively provides an identity for the namespace.
Scope() Scope
// Repository should return a reference to the named repository. The
// registry may or may not have the repository but should always return a
// reference.
Repository(ctx context.Context, name reference.Named) (Repository, error)
// Repositories fills 'repos' with a lexicographically sorted catalog of repositories
// up to the size of 'repos' and returns the value 'n' for the number of entries
// which were filled. 'last' contains an offset in the catalog, and 'err' will be
// set to io.EOF if there are no more entries to obtain.
Repositories(ctx context.Context, repos []string, last string) (n int, err error)
// Blobs returns a blob enumerator to access all blobs
Blobs() BlobEnumerator
// BlobStatter returns a BlobStatter to control
BlobStatter() BlobStatter
}
// RepositoryEnumerator describes an operation to enumerate repositories
type RepositoryEnumerator interface {
Enumerate(ctx context.Context, ingester func(string) error) error
}
// ManifestServiceOption is a function argument for Manifest Service methods
type ManifestServiceOption interface {
Apply(ManifestService) error
}
// WithTag allows a tag to be passed into Put
func WithTag(tag string) ManifestServiceOption {
return WithTagOption{tag}
}
// WithTagOption holds a tag
type WithTagOption struct{ Tag string }
// Apply conforms to the ManifestServiceOption interface
func (o WithTagOption) Apply(m ManifestService) error {
// no implementation
return nil
}
// Repository is a named collection of manifests and layers.
type Repository interface {
// Named returns the name of the repository.
Named() reference.Named
// Manifests returns a reference to this repository's manifest service.
// with the supplied options applied.
Manifests(ctx context.Context, options ...ManifestServiceOption) (ManifestService, error)
// Blobs returns a reference to this repository's blob service.
Blobs(ctx context.Context) BlobStore
// TODO(stevvooe): The above BlobStore return can probably be relaxed to
// be a BlobService for use with clients. This will allow such
// implementations to avoid implementing ServeBlob.
// Tags returns a reference to this repositories tag service
Tags(ctx context.Context) TagService
}
// TODO(stevvooe): Must add close methods to all these. May want to change the
// way instances are created to better reflect internal dependency
// relationships.

27
vendor/github.com/docker/distribution/tags.go generated vendored Normal file
View File

@ -0,0 +1,27 @@
package distribution
import (
"context"
)
// TagService provides access to information about tagged objects.
type TagService interface {
// Get retrieves the descriptor identified by the tag. Some
// implementations may differentiate between "trusted" tags and
// "untrusted" tags. If a tag is "untrusted", the mapping will be returned
// as an ErrTagUntrusted error, with the target descriptor.
Get(ctx context.Context, tag string) (Descriptor, error)
// Tag associates the tag with the provided descriptor, updating the
// current association, if needed.
Tag(ctx context.Context, tag string, desc Descriptor) error
// Untag removes the given tag association
Untag(ctx context.Context, tag string) error
// All returns the set of tags managed by this tag service
All(ctx context.Context) ([]string, error)
// Lookup returns the set of tags referencing the given digest.
Lookup(ctx context.Context, digest Descriptor) ([]string, error)
}