Merge pull request #71 from tonistiigi/local-session
source: add local file source fundamentalsdocker-18.09
commit
6896b5c414
|
@ -19,6 +19,7 @@
|
||||||
Vertex
|
Vertex
|
||||||
VertexStatus
|
VertexStatus
|
||||||
VertexLog
|
VertexLog
|
||||||
|
BytesMessage
|
||||||
*/
|
*/
|
||||||
package moby_buildkit_v1
|
package moby_buildkit_v1
|
||||||
|
|
||||||
|
@ -121,6 +122,7 @@ type SolveRequest struct {
|
||||||
Definition [][]byte `protobuf:"bytes,2,rep,name=Definition" json:"Definition,omitempty"`
|
Definition [][]byte `protobuf:"bytes,2,rep,name=Definition" json:"Definition,omitempty"`
|
||||||
Exporter string `protobuf:"bytes,3,opt,name=Exporter,proto3" json:"Exporter,omitempty"`
|
Exporter string `protobuf:"bytes,3,opt,name=Exporter,proto3" json:"Exporter,omitempty"`
|
||||||
ExporterAttrs map[string]string `protobuf:"bytes,4,rep,name=ExporterAttrs" json:"ExporterAttrs,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
|
ExporterAttrs map[string]string `protobuf:"bytes,4,rep,name=ExporterAttrs" json:"ExporterAttrs,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
|
||||||
|
Session string `protobuf:"bytes,5,opt,name=Session,proto3" json:"Session,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *SolveRequest) Reset() { *m = SolveRequest{} }
|
func (m *SolveRequest) Reset() { *m = SolveRequest{} }
|
||||||
|
@ -156,6 +158,13 @@ func (m *SolveRequest) GetExporterAttrs() map[string]string {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (m *SolveRequest) GetSession() string {
|
||||||
|
if m != nil {
|
||||||
|
return m.Session
|
||||||
|
}
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
type SolveResponse struct {
|
type SolveResponse struct {
|
||||||
Vtx []*Vertex `protobuf:"bytes,1,rep,name=vtx" json:"vtx,omitempty"`
|
Vtx []*Vertex `protobuf:"bytes,1,rep,name=vtx" json:"vtx,omitempty"`
|
||||||
}
|
}
|
||||||
|
@ -369,6 +378,22 @@ func (m *VertexLog) GetMsg() []byte {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type BytesMessage struct {
|
||||||
|
Data []byte `protobuf:"bytes,1,opt,name=data,proto3" json:"data,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *BytesMessage) Reset() { *m = BytesMessage{} }
|
||||||
|
func (m *BytesMessage) String() string { return proto.CompactTextString(m) }
|
||||||
|
func (*BytesMessage) ProtoMessage() {}
|
||||||
|
func (*BytesMessage) Descriptor() ([]byte, []int) { return fileDescriptorControl, []int{10} }
|
||||||
|
|
||||||
|
func (m *BytesMessage) GetData() []byte {
|
||||||
|
if m != nil {
|
||||||
|
return m.Data
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
proto.RegisterType((*DiskUsageRequest)(nil), "moby.buildkit.v1.DiskUsageRequest")
|
proto.RegisterType((*DiskUsageRequest)(nil), "moby.buildkit.v1.DiskUsageRequest")
|
||||||
proto.RegisterType((*DiskUsageResponse)(nil), "moby.buildkit.v1.DiskUsageResponse")
|
proto.RegisterType((*DiskUsageResponse)(nil), "moby.buildkit.v1.DiskUsageResponse")
|
||||||
|
@ -380,6 +405,7 @@ func init() {
|
||||||
proto.RegisterType((*Vertex)(nil), "moby.buildkit.v1.Vertex")
|
proto.RegisterType((*Vertex)(nil), "moby.buildkit.v1.Vertex")
|
||||||
proto.RegisterType((*VertexStatus)(nil), "moby.buildkit.v1.VertexStatus")
|
proto.RegisterType((*VertexStatus)(nil), "moby.buildkit.v1.VertexStatus")
|
||||||
proto.RegisterType((*VertexLog)(nil), "moby.buildkit.v1.VertexLog")
|
proto.RegisterType((*VertexLog)(nil), "moby.buildkit.v1.VertexLog")
|
||||||
|
proto.RegisterType((*BytesMessage)(nil), "moby.buildkit.v1.BytesMessage")
|
||||||
}
|
}
|
||||||
|
|
||||||
// Reference imports to suppress errors if they are not otherwise used.
|
// Reference imports to suppress errors if they are not otherwise used.
|
||||||
|
@ -396,6 +422,7 @@ type ControlClient interface {
|
||||||
DiskUsage(ctx context.Context, in *DiskUsageRequest, opts ...grpc.CallOption) (*DiskUsageResponse, error)
|
DiskUsage(ctx context.Context, in *DiskUsageRequest, opts ...grpc.CallOption) (*DiskUsageResponse, error)
|
||||||
Solve(ctx context.Context, in *SolveRequest, opts ...grpc.CallOption) (*SolveResponse, error)
|
Solve(ctx context.Context, in *SolveRequest, opts ...grpc.CallOption) (*SolveResponse, error)
|
||||||
Status(ctx context.Context, in *StatusRequest, opts ...grpc.CallOption) (Control_StatusClient, error)
|
Status(ctx context.Context, in *StatusRequest, opts ...grpc.CallOption) (Control_StatusClient, error)
|
||||||
|
Session(ctx context.Context, opts ...grpc.CallOption) (Control_SessionClient, error)
|
||||||
}
|
}
|
||||||
|
|
||||||
type controlClient struct {
|
type controlClient struct {
|
||||||
|
@ -456,12 +483,44 @@ func (x *controlStatusClient) Recv() (*StatusResponse, error) {
|
||||||
return m, nil
|
return m, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (c *controlClient) Session(ctx context.Context, opts ...grpc.CallOption) (Control_SessionClient, error) {
|
||||||
|
stream, err := grpc.NewClientStream(ctx, &_Control_serviceDesc.Streams[1], c.cc, "/moby.buildkit.v1.Control/Session", opts...)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
x := &controlSessionClient{stream}
|
||||||
|
return x, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
type Control_SessionClient interface {
|
||||||
|
Send(*BytesMessage) error
|
||||||
|
Recv() (*BytesMessage, error)
|
||||||
|
grpc.ClientStream
|
||||||
|
}
|
||||||
|
|
||||||
|
type controlSessionClient struct {
|
||||||
|
grpc.ClientStream
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x *controlSessionClient) Send(m *BytesMessage) error {
|
||||||
|
return x.ClientStream.SendMsg(m)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x *controlSessionClient) Recv() (*BytesMessage, error) {
|
||||||
|
m := new(BytesMessage)
|
||||||
|
if err := x.ClientStream.RecvMsg(m); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return m, nil
|
||||||
|
}
|
||||||
|
|
||||||
// Server API for Control service
|
// Server API for Control service
|
||||||
|
|
||||||
type ControlServer interface {
|
type ControlServer interface {
|
||||||
DiskUsage(context.Context, *DiskUsageRequest) (*DiskUsageResponse, error)
|
DiskUsage(context.Context, *DiskUsageRequest) (*DiskUsageResponse, error)
|
||||||
Solve(context.Context, *SolveRequest) (*SolveResponse, error)
|
Solve(context.Context, *SolveRequest) (*SolveResponse, error)
|
||||||
Status(*StatusRequest, Control_StatusServer) error
|
Status(*StatusRequest, Control_StatusServer) error
|
||||||
|
Session(Control_SessionServer) error
|
||||||
}
|
}
|
||||||
|
|
||||||
func RegisterControlServer(s *grpc.Server, srv ControlServer) {
|
func RegisterControlServer(s *grpc.Server, srv ControlServer) {
|
||||||
|
@ -525,6 +584,32 @@ func (x *controlStatusServer) Send(m *StatusResponse) error {
|
||||||
return x.ServerStream.SendMsg(m)
|
return x.ServerStream.SendMsg(m)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func _Control_Session_Handler(srv interface{}, stream grpc.ServerStream) error {
|
||||||
|
return srv.(ControlServer).Session(&controlSessionServer{stream})
|
||||||
|
}
|
||||||
|
|
||||||
|
type Control_SessionServer interface {
|
||||||
|
Send(*BytesMessage) error
|
||||||
|
Recv() (*BytesMessage, error)
|
||||||
|
grpc.ServerStream
|
||||||
|
}
|
||||||
|
|
||||||
|
type controlSessionServer struct {
|
||||||
|
grpc.ServerStream
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x *controlSessionServer) Send(m *BytesMessage) error {
|
||||||
|
return x.ServerStream.SendMsg(m)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x *controlSessionServer) Recv() (*BytesMessage, error) {
|
||||||
|
m := new(BytesMessage)
|
||||||
|
if err := x.ServerStream.RecvMsg(m); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return m, nil
|
||||||
|
}
|
||||||
|
|
||||||
var _Control_serviceDesc = grpc.ServiceDesc{
|
var _Control_serviceDesc = grpc.ServiceDesc{
|
||||||
ServiceName: "moby.buildkit.v1.Control",
|
ServiceName: "moby.buildkit.v1.Control",
|
||||||
HandlerType: (*ControlServer)(nil),
|
HandlerType: (*ControlServer)(nil),
|
||||||
|
@ -544,6 +629,12 @@ var _Control_serviceDesc = grpc.ServiceDesc{
|
||||||
Handler: _Control_Status_Handler,
|
Handler: _Control_Status_Handler,
|
||||||
ServerStreams: true,
|
ServerStreams: true,
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
StreamName: "Session",
|
||||||
|
Handler: _Control_Session_Handler,
|
||||||
|
ServerStreams: true,
|
||||||
|
ClientStreams: true,
|
||||||
|
},
|
||||||
},
|
},
|
||||||
Metadata: "control.proto",
|
Metadata: "control.proto",
|
||||||
}
|
}
|
||||||
|
@ -697,6 +788,12 @@ func (m *SolveRequest) MarshalTo(dAtA []byte) (int, error) {
|
||||||
i += copy(dAtA[i:], v)
|
i += copy(dAtA[i:], v)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
if len(m.Session) > 0 {
|
||||||
|
dAtA[i] = 0x2a
|
||||||
|
i++
|
||||||
|
i = encodeVarintControl(dAtA, i, uint64(len(m.Session)))
|
||||||
|
i += copy(dAtA[i:], m.Session)
|
||||||
|
}
|
||||||
return i, nil
|
return i, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1006,6 +1103,30 @@ func (m *VertexLog) MarshalTo(dAtA []byte) (int, error) {
|
||||||
return i, nil
|
return i, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (m *BytesMessage) Marshal() (dAtA []byte, err error) {
|
||||||
|
size := m.Size()
|
||||||
|
dAtA = make([]byte, size)
|
||||||
|
n, err := m.MarshalTo(dAtA)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return dAtA[:n], nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *BytesMessage) MarshalTo(dAtA []byte) (int, error) {
|
||||||
|
var i int
|
||||||
|
_ = i
|
||||||
|
var l int
|
||||||
|
_ = l
|
||||||
|
if len(m.Data) > 0 {
|
||||||
|
dAtA[i] = 0xa
|
||||||
|
i++
|
||||||
|
i = encodeVarintControl(dAtA, i, uint64(len(m.Data)))
|
||||||
|
i += copy(dAtA[i:], m.Data)
|
||||||
|
}
|
||||||
|
return i, nil
|
||||||
|
}
|
||||||
|
|
||||||
func encodeFixed64Control(dAtA []byte, offset int, v uint64) int {
|
func encodeFixed64Control(dAtA []byte, offset int, v uint64) int {
|
||||||
dAtA[offset] = uint8(v)
|
dAtA[offset] = uint8(v)
|
||||||
dAtA[offset+1] = uint8(v >> 8)
|
dAtA[offset+1] = uint8(v >> 8)
|
||||||
|
@ -1095,6 +1216,10 @@ func (m *SolveRequest) Size() (n int) {
|
||||||
n += mapEntrySize + 1 + sovControl(uint64(mapEntrySize))
|
n += mapEntrySize + 1 + sovControl(uint64(mapEntrySize))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
l = len(m.Session)
|
||||||
|
if l > 0 {
|
||||||
|
n += 1 + l + sovControl(uint64(l))
|
||||||
|
}
|
||||||
return n
|
return n
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1232,6 +1357,16 @@ func (m *VertexLog) Size() (n int) {
|
||||||
return n
|
return n
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (m *BytesMessage) Size() (n int) {
|
||||||
|
var l int
|
||||||
|
_ = l
|
||||||
|
l = len(m.Data)
|
||||||
|
if l > 0 {
|
||||||
|
n += 1 + l + sovControl(uint64(l))
|
||||||
|
}
|
||||||
|
return n
|
||||||
|
}
|
||||||
|
|
||||||
func sovControl(x uint64) (n int) {
|
func sovControl(x uint64) (n int) {
|
||||||
for {
|
for {
|
||||||
n++
|
n++
|
||||||
|
@ -1746,6 +1881,35 @@ func (m *SolveRequest) Unmarshal(dAtA []byte) error {
|
||||||
m.ExporterAttrs[mapkey] = mapvalue
|
m.ExporterAttrs[mapkey] = mapvalue
|
||||||
}
|
}
|
||||||
iNdEx = postIndex
|
iNdEx = postIndex
|
||||||
|
case 5:
|
||||||
|
if wireType != 2 {
|
||||||
|
return fmt.Errorf("proto: wrong wireType = %d for field Session", wireType)
|
||||||
|
}
|
||||||
|
var stringLen uint64
|
||||||
|
for shift := uint(0); ; shift += 7 {
|
||||||
|
if shift >= 64 {
|
||||||
|
return ErrIntOverflowControl
|
||||||
|
}
|
||||||
|
if iNdEx >= l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
b := dAtA[iNdEx]
|
||||||
|
iNdEx++
|
||||||
|
stringLen |= (uint64(b) & 0x7F) << shift
|
||||||
|
if b < 0x80 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
intStringLen := int(stringLen)
|
||||||
|
if intStringLen < 0 {
|
||||||
|
return ErrInvalidLengthControl
|
||||||
|
}
|
||||||
|
postIndex := iNdEx + intStringLen
|
||||||
|
if postIndex > l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
m.Session = string(dAtA[iNdEx:postIndex])
|
||||||
|
iNdEx = postIndex
|
||||||
default:
|
default:
|
||||||
iNdEx = preIndex
|
iNdEx = preIndex
|
||||||
skippy, err := skipControl(dAtA[iNdEx:])
|
skippy, err := skipControl(dAtA[iNdEx:])
|
||||||
|
@ -2752,6 +2916,87 @@ func (m *VertexLog) Unmarshal(dAtA []byte) error {
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
func (m *BytesMessage) Unmarshal(dAtA []byte) error {
|
||||||
|
l := len(dAtA)
|
||||||
|
iNdEx := 0
|
||||||
|
for iNdEx < l {
|
||||||
|
preIndex := iNdEx
|
||||||
|
var wire uint64
|
||||||
|
for shift := uint(0); ; shift += 7 {
|
||||||
|
if shift >= 64 {
|
||||||
|
return ErrIntOverflowControl
|
||||||
|
}
|
||||||
|
if iNdEx >= l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
b := dAtA[iNdEx]
|
||||||
|
iNdEx++
|
||||||
|
wire |= (uint64(b) & 0x7F) << shift
|
||||||
|
if b < 0x80 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
fieldNum := int32(wire >> 3)
|
||||||
|
wireType := int(wire & 0x7)
|
||||||
|
if wireType == 4 {
|
||||||
|
return fmt.Errorf("proto: BytesMessage: wiretype end group for non-group")
|
||||||
|
}
|
||||||
|
if fieldNum <= 0 {
|
||||||
|
return fmt.Errorf("proto: BytesMessage: illegal tag %d (wire type %d)", fieldNum, wire)
|
||||||
|
}
|
||||||
|
switch fieldNum {
|
||||||
|
case 1:
|
||||||
|
if wireType != 2 {
|
||||||
|
return fmt.Errorf("proto: wrong wireType = %d for field Data", wireType)
|
||||||
|
}
|
||||||
|
var byteLen int
|
||||||
|
for shift := uint(0); ; shift += 7 {
|
||||||
|
if shift >= 64 {
|
||||||
|
return ErrIntOverflowControl
|
||||||
|
}
|
||||||
|
if iNdEx >= l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
b := dAtA[iNdEx]
|
||||||
|
iNdEx++
|
||||||
|
byteLen |= (int(b) & 0x7F) << shift
|
||||||
|
if b < 0x80 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if byteLen < 0 {
|
||||||
|
return ErrInvalidLengthControl
|
||||||
|
}
|
||||||
|
postIndex := iNdEx + byteLen
|
||||||
|
if postIndex > l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
m.Data = append(m.Data[:0], dAtA[iNdEx:postIndex]...)
|
||||||
|
if m.Data == nil {
|
||||||
|
m.Data = []byte{}
|
||||||
|
}
|
||||||
|
iNdEx = postIndex
|
||||||
|
default:
|
||||||
|
iNdEx = preIndex
|
||||||
|
skippy, err := skipControl(dAtA[iNdEx:])
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if skippy < 0 {
|
||||||
|
return ErrInvalidLengthControl
|
||||||
|
}
|
||||||
|
if (iNdEx + skippy) > l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
iNdEx += skippy
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if iNdEx > l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
func skipControl(dAtA []byte) (n int, err error) {
|
func skipControl(dAtA []byte) (n int, err error) {
|
||||||
l := len(dAtA)
|
l := len(dAtA)
|
||||||
iNdEx := 0
|
iNdEx := 0
|
||||||
|
@ -2860,55 +3105,57 @@ var (
|
||||||
func init() { proto.RegisterFile("control.proto", fileDescriptorControl) }
|
func init() { proto.RegisterFile("control.proto", fileDescriptorControl) }
|
||||||
|
|
||||||
var fileDescriptorControl = []byte{
|
var fileDescriptorControl = []byte{
|
||||||
// 785 bytes of a gzipped FileDescriptorProto
|
// 832 bytes of a gzipped FileDescriptorProto
|
||||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xa4, 0x55, 0x4f, 0x6f, 0xe3, 0x44,
|
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xa4, 0x55, 0x5f, 0x8f, 0xdb, 0x44,
|
||||||
0x14, 0x67, 0xec, 0xd4, 0x49, 0x5e, 0x92, 0x55, 0x19, 0xa1, 0x95, 0x65, 0x44, 0x12, 0xcc, 0x25,
|
0x10, 0xc7, 0x76, 0xce, 0x49, 0xe6, 0x72, 0xd5, 0xb1, 0x42, 0x95, 0x65, 0x44, 0x12, 0xcc, 0x4b,
|
||||||
0x5a, 0x69, 0x1d, 0x36, 0x80, 0x84, 0x8a, 0x84, 0x20, 0x64, 0x25, 0x5a, 0xb1, 0x97, 0xe9, 0x16,
|
0x54, 0xa9, 0xbe, 0x36, 0x80, 0x84, 0x0e, 0x09, 0x41, 0x48, 0x25, 0xee, 0xc4, 0xbd, 0xec, 0xb5,
|
||||||
0xce, 0x4e, 0x32, 0x75, 0xad, 0xd8, 0x9e, 0x30, 0x33, 0x8e, 0x1a, 0x3e, 0x05, 0xdf, 0x85, 0xcf,
|
0xf0, 0xec, 0x24, 0x73, 0xae, 0x15, 0xc7, 0x1b, 0x76, 0xd7, 0xd1, 0x85, 0x4f, 0x01, 0x9f, 0x85,
|
||||||
0x80, 0xd4, 0x23, 0x67, 0x0e, 0x05, 0xf5, 0x03, 0x70, 0x85, 0x23, 0xf2, 0xcc, 0x38, 0x75, 0x9b,
|
0xcf, 0x80, 0xe8, 0x23, 0xcf, 0x3c, 0x1c, 0xe8, 0x3e, 0x00, 0x9f, 0x01, 0xed, 0x1f, 0xa7, 0xbe,
|
||||||
0xa6, 0x94, 0xf6, 0x94, 0x79, 0x93, 0xdf, 0xfb, 0xbd, 0x37, 0xbf, 0xf7, 0xc7, 0xd0, 0x99, 0xb1,
|
0xe6, 0xd2, 0x96, 0xeb, 0x93, 0x67, 0xd6, 0xbf, 0x99, 0x9d, 0xf9, 0xcd, 0x9f, 0x85, 0x83, 0x29,
|
||||||
0x4c, 0x72, 0x96, 0x04, 0x4b, 0xce, 0x24, 0xc3, 0xfb, 0x29, 0x9b, 0xae, 0x83, 0x69, 0x1e, 0x27,
|
0x2b, 0x24, 0x67, 0x79, 0xbc, 0xe4, 0x4c, 0x32, 0x72, 0xb8, 0x60, 0x93, 0x75, 0x3c, 0x29, 0xb3,
|
||||||
0xf3, 0x45, 0x2c, 0x83, 0xd5, 0x2b, 0xef, 0x65, 0x14, 0xcb, 0xb3, 0x7c, 0x1a, 0xcc, 0x58, 0x3a,
|
0x7c, 0x36, 0xcf, 0x64, 0xbc, 0x7a, 0x1c, 0x3e, 0x4c, 0x33, 0xf9, 0xbc, 0x9c, 0xc4, 0x53, 0xb6,
|
||||||
0x8c, 0x58, 0xc4, 0x86, 0x0a, 0x38, 0xcd, 0x4f, 0x95, 0xa5, 0x0c, 0x75, 0xd2, 0x04, 0x5e, 0x2f,
|
0x38, 0x4a, 0x59, 0xca, 0x8e, 0x34, 0x70, 0x52, 0x5e, 0x68, 0x4d, 0x2b, 0x5a, 0x32, 0x0e, 0xc2,
|
||||||
0x62, 0x2c, 0x4a, 0xe8, 0x35, 0x4a, 0xc6, 0x29, 0x15, 0x32, 0x4c, 0x97, 0x1a, 0xe0, 0x63, 0xd8,
|
0x5e, 0xca, 0x58, 0x9a, 0xe3, 0x4b, 0x94, 0xcc, 0x16, 0x28, 0x64, 0xb2, 0x58, 0x1a, 0x40, 0x44,
|
||||||
0x9f, 0xc4, 0x62, 0x71, 0x22, 0xc2, 0x88, 0x12, 0xfa, 0x63, 0x4e, 0x85, 0xf4, 0x8f, 0xe0, 0xdd,
|
0xe0, 0x70, 0x9c, 0x89, 0xf9, 0x33, 0x91, 0xa4, 0x48, 0xf1, 0xa7, 0x12, 0x85, 0x8c, 0x4e, 0xe1,
|
||||||
0xca, 0x9d, 0x58, 0xb2, 0x4c, 0x50, 0xfc, 0x19, 0x38, 0x9c, 0xce, 0x18, 0x9f, 0xbb, 0xa8, 0x6f,
|
0xfd, 0xda, 0x99, 0x58, 0xb2, 0x42, 0x20, 0xf9, 0x1c, 0x7c, 0x8e, 0x53, 0xc6, 0x67, 0x81, 0xd3,
|
||||||
0x0f, 0x5a, 0xa3, 0x0f, 0x82, 0xdb, 0xb9, 0x05, 0xc6, 0xa1, 0x00, 0x11, 0x03, 0xf6, 0x43, 0x68,
|
0xf7, 0x06, 0xfb, 0xc3, 0x8f, 0xe2, 0x57, 0x63, 0x8b, 0xad, 0x81, 0x02, 0x51, 0x0b, 0x8e, 0x12,
|
||||||
0x55, 0xae, 0xf1, 0x33, 0xb0, 0x0e, 0x27, 0x2e, 0xea, 0xa3, 0x41, 0x93, 0x58, 0x87, 0x13, 0xec,
|
0xd8, 0xaf, 0x1d, 0x93, 0x7b, 0xe0, 0x9e, 0x8c, 0x03, 0xa7, 0xef, 0x0c, 0xda, 0xd4, 0x3d, 0x19,
|
||||||
0x42, 0xfd, 0x4d, 0x2e, 0xc3, 0x69, 0x42, 0x5d, 0xab, 0x8f, 0x06, 0x0d, 0x52, 0x9a, 0xf8, 0x3d,
|
0x93, 0x00, 0x9a, 0x67, 0xa5, 0x4c, 0x26, 0x39, 0x06, 0x6e, 0xdf, 0x19, 0xb4, 0x68, 0xa5, 0x92,
|
||||||
0xd8, 0x3b, 0xcc, 0x4e, 0x04, 0x75, 0x6d, 0x75, 0xaf, 0x0d, 0x8c, 0xa1, 0x76, 0x1c, 0xff, 0x44,
|
0x0f, 0x60, 0xef, 0xa4, 0x78, 0x26, 0x30, 0xf0, 0xf4, 0xb9, 0x51, 0x08, 0x81, 0xc6, 0x79, 0xf6,
|
||||||
0xdd, 0x5a, 0x1f, 0x0d, 0x6c, 0xa2, 0xce, 0xfe, 0xdf, 0x08, 0xda, 0xc7, 0x2c, 0x59, 0x95, 0xf9,
|
0x33, 0x06, 0x8d, 0xbe, 0x33, 0xf0, 0xa8, 0x96, 0xa3, 0x5f, 0x5d, 0xe8, 0x9c, 0xb3, 0x7c, 0x55,
|
||||||
0xe3, 0x7d, 0xb0, 0x09, 0x3d, 0x35, 0x51, 0x8a, 0x23, 0xee, 0x02, 0x4c, 0xe8, 0x69, 0x9c, 0xc5,
|
0xc5, 0x4f, 0x0e, 0xc1, 0xa3, 0x78, 0x61, 0x6f, 0x51, 0x22, 0xe9, 0x02, 0x8c, 0xf1, 0x22, 0x2b,
|
||||||
0x32, 0x66, 0x99, 0x6b, 0xf5, 0xed, 0x41, 0x9b, 0x54, 0x6e, 0xb0, 0x07, 0x8d, 0xd7, 0xe7, 0x4b,
|
0x32, 0x99, 0xb1, 0x22, 0x70, 0xfb, 0xde, 0xa0, 0x43, 0x6b, 0x27, 0x24, 0x84, 0xd6, 0x93, 0xcb,
|
||||||
0xc6, 0x25, 0xe5, 0x2a, 0x5e, 0x93, 0x6c, 0x6c, 0xfc, 0x03, 0x74, 0xca, 0xf3, 0xd7, 0x52, 0x72,
|
0x25, 0xe3, 0x12, 0xb9, 0xbe, 0xaf, 0x4d, 0x37, 0x3a, 0xf9, 0x11, 0x0e, 0x2a, 0xf9, 0x1b, 0x29,
|
||||||
0xe1, 0xd6, 0xd4, 0xfb, 0x5f, 0x6d, 0xbf, 0xbf, 0x9a, 0x44, 0x70, 0xc3, 0xe7, 0x75, 0x26, 0xf9,
|
0xb9, 0x08, 0x1a, 0x3a, 0xff, 0xc7, 0xdb, 0xf9, 0xd7, 0x83, 0x88, 0x6f, 0xd8, 0x3c, 0x29, 0x24,
|
||||||
0x9a, 0xdc, 0xe4, 0xf1, 0xbe, 0x02, 0xbc, 0x0d, 0x2a, 0x92, 0x5f, 0xd0, 0x75, 0x99, 0xfc, 0x82,
|
0x5f, 0xd3, 0x9b, 0x7e, 0x54, 0xee, 0xe7, 0x28, 0x84, 0x8a, 0x68, 0x4f, 0xdf, 0x59, 0xa9, 0xe1,
|
||||||
0xae, 0x0b, 0x25, 0x56, 0x61, 0x92, 0x6b, 0x85, 0x9a, 0x44, 0x1b, 0x07, 0xd6, 0xe7, 0xc8, 0xff,
|
0xd7, 0x40, 0xb6, 0xcd, 0x55, 0x5a, 0x73, 0x5c, 0x57, 0x69, 0xcd, 0x71, 0xad, 0x38, 0x5a, 0x25,
|
||||||
0x02, 0x3a, 0x26, 0xa6, 0x29, 0xd2, 0x0b, 0xb0, 0x57, 0xf2, 0xdc, 0x54, 0xc8, 0xdd, 0xce, 0xf0,
|
0x79, 0x69, 0xb8, 0x6b, 0x53, 0xa3, 0x1c, 0xbb, 0x5f, 0x38, 0xd1, 0x97, 0x70, 0x60, 0xa3, 0xb1,
|
||||||
0x7b, 0xca, 0x25, 0x3d, 0x27, 0x05, 0xc8, 0xff, 0x10, 0x3a, 0xc7, 0x32, 0x94, 0xb9, 0xd8, 0x29,
|
0xe5, 0x7b, 0x00, 0xde, 0x4a, 0x5e, 0xda, 0xda, 0x05, 0xdb, 0xb1, 0xff, 0x80, 0x5c, 0xe2, 0x25,
|
||||||
0x9b, 0xff, 0x0b, 0x82, 0x67, 0x25, 0xc6, 0x44, 0xf8, 0x14, 0x1a, 0x2b, 0x45, 0x42, 0xc5, 0x7f,
|
0x55, 0xa0, 0xe8, 0x63, 0x38, 0x38, 0x97, 0x89, 0x2c, 0xc5, 0x4e, 0x42, 0xa3, 0xdf, 0x1c, 0xb8,
|
||||||
0x86, 0xd9, 0x20, 0xf1, 0x01, 0x34, 0x84, 0xe2, 0xa1, 0x42, 0xa9, 0xdf, 0x1a, 0x75, 0x77, 0x79,
|
0x57, 0x61, 0xec, 0x0d, 0x9f, 0x41, 0x6b, 0xa5, 0x9d, 0xa0, 0x78, 0xe3, 0x35, 0x1b, 0x24, 0x39,
|
||||||
0x99, 0x78, 0x1b, 0x3c, 0x1e, 0x42, 0x2d, 0x61, 0x91, 0x70, 0x6d, 0xe5, 0xf7, 0xfe, 0x2e, 0xbf,
|
0x86, 0x96, 0xd0, 0x7e, 0x50, 0xe8, 0xba, 0xec, 0x0f, 0xbb, 0xbb, 0xac, 0xec, 0x7d, 0x1b, 0x3c,
|
||||||
0xef, 0x58, 0x44, 0x14, 0xd0, 0xbf, 0xb4, 0xc0, 0xd1, 0x77, 0xf8, 0x08, 0x9c, 0x79, 0x1c, 0x51,
|
0x39, 0x82, 0x46, 0xce, 0x52, 0x11, 0x78, 0xda, 0xee, 0xc3, 0x5d, 0x76, 0xdf, 0xb3, 0x94, 0x6a,
|
||||||
0x21, 0xf5, 0xab, 0xc6, 0xa3, 0x8b, 0xcb, 0xde, 0x3b, 0xbf, 0x5f, 0xf6, 0x5e, 0x54, 0xa6, 0x88,
|
0x60, 0x74, 0xe5, 0x82, 0x6f, 0xce, 0xc8, 0x29, 0xf8, 0xb3, 0x2c, 0x45, 0x21, 0x4d, 0x56, 0xa3,
|
||||||
0x2d, 0x69, 0x56, 0x4c, 0x5d, 0x18, 0x67, 0x94, 0x8b, 0x61, 0xc4, 0x5e, 0x6a, 0x97, 0x60, 0xa2,
|
0xe1, 0x8b, 0xab, 0xde, 0x7b, 0x7f, 0x5d, 0xf5, 0x1e, 0xd4, 0xe6, 0x8b, 0x2d, 0xb1, 0x50, 0xf3,
|
||||||
0x7e, 0x88, 0x61, 0x28, 0xb8, 0xe2, 0x6c, 0x99, 0x4b, 0xfd, 0x82, 0x47, 0x72, 0x69, 0x86, 0xa2,
|
0x98, 0x64, 0x05, 0x72, 0x71, 0x94, 0xb2, 0x87, 0xc6, 0x24, 0x1e, 0xeb, 0x0f, 0xb5, 0x1e, 0x94,
|
||||||
0x8d, 0xb3, 0x30, 0xa5, 0xa6, 0xd7, 0xd4, 0x19, 0x3f, 0x07, 0x67, 0x16, 0xce, 0xce, 0xe8, 0x5c,
|
0xaf, 0xac, 0x58, 0x96, 0xd2, 0x64, 0x70, 0x47, 0x5f, 0xc6, 0x83, 0x6a, 0xf0, 0x22, 0x59, 0xa0,
|
||||||
0x35, 0x77, 0x83, 0x18, 0x0b, 0x1f, 0x40, 0x5d, 0xc8, 0x90, 0x4b, 0x3a, 0x77, 0xf7, 0xfa, 0x68,
|
0xed, 0x42, 0x2d, 0x93, 0xfb, 0xe0, 0x4f, 0x93, 0xe9, 0x73, 0x9c, 0xe9, 0xb6, 0x6f, 0x51, 0xab,
|
||||||
0xd0, 0x1a, 0x79, 0x81, 0x1e, 0xea, 0xa0, 0x1c, 0xea, 0xe0, 0x6d, 0x39, 0xd4, 0xe3, 0xda, 0xcf,
|
0x91, 0x63, 0x68, 0x0a, 0x99, 0x70, 0x89, 0x33, 0xdd, 0x40, 0xfb, 0xc3, 0x30, 0x36, 0xe3, 0x1e,
|
||||||
0x7f, 0xf4, 0x10, 0x29, 0x1d, 0xf0, 0x97, 0xd0, 0x9c, 0xb1, 0x74, 0x99, 0xd0, 0xc2, 0xdb, 0x79,
|
0x57, 0xe3, 0x1e, 0x3f, 0xad, 0xc6, 0x7d, 0xd4, 0xf8, 0xe5, 0xef, 0x9e, 0x43, 0x2b, 0x03, 0xf2,
|
||||||
0xa0, 0xf7, 0xb5, 0x4b, 0xd1, 0x7a, 0x94, 0x73, 0xc6, 0xdd, 0xba, 0x6e, 0x3d, 0x65, 0xf8, 0x7f,
|
0x15, 0xb4, 0xa7, 0x6c, 0xb1, 0xcc, 0x51, 0x59, 0xfb, 0x6f, 0x69, 0xfd, 0xd2, 0x44, 0xb5, 0x1e,
|
||||||
0x59, 0xd0, 0xae, 0x16, 0x6b, 0x6b, 0xaa, 0x8f, 0xc0, 0xd1, 0xa5, 0xd7, 0x2d, 0xfb, 0x38, 0xa9,
|
0x72, 0xce, 0x78, 0xd0, 0x34, 0xad, 0xa7, 0x95, 0xe8, 0x5f, 0x17, 0x3a, 0xf5, 0x62, 0x6d, 0xcd,
|
||||||
0x34, 0xc3, 0x9d, 0x52, 0xb9, 0x50, 0x9f, 0xe5, 0x9c, 0xd3, 0x4c, 0x9a, 0x45, 0x50, 0x9a, 0x45,
|
0xfb, 0x29, 0xf8, 0xa6, 0xf4, 0xa6, 0x65, 0xef, 0x46, 0x95, 0xf1, 0x70, 0x2b, 0x55, 0x01, 0x34,
|
||||||
0xc2, 0x92, 0xc9, 0x30, 0x51, 0x52, 0xd9, 0x44, 0x1b, 0x78, 0x0c, 0xcd, 0xcd, 0xde, 0x7b, 0x80,
|
0xa7, 0x25, 0xe7, 0x58, 0x48, 0xbb, 0x22, 0x2a, 0x55, 0x05, 0x2c, 0x99, 0x4c, 0x72, 0x4d, 0x95,
|
||||||
0x0c, 0x8d, 0x22, 0x5d, 0x2d, 0xc5, 0xc6, 0xad, 0x5a, 0x86, 0xfa, 0x93, 0xca, 0xd0, 0xf8, 0xdf,
|
0x47, 0x8d, 0x42, 0x46, 0xd0, 0xde, 0x6c, 0xc4, 0xb7, 0xa0, 0xa1, 0xa5, 0xc2, 0x35, 0x54, 0x6c,
|
||||||
0x65, 0xf0, 0x7f, 0x45, 0xd0, 0xdc, 0x74, 0x79, 0x45, 0x5d, 0xf4, 0x64, 0x75, 0x6f, 0x28, 0x63,
|
0xcc, 0xea, 0x65, 0x68, 0xbe, 0x53, 0x19, 0x5a, 0xff, 0xbb, 0x0c, 0xd1, 0xef, 0x0e, 0xb4, 0x37,
|
||||||
0x3d, 0x4e, 0x99, 0xe7, 0xe0, 0x08, 0xc9, 0x69, 0x98, 0xaa, 0x1a, 0xd9, 0xc4, 0x58, 0xc5, 0x3e,
|
0x5d, 0x5e, 0x63, 0xd7, 0x79, 0x67, 0x76, 0x6f, 0x30, 0xe3, 0xde, 0x8d, 0x99, 0xfb, 0xe0, 0x0b,
|
||||||
0x49, 0x45, 0xa4, 0x2a, 0xd4, 0x26, 0xc5, 0x71, 0xf4, 0x0f, 0x82, 0xfa, 0x37, 0xfa, 0x03, 0x87,
|
0xc9, 0x31, 0x59, 0xe8, 0x1a, 0x79, 0xd4, 0x6a, 0x6a, 0x9f, 0x2c, 0x44, 0xaa, 0x2b, 0xd4, 0xa1,
|
||||||
0xdf, 0x42, 0x73, 0xf3, 0x91, 0xc1, 0xfe, 0xf6, 0x54, 0xdf, 0xfe, 0x2a, 0x79, 0x1f, 0xdd, 0x8b,
|
0x4a, 0x8c, 0x22, 0xe8, 0x8c, 0xd6, 0x12, 0xc5, 0x19, 0x0a, 0xf5, 0x5a, 0xa8, 0xda, 0xce, 0x12,
|
||||||
0x31, 0xeb, 0xe9, 0x5b, 0xd8, 0x53, 0x1b, 0x11, 0x77, 0xef, 0x5f, 0xcf, 0x5e, 0x6f, 0xe7, 0xff,
|
0x99, 0xe8, 0x3c, 0x3a, 0x54, 0xcb, 0xc3, 0x3f, 0x5c, 0x68, 0x7e, 0x6b, 0x9e, 0x47, 0xf2, 0x14,
|
||||||
0x86, 0xe9, 0x0d, 0x38, 0xa6, 0xbb, 0xef, 0x82, 0x56, 0x17, 0xa7, 0xd7, 0xdf, 0x0d, 0xd0, 0x64,
|
0xda, 0x9b, 0x27, 0x8a, 0x44, 0xdb, 0x93, 0xff, 0xea, 0x9b, 0x16, 0x7e, 0xf2, 0x5a, 0x8c, 0x5d,
|
||||||
0x1f, 0xa3, 0x71, 0xfb, 0xe2, 0xaa, 0x8b, 0x7e, 0xbb, 0xea, 0xa2, 0x3f, 0xaf, 0xba, 0x68, 0xea,
|
0x61, 0xdf, 0xc1, 0x9e, 0xde, 0x9a, 0xa4, 0xfb, 0xfa, 0xe5, 0x1e, 0xf6, 0x76, 0xfe, 0xb7, 0x9e,
|
||||||
0x28, 0x6d, 0x3f, 0xf9, 0x37, 0x00, 0x00, 0xff, 0xff, 0x97, 0xc3, 0x70, 0xc2, 0xef, 0x07, 0x00,
|
0xce, 0xc0, 0xb7, 0x13, 0x70, 0x1b, 0xb4, 0xbe, 0x5c, 0xc3, 0xfe, 0x6e, 0x80, 0x71, 0xf6, 0xc8,
|
||||||
0x00,
|
0x21, 0x67, 0x9b, 0xa7, 0xe2, 0xb6, 0xd0, 0xea, 0xcc, 0x85, 0x6f, 0xf8, 0x3f, 0x70, 0x1e, 0x39,
|
||||||
|
0xa3, 0xce, 0x8b, 0xeb, 0xae, 0xf3, 0xe7, 0x75, 0xd7, 0xf9, 0xe7, 0xba, 0xeb, 0x4c, 0x7c, 0x5d,
|
||||||
|
0xce, 0x4f, 0xff, 0x0b, 0x00, 0x00, 0xff, 0xff, 0xc9, 0xc1, 0x1a, 0x81, 0x7c, 0x08, 0x00, 0x00,
|
||||||
}
|
}
|
||||||
|
|
|
@ -13,6 +13,7 @@ service Control {
|
||||||
rpc DiskUsage(DiskUsageRequest) returns (DiskUsageResponse);
|
rpc DiskUsage(DiskUsageRequest) returns (DiskUsageResponse);
|
||||||
rpc Solve(SolveRequest) returns (SolveResponse);
|
rpc Solve(SolveRequest) returns (SolveResponse);
|
||||||
rpc Status(StatusRequest) returns (stream StatusResponse);
|
rpc Status(StatusRequest) returns (stream StatusResponse);
|
||||||
|
rpc Session(stream BytesMessage) returns (stream BytesMessage);
|
||||||
}
|
}
|
||||||
|
|
||||||
message DiskUsageRequest {
|
message DiskUsageRequest {
|
||||||
|
@ -34,6 +35,7 @@ message SolveRequest {
|
||||||
repeated bytes Definition = 2; // TODO: remove repeated
|
repeated bytes Definition = 2; // TODO: remove repeated
|
||||||
string Exporter = 3;
|
string Exporter = 3;
|
||||||
map<string, string> ExporterAttrs = 4;
|
map<string, string> ExporterAttrs = 4;
|
||||||
|
string Session = 5;
|
||||||
}
|
}
|
||||||
|
|
||||||
message SolveResponse {
|
message SolveResponse {
|
||||||
|
@ -78,3 +80,7 @@ message VertexLog {
|
||||||
int64 stream = 3;
|
int64 stream = 3;
|
||||||
bytes msg = 4;
|
bytes msg = 4;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
message BytesMessage {
|
||||||
|
bytes data = 1;
|
||||||
|
}
|
|
@ -106,6 +106,6 @@ func testBuildMultiMount(t *testing.T, address string) {
|
||||||
err = llb.WriteTo(dt, buf)
|
err = llb.WriteTo(dt, buf)
|
||||||
assert.Nil(t, err)
|
assert.Nil(t, err)
|
||||||
|
|
||||||
err = c.Solve(context.TODO(), buf, nil, "", nil)
|
err = c.Solve(context.TODO(), buf, nil, "", nil, "")
|
||||||
assert.Nil(t, err)
|
assert.Nil(t, err)
|
||||||
}
|
}
|
||||||
|
|
|
@ -78,6 +78,18 @@ func Scratch() *State {
|
||||||
return s
|
return s
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func Local(name string) *State {
|
||||||
|
return Source("local://" + name)
|
||||||
|
}
|
||||||
|
|
||||||
|
type LocalOption func(*source)
|
||||||
|
|
||||||
|
func SessionID(id string) LocalOption {
|
||||||
|
return func(s *source) {
|
||||||
|
s.attrs[pb.AttrLocalSessionID] = id
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
type exec struct {
|
type exec struct {
|
||||||
meta Meta
|
meta Meta
|
||||||
mounts []*mount
|
mounts []*mount
|
||||||
|
|
|
@ -0,0 +1,5 @@
|
||||||
|
package client
|
||||||
|
|
||||||
|
func getSharedKey(dir string) (string, error) {
|
||||||
|
return dir, nil // not implemented
|
||||||
|
}
|
|
@ -5,15 +5,29 @@ import (
|
||||||
"crypto/rand"
|
"crypto/rand"
|
||||||
"encoding/hex"
|
"encoding/hex"
|
||||||
"io"
|
"io"
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/Sirupsen/logrus"
|
||||||
controlapi "github.com/moby/buildkit/api/services/control"
|
controlapi "github.com/moby/buildkit/api/services/control"
|
||||||
"github.com/moby/buildkit/client/llb"
|
"github.com/moby/buildkit/client/llb"
|
||||||
|
"github.com/moby/buildkit/session"
|
||||||
|
"github.com/moby/buildkit/session/filesync"
|
||||||
|
"github.com/moby/buildkit/session/grpchijack"
|
||||||
|
"github.com/moby/buildkit/solver/pb"
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
"golang.org/x/sync/errgroup"
|
"golang.org/x/sync/errgroup"
|
||||||
)
|
)
|
||||||
|
|
||||||
func (c *Client) Solve(ctx context.Context, r io.Reader, statusChan chan *SolveStatus, exporter string, exporterAttrs map[string]string) error {
|
func (c *Client) Solve(ctx context.Context, r io.Reader, statusChan chan *SolveStatus, exporter string, exporterAttrs map[string]string, localDir string) error {
|
||||||
|
defer func() {
|
||||||
|
if statusChan != nil {
|
||||||
|
close(statusChan)
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
def, err := llb.ReadFrom(r)
|
def, err := llb.ReadFrom(r)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrap(err, "failed to parse input")
|
return errors.Wrap(err, "failed to parse input")
|
||||||
|
@ -23,10 +37,34 @@ func (c *Client) Solve(ctx context.Context, r io.Reader, statusChan chan *SolveS
|
||||||
return errors.New("invalid empty definition")
|
return errors.New("invalid empty definition")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if err := validateLocals(def, localDir); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
ref := generateID()
|
ref := generateID()
|
||||||
eg, ctx := errgroup.WithContext(ctx)
|
eg, ctx := errgroup.WithContext(ctx)
|
||||||
|
|
||||||
statusContext, cancelStatus := context.WithCancel(context.Background())
|
statusContext, cancelStatus := context.WithCancel(context.Background())
|
||||||
|
defer cancelStatus()
|
||||||
|
|
||||||
|
sharedKey, err := getSharedKey(localDir)
|
||||||
|
if err != nil {
|
||||||
|
return errors.Wrap(err, "failed to get build shared key")
|
||||||
|
}
|
||||||
|
s, err := session.NewSession(filepath.Base(localDir), sharedKey)
|
||||||
|
if err != nil {
|
||||||
|
return errors.Wrap(err, "failed to create session")
|
||||||
|
}
|
||||||
|
|
||||||
|
if localDir != "" {
|
||||||
|
_, dir, _ := parseLocalDir(localDir)
|
||||||
|
workdirProvider := filesync.NewFSSyncProvider(dir, nil)
|
||||||
|
s.Allow(workdirProvider)
|
||||||
|
}
|
||||||
|
|
||||||
|
eg.Go(func() error {
|
||||||
|
return s.Run(ctx, grpchijack.Dialer(c.controlClient()))
|
||||||
|
})
|
||||||
|
|
||||||
eg.Go(func() error {
|
eg.Go(func() error {
|
||||||
defer func() { // make sure the Status ends cleanly on build errors
|
defer func() { // make sure the Status ends cleanly on build errors
|
||||||
|
@ -34,12 +72,15 @@ func (c *Client) Solve(ctx context.Context, r io.Reader, statusChan chan *SolveS
|
||||||
<-time.After(3 * time.Second)
|
<-time.After(3 * time.Second)
|
||||||
cancelStatus()
|
cancelStatus()
|
||||||
}()
|
}()
|
||||||
|
logrus.Debugf("stopping session")
|
||||||
|
s.Close()
|
||||||
}()
|
}()
|
||||||
_, err = c.controlClient().Solve(ctx, &controlapi.SolveRequest{
|
_, err = c.controlClient().Solve(ctx, &controlapi.SolveRequest{
|
||||||
Ref: ref,
|
Ref: ref,
|
||||||
Definition: def,
|
Definition: def,
|
||||||
Exporter: exporter,
|
Exporter: exporter,
|
||||||
ExporterAttrs: exporterAttrs,
|
ExporterAttrs: exporterAttrs,
|
||||||
|
Session: s.UUID(),
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrap(err, "failed to solve")
|
return errors.Wrap(err, "failed to solve")
|
||||||
|
@ -100,12 +141,6 @@ func (c *Client) Solve(ctx context.Context, r io.Reader, statusChan chan *SolveS
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
|
|
||||||
defer func() {
|
|
||||||
if statusChan != nil {
|
|
||||||
close(statusChan)
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
|
|
||||||
return eg.Wait()
|
return eg.Wait()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -116,3 +151,44 @@ func generateID() string {
|
||||||
}
|
}
|
||||||
return hex.EncodeToString(b)
|
return hex.EncodeToString(b)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func validateLocals(defs [][]byte, localDir string) error {
|
||||||
|
k, _, err := parseLocalDir(localDir)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
for _, dt := range defs {
|
||||||
|
var op pb.Op
|
||||||
|
if err := (&op).Unmarshal(dt); err != nil {
|
||||||
|
return errors.Wrap(err, "failed to parse llb proto op")
|
||||||
|
}
|
||||||
|
if src := op.GetSource(); src != nil {
|
||||||
|
if strings.HasPrefix(src.Identifier, "local://") { // TODO: just make a type property
|
||||||
|
name := strings.TrimPrefix(src.Identifier, "local://")
|
||||||
|
if name != k {
|
||||||
|
return errors.Errorf("local directory %s not enabled", name)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func parseLocalDir(str string) (string, string, error) {
|
||||||
|
if str == "" {
|
||||||
|
return "", "", nil
|
||||||
|
}
|
||||||
|
parts := strings.SplitN(str, "=", 2)
|
||||||
|
if len(parts) != 2 {
|
||||||
|
return "", "", errors.Errorf("invalid local indentifier %q, need name=dir", str)
|
||||||
|
}
|
||||||
|
fi, err := os.Stat(parts[1])
|
||||||
|
if err != nil {
|
||||||
|
return "", "", errors.Wrapf(err, "could not find %s", parts[1])
|
||||||
|
}
|
||||||
|
if !fi.IsDir() {
|
||||||
|
return "", "", errors.Errorf("%s not a directory", parts[1])
|
||||||
|
}
|
||||||
|
return parts[0], parts[1], nil
|
||||||
|
|
||||||
|
}
|
||||||
|
|
|
@ -33,6 +33,10 @@ var buildCommand = cli.Command{
|
||||||
Name: "no-progress",
|
Name: "no-progress",
|
||||||
Usage: "Don't show interactive progress",
|
Usage: "Don't show interactive progress",
|
||||||
},
|
},
|
||||||
|
cli.StringFlag{
|
||||||
|
Name: "local",
|
||||||
|
Usage: "Allow build access to the local directory",
|
||||||
|
},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -61,7 +65,7 @@ func build(clicontext *cli.Context) error {
|
||||||
}
|
}
|
||||||
|
|
||||||
eg.Go(func() error {
|
eg.Go(func() error {
|
||||||
return c.Solve(ctx, os.Stdin, ch, clicontext.String("exporter"), exporterAttrs)
|
return c.Solve(ctx, os.Stdin, ch, clicontext.String("exporter"), exporterAttrs, clicontext.String("local"))
|
||||||
})
|
})
|
||||||
|
|
||||||
eg.Go(func() error {
|
eg.Go(func() error {
|
||||||
|
|
|
@ -1,11 +1,14 @@
|
||||||
package control
|
package control
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"github.com/Sirupsen/logrus"
|
||||||
"github.com/containerd/containerd/snapshot"
|
"github.com/containerd/containerd/snapshot"
|
||||||
controlapi "github.com/moby/buildkit/api/services/control"
|
controlapi "github.com/moby/buildkit/api/services/control"
|
||||||
"github.com/moby/buildkit/cache"
|
"github.com/moby/buildkit/cache"
|
||||||
"github.com/moby/buildkit/client"
|
"github.com/moby/buildkit/client"
|
||||||
"github.com/moby/buildkit/exporter"
|
"github.com/moby/buildkit/exporter"
|
||||||
|
"github.com/moby/buildkit/session"
|
||||||
|
"github.com/moby/buildkit/session/grpchijack"
|
||||||
"github.com/moby/buildkit/solver"
|
"github.com/moby/buildkit/solver"
|
||||||
"github.com/moby/buildkit/source"
|
"github.com/moby/buildkit/source"
|
||||||
"github.com/moby/buildkit/worker"
|
"github.com/moby/buildkit/worker"
|
||||||
|
@ -22,6 +25,7 @@ type Opt struct {
|
||||||
SourceManager *source.Manager
|
SourceManager *source.Manager
|
||||||
InstructionCache solver.InstructionCache
|
InstructionCache solver.InstructionCache
|
||||||
Exporters map[string]exporter.Exporter
|
Exporters map[string]exporter.Exporter
|
||||||
|
SessionManager *session.Manager
|
||||||
}
|
}
|
||||||
|
|
||||||
type Controller struct { // TODO: ControlService
|
type Controller struct { // TODO: ControlService
|
||||||
|
@ -83,6 +87,8 @@ func (c *Controller) Solve(ctx context.Context, req *controlapi.SolveRequest) (*
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
ctx = session.NewContext(ctx, req.Session)
|
||||||
|
|
||||||
if err := c.solver.Solve(ctx, req.Ref, v, expi); err != nil {
|
if err := c.solver.Solve(ctx, req.Ref, v, expi); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
@ -147,3 +153,12 @@ func (c *Controller) Status(req *controlapi.StatusRequest, stream controlapi.Con
|
||||||
|
|
||||||
return eg.Wait()
|
return eg.Wait()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (c *Controller) Session(stream controlapi.Control_SessionServer) error {
|
||||||
|
logrus.Debugf("session started")
|
||||||
|
conn, opts := grpchijack.Hijack(stream)
|
||||||
|
defer conn.Close()
|
||||||
|
err := c.opt.SessionManager.HandleConn(stream.Context(), conn, opts)
|
||||||
|
logrus.Debugf("session finished: %v", err)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
|
@ -14,10 +14,12 @@ import (
|
||||||
"github.com/moby/buildkit/cache/metadata"
|
"github.com/moby/buildkit/cache/metadata"
|
||||||
"github.com/moby/buildkit/exporter"
|
"github.com/moby/buildkit/exporter"
|
||||||
imageexporter "github.com/moby/buildkit/exporter/containerimage"
|
imageexporter "github.com/moby/buildkit/exporter/containerimage"
|
||||||
|
"github.com/moby/buildkit/session"
|
||||||
"github.com/moby/buildkit/snapshot/blobmapping"
|
"github.com/moby/buildkit/snapshot/blobmapping"
|
||||||
"github.com/moby/buildkit/source"
|
"github.com/moby/buildkit/source"
|
||||||
"github.com/moby/buildkit/source/containerimage"
|
"github.com/moby/buildkit/source/containerimage"
|
||||||
"github.com/moby/buildkit/source/git"
|
"github.com/moby/buildkit/source/git"
|
||||||
|
"github.com/moby/buildkit/source/local"
|
||||||
)
|
)
|
||||||
|
|
||||||
const keyImageExporter = "image"
|
const keyImageExporter = "image"
|
||||||
|
@ -85,6 +87,20 @@ func defaultControllerOpts(root string, pd pullDeps) (*Opt, error) {
|
||||||
|
|
||||||
sm.Register(gs)
|
sm.Register(gs)
|
||||||
|
|
||||||
|
sessm, err := session.NewManager()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
ss, err := local.NewSource(local.Opt{
|
||||||
|
SessionManager: sessm,
|
||||||
|
CacheAccessor: cm,
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
sm.Register(ss)
|
||||||
|
|
||||||
exporters := map[string]exporter.Exporter{}
|
exporters := map[string]exporter.Exporter{}
|
||||||
|
|
||||||
imageExporter, err := imageexporter.New(imageexporter.Opt{
|
imageExporter, err := imageexporter.New(imageexporter.Opt{
|
||||||
|
@ -105,5 +121,6 @@ func defaultControllerOpts(root string, pd pullDeps) (*Opt, error) {
|
||||||
SourceManager: sm,
|
SourceManager: sm,
|
||||||
InstructionCache: ic,
|
InstructionCache: ic,
|
||||||
Exporters: exporters,
|
Exporters: exporters,
|
||||||
|
SessionManager: sessm,
|
||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
|
@ -0,0 +1,114 @@
|
||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"flag"
|
||||||
|
"os"
|
||||||
|
|
||||||
|
"github.com/moby/buildkit/client/llb"
|
||||||
|
"github.com/moby/buildkit/util/system"
|
||||||
|
)
|
||||||
|
|
||||||
|
type buildOpt struct {
|
||||||
|
target string
|
||||||
|
containerd string
|
||||||
|
runc string
|
||||||
|
local bool
|
||||||
|
}
|
||||||
|
|
||||||
|
func main() {
|
||||||
|
var opt buildOpt
|
||||||
|
flag.StringVar(&opt.target, "target", "containerd", "target (standalone, containerd)")
|
||||||
|
flag.StringVar(&opt.containerd, "containerd", "master", "containerd version")
|
||||||
|
flag.StringVar(&opt.runc, "runc", "v1.0.0-rc3", "runc version")
|
||||||
|
flag.BoolVar(&opt.local, "local", false, "use local buildkit source")
|
||||||
|
flag.Parse()
|
||||||
|
|
||||||
|
bk := buildkit(opt)
|
||||||
|
out := bk.Run(llb.Shlex("ls -l /bin")) // debug output
|
||||||
|
|
||||||
|
dt, err := out.Marshal()
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
llb.WriteTo(dt, os.Stdout)
|
||||||
|
}
|
||||||
|
|
||||||
|
func goBuildBase() *llb.State {
|
||||||
|
goAlpine := llb.Image("docker.io/library/golang:1.8-alpine")
|
||||||
|
return goAlpine.
|
||||||
|
AddEnv("PATH", "/usr/local/go/bin:"+system.DefaultPathEnv).
|
||||||
|
AddEnv("GOPATH", "/go").
|
||||||
|
Run(llb.Shlex("apk add --no-cache g++ linux-headers make")).Root()
|
||||||
|
}
|
||||||
|
|
||||||
|
func goRepo(s *llb.State, repo string, src *llb.State) func(ro ...llb.RunOption) *llb.State {
|
||||||
|
dir := "/go/src/" + repo
|
||||||
|
return func(ro ...llb.RunOption) *llb.State {
|
||||||
|
es := s.Dir(dir).Run(ro...)
|
||||||
|
es.AddMount(dir, src)
|
||||||
|
return es.AddMount(dir+"/bin", llb.Scratch())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func runc(version string) *llb.State {
|
||||||
|
repo := "github.com/opencontainers/runc"
|
||||||
|
return goRepo(goBuildBase(), repo, llb.Git(repo, version))(
|
||||||
|
llb.Shlex("go build -o ./bin/runc ./"),
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
func containerd(version string) *llb.State {
|
||||||
|
repo := "github.com/containerd/containerd"
|
||||||
|
return goRepo(
|
||||||
|
goBuildBase().
|
||||||
|
Run(llb.Shlex("apk add --no-cache btrfs-progs-dev")).Root(),
|
||||||
|
repo, llb.Git(repo, version, llb.KeepGitDir()))(
|
||||||
|
llb.Shlex("make bin/containerd"),
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
func buildkit(opt buildOpt) *llb.State {
|
||||||
|
repo := "github.com/moby/buildkit"
|
||||||
|
src := llb.Git(repo, "master")
|
||||||
|
if opt.local {
|
||||||
|
src = llb.Local("buildkit-src")
|
||||||
|
}
|
||||||
|
run := goRepo(goBuildBase(), repo, src)
|
||||||
|
|
||||||
|
builddStandalone := run(llb.Shlex("go build -o ./bin/buildd-standalone -tags standalone ./cmd/buildd"))
|
||||||
|
|
||||||
|
builddContainerd := run(llb.Shlex("go build -o ./bin/buildd-containerd -tags containerd ./cmd/buildd"))
|
||||||
|
|
||||||
|
buildctl := run(llb.Shlex("go build -o ./bin/buildctl ./cmd/buildctl"))
|
||||||
|
|
||||||
|
r := llb.Image("docker.io/library/alpine:latest").With(
|
||||||
|
copyAll(buildctl, "/bin"),
|
||||||
|
copyAll(runc(opt.runc), "/bin"),
|
||||||
|
)
|
||||||
|
|
||||||
|
if opt.target == "containerd" {
|
||||||
|
return r.With(
|
||||||
|
copyAll(containerd(opt.containerd), "/bin"),
|
||||||
|
copyAll(builddContainerd, "/bin"))
|
||||||
|
}
|
||||||
|
return r.With(copyAll(builddStandalone, "/bin"))
|
||||||
|
}
|
||||||
|
|
||||||
|
func copyAll(src *llb.State, destPath string) llb.StateOption {
|
||||||
|
return copyFrom(src, "/.", destPath)
|
||||||
|
}
|
||||||
|
|
||||||
|
// copyFrom has similar semantics as `COPY --from`
|
||||||
|
func copyFrom(src *llb.State, srcPath, destPath string) llb.StateOption {
|
||||||
|
return func(s *llb.State) *llb.State {
|
||||||
|
return copy(src, srcPath, s, destPath)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// copy copies files between 2 states using cp until there is no copyOp
|
||||||
|
func copy(src *llb.State, srcPath string, dest *llb.State, destPath string) *llb.State {
|
||||||
|
cpImage := llb.Image("docker.io/library/alpine:latest")
|
||||||
|
cp := cpImage.Run(llb.Shlexf("cp -a /src%s /dest%s", srcPath, destPath))
|
||||||
|
cp.AddMount("/src", src)
|
||||||
|
return cp.AddMount("/dest", dest)
|
||||||
|
}
|
|
@ -0,0 +1,22 @@
|
||||||
|
package session
|
||||||
|
|
||||||
|
import "context"
|
||||||
|
|
||||||
|
type contextKeyT string
|
||||||
|
|
||||||
|
var contextKey = contextKeyT("buildkit/session-uuid")
|
||||||
|
|
||||||
|
func NewContext(ctx context.Context, uuid string) context.Context {
|
||||||
|
if uuid != "" {
|
||||||
|
return context.WithValue(ctx, contextKey, uuid)
|
||||||
|
}
|
||||||
|
return ctx
|
||||||
|
}
|
||||||
|
|
||||||
|
func FromContext(ctx context.Context) string {
|
||||||
|
v := ctx.Value(contextKey)
|
||||||
|
if v == nil {
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
return v.(string)
|
||||||
|
}
|
|
@ -0,0 +1,30 @@
|
||||||
|
package filesync
|
||||||
|
|
||||||
|
import (
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/Sirupsen/logrus"
|
||||||
|
"github.com/tonistiigi/fsutil"
|
||||||
|
"google.golang.org/grpc"
|
||||||
|
)
|
||||||
|
|
||||||
|
func sendDiffCopy(stream grpc.Stream, dir string, includes, excludes []string, progress progressCb) error {
|
||||||
|
return fsutil.Send(stream.Context(), stream, dir, &fsutil.WalkOpt{
|
||||||
|
ExcludePatterns: excludes,
|
||||||
|
IncludePaths: includes, // TODO: rename IncludePatterns
|
||||||
|
}, progress)
|
||||||
|
}
|
||||||
|
|
||||||
|
func recvDiffCopy(ds grpc.Stream, dest string, cu CacheUpdater) error {
|
||||||
|
st := time.Now()
|
||||||
|
defer func() {
|
||||||
|
logrus.Debugf("diffcopy took: %v", time.Since(st))
|
||||||
|
}()
|
||||||
|
var cf fsutil.ChangeFunc
|
||||||
|
if cu != nil {
|
||||||
|
cu.MarkSupported(true)
|
||||||
|
cf = cu.HandleChange
|
||||||
|
}
|
||||||
|
|
||||||
|
return fsutil.Receive(ds.Context(), ds, dest, cf)
|
||||||
|
}
|
|
@ -0,0 +1,186 @@
|
||||||
|
package filesync
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"os"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"github.com/moby/buildkit/session"
|
||||||
|
"github.com/pkg/errors"
|
||||||
|
"github.com/tonistiigi/fsutil"
|
||||||
|
"golang.org/x/net/context"
|
||||||
|
"google.golang.org/grpc"
|
||||||
|
"google.golang.org/grpc/metadata"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
keyOverrideExcludes = "override-excludes"
|
||||||
|
keyIncludePatterns = "include-patterns"
|
||||||
|
)
|
||||||
|
|
||||||
|
type fsSyncProvider struct {
|
||||||
|
root string
|
||||||
|
excludes []string
|
||||||
|
p progressCb
|
||||||
|
doneCh chan error
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewFSSyncProvider creates a new provider for sending files from client
|
||||||
|
func NewFSSyncProvider(root string, excludes []string) session.Attachable {
|
||||||
|
p := &fsSyncProvider{
|
||||||
|
root: root,
|
||||||
|
excludes: excludes,
|
||||||
|
}
|
||||||
|
return p
|
||||||
|
}
|
||||||
|
|
||||||
|
func (sp *fsSyncProvider) Register(server *grpc.Server) {
|
||||||
|
RegisterFileSyncServer(server, sp)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (sp *fsSyncProvider) DiffCopy(stream FileSync_DiffCopyServer) error {
|
||||||
|
return sp.handle("diffcopy", stream)
|
||||||
|
}
|
||||||
|
func (sp *fsSyncProvider) TarStream(stream FileSync_TarStreamServer) error {
|
||||||
|
return sp.handle("tarstream", stream)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (sp *fsSyncProvider) handle(method string, stream grpc.ServerStream) error {
|
||||||
|
var pr *protocol
|
||||||
|
for _, p := range supportedProtocols {
|
||||||
|
if method == p.name && isProtoSupported(p.name) {
|
||||||
|
pr = &p
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if pr == nil {
|
||||||
|
return errors.New("failed to negotiate protocol")
|
||||||
|
}
|
||||||
|
|
||||||
|
opts, _ := metadata.FromContext(stream.Context()) // if no metadata continue with empty object
|
||||||
|
|
||||||
|
var excludes []string
|
||||||
|
if len(opts[keyOverrideExcludes]) == 0 || opts[keyOverrideExcludes][0] != "true" {
|
||||||
|
excludes = sp.excludes
|
||||||
|
}
|
||||||
|
includes := opts[keyIncludePatterns]
|
||||||
|
|
||||||
|
var progress progressCb
|
||||||
|
if sp.p != nil {
|
||||||
|
progress = sp.p
|
||||||
|
sp.p = nil
|
||||||
|
}
|
||||||
|
|
||||||
|
var doneCh chan error
|
||||||
|
if sp.doneCh != nil {
|
||||||
|
doneCh = sp.doneCh
|
||||||
|
sp.doneCh = nil
|
||||||
|
}
|
||||||
|
err := pr.sendFn(stream, sp.root, includes, excludes, progress)
|
||||||
|
if doneCh != nil {
|
||||||
|
if err != nil {
|
||||||
|
doneCh <- err
|
||||||
|
}
|
||||||
|
close(doneCh)
|
||||||
|
}
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
func (sp *fsSyncProvider) SetNextProgressCallback(f func(int, bool), doneCh chan error) {
|
||||||
|
sp.p = f
|
||||||
|
sp.doneCh = doneCh
|
||||||
|
}
|
||||||
|
|
||||||
|
type progressCb func(int, bool)
|
||||||
|
|
||||||
|
type protocol struct {
|
||||||
|
name string
|
||||||
|
sendFn func(stream grpc.Stream, srcDir string, includes, excludes []string, progress progressCb) error
|
||||||
|
recvFn func(stream grpc.Stream, destDir string, cu CacheUpdater) error
|
||||||
|
}
|
||||||
|
|
||||||
|
func isProtoSupported(p string) bool {
|
||||||
|
// TODO: this should be removed after testing if stability is confirmed
|
||||||
|
if override := os.Getenv("BUILD_STREAM_PROTOCOL"); override != "" {
|
||||||
|
return strings.EqualFold(p, override)
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
var supportedProtocols = []protocol{
|
||||||
|
{
|
||||||
|
name: "diffcopy",
|
||||||
|
sendFn: sendDiffCopy,
|
||||||
|
recvFn: recvDiffCopy,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "tarstream",
|
||||||
|
sendFn: sendTarStream,
|
||||||
|
recvFn: recvTarStream,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
// FSSendRequestOpt defines options for FSSend request
|
||||||
|
type FSSendRequestOpt struct {
|
||||||
|
IncludePatterns []string
|
||||||
|
OverrideExcludes bool
|
||||||
|
DestDir string
|
||||||
|
CacheUpdater CacheUpdater
|
||||||
|
}
|
||||||
|
|
||||||
|
// CacheUpdater is an object capable of sending notifications for the cache hash changes
|
||||||
|
type CacheUpdater interface {
|
||||||
|
MarkSupported(bool)
|
||||||
|
HandleChange(fsutil.ChangeKind, string, os.FileInfo, error) error
|
||||||
|
}
|
||||||
|
|
||||||
|
// FSSync initializes a transfer of files
|
||||||
|
func FSSync(ctx context.Context, c session.Caller, opt FSSendRequestOpt) error {
|
||||||
|
var pr *protocol
|
||||||
|
for _, p := range supportedProtocols {
|
||||||
|
if isProtoSupported(p.name) && c.Supports(session.MethodURL(_FileSync_serviceDesc.ServiceName, p.name)) {
|
||||||
|
pr = &p
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if pr == nil {
|
||||||
|
return errors.New("no fssync handlers")
|
||||||
|
}
|
||||||
|
|
||||||
|
opts := make(map[string][]string)
|
||||||
|
if opt.OverrideExcludes {
|
||||||
|
opts[keyOverrideExcludes] = []string{"true"}
|
||||||
|
}
|
||||||
|
|
||||||
|
if opt.IncludePatterns != nil {
|
||||||
|
opts[keyIncludePatterns] = opt.IncludePatterns
|
||||||
|
}
|
||||||
|
|
||||||
|
ctx, cancel := context.WithCancel(ctx)
|
||||||
|
defer cancel()
|
||||||
|
|
||||||
|
client := NewFileSyncClient(c.Conn())
|
||||||
|
|
||||||
|
var stream grpc.ClientStream
|
||||||
|
|
||||||
|
ctx = metadata.NewContext(ctx, opts)
|
||||||
|
|
||||||
|
switch pr.name {
|
||||||
|
case "tarstream":
|
||||||
|
cc, err := client.TarStream(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
stream = cc
|
||||||
|
case "diffcopy":
|
||||||
|
cc, err := client.DiffCopy(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
stream = cc
|
||||||
|
default:
|
||||||
|
panic(fmt.Sprintf("invalid protocol: %q", pr.name))
|
||||||
|
}
|
||||||
|
|
||||||
|
return pr.recvFn(stream, opt.DestDir, opt.CacheUpdater)
|
||||||
|
}
|
|
@ -0,0 +1,575 @@
|
||||||
|
// Code generated by protoc-gen-gogo.
|
||||||
|
// source: filesync.proto
|
||||||
|
// DO NOT EDIT!
|
||||||
|
|
||||||
|
/*
|
||||||
|
Package filesync is a generated protocol buffer package.
|
||||||
|
|
||||||
|
It is generated from these files:
|
||||||
|
filesync.proto
|
||||||
|
|
||||||
|
It has these top-level messages:
|
||||||
|
BytesMessage
|
||||||
|
*/
|
||||||
|
package filesync
|
||||||
|
|
||||||
|
import proto "github.com/gogo/protobuf/proto"
|
||||||
|
import fmt "fmt"
|
||||||
|
import math "math"
|
||||||
|
|
||||||
|
import bytes "bytes"
|
||||||
|
|
||||||
|
import strings "strings"
|
||||||
|
import reflect "reflect"
|
||||||
|
|
||||||
|
import (
|
||||||
|
context "golang.org/x/net/context"
|
||||||
|
grpc "google.golang.org/grpc"
|
||||||
|
)
|
||||||
|
|
||||||
|
import io "io"
|
||||||
|
|
||||||
|
// Reference imports to suppress errors if they are not otherwise used.
|
||||||
|
var _ = proto.Marshal
|
||||||
|
var _ = fmt.Errorf
|
||||||
|
var _ = math.Inf
|
||||||
|
|
||||||
|
// This is a compile-time assertion to ensure that this generated file
|
||||||
|
// is compatible with the proto package it is being compiled against.
|
||||||
|
// A compilation error at this line likely means your copy of the
|
||||||
|
// proto package needs to be updated.
|
||||||
|
const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package
|
||||||
|
|
||||||
|
// BytesMessage contains a chunk of byte data
|
||||||
|
type BytesMessage struct {
|
||||||
|
Data []byte `protobuf:"bytes,1,opt,name=data,proto3" json:"data,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *BytesMessage) Reset() { *m = BytesMessage{} }
|
||||||
|
func (*BytesMessage) ProtoMessage() {}
|
||||||
|
func (*BytesMessage) Descriptor() ([]byte, []int) { return fileDescriptorFilesync, []int{0} }
|
||||||
|
|
||||||
|
func (m *BytesMessage) GetData() []byte {
|
||||||
|
if m != nil {
|
||||||
|
return m.Data
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
proto.RegisterType((*BytesMessage)(nil), "moby.filesync.v1.BytesMessage")
|
||||||
|
}
|
||||||
|
func (this *BytesMessage) Equal(that interface{}) bool {
|
||||||
|
if that == nil {
|
||||||
|
if this == nil {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
that1, ok := that.(*BytesMessage)
|
||||||
|
if !ok {
|
||||||
|
that2, ok := that.(BytesMessage)
|
||||||
|
if ok {
|
||||||
|
that1 = &that2
|
||||||
|
} else {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if that1 == nil {
|
||||||
|
if this == nil {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
} else if this == nil {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
if !bytes.Equal(this.Data, that1.Data) {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
func (this *BytesMessage) GoString() string {
|
||||||
|
if this == nil {
|
||||||
|
return "nil"
|
||||||
|
}
|
||||||
|
s := make([]string, 0, 5)
|
||||||
|
s = append(s, "&filesync.BytesMessage{")
|
||||||
|
s = append(s, "Data: "+fmt.Sprintf("%#v", this.Data)+",\n")
|
||||||
|
s = append(s, "}")
|
||||||
|
return strings.Join(s, "")
|
||||||
|
}
|
||||||
|
func valueToGoStringFilesync(v interface{}, typ string) string {
|
||||||
|
rv := reflect.ValueOf(v)
|
||||||
|
if rv.IsNil() {
|
||||||
|
return "nil"
|
||||||
|
}
|
||||||
|
pv := reflect.Indirect(rv).Interface()
|
||||||
|
return fmt.Sprintf("func(v %v) *%v { return &v } ( %#v )", typ, typ, pv)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Reference imports to suppress errors if they are not otherwise used.
|
||||||
|
var _ context.Context
|
||||||
|
var _ grpc.ClientConn
|
||||||
|
|
||||||
|
// This is a compile-time assertion to ensure that this generated file
|
||||||
|
// is compatible with the grpc package it is being compiled against.
|
||||||
|
const _ = grpc.SupportPackageIsVersion4
|
||||||
|
|
||||||
|
// Client API for FileSync service
|
||||||
|
|
||||||
|
type FileSyncClient interface {
|
||||||
|
DiffCopy(ctx context.Context, opts ...grpc.CallOption) (FileSync_DiffCopyClient, error)
|
||||||
|
TarStream(ctx context.Context, opts ...grpc.CallOption) (FileSync_TarStreamClient, error)
|
||||||
|
}
|
||||||
|
|
||||||
|
type fileSyncClient struct {
|
||||||
|
cc *grpc.ClientConn
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewFileSyncClient(cc *grpc.ClientConn) FileSyncClient {
|
||||||
|
return &fileSyncClient{cc}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *fileSyncClient) DiffCopy(ctx context.Context, opts ...grpc.CallOption) (FileSync_DiffCopyClient, error) {
|
||||||
|
stream, err := grpc.NewClientStream(ctx, &_FileSync_serviceDesc.Streams[0], c.cc, "/moby.filesync.v1.FileSync/DiffCopy", opts...)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
x := &fileSyncDiffCopyClient{stream}
|
||||||
|
return x, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
type FileSync_DiffCopyClient interface {
|
||||||
|
Send(*BytesMessage) error
|
||||||
|
Recv() (*BytesMessage, error)
|
||||||
|
grpc.ClientStream
|
||||||
|
}
|
||||||
|
|
||||||
|
type fileSyncDiffCopyClient struct {
|
||||||
|
grpc.ClientStream
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x *fileSyncDiffCopyClient) Send(m *BytesMessage) error {
|
||||||
|
return x.ClientStream.SendMsg(m)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x *fileSyncDiffCopyClient) Recv() (*BytesMessage, error) {
|
||||||
|
m := new(BytesMessage)
|
||||||
|
if err := x.ClientStream.RecvMsg(m); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return m, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *fileSyncClient) TarStream(ctx context.Context, opts ...grpc.CallOption) (FileSync_TarStreamClient, error) {
|
||||||
|
stream, err := grpc.NewClientStream(ctx, &_FileSync_serviceDesc.Streams[1], c.cc, "/moby.filesync.v1.FileSync/TarStream", opts...)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
x := &fileSyncTarStreamClient{stream}
|
||||||
|
return x, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
type FileSync_TarStreamClient interface {
|
||||||
|
Send(*BytesMessage) error
|
||||||
|
Recv() (*BytesMessage, error)
|
||||||
|
grpc.ClientStream
|
||||||
|
}
|
||||||
|
|
||||||
|
type fileSyncTarStreamClient struct {
|
||||||
|
grpc.ClientStream
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x *fileSyncTarStreamClient) Send(m *BytesMessage) error {
|
||||||
|
return x.ClientStream.SendMsg(m)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x *fileSyncTarStreamClient) Recv() (*BytesMessage, error) {
|
||||||
|
m := new(BytesMessage)
|
||||||
|
if err := x.ClientStream.RecvMsg(m); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return m, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Server API for FileSync service
|
||||||
|
|
||||||
|
type FileSyncServer interface {
|
||||||
|
DiffCopy(FileSync_DiffCopyServer) error
|
||||||
|
TarStream(FileSync_TarStreamServer) error
|
||||||
|
}
|
||||||
|
|
||||||
|
func RegisterFileSyncServer(s *grpc.Server, srv FileSyncServer) {
|
||||||
|
s.RegisterService(&_FileSync_serviceDesc, srv)
|
||||||
|
}
|
||||||
|
|
||||||
|
func _FileSync_DiffCopy_Handler(srv interface{}, stream grpc.ServerStream) error {
|
||||||
|
return srv.(FileSyncServer).DiffCopy(&fileSyncDiffCopyServer{stream})
|
||||||
|
}
|
||||||
|
|
||||||
|
type FileSync_DiffCopyServer interface {
|
||||||
|
Send(*BytesMessage) error
|
||||||
|
Recv() (*BytesMessage, error)
|
||||||
|
grpc.ServerStream
|
||||||
|
}
|
||||||
|
|
||||||
|
type fileSyncDiffCopyServer struct {
|
||||||
|
grpc.ServerStream
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x *fileSyncDiffCopyServer) Send(m *BytesMessage) error {
|
||||||
|
return x.ServerStream.SendMsg(m)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x *fileSyncDiffCopyServer) Recv() (*BytesMessage, error) {
|
||||||
|
m := new(BytesMessage)
|
||||||
|
if err := x.ServerStream.RecvMsg(m); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return m, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func _FileSync_TarStream_Handler(srv interface{}, stream grpc.ServerStream) error {
|
||||||
|
return srv.(FileSyncServer).TarStream(&fileSyncTarStreamServer{stream})
|
||||||
|
}
|
||||||
|
|
||||||
|
type FileSync_TarStreamServer interface {
|
||||||
|
Send(*BytesMessage) error
|
||||||
|
Recv() (*BytesMessage, error)
|
||||||
|
grpc.ServerStream
|
||||||
|
}
|
||||||
|
|
||||||
|
type fileSyncTarStreamServer struct {
|
||||||
|
grpc.ServerStream
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x *fileSyncTarStreamServer) Send(m *BytesMessage) error {
|
||||||
|
return x.ServerStream.SendMsg(m)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x *fileSyncTarStreamServer) Recv() (*BytesMessage, error) {
|
||||||
|
m := new(BytesMessage)
|
||||||
|
if err := x.ServerStream.RecvMsg(m); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return m, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
var _FileSync_serviceDesc = grpc.ServiceDesc{
|
||||||
|
ServiceName: "moby.filesync.v1.FileSync",
|
||||||
|
HandlerType: (*FileSyncServer)(nil),
|
||||||
|
Methods: []grpc.MethodDesc{},
|
||||||
|
Streams: []grpc.StreamDesc{
|
||||||
|
{
|
||||||
|
StreamName: "DiffCopy",
|
||||||
|
Handler: _FileSync_DiffCopy_Handler,
|
||||||
|
ServerStreams: true,
|
||||||
|
ClientStreams: true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
StreamName: "TarStream",
|
||||||
|
Handler: _FileSync_TarStream_Handler,
|
||||||
|
ServerStreams: true,
|
||||||
|
ClientStreams: true,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
Metadata: "filesync.proto",
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *BytesMessage) Marshal() (dAtA []byte, err error) {
|
||||||
|
size := m.Size()
|
||||||
|
dAtA = make([]byte, size)
|
||||||
|
n, err := m.MarshalTo(dAtA)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return dAtA[:n], nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *BytesMessage) MarshalTo(dAtA []byte) (int, error) {
|
||||||
|
var i int
|
||||||
|
_ = i
|
||||||
|
var l int
|
||||||
|
_ = l
|
||||||
|
if len(m.Data) > 0 {
|
||||||
|
dAtA[i] = 0xa
|
||||||
|
i++
|
||||||
|
i = encodeVarintFilesync(dAtA, i, uint64(len(m.Data)))
|
||||||
|
i += copy(dAtA[i:], m.Data)
|
||||||
|
}
|
||||||
|
return i, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func encodeFixed64Filesync(dAtA []byte, offset int, v uint64) int {
|
||||||
|
dAtA[offset] = uint8(v)
|
||||||
|
dAtA[offset+1] = uint8(v >> 8)
|
||||||
|
dAtA[offset+2] = uint8(v >> 16)
|
||||||
|
dAtA[offset+3] = uint8(v >> 24)
|
||||||
|
dAtA[offset+4] = uint8(v >> 32)
|
||||||
|
dAtA[offset+5] = uint8(v >> 40)
|
||||||
|
dAtA[offset+6] = uint8(v >> 48)
|
||||||
|
dAtA[offset+7] = uint8(v >> 56)
|
||||||
|
return offset + 8
|
||||||
|
}
|
||||||
|
func encodeFixed32Filesync(dAtA []byte, offset int, v uint32) int {
|
||||||
|
dAtA[offset] = uint8(v)
|
||||||
|
dAtA[offset+1] = uint8(v >> 8)
|
||||||
|
dAtA[offset+2] = uint8(v >> 16)
|
||||||
|
dAtA[offset+3] = uint8(v >> 24)
|
||||||
|
return offset + 4
|
||||||
|
}
|
||||||
|
func encodeVarintFilesync(dAtA []byte, offset int, v uint64) int {
|
||||||
|
for v >= 1<<7 {
|
||||||
|
dAtA[offset] = uint8(v&0x7f | 0x80)
|
||||||
|
v >>= 7
|
||||||
|
offset++
|
||||||
|
}
|
||||||
|
dAtA[offset] = uint8(v)
|
||||||
|
return offset + 1
|
||||||
|
}
|
||||||
|
func (m *BytesMessage) Size() (n int) {
|
||||||
|
var l int
|
||||||
|
_ = l
|
||||||
|
l = len(m.Data)
|
||||||
|
if l > 0 {
|
||||||
|
n += 1 + l + sovFilesync(uint64(l))
|
||||||
|
}
|
||||||
|
return n
|
||||||
|
}
|
||||||
|
|
||||||
|
func sovFilesync(x uint64) (n int) {
|
||||||
|
for {
|
||||||
|
n++
|
||||||
|
x >>= 7
|
||||||
|
if x == 0 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return n
|
||||||
|
}
|
||||||
|
func sozFilesync(x uint64) (n int) {
|
||||||
|
return sovFilesync(uint64((x << 1) ^ uint64((int64(x) >> 63))))
|
||||||
|
}
|
||||||
|
func (this *BytesMessage) String() string {
|
||||||
|
if this == nil {
|
||||||
|
return "nil"
|
||||||
|
}
|
||||||
|
s := strings.Join([]string{`&BytesMessage{`,
|
||||||
|
`Data:` + fmt.Sprintf("%v", this.Data) + `,`,
|
||||||
|
`}`,
|
||||||
|
}, "")
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
func valueToStringFilesync(v interface{}) string {
|
||||||
|
rv := reflect.ValueOf(v)
|
||||||
|
if rv.IsNil() {
|
||||||
|
return "nil"
|
||||||
|
}
|
||||||
|
pv := reflect.Indirect(rv).Interface()
|
||||||
|
return fmt.Sprintf("*%v", pv)
|
||||||
|
}
|
||||||
|
func (m *BytesMessage) Unmarshal(dAtA []byte) error {
|
||||||
|
l := len(dAtA)
|
||||||
|
iNdEx := 0
|
||||||
|
for iNdEx < l {
|
||||||
|
preIndex := iNdEx
|
||||||
|
var wire uint64
|
||||||
|
for shift := uint(0); ; shift += 7 {
|
||||||
|
if shift >= 64 {
|
||||||
|
return ErrIntOverflowFilesync
|
||||||
|
}
|
||||||
|
if iNdEx >= l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
b := dAtA[iNdEx]
|
||||||
|
iNdEx++
|
||||||
|
wire |= (uint64(b) & 0x7F) << shift
|
||||||
|
if b < 0x80 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
fieldNum := int32(wire >> 3)
|
||||||
|
wireType := int(wire & 0x7)
|
||||||
|
if wireType == 4 {
|
||||||
|
return fmt.Errorf("proto: BytesMessage: wiretype end group for non-group")
|
||||||
|
}
|
||||||
|
if fieldNum <= 0 {
|
||||||
|
return fmt.Errorf("proto: BytesMessage: illegal tag %d (wire type %d)", fieldNum, wire)
|
||||||
|
}
|
||||||
|
switch fieldNum {
|
||||||
|
case 1:
|
||||||
|
if wireType != 2 {
|
||||||
|
return fmt.Errorf("proto: wrong wireType = %d for field Data", wireType)
|
||||||
|
}
|
||||||
|
var byteLen int
|
||||||
|
for shift := uint(0); ; shift += 7 {
|
||||||
|
if shift >= 64 {
|
||||||
|
return ErrIntOverflowFilesync
|
||||||
|
}
|
||||||
|
if iNdEx >= l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
b := dAtA[iNdEx]
|
||||||
|
iNdEx++
|
||||||
|
byteLen |= (int(b) & 0x7F) << shift
|
||||||
|
if b < 0x80 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if byteLen < 0 {
|
||||||
|
return ErrInvalidLengthFilesync
|
||||||
|
}
|
||||||
|
postIndex := iNdEx + byteLen
|
||||||
|
if postIndex > l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
m.Data = append(m.Data[:0], dAtA[iNdEx:postIndex]...)
|
||||||
|
if m.Data == nil {
|
||||||
|
m.Data = []byte{}
|
||||||
|
}
|
||||||
|
iNdEx = postIndex
|
||||||
|
default:
|
||||||
|
iNdEx = preIndex
|
||||||
|
skippy, err := skipFilesync(dAtA[iNdEx:])
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if skippy < 0 {
|
||||||
|
return ErrInvalidLengthFilesync
|
||||||
|
}
|
||||||
|
if (iNdEx + skippy) > l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
iNdEx += skippy
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if iNdEx > l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
func skipFilesync(dAtA []byte) (n int, err error) {
|
||||||
|
l := len(dAtA)
|
||||||
|
iNdEx := 0
|
||||||
|
for iNdEx < l {
|
||||||
|
var wire uint64
|
||||||
|
for shift := uint(0); ; shift += 7 {
|
||||||
|
if shift >= 64 {
|
||||||
|
return 0, ErrIntOverflowFilesync
|
||||||
|
}
|
||||||
|
if iNdEx >= l {
|
||||||
|
return 0, io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
b := dAtA[iNdEx]
|
||||||
|
iNdEx++
|
||||||
|
wire |= (uint64(b) & 0x7F) << shift
|
||||||
|
if b < 0x80 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
wireType := int(wire & 0x7)
|
||||||
|
switch wireType {
|
||||||
|
case 0:
|
||||||
|
for shift := uint(0); ; shift += 7 {
|
||||||
|
if shift >= 64 {
|
||||||
|
return 0, ErrIntOverflowFilesync
|
||||||
|
}
|
||||||
|
if iNdEx >= l {
|
||||||
|
return 0, io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
iNdEx++
|
||||||
|
if dAtA[iNdEx-1] < 0x80 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return iNdEx, nil
|
||||||
|
case 1:
|
||||||
|
iNdEx += 8
|
||||||
|
return iNdEx, nil
|
||||||
|
case 2:
|
||||||
|
var length int
|
||||||
|
for shift := uint(0); ; shift += 7 {
|
||||||
|
if shift >= 64 {
|
||||||
|
return 0, ErrIntOverflowFilesync
|
||||||
|
}
|
||||||
|
if iNdEx >= l {
|
||||||
|
return 0, io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
b := dAtA[iNdEx]
|
||||||
|
iNdEx++
|
||||||
|
length |= (int(b) & 0x7F) << shift
|
||||||
|
if b < 0x80 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
iNdEx += length
|
||||||
|
if length < 0 {
|
||||||
|
return 0, ErrInvalidLengthFilesync
|
||||||
|
}
|
||||||
|
return iNdEx, nil
|
||||||
|
case 3:
|
||||||
|
for {
|
||||||
|
var innerWire uint64
|
||||||
|
var start int = iNdEx
|
||||||
|
for shift := uint(0); ; shift += 7 {
|
||||||
|
if shift >= 64 {
|
||||||
|
return 0, ErrIntOverflowFilesync
|
||||||
|
}
|
||||||
|
if iNdEx >= l {
|
||||||
|
return 0, io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
b := dAtA[iNdEx]
|
||||||
|
iNdEx++
|
||||||
|
innerWire |= (uint64(b) & 0x7F) << shift
|
||||||
|
if b < 0x80 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
innerWireType := int(innerWire & 0x7)
|
||||||
|
if innerWireType == 4 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
next, err := skipFilesync(dAtA[start:])
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
iNdEx = start + next
|
||||||
|
}
|
||||||
|
return iNdEx, nil
|
||||||
|
case 4:
|
||||||
|
return iNdEx, nil
|
||||||
|
case 5:
|
||||||
|
iNdEx += 4
|
||||||
|
return iNdEx, nil
|
||||||
|
default:
|
||||||
|
return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
panic("unreachable")
|
||||||
|
}
|
||||||
|
|
||||||
|
var (
|
||||||
|
ErrInvalidLengthFilesync = fmt.Errorf("proto: negative length found during unmarshaling")
|
||||||
|
ErrIntOverflowFilesync = fmt.Errorf("proto: integer overflow")
|
||||||
|
)
|
||||||
|
|
||||||
|
func init() { proto.RegisterFile("filesync.proto", fileDescriptorFilesync) }
|
||||||
|
|
||||||
|
var fileDescriptorFilesync = []byte{
|
||||||
|
// 198 bytes of a gzipped FileDescriptorProto
|
||||||
|
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xe2, 0xe2, 0x4b, 0xcb, 0xcc, 0x49,
|
||||||
|
0x2d, 0xae, 0xcc, 0x4b, 0xd6, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x12, 0xc8, 0xcd, 0x4f, 0xaa,
|
||||||
|
0xd4, 0x83, 0x0b, 0x96, 0x19, 0x2a, 0x29, 0x71, 0xf1, 0x38, 0x55, 0x96, 0xa4, 0x16, 0xfb, 0xa6,
|
||||||
|
0x16, 0x17, 0x27, 0xa6, 0xa7, 0x0a, 0x09, 0x71, 0xb1, 0xa4, 0x24, 0x96, 0x24, 0x4a, 0x30, 0x2a,
|
||||||
|
0x30, 0x6a, 0xf0, 0x04, 0x81, 0xd9, 0x46, 0xab, 0x19, 0xb9, 0x38, 0xdc, 0x32, 0x73, 0x52, 0x83,
|
||||||
|
0x2b, 0xf3, 0x92, 0x85, 0xfc, 0xb8, 0x38, 0x5c, 0x32, 0xd3, 0xd2, 0x9c, 0xf3, 0x0b, 0x2a, 0x85,
|
||||||
|
0xe4, 0xf4, 0xd0, 0xcd, 0xd3, 0x43, 0x36, 0x4c, 0x8a, 0x80, 0xbc, 0x06, 0xa3, 0x01, 0xa3, 0x90,
|
||||||
|
0x3f, 0x17, 0x67, 0x48, 0x62, 0x51, 0x70, 0x49, 0x51, 0x6a, 0x62, 0x2e, 0x35, 0x0c, 0x74, 0x32,
|
||||||
|
0xbb, 0xf0, 0x50, 0x8e, 0xe1, 0xc6, 0x43, 0x39, 0x86, 0x0f, 0x0f, 0xe5, 0x18, 0x1b, 0x1e, 0xc9,
|
||||||
|
0x31, 0xae, 0x78, 0x24, 0xc7, 0x78, 0xe2, 0x91, 0x1c, 0xe3, 0x85, 0x47, 0x72, 0x8c, 0x0f, 0x1e,
|
||||||
|
0xc9, 0x31, 0xbe, 0x78, 0x24, 0xc7, 0xf0, 0xe1, 0x91, 0x1c, 0xe3, 0x84, 0xc7, 0x72, 0x0c, 0x51,
|
||||||
|
0x1c, 0x30, 0xb3, 0x92, 0xd8, 0xc0, 0x41, 0x64, 0x0c, 0x08, 0x00, 0x00, 0xff, 0xff, 0x5f, 0x0c,
|
||||||
|
0x8d, 0xc5, 0x34, 0x01, 0x00, 0x00,
|
||||||
|
}
|
|
@ -0,0 +1,15 @@
|
||||||
|
syntax = "proto3";
|
||||||
|
|
||||||
|
package moby.filesync.v1;
|
||||||
|
|
||||||
|
option go_package = "filesync";
|
||||||
|
|
||||||
|
service FileSync{
|
||||||
|
rpc DiffCopy(stream BytesMessage) returns (stream BytesMessage);
|
||||||
|
rpc TarStream(stream BytesMessage) returns (stream BytesMessage);
|
||||||
|
}
|
||||||
|
|
||||||
|
// BytesMessage contains a chunk of byte data
|
||||||
|
message BytesMessage{
|
||||||
|
bytes data = 1;
|
||||||
|
}
|
|
@ -0,0 +1,71 @@
|
||||||
|
package filesync
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"io/ioutil"
|
||||||
|
"path/filepath"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/moby/buildkit/session"
|
||||||
|
"github.com/moby/buildkit/session/testutil"
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
"golang.org/x/sync/errgroup"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestFileSyncIncludePatterns(t *testing.T) {
|
||||||
|
tmpDir, err := ioutil.TempDir("", "fsynctest")
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
destDir, err := ioutil.TempDir("", "fsynctest")
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
err = ioutil.WriteFile(filepath.Join(tmpDir, "foo"), []byte("content1"), 0600)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
err = ioutil.WriteFile(filepath.Join(tmpDir, "bar"), []byte("content2"), 0600)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
s, err := session.NewSession("foo", "bar")
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
m, err := session.NewManager()
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
fs := NewFSSyncProvider(tmpDir, nil)
|
||||||
|
s.Allow(fs)
|
||||||
|
|
||||||
|
dialer := session.Dialer(testutil.TestStream(testutil.Handler(m.HandleConn)))
|
||||||
|
|
||||||
|
g, ctx := errgroup.WithContext(context.Background())
|
||||||
|
|
||||||
|
g.Go(func() error {
|
||||||
|
return s.Run(ctx, dialer)
|
||||||
|
})
|
||||||
|
|
||||||
|
g.Go(func() (reterr error) {
|
||||||
|
c, err := m.Get(ctx, s.UUID())
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if err := FSSync(ctx, c, FSSendRequestOpt{
|
||||||
|
DestDir: destDir,
|
||||||
|
IncludePatterns: []string{"ba*"},
|
||||||
|
}); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
_, err = ioutil.ReadFile(filepath.Join(destDir, "foo"))
|
||||||
|
assert.Error(t, err)
|
||||||
|
|
||||||
|
dt, err := ioutil.ReadFile(filepath.Join(destDir, "bar"))
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
assert.Equal(t, "content2", string(dt))
|
||||||
|
return s.Close()
|
||||||
|
})
|
||||||
|
|
||||||
|
err = g.Wait()
|
||||||
|
require.NoError(t, err)
|
||||||
|
}
|
|
@ -0,0 +1,3 @@
|
||||||
|
package filesync
|
||||||
|
|
||||||
|
//go:generate protoc --gogoslick_out=plugins=grpc:. filesync.proto
|
|
@ -0,0 +1,83 @@
|
||||||
|
package filesync
|
||||||
|
|
||||||
|
import (
|
||||||
|
"io"
|
||||||
|
|
||||||
|
"github.com/Sirupsen/logrus"
|
||||||
|
"github.com/docker/docker/pkg/archive"
|
||||||
|
"github.com/docker/docker/pkg/chrootarchive"
|
||||||
|
"github.com/pkg/errors"
|
||||||
|
"google.golang.org/grpc"
|
||||||
|
)
|
||||||
|
|
||||||
|
func sendTarStream(stream grpc.Stream, dir string, includes, excludes []string, progress progressCb) error {
|
||||||
|
a, err := archive.TarWithOptions(dir, &archive.TarOptions{
|
||||||
|
ExcludePatterns: excludes,
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
size := 0
|
||||||
|
buf := make([]byte, 1<<15)
|
||||||
|
t := new(BytesMessage)
|
||||||
|
for {
|
||||||
|
n, err := a.Read(buf)
|
||||||
|
if err != nil {
|
||||||
|
if err == io.EOF {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
t.Data = buf[:n]
|
||||||
|
|
||||||
|
if err := stream.SendMsg(t); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
size += n
|
||||||
|
if progress != nil {
|
||||||
|
progress(size, false)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if progress != nil {
|
||||||
|
progress(size, true)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func recvTarStream(ds grpc.Stream, dest string, cs CacheUpdater) error {
|
||||||
|
|
||||||
|
pr, pw := io.Pipe()
|
||||||
|
|
||||||
|
go func() {
|
||||||
|
var (
|
||||||
|
err error
|
||||||
|
t = new(BytesMessage)
|
||||||
|
)
|
||||||
|
for {
|
||||||
|
if err = ds.RecvMsg(t); err != nil {
|
||||||
|
if err == io.EOF {
|
||||||
|
err = nil
|
||||||
|
}
|
||||||
|
break
|
||||||
|
}
|
||||||
|
_, err = pw.Write(t.Data)
|
||||||
|
if err != nil {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if err = pw.CloseWithError(err); err != nil {
|
||||||
|
logrus.Errorf("failed to close tar transfer pipe")
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
decompressedStream, err := archive.DecompressStream(pr)
|
||||||
|
if err != nil {
|
||||||
|
return errors.Wrap(err, "failed to decompress stream")
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := chrootarchive.Untar(decompressedStream, dest, nil); err != nil {
|
||||||
|
return errors.Wrap(err, "failed to untar context")
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
|
@ -0,0 +1,62 @@
|
||||||
|
package session
|
||||||
|
|
||||||
|
import (
|
||||||
|
"net"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/Sirupsen/logrus"
|
||||||
|
"github.com/pkg/errors"
|
||||||
|
"golang.org/x/net/context"
|
||||||
|
"golang.org/x/net/http2"
|
||||||
|
"google.golang.org/grpc"
|
||||||
|
"google.golang.org/grpc/health/grpc_health_v1"
|
||||||
|
)
|
||||||
|
|
||||||
|
func serve(ctx context.Context, grpcServer *grpc.Server, conn net.Conn) {
|
||||||
|
go func() {
|
||||||
|
<-ctx.Done()
|
||||||
|
conn.Close()
|
||||||
|
}()
|
||||||
|
logrus.Debugf("serving grpc connection")
|
||||||
|
(&http2.Server{}).ServeConn(conn, &http2.ServeConnOpts{Handler: grpcServer})
|
||||||
|
}
|
||||||
|
|
||||||
|
func grpcClientConn(ctx context.Context, conn net.Conn) (context.Context, *grpc.ClientConn, error) {
|
||||||
|
dialOpt := grpc.WithDialer(func(addr string, d time.Duration) (net.Conn, error) {
|
||||||
|
return conn, nil
|
||||||
|
})
|
||||||
|
|
||||||
|
cc, err := grpc.DialContext(ctx, "", dialOpt, grpc.WithInsecure())
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, errors.Wrap(err, "failed to create grpc client")
|
||||||
|
}
|
||||||
|
|
||||||
|
ctx, cancel := context.WithCancel(ctx)
|
||||||
|
go monitorHealth(ctx, cc, cancel)
|
||||||
|
|
||||||
|
return ctx, cc, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func monitorHealth(ctx context.Context, cc *grpc.ClientConn, cancelConn func()) {
|
||||||
|
defer cancelConn()
|
||||||
|
defer cc.Close()
|
||||||
|
|
||||||
|
ticker := time.NewTicker(500 * time.Millisecond)
|
||||||
|
defer ticker.Stop()
|
||||||
|
healthClient := grpc_health_v1.NewHealthClient(cc)
|
||||||
|
|
||||||
|
for {
|
||||||
|
select {
|
||||||
|
case <-ctx.Done():
|
||||||
|
return
|
||||||
|
case <-ticker.C:
|
||||||
|
<-ticker.C
|
||||||
|
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
||||||
|
_, err := healthClient.Check(ctx, &grpc_health_v1.HealthCheckRequest{})
|
||||||
|
cancel()
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
|
@ -0,0 +1,123 @@
|
||||||
|
package grpchijack
|
||||||
|
|
||||||
|
import (
|
||||||
|
"net"
|
||||||
|
"strings"
|
||||||
|
"sync"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
controlapi "github.com/moby/buildkit/api/services/control"
|
||||||
|
"github.com/moby/buildkit/session"
|
||||||
|
"golang.org/x/net/context"
|
||||||
|
"google.golang.org/grpc"
|
||||||
|
"google.golang.org/grpc/metadata"
|
||||||
|
)
|
||||||
|
|
||||||
|
var bufPool = sync.Pool{
|
||||||
|
New: func() interface{} {
|
||||||
|
return make([]byte, 32*1<<10)
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
func Dialer(api controlapi.ControlClient) session.Dialer {
|
||||||
|
return func(ctx context.Context, proto string, meta map[string][]string) (net.Conn, error) {
|
||||||
|
|
||||||
|
meta = lowerHeaders(meta)
|
||||||
|
|
||||||
|
md := metadata.MD(meta)
|
||||||
|
|
||||||
|
ctx = metadata.NewContext(context.Background(), md)
|
||||||
|
|
||||||
|
stream, err := api.Session(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return streamToConn(stream), nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func streamToConn(stream grpc.Stream) net.Conn {
|
||||||
|
return &conn{stream: stream, buf: make([]byte, 32*1<<10)}
|
||||||
|
}
|
||||||
|
|
||||||
|
type conn struct {
|
||||||
|
stream grpc.Stream
|
||||||
|
buf []byte
|
||||||
|
lastBuf []byte
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *conn) Read(b []byte) (int, error) {
|
||||||
|
if c.lastBuf != nil {
|
||||||
|
n := copy(b, c.lastBuf)
|
||||||
|
c.lastBuf = c.lastBuf[n:]
|
||||||
|
if len(c.lastBuf) == 0 {
|
||||||
|
c.lastBuf = nil
|
||||||
|
}
|
||||||
|
return n, nil
|
||||||
|
}
|
||||||
|
m := new(controlapi.BytesMessage)
|
||||||
|
m.Data = c.buf
|
||||||
|
|
||||||
|
if err := c.stream.RecvMsg(m); err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
c.buf = m.Data[:cap(m.Data)]
|
||||||
|
|
||||||
|
n := copy(b, m.Data)
|
||||||
|
if n < len(m.Data) {
|
||||||
|
c.lastBuf = m.Data[n:]
|
||||||
|
}
|
||||||
|
|
||||||
|
return n, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *conn) Write(b []byte) (int, error) {
|
||||||
|
m := &controlapi.BytesMessage{Data: b}
|
||||||
|
if err := c.stream.SendMsg(m); err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
return len(b), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *conn) Close() error {
|
||||||
|
if cs, ok := s.stream.(grpc.ClientStream); ok {
|
||||||
|
return cs.CloseSend()
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *conn) LocalAddr() net.Addr {
|
||||||
|
return dummyAddr{}
|
||||||
|
}
|
||||||
|
func (s *conn) RemoteAddr() net.Addr {
|
||||||
|
return dummyAddr{}
|
||||||
|
}
|
||||||
|
func (s *conn) SetDeadline(t time.Time) error {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
func (s *conn) SetReadDeadline(t time.Time) error {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
func (s *conn) SetWriteDeadline(t time.Time) error {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
type dummyAddr struct {
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d dummyAddr) Network() string {
|
||||||
|
return "tcp"
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d dummyAddr) String() string {
|
||||||
|
return "localhost"
|
||||||
|
}
|
||||||
|
|
||||||
|
func lowerHeaders(in map[string][]string) map[string][]string {
|
||||||
|
out := map[string][]string{}
|
||||||
|
for k := range in {
|
||||||
|
out[strings.ToLower(k)] = in[k]
|
||||||
|
}
|
||||||
|
return out
|
||||||
|
}
|
|
@ -0,0 +1,13 @@
|
||||||
|
package grpchijack
|
||||||
|
|
||||||
|
import (
|
||||||
|
"net"
|
||||||
|
|
||||||
|
controlapi "github.com/moby/buildkit/api/services/control"
|
||||||
|
"google.golang.org/grpc/metadata"
|
||||||
|
)
|
||||||
|
|
||||||
|
func Hijack(stream controlapi.Control_SessionServer) (net.Conn, map[string][]string) {
|
||||||
|
md, _ := metadata.FromContext(stream.Context())
|
||||||
|
return streamToConn(stream), md
|
||||||
|
}
|
|
@ -0,0 +1,212 @@
|
||||||
|
package session
|
||||||
|
|
||||||
|
import (
|
||||||
|
"net"
|
||||||
|
"net/http"
|
||||||
|
"strings"
|
||||||
|
"sync"
|
||||||
|
|
||||||
|
"github.com/pkg/errors"
|
||||||
|
"golang.org/x/net/context"
|
||||||
|
"google.golang.org/grpc"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Caller can invoke requests on the session
|
||||||
|
type Caller interface {
|
||||||
|
Context() context.Context
|
||||||
|
Supports(method string) bool
|
||||||
|
Conn() *grpc.ClientConn
|
||||||
|
Name() string
|
||||||
|
SharedKey() string
|
||||||
|
}
|
||||||
|
|
||||||
|
type client struct {
|
||||||
|
Session
|
||||||
|
cc *grpc.ClientConn
|
||||||
|
supported map[string]struct{}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Manager is a controller for accessing currently active sessions
|
||||||
|
type Manager struct {
|
||||||
|
sessions map[string]*client
|
||||||
|
mu sync.Mutex
|
||||||
|
updateCondition *sync.Cond
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewManager returns a new Manager
|
||||||
|
func NewManager() (*Manager, error) {
|
||||||
|
sm := &Manager{
|
||||||
|
sessions: make(map[string]*client),
|
||||||
|
}
|
||||||
|
sm.updateCondition = sync.NewCond(&sm.mu)
|
||||||
|
return sm, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// HandleHTTPRequest handles an incoming HTTP request
|
||||||
|
func (sm *Manager) HandleHTTPRequest(ctx context.Context, w http.ResponseWriter, r *http.Request) error {
|
||||||
|
hijacker, ok := w.(http.Hijacker)
|
||||||
|
if !ok {
|
||||||
|
return errors.New("handler does not support hijack")
|
||||||
|
}
|
||||||
|
|
||||||
|
uuid := r.Header.Get(headerSessionUUID)
|
||||||
|
|
||||||
|
proto := r.Header.Get("Upgrade")
|
||||||
|
|
||||||
|
sm.mu.Lock()
|
||||||
|
if _, ok := sm.sessions[uuid]; ok {
|
||||||
|
sm.mu.Unlock()
|
||||||
|
return errors.Errorf("session %s already exists", uuid)
|
||||||
|
}
|
||||||
|
|
||||||
|
if proto == "" {
|
||||||
|
sm.mu.Unlock()
|
||||||
|
return errors.New("no upgrade proto in request")
|
||||||
|
}
|
||||||
|
|
||||||
|
if proto != "h2c" {
|
||||||
|
sm.mu.Unlock()
|
||||||
|
return errors.Errorf("protocol %s not supported", proto)
|
||||||
|
}
|
||||||
|
|
||||||
|
conn, _, err := hijacker.Hijack()
|
||||||
|
if err != nil {
|
||||||
|
sm.mu.Unlock()
|
||||||
|
return errors.Wrap(err, "failed to hijack connection")
|
||||||
|
}
|
||||||
|
|
||||||
|
resp := &http.Response{
|
||||||
|
StatusCode: http.StatusSwitchingProtocols,
|
||||||
|
ProtoMajor: 1,
|
||||||
|
ProtoMinor: 1,
|
||||||
|
Header: http.Header{},
|
||||||
|
}
|
||||||
|
resp.Header.Set("Connection", "Upgrade")
|
||||||
|
resp.Header.Set("Upgrade", proto)
|
||||||
|
|
||||||
|
// set raw mode
|
||||||
|
conn.Write([]byte{})
|
||||||
|
resp.Write(conn)
|
||||||
|
|
||||||
|
return sm.handleConn(ctx, conn, r.Header)
|
||||||
|
}
|
||||||
|
|
||||||
|
// HandleConn handles an incoming raw connection
|
||||||
|
func (sm *Manager) HandleConn(ctx context.Context, conn net.Conn, opts map[string][]string) error {
|
||||||
|
sm.mu.Lock()
|
||||||
|
return sm.handleConn(ctx, conn, opts)
|
||||||
|
}
|
||||||
|
|
||||||
|
// caller needs to take lock, this function will release it
|
||||||
|
func (sm *Manager) handleConn(ctx context.Context, conn net.Conn, opts map[string][]string) error {
|
||||||
|
ctx, cancel := context.WithCancel(ctx)
|
||||||
|
defer cancel()
|
||||||
|
|
||||||
|
opts = canonicalHeaders(opts)
|
||||||
|
|
||||||
|
h := http.Header(opts)
|
||||||
|
uuid := h.Get(headerSessionUUID)
|
||||||
|
name := h.Get(headerSessionName)
|
||||||
|
sharedKey := h.Get(headerSessionSharedKey)
|
||||||
|
|
||||||
|
ctx, cc, err := grpcClientConn(ctx, conn)
|
||||||
|
if err != nil {
|
||||||
|
sm.mu.Unlock()
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
c := &client{
|
||||||
|
Session: Session{
|
||||||
|
uuid: uuid,
|
||||||
|
name: name,
|
||||||
|
sharedKey: sharedKey,
|
||||||
|
ctx: ctx,
|
||||||
|
cancelCtx: cancel,
|
||||||
|
done: make(chan struct{}),
|
||||||
|
},
|
||||||
|
cc: cc,
|
||||||
|
supported: make(map[string]struct{}),
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, m := range opts[headerSessionMethod] {
|
||||||
|
c.supported[strings.ToLower(m)] = struct{}{}
|
||||||
|
}
|
||||||
|
sm.sessions[uuid] = c
|
||||||
|
sm.updateCondition.Broadcast()
|
||||||
|
sm.mu.Unlock()
|
||||||
|
|
||||||
|
defer func() {
|
||||||
|
sm.mu.Lock()
|
||||||
|
delete(sm.sessions, uuid)
|
||||||
|
sm.mu.Unlock()
|
||||||
|
}()
|
||||||
|
|
||||||
|
<-c.ctx.Done()
|
||||||
|
conn.Close()
|
||||||
|
close(c.done)
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get returns a session by UUID
|
||||||
|
func (sm *Manager) Get(ctx context.Context, uuid string) (Caller, error) {
|
||||||
|
ctx, cancel := context.WithCancel(ctx)
|
||||||
|
defer cancel()
|
||||||
|
|
||||||
|
go func() {
|
||||||
|
select {
|
||||||
|
case <-ctx.Done():
|
||||||
|
sm.updateCondition.Broadcast()
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
var c *client
|
||||||
|
|
||||||
|
sm.mu.Lock()
|
||||||
|
for {
|
||||||
|
select {
|
||||||
|
case <-ctx.Done():
|
||||||
|
sm.mu.Unlock()
|
||||||
|
return nil, errors.Wrapf(ctx.Err(), "no active session for %s", uuid)
|
||||||
|
default:
|
||||||
|
}
|
||||||
|
var ok bool
|
||||||
|
c, ok = sm.sessions[uuid]
|
||||||
|
if !ok || c.closed() {
|
||||||
|
sm.updateCondition.Wait()
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
sm.mu.Unlock()
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
|
return c, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *client) Context() context.Context {
|
||||||
|
return c.context()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *client) Name() string {
|
||||||
|
return c.name
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *client) SharedKey() string {
|
||||||
|
return c.sharedKey
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *client) Supports(url string) bool {
|
||||||
|
_, ok := c.supported[strings.ToLower(url)]
|
||||||
|
return ok
|
||||||
|
}
|
||||||
|
func (c *client) Conn() *grpc.ClientConn {
|
||||||
|
return c.cc
|
||||||
|
}
|
||||||
|
|
||||||
|
func canonicalHeaders(in map[string][]string) map[string][]string {
|
||||||
|
out := map[string][]string{}
|
||||||
|
for k := range in {
|
||||||
|
out[http.CanonicalHeaderKey(k)] = in[k]
|
||||||
|
}
|
||||||
|
return out
|
||||||
|
}
|
|
@ -0,0 +1,118 @@
|
||||||
|
package session
|
||||||
|
|
||||||
|
import (
|
||||||
|
"net"
|
||||||
|
|
||||||
|
"github.com/docker/docker/pkg/stringid"
|
||||||
|
"github.com/pkg/errors"
|
||||||
|
"golang.org/x/net/context"
|
||||||
|
"google.golang.org/grpc"
|
||||||
|
"google.golang.org/grpc/health"
|
||||||
|
"google.golang.org/grpc/health/grpc_health_v1"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
headerSessionUUID = "X-Docker-Expose-Session-Uuid"
|
||||||
|
headerSessionName = "X-Docker-Expose-Session-Name"
|
||||||
|
headerSessionSharedKey = "X-Docker-Expose-Session-Sharedkey"
|
||||||
|
headerSessionMethod = "X-Docker-Expose-Session-Grpc-Method"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Dialer returns a connection that can be used by the session
|
||||||
|
type Dialer func(ctx context.Context, proto string, meta map[string][]string) (net.Conn, error)
|
||||||
|
|
||||||
|
// Attachable defines a feature that can be expsed on a session
|
||||||
|
type Attachable interface {
|
||||||
|
Register(*grpc.Server)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Session is a long running connection between client and a daemon
|
||||||
|
type Session struct {
|
||||||
|
uuid string
|
||||||
|
name string
|
||||||
|
sharedKey string
|
||||||
|
ctx context.Context
|
||||||
|
cancelCtx func()
|
||||||
|
done chan struct{}
|
||||||
|
grpcServer *grpc.Server
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewSession returns a new long running session
|
||||||
|
func NewSession(name, sharedKey string) (*Session, error) {
|
||||||
|
uuid := stringid.GenerateRandomID()
|
||||||
|
s := &Session{
|
||||||
|
uuid: uuid,
|
||||||
|
name: name,
|
||||||
|
sharedKey: sharedKey,
|
||||||
|
grpcServer: grpc.NewServer(),
|
||||||
|
}
|
||||||
|
|
||||||
|
grpc_health_v1.RegisterHealthServer(s.grpcServer, health.NewServer())
|
||||||
|
|
||||||
|
return s, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Allow enable a given service to be reachable through the grpc session
|
||||||
|
func (s *Session) Allow(a Attachable) {
|
||||||
|
a.Register(s.grpcServer)
|
||||||
|
}
|
||||||
|
|
||||||
|
// UUID returns unique identifier for the session
|
||||||
|
func (s *Session) UUID() string {
|
||||||
|
return s.uuid
|
||||||
|
}
|
||||||
|
|
||||||
|
// Run activates the session
|
||||||
|
func (s *Session) Run(ctx context.Context, dialer Dialer) error {
|
||||||
|
ctx, cancel := context.WithCancel(ctx)
|
||||||
|
s.cancelCtx = cancel
|
||||||
|
s.done = make(chan struct{})
|
||||||
|
|
||||||
|
defer cancel()
|
||||||
|
defer close(s.done)
|
||||||
|
|
||||||
|
meta := make(map[string][]string)
|
||||||
|
meta[headerSessionUUID] = []string{s.uuid}
|
||||||
|
meta[headerSessionName] = []string{s.name}
|
||||||
|
meta[headerSessionSharedKey] = []string{s.sharedKey}
|
||||||
|
|
||||||
|
for name, svc := range s.grpcServer.GetServiceInfo() {
|
||||||
|
for _, method := range svc.Methods {
|
||||||
|
meta[headerSessionMethod] = append(meta[headerSessionMethod], MethodURL(name, method.Name))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
conn, err := dialer(ctx, "h2c", meta)
|
||||||
|
if err != nil {
|
||||||
|
return errors.Wrap(err, "failed to dial gRPC")
|
||||||
|
}
|
||||||
|
serve(ctx, s.grpcServer, conn)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Close closes the session
|
||||||
|
func (s *Session) Close() error {
|
||||||
|
if s.cancelCtx != nil && s.done != nil {
|
||||||
|
s.grpcServer.Stop()
|
||||||
|
s.cancelCtx()
|
||||||
|
<-s.done
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *Session) context() context.Context {
|
||||||
|
return s.ctx
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *Session) closed() bool {
|
||||||
|
select {
|
||||||
|
case <-s.context().Done():
|
||||||
|
return true
|
||||||
|
default:
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// MethodURL returns a gRPC method URL for service and method name
|
||||||
|
func MethodURL(s, m string) string {
|
||||||
|
return "/" + s + "/" + m
|
||||||
|
}
|
|
@ -0,0 +1,70 @@
|
||||||
|
package testutil
|
||||||
|
|
||||||
|
import (
|
||||||
|
"io"
|
||||||
|
"net"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/Sirupsen/logrus"
|
||||||
|
"golang.org/x/net/context"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Handler is function called to handle incoming connection
|
||||||
|
type Handler func(ctx context.Context, conn net.Conn, meta map[string][]string) error
|
||||||
|
|
||||||
|
// Dialer is a function for dialing an outgoing connection
|
||||||
|
type Dialer func(ctx context.Context, proto string, meta map[string][]string) (net.Conn, error)
|
||||||
|
|
||||||
|
// TestStream creates an in memory session dialer for a handler function
|
||||||
|
func TestStream(handler Handler) Dialer {
|
||||||
|
s1, s2 := sockPair()
|
||||||
|
return func(ctx context.Context, proto string, meta map[string][]string) (net.Conn, error) {
|
||||||
|
go func() {
|
||||||
|
err := handler(context.TODO(), s1, meta)
|
||||||
|
if err != nil {
|
||||||
|
logrus.Error(err)
|
||||||
|
}
|
||||||
|
s1.Close()
|
||||||
|
}()
|
||||||
|
return s2, nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func sockPair() (*sock, *sock) {
|
||||||
|
pr1, pw1 := io.Pipe()
|
||||||
|
pr2, pw2 := io.Pipe()
|
||||||
|
return &sock{pw1, pr2, pw1}, &sock{pw2, pr1, pw2}
|
||||||
|
}
|
||||||
|
|
||||||
|
type sock struct {
|
||||||
|
io.Writer
|
||||||
|
io.Reader
|
||||||
|
io.Closer
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *sock) LocalAddr() net.Addr {
|
||||||
|
return dummyAddr{}
|
||||||
|
}
|
||||||
|
func (s *sock) RemoteAddr() net.Addr {
|
||||||
|
return dummyAddr{}
|
||||||
|
}
|
||||||
|
func (s *sock) SetDeadline(t time.Time) error {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
func (s *sock) SetReadDeadline(t time.Time) error {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
func (s *sock) SetWriteDeadline(t time.Time) error {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
type dummyAddr struct {
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d dummyAddr) Network() string {
|
||||||
|
return "tcp"
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d dummyAddr) String() string {
|
||||||
|
return "localhost"
|
||||||
|
}
|
|
@ -1,3 +1,4 @@
|
||||||
package pb
|
package pb
|
||||||
|
|
||||||
const AttrKeepGitDir = "git.keepgitdir"
|
const AttrKeepGitDir = "git.keepgitdir"
|
||||||
|
const AttrLocalSessionID = "local.session"
|
||||||
|
|
|
@ -42,6 +42,14 @@ func (s *sourceOp) instance(ctx context.Context) (source.SourceInstance, error)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
if id, ok := id.(*source.LocalIdentifier); ok {
|
||||||
|
for k, v := range s.op.Source.Attrs {
|
||||||
|
switch k {
|
||||||
|
case pb.AttrLocalSessionID:
|
||||||
|
id.SessionID = v
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
src, err := s.sm.Resolve(ctx, id)
|
src, err := s.sm.Resolve(ctx, id)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
|
|
|
@ -83,6 +83,7 @@ func (v *vertex) notifyStarted(ctx context.Context) {
|
||||||
defer pw.Close()
|
defer pw.Close()
|
||||||
now := time.Now()
|
now := time.Now()
|
||||||
v.clientVertex.Started = &now
|
v.clientVertex.Started = &now
|
||||||
|
v.clientVertex.Completed = nil
|
||||||
pw.Write(v.Digest().String(), v.clientVertex)
|
pw.Write(v.Digest().String(), v.clientVertex)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -15,6 +15,7 @@ var (
|
||||||
const (
|
const (
|
||||||
DockerImageScheme = "docker-image"
|
DockerImageScheme = "docker-image"
|
||||||
GitScheme = "git"
|
GitScheme = "git"
|
||||||
|
LocalScheme = "local"
|
||||||
)
|
)
|
||||||
|
|
||||||
type Identifier interface {
|
type Identifier interface {
|
||||||
|
@ -33,6 +34,8 @@ func FromString(s string) (Identifier, error) {
|
||||||
return NewImageIdentifier(parts[1])
|
return NewImageIdentifier(parts[1])
|
||||||
case GitScheme:
|
case GitScheme:
|
||||||
return NewGitIdentifier(parts[1])
|
return NewGitIdentifier(parts[1])
|
||||||
|
case LocalScheme:
|
||||||
|
return NewLocalIdentifier(parts[1])
|
||||||
default:
|
default:
|
||||||
return nil, errors.Wrapf(errNotFound, "unknown schema %s", parts[0])
|
return nil, errors.Wrapf(errNotFound, "unknown schema %s", parts[0])
|
||||||
}
|
}
|
||||||
|
@ -54,6 +57,19 @@ func NewImageIdentifier(str string) (*ImageIdentifier, error) {
|
||||||
return &ImageIdentifier{Reference: ref}, nil
|
return &ImageIdentifier{Reference: ref}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (i *ImageIdentifier) ID() string {
|
func (_ *ImageIdentifier) ID() string {
|
||||||
return DockerImageScheme
|
return DockerImageScheme
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type LocalIdentifier struct {
|
||||||
|
Name string
|
||||||
|
SessionID string
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewLocalIdentifier(str string) (*LocalIdentifier, error) {
|
||||||
|
return &LocalIdentifier{Name: str}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (_ *LocalIdentifier) ID() string {
|
||||||
|
return LocalScheme
|
||||||
|
}
|
||||||
|
|
|
@ -0,0 +1,138 @@
|
||||||
|
package local
|
||||||
|
|
||||||
|
import (
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/moby/buildkit/cache"
|
||||||
|
"github.com/moby/buildkit/session"
|
||||||
|
"github.com/moby/buildkit/session/filesync"
|
||||||
|
"github.com/moby/buildkit/snapshot"
|
||||||
|
"github.com/moby/buildkit/source"
|
||||||
|
"github.com/pkg/errors"
|
||||||
|
"golang.org/x/net/context"
|
||||||
|
)
|
||||||
|
|
||||||
|
type Opt struct {
|
||||||
|
SessionManager *session.Manager
|
||||||
|
CacheAccessor cache.Accessor
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewSource(opt Opt) (source.Source, error) {
|
||||||
|
ls := &localSource{
|
||||||
|
sm: opt.SessionManager,
|
||||||
|
cm: opt.CacheAccessor,
|
||||||
|
}
|
||||||
|
return ls, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
type localSource struct {
|
||||||
|
sm *session.Manager
|
||||||
|
cm cache.Accessor
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ls *localSource) ID() string {
|
||||||
|
return source.LocalScheme
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ls *localSource) Resolve(ctx context.Context, id source.Identifier) (source.SourceInstance, error) {
|
||||||
|
localIdentifier, ok := id.(*source.LocalIdentifier)
|
||||||
|
if !ok {
|
||||||
|
return nil, errors.Errorf("invalid local identifier %v", id)
|
||||||
|
}
|
||||||
|
|
||||||
|
return &localSourceHandler{
|
||||||
|
src: *localIdentifier,
|
||||||
|
localSource: ls,
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
type localSourceHandler struct {
|
||||||
|
src source.LocalIdentifier
|
||||||
|
*localSource
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ls *localSourceHandler) CacheKey(ctx context.Context) (string, error) {
|
||||||
|
sessionID := ls.src.SessionID
|
||||||
|
|
||||||
|
if sessionID == "" {
|
||||||
|
uuid := session.FromContext(ctx)
|
||||||
|
if uuid == "" {
|
||||||
|
return "", errors.New("could not access local files without session")
|
||||||
|
}
|
||||||
|
sessionID = uuid
|
||||||
|
}
|
||||||
|
|
||||||
|
return "session:" + ls.src.Name + ":" + sessionID, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ls *localSourceHandler) Snapshot(ctx context.Context) (out cache.ImmutableRef, retErr error) {
|
||||||
|
|
||||||
|
uuid := session.FromContext(ctx)
|
||||||
|
if uuid == "" {
|
||||||
|
return nil, errors.New("could not access local files without session")
|
||||||
|
}
|
||||||
|
|
||||||
|
timeoutCtx, cancel := context.WithTimeout(ctx, 5*time.Second)
|
||||||
|
defer cancel()
|
||||||
|
|
||||||
|
caller, err := ls.sm.Get(timeoutCtx, uuid)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
mutable, err := ls.cm.New(ctx, nil)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
defer func() {
|
||||||
|
if retErr != nil && mutable != nil {
|
||||||
|
s, err := mutable.Freeze()
|
||||||
|
if err == nil {
|
||||||
|
go s.Release(context.TODO())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
mount, err := mutable.Mount(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
lm := snapshot.LocalMounter(mount)
|
||||||
|
|
||||||
|
dest, err := lm.Mount()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
defer func() {
|
||||||
|
if retErr != nil && lm != nil {
|
||||||
|
lm.Unmount()
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
opt := filesync.FSSendRequestOpt{
|
||||||
|
IncludePatterns: nil,
|
||||||
|
OverrideExcludes: false,
|
||||||
|
DestDir: dest,
|
||||||
|
CacheUpdater: nil,
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := filesync.FSSync(ctx, caller, opt); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := lm.Unmount(); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
lm = nil
|
||||||
|
|
||||||
|
snap, err := mutable.ReleaseAndCommit(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
mutable = nil
|
||||||
|
|
||||||
|
return snap, err
|
||||||
|
}
|
|
@ -35,3 +35,6 @@ golang.org/x/time 8be79e1e0910c292df4e79c241bb7e8f7e725959
|
||||||
github.com/BurntSushi/locker 392720b78f44e9d0249fcac6c43b111b47a370b8
|
github.com/BurntSushi/locker 392720b78f44e9d0249fcac6c43b111b47a370b8
|
||||||
github.com/docker/docker 05c7c311390911daebcf5d9519dee813fc02a887
|
github.com/docker/docker 05c7c311390911daebcf5d9519dee813fc02a887
|
||||||
github.com/pkg/profile 5b67d428864e92711fcbd2f8629456121a56d91f
|
github.com/pkg/profile 5b67d428864e92711fcbd2f8629456121a56d91f
|
||||||
|
|
||||||
|
github.com/tonistiigi/fsutil 0ac4c11b053b9c5c7c47558f81f96c7100ce50fb
|
||||||
|
github.com/stevvooe/continuity 86cec1535a968310e7532819f699ff2830ed7463
|
||||||
|
|
|
@ -0,0 +1 @@
|
||||||
|
This code provides helper functions for dealing with archive files.
|
File diff suppressed because it is too large
Load Diff
|
@ -0,0 +1,92 @@
|
||||||
|
package archive
|
||||||
|
|
||||||
|
import (
|
||||||
|
"archive/tar"
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
"strings"
|
||||||
|
"syscall"
|
||||||
|
|
||||||
|
"github.com/docker/docker/pkg/system"
|
||||||
|
)
|
||||||
|
|
||||||
|
func getWhiteoutConverter(format WhiteoutFormat) tarWhiteoutConverter {
|
||||||
|
if format == OverlayWhiteoutFormat {
|
||||||
|
return overlayWhiteoutConverter{}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
type overlayWhiteoutConverter struct{}
|
||||||
|
|
||||||
|
func (overlayWhiteoutConverter) ConvertWrite(hdr *tar.Header, path string, fi os.FileInfo) (wo *tar.Header, err error) {
|
||||||
|
// convert whiteouts to AUFS format
|
||||||
|
if fi.Mode()&os.ModeCharDevice != 0 && hdr.Devmajor == 0 && hdr.Devminor == 0 {
|
||||||
|
// we just rename the file and make it normal
|
||||||
|
dir, filename := filepath.Split(hdr.Name)
|
||||||
|
hdr.Name = filepath.Join(dir, WhiteoutPrefix+filename)
|
||||||
|
hdr.Mode = 0600
|
||||||
|
hdr.Typeflag = tar.TypeReg
|
||||||
|
hdr.Size = 0
|
||||||
|
}
|
||||||
|
|
||||||
|
if fi.Mode()&os.ModeDir != 0 {
|
||||||
|
// convert opaque dirs to AUFS format by writing an empty file with the prefix
|
||||||
|
opaque, err := system.Lgetxattr(path, "trusted.overlay.opaque")
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if len(opaque) == 1 && opaque[0] == 'y' {
|
||||||
|
if hdr.Xattrs != nil {
|
||||||
|
delete(hdr.Xattrs, "trusted.overlay.opaque")
|
||||||
|
}
|
||||||
|
|
||||||
|
// create a header for the whiteout file
|
||||||
|
// it should inherit some properties from the parent, but be a regular file
|
||||||
|
wo = &tar.Header{
|
||||||
|
Typeflag: tar.TypeReg,
|
||||||
|
Mode: hdr.Mode & int64(os.ModePerm),
|
||||||
|
Name: filepath.Join(hdr.Name, WhiteoutOpaqueDir),
|
||||||
|
Size: 0,
|
||||||
|
Uid: hdr.Uid,
|
||||||
|
Uname: hdr.Uname,
|
||||||
|
Gid: hdr.Gid,
|
||||||
|
Gname: hdr.Gname,
|
||||||
|
AccessTime: hdr.AccessTime,
|
||||||
|
ChangeTime: hdr.ChangeTime,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
func (overlayWhiteoutConverter) ConvertRead(hdr *tar.Header, path string) (bool, error) {
|
||||||
|
base := filepath.Base(path)
|
||||||
|
dir := filepath.Dir(path)
|
||||||
|
|
||||||
|
// if a directory is marked as opaque by the AUFS special file, we need to translate that to overlay
|
||||||
|
if base == WhiteoutOpaqueDir {
|
||||||
|
err := syscall.Setxattr(dir, "trusted.overlay.opaque", []byte{'y'}, 0)
|
||||||
|
// don't write the file itself
|
||||||
|
return false, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// if a file was deleted and we are using overlay, we need to create a character device
|
||||||
|
if strings.HasPrefix(base, WhiteoutPrefix) {
|
||||||
|
originalBase := base[len(WhiteoutPrefix):]
|
||||||
|
originalPath := filepath.Join(dir, originalBase)
|
||||||
|
|
||||||
|
if err := syscall.Mknod(originalPath, syscall.S_IFCHR, 0); err != nil {
|
||||||
|
return false, err
|
||||||
|
}
|
||||||
|
if err := os.Chown(originalPath, hdr.Uid, hdr.Gid); err != nil {
|
||||||
|
return false, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// don't write the file itself
|
||||||
|
return false, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return true, nil
|
||||||
|
}
|
|
@ -0,0 +1,7 @@
|
||||||
|
// +build !linux
|
||||||
|
|
||||||
|
package archive
|
||||||
|
|
||||||
|
func getWhiteoutConverter(format WhiteoutFormat) tarWhiteoutConverter {
|
||||||
|
return nil
|
||||||
|
}
|
|
@ -0,0 +1,121 @@
|
||||||
|
// +build !windows
|
||||||
|
|
||||||
|
package archive
|
||||||
|
|
||||||
|
import (
|
||||||
|
"archive/tar"
|
||||||
|
"errors"
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
"syscall"
|
||||||
|
|
||||||
|
"github.com/docker/docker/pkg/idtools"
|
||||||
|
"github.com/docker/docker/pkg/system"
|
||||||
|
rsystem "github.com/opencontainers/runc/libcontainer/system"
|
||||||
|
)
|
||||||
|
|
||||||
|
// fixVolumePathPrefix does platform specific processing to ensure that if
|
||||||
|
// the path being passed in is not in a volume path format, convert it to one.
|
||||||
|
func fixVolumePathPrefix(srcPath string) string {
|
||||||
|
return srcPath
|
||||||
|
}
|
||||||
|
|
||||||
|
// getWalkRoot calculates the root path when performing a TarWithOptions.
|
||||||
|
// We use a separate function as this is platform specific. On Linux, we
|
||||||
|
// can't use filepath.Join(srcPath,include) because this will clean away
|
||||||
|
// a trailing "." or "/" which may be important.
|
||||||
|
func getWalkRoot(srcPath string, include string) string {
|
||||||
|
return srcPath + string(filepath.Separator) + include
|
||||||
|
}
|
||||||
|
|
||||||
|
// CanonicalTarNameForPath returns platform-specific filepath
|
||||||
|
// to canonical posix-style path for tar archival. p is relative
|
||||||
|
// path.
|
||||||
|
func CanonicalTarNameForPath(p string) (string, error) {
|
||||||
|
return p, nil // already unix-style
|
||||||
|
}
|
||||||
|
|
||||||
|
// chmodTarEntry is used to adjust the file permissions used in tar header based
|
||||||
|
// on the platform the archival is done.
|
||||||
|
|
||||||
|
func chmodTarEntry(perm os.FileMode) os.FileMode {
|
||||||
|
return perm // noop for unix as golang APIs provide perm bits correctly
|
||||||
|
}
|
||||||
|
|
||||||
|
func setHeaderForSpecialDevice(hdr *tar.Header, name string, stat interface{}) (err error) {
|
||||||
|
s, ok := stat.(*syscall.Stat_t)
|
||||||
|
|
||||||
|
if ok {
|
||||||
|
// Currently go does not fill in the major/minors
|
||||||
|
if s.Mode&syscall.S_IFBLK != 0 ||
|
||||||
|
s.Mode&syscall.S_IFCHR != 0 {
|
||||||
|
hdr.Devmajor = int64(major(uint64(s.Rdev)))
|
||||||
|
hdr.Devminor = int64(minor(uint64(s.Rdev)))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
func getInodeFromStat(stat interface{}) (inode uint64, err error) {
|
||||||
|
s, ok := stat.(*syscall.Stat_t)
|
||||||
|
|
||||||
|
if ok {
|
||||||
|
inode = uint64(s.Ino)
|
||||||
|
}
|
||||||
|
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
func getFileUIDGID(stat interface{}) (idtools.IDPair, error) {
|
||||||
|
s, ok := stat.(*syscall.Stat_t)
|
||||||
|
|
||||||
|
if !ok {
|
||||||
|
return idtools.IDPair{}, errors.New("cannot convert stat value to syscall.Stat_t")
|
||||||
|
}
|
||||||
|
return idtools.IDPair{UID: int(s.Uid), GID: int(s.Gid)}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func major(device uint64) uint64 {
|
||||||
|
return (device >> 8) & 0xfff
|
||||||
|
}
|
||||||
|
|
||||||
|
func minor(device uint64) uint64 {
|
||||||
|
return (device & 0xff) | ((device >> 12) & 0xfff00)
|
||||||
|
}
|
||||||
|
|
||||||
|
// handleTarTypeBlockCharFifo is an OS-specific helper function used by
|
||||||
|
// createTarFile to handle the following types of header: Block; Char; Fifo
|
||||||
|
func handleTarTypeBlockCharFifo(hdr *tar.Header, path string) error {
|
||||||
|
if rsystem.RunningInUserNS() {
|
||||||
|
// cannot create a device if running in user namespace
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
mode := uint32(hdr.Mode & 07777)
|
||||||
|
switch hdr.Typeflag {
|
||||||
|
case tar.TypeBlock:
|
||||||
|
mode |= syscall.S_IFBLK
|
||||||
|
case tar.TypeChar:
|
||||||
|
mode |= syscall.S_IFCHR
|
||||||
|
case tar.TypeFifo:
|
||||||
|
mode |= syscall.S_IFIFO
|
||||||
|
}
|
||||||
|
|
||||||
|
return system.Mknod(path, mode, int(system.Mkdev(hdr.Devmajor, hdr.Devminor)))
|
||||||
|
}
|
||||||
|
|
||||||
|
func handleLChmod(hdr *tar.Header, path string, hdrInfo os.FileInfo) error {
|
||||||
|
if hdr.Typeflag == tar.TypeLink {
|
||||||
|
if fi, err := os.Lstat(hdr.Linkname); err == nil && (fi.Mode()&os.ModeSymlink == 0) {
|
||||||
|
if err := os.Chmod(path, hdrInfo.Mode()); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} else if hdr.Typeflag != tar.TypeSymlink {
|
||||||
|
if err := os.Chmod(path, hdrInfo.Mode()); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
|
@ -0,0 +1,79 @@
|
||||||
|
// +build windows
|
||||||
|
|
||||||
|
package archive
|
||||||
|
|
||||||
|
import (
|
||||||
|
"archive/tar"
|
||||||
|
"fmt"
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"github.com/docker/docker/pkg/idtools"
|
||||||
|
"github.com/docker/docker/pkg/longpath"
|
||||||
|
)
|
||||||
|
|
||||||
|
// fixVolumePathPrefix does platform specific processing to ensure that if
|
||||||
|
// the path being passed in is not in a volume path format, convert it to one.
|
||||||
|
func fixVolumePathPrefix(srcPath string) string {
|
||||||
|
return longpath.AddPrefix(srcPath)
|
||||||
|
}
|
||||||
|
|
||||||
|
// getWalkRoot calculates the root path when performing a TarWithOptions.
|
||||||
|
// We use a separate function as this is platform specific.
|
||||||
|
func getWalkRoot(srcPath string, include string) string {
|
||||||
|
return filepath.Join(srcPath, include)
|
||||||
|
}
|
||||||
|
|
||||||
|
// CanonicalTarNameForPath returns platform-specific filepath
|
||||||
|
// to canonical posix-style path for tar archival. p is relative
|
||||||
|
// path.
|
||||||
|
func CanonicalTarNameForPath(p string) (string, error) {
|
||||||
|
// windows: convert windows style relative path with backslashes
|
||||||
|
// into forward slashes. Since windows does not allow '/' or '\'
|
||||||
|
// in file names, it is mostly safe to replace however we must
|
||||||
|
// check just in case
|
||||||
|
if strings.Contains(p, "/") {
|
||||||
|
return "", fmt.Errorf("Windows path contains forward slash: %s", p)
|
||||||
|
}
|
||||||
|
return strings.Replace(p, string(os.PathSeparator), "/", -1), nil
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
// chmodTarEntry is used to adjust the file permissions used in tar header based
|
||||||
|
// on the platform the archival is done.
|
||||||
|
func chmodTarEntry(perm os.FileMode) os.FileMode {
|
||||||
|
//perm &= 0755 // this 0-ed out tar flags (like link, regular file, directory marker etc.)
|
||||||
|
permPart := perm & os.ModePerm
|
||||||
|
noPermPart := perm &^ os.ModePerm
|
||||||
|
// Add the x bit: make everything +x from windows
|
||||||
|
permPart |= 0111
|
||||||
|
permPart &= 0755
|
||||||
|
|
||||||
|
return noPermPart | permPart
|
||||||
|
}
|
||||||
|
|
||||||
|
func setHeaderForSpecialDevice(hdr *tar.Header, name string, stat interface{}) (err error) {
|
||||||
|
// do nothing. no notion of Rdev, Nlink in stat on Windows
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
func getInodeFromStat(stat interface{}) (inode uint64, err error) {
|
||||||
|
// do nothing. no notion of Inode in stat on Windows
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// handleTarTypeBlockCharFifo is an OS-specific helper function used by
|
||||||
|
// createTarFile to handle the following types of header: Block; Char; Fifo
|
||||||
|
func handleTarTypeBlockCharFifo(hdr *tar.Header, path string) error {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func handleLChmod(hdr *tar.Header, path string, hdrInfo os.FileInfo) error {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func getFileUIDGID(stat interface{}) (idtools.IDPair, error) {
|
||||||
|
// no notion of file ownership mapping yet on Windows
|
||||||
|
return idtools.IDPair{0, 0}, nil
|
||||||
|
}
|
|
@ -0,0 +1,441 @@
|
||||||
|
package archive
|
||||||
|
|
||||||
|
import (
|
||||||
|
"archive/tar"
|
||||||
|
"bytes"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"io/ioutil"
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
"sort"
|
||||||
|
"strings"
|
||||||
|
"syscall"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/Sirupsen/logrus"
|
||||||
|
"github.com/docker/docker/pkg/idtools"
|
||||||
|
"github.com/docker/docker/pkg/pools"
|
||||||
|
"github.com/docker/docker/pkg/system"
|
||||||
|
)
|
||||||
|
|
||||||
|
// ChangeType represents the change type.
|
||||||
|
type ChangeType int
|
||||||
|
|
||||||
|
const (
|
||||||
|
// ChangeModify represents the modify operation.
|
||||||
|
ChangeModify = iota
|
||||||
|
// ChangeAdd represents the add operation.
|
||||||
|
ChangeAdd
|
||||||
|
// ChangeDelete represents the delete operation.
|
||||||
|
ChangeDelete
|
||||||
|
)
|
||||||
|
|
||||||
|
func (c ChangeType) String() string {
|
||||||
|
switch c {
|
||||||
|
case ChangeModify:
|
||||||
|
return "C"
|
||||||
|
case ChangeAdd:
|
||||||
|
return "A"
|
||||||
|
case ChangeDelete:
|
||||||
|
return "D"
|
||||||
|
}
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
|
// Change represents a change, it wraps the change type and path.
|
||||||
|
// It describes changes of the files in the path respect to the
|
||||||
|
// parent layers. The change could be modify, add, delete.
|
||||||
|
// This is used for layer diff.
|
||||||
|
type Change struct {
|
||||||
|
Path string
|
||||||
|
Kind ChangeType
|
||||||
|
}
|
||||||
|
|
||||||
|
func (change *Change) String() string {
|
||||||
|
return fmt.Sprintf("%s %s", change.Kind, change.Path)
|
||||||
|
}
|
||||||
|
|
||||||
|
// for sort.Sort
|
||||||
|
type changesByPath []Change
|
||||||
|
|
||||||
|
func (c changesByPath) Less(i, j int) bool { return c[i].Path < c[j].Path }
|
||||||
|
func (c changesByPath) Len() int { return len(c) }
|
||||||
|
func (c changesByPath) Swap(i, j int) { c[j], c[i] = c[i], c[j] }
|
||||||
|
|
||||||
|
// Gnu tar and the go tar writer don't have sub-second mtime
|
||||||
|
// precision, which is problematic when we apply changes via tar
|
||||||
|
// files, we handle this by comparing for exact times, *or* same
|
||||||
|
// second count and either a or b having exactly 0 nanoseconds
|
||||||
|
func sameFsTime(a, b time.Time) bool {
|
||||||
|
return a == b ||
|
||||||
|
(a.Unix() == b.Unix() &&
|
||||||
|
(a.Nanosecond() == 0 || b.Nanosecond() == 0))
|
||||||
|
}
|
||||||
|
|
||||||
|
func sameFsTimeSpec(a, b syscall.Timespec) bool {
|
||||||
|
return a.Sec == b.Sec &&
|
||||||
|
(a.Nsec == b.Nsec || a.Nsec == 0 || b.Nsec == 0)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Changes walks the path rw and determines changes for the files in the path,
|
||||||
|
// with respect to the parent layers
|
||||||
|
func Changes(layers []string, rw string) ([]Change, error) {
|
||||||
|
return changes(layers, rw, aufsDeletedFile, aufsMetadataSkip)
|
||||||
|
}
|
||||||
|
|
||||||
|
func aufsMetadataSkip(path string) (skip bool, err error) {
|
||||||
|
skip, err = filepath.Match(string(os.PathSeparator)+WhiteoutMetaPrefix+"*", path)
|
||||||
|
if err != nil {
|
||||||
|
skip = true
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
func aufsDeletedFile(root, path string, fi os.FileInfo) (string, error) {
|
||||||
|
f := filepath.Base(path)
|
||||||
|
|
||||||
|
// If there is a whiteout, then the file was removed
|
||||||
|
if strings.HasPrefix(f, WhiteoutPrefix) {
|
||||||
|
originalFile := f[len(WhiteoutPrefix):]
|
||||||
|
return filepath.Join(filepath.Dir(path), originalFile), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return "", nil
|
||||||
|
}
|
||||||
|
|
||||||
|
type skipChange func(string) (bool, error)
|
||||||
|
type deleteChange func(string, string, os.FileInfo) (string, error)
|
||||||
|
|
||||||
|
func changes(layers []string, rw string, dc deleteChange, sc skipChange) ([]Change, error) {
|
||||||
|
var (
|
||||||
|
changes []Change
|
||||||
|
changedDirs = make(map[string]struct{})
|
||||||
|
)
|
||||||
|
|
||||||
|
err := filepath.Walk(rw, func(path string, f os.FileInfo, err error) error {
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Rebase path
|
||||||
|
path, err = filepath.Rel(rw, path)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// As this runs on the daemon side, file paths are OS specific.
|
||||||
|
path = filepath.Join(string(os.PathSeparator), path)
|
||||||
|
|
||||||
|
// Skip root
|
||||||
|
if path == string(os.PathSeparator) {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
if sc != nil {
|
||||||
|
if skip, err := sc(path); skip {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
change := Change{
|
||||||
|
Path: path,
|
||||||
|
}
|
||||||
|
|
||||||
|
deletedFile, err := dc(rw, path, f)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Find out what kind of modification happened
|
||||||
|
if deletedFile != "" {
|
||||||
|
change.Path = deletedFile
|
||||||
|
change.Kind = ChangeDelete
|
||||||
|
} else {
|
||||||
|
// Otherwise, the file was added
|
||||||
|
change.Kind = ChangeAdd
|
||||||
|
|
||||||
|
// ...Unless it already existed in a top layer, in which case, it's a modification
|
||||||
|
for _, layer := range layers {
|
||||||
|
stat, err := os.Stat(filepath.Join(layer, path))
|
||||||
|
if err != nil && !os.IsNotExist(err) {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if err == nil {
|
||||||
|
// The file existed in the top layer, so that's a modification
|
||||||
|
|
||||||
|
// However, if it's a directory, maybe it wasn't actually modified.
|
||||||
|
// If you modify /foo/bar/baz, then /foo will be part of the changed files only because it's the parent of bar
|
||||||
|
if stat.IsDir() && f.IsDir() {
|
||||||
|
if f.Size() == stat.Size() && f.Mode() == stat.Mode() && sameFsTime(f.ModTime(), stat.ModTime()) {
|
||||||
|
// Both directories are the same, don't record the change
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
change.Kind = ChangeModify
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// If /foo/bar/file.txt is modified, then /foo/bar must be part of the changed files.
|
||||||
|
// This block is here to ensure the change is recorded even if the
|
||||||
|
// modify time, mode and size of the parent directory in the rw and ro layers are all equal.
|
||||||
|
// Check https://github.com/docker/docker/pull/13590 for details.
|
||||||
|
if f.IsDir() {
|
||||||
|
changedDirs[path] = struct{}{}
|
||||||
|
}
|
||||||
|
if change.Kind == ChangeAdd || change.Kind == ChangeDelete {
|
||||||
|
parent := filepath.Dir(path)
|
||||||
|
if _, ok := changedDirs[parent]; !ok && parent != "/" {
|
||||||
|
changes = append(changes, Change{Path: parent, Kind: ChangeModify})
|
||||||
|
changedDirs[parent] = struct{}{}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Record change
|
||||||
|
changes = append(changes, change)
|
||||||
|
return nil
|
||||||
|
})
|
||||||
|
if err != nil && !os.IsNotExist(err) {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return changes, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// FileInfo describes the information of a file.
|
||||||
|
type FileInfo struct {
|
||||||
|
parent *FileInfo
|
||||||
|
name string
|
||||||
|
stat *system.StatT
|
||||||
|
children map[string]*FileInfo
|
||||||
|
capability []byte
|
||||||
|
added bool
|
||||||
|
}
|
||||||
|
|
||||||
|
// LookUp looks up the file information of a file.
|
||||||
|
func (info *FileInfo) LookUp(path string) *FileInfo {
|
||||||
|
// As this runs on the daemon side, file paths are OS specific.
|
||||||
|
parent := info
|
||||||
|
if path == string(os.PathSeparator) {
|
||||||
|
return info
|
||||||
|
}
|
||||||
|
|
||||||
|
pathElements := strings.Split(path, string(os.PathSeparator))
|
||||||
|
for _, elem := range pathElements {
|
||||||
|
if elem != "" {
|
||||||
|
child := parent.children[elem]
|
||||||
|
if child == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
parent = child
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return parent
|
||||||
|
}
|
||||||
|
|
||||||
|
func (info *FileInfo) path() string {
|
||||||
|
if info.parent == nil {
|
||||||
|
// As this runs on the daemon side, file paths are OS specific.
|
||||||
|
return string(os.PathSeparator)
|
||||||
|
}
|
||||||
|
return filepath.Join(info.parent.path(), info.name)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (info *FileInfo) addChanges(oldInfo *FileInfo, changes *[]Change) {
|
||||||
|
|
||||||
|
sizeAtEntry := len(*changes)
|
||||||
|
|
||||||
|
if oldInfo == nil {
|
||||||
|
// add
|
||||||
|
change := Change{
|
||||||
|
Path: info.path(),
|
||||||
|
Kind: ChangeAdd,
|
||||||
|
}
|
||||||
|
*changes = append(*changes, change)
|
||||||
|
info.added = true
|
||||||
|
}
|
||||||
|
|
||||||
|
// We make a copy so we can modify it to detect additions
|
||||||
|
// also, we only recurse on the old dir if the new info is a directory
|
||||||
|
// otherwise any previous delete/change is considered recursive
|
||||||
|
oldChildren := make(map[string]*FileInfo)
|
||||||
|
if oldInfo != nil && info.isDir() {
|
||||||
|
for k, v := range oldInfo.children {
|
||||||
|
oldChildren[k] = v
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
for name, newChild := range info.children {
|
||||||
|
oldChild := oldChildren[name]
|
||||||
|
if oldChild != nil {
|
||||||
|
// change?
|
||||||
|
oldStat := oldChild.stat
|
||||||
|
newStat := newChild.stat
|
||||||
|
// Note: We can't compare inode or ctime or blocksize here, because these change
|
||||||
|
// when copying a file into a container. However, that is not generally a problem
|
||||||
|
// because any content change will change mtime, and any status change should
|
||||||
|
// be visible when actually comparing the stat fields. The only time this
|
||||||
|
// breaks down is if some code intentionally hides a change by setting
|
||||||
|
// back mtime
|
||||||
|
if statDifferent(oldStat, newStat) ||
|
||||||
|
!bytes.Equal(oldChild.capability, newChild.capability) {
|
||||||
|
change := Change{
|
||||||
|
Path: newChild.path(),
|
||||||
|
Kind: ChangeModify,
|
||||||
|
}
|
||||||
|
*changes = append(*changes, change)
|
||||||
|
newChild.added = true
|
||||||
|
}
|
||||||
|
|
||||||
|
// Remove from copy so we can detect deletions
|
||||||
|
delete(oldChildren, name)
|
||||||
|
}
|
||||||
|
|
||||||
|
newChild.addChanges(oldChild, changes)
|
||||||
|
}
|
||||||
|
for _, oldChild := range oldChildren {
|
||||||
|
// delete
|
||||||
|
change := Change{
|
||||||
|
Path: oldChild.path(),
|
||||||
|
Kind: ChangeDelete,
|
||||||
|
}
|
||||||
|
*changes = append(*changes, change)
|
||||||
|
}
|
||||||
|
|
||||||
|
// If there were changes inside this directory, we need to add it, even if the directory
|
||||||
|
// itself wasn't changed. This is needed to properly save and restore filesystem permissions.
|
||||||
|
// As this runs on the daemon side, file paths are OS specific.
|
||||||
|
if len(*changes) > sizeAtEntry && info.isDir() && !info.added && info.path() != string(os.PathSeparator) {
|
||||||
|
change := Change{
|
||||||
|
Path: info.path(),
|
||||||
|
Kind: ChangeModify,
|
||||||
|
}
|
||||||
|
// Let's insert the directory entry before the recently added entries located inside this dir
|
||||||
|
*changes = append(*changes, change) // just to resize the slice, will be overwritten
|
||||||
|
copy((*changes)[sizeAtEntry+1:], (*changes)[sizeAtEntry:])
|
||||||
|
(*changes)[sizeAtEntry] = change
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
// Changes add changes to file information.
|
||||||
|
func (info *FileInfo) Changes(oldInfo *FileInfo) []Change {
|
||||||
|
var changes []Change
|
||||||
|
|
||||||
|
info.addChanges(oldInfo, &changes)
|
||||||
|
|
||||||
|
return changes
|
||||||
|
}
|
||||||
|
|
||||||
|
func newRootFileInfo() *FileInfo {
|
||||||
|
// As this runs on the daemon side, file paths are OS specific.
|
||||||
|
root := &FileInfo{
|
||||||
|
name: string(os.PathSeparator),
|
||||||
|
children: make(map[string]*FileInfo),
|
||||||
|
}
|
||||||
|
return root
|
||||||
|
}
|
||||||
|
|
||||||
|
// ChangesDirs compares two directories and generates an array of Change objects describing the changes.
|
||||||
|
// If oldDir is "", then all files in newDir will be Add-Changes.
|
||||||
|
func ChangesDirs(newDir, oldDir string) ([]Change, error) {
|
||||||
|
var (
|
||||||
|
oldRoot, newRoot *FileInfo
|
||||||
|
)
|
||||||
|
if oldDir == "" {
|
||||||
|
emptyDir, err := ioutil.TempDir("", "empty")
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
defer os.Remove(emptyDir)
|
||||||
|
oldDir = emptyDir
|
||||||
|
}
|
||||||
|
oldRoot, newRoot, err := collectFileInfoForChanges(oldDir, newDir)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return newRoot.Changes(oldRoot), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// ChangesSize calculates the size in bytes of the provided changes, based on newDir.
|
||||||
|
func ChangesSize(newDir string, changes []Change) int64 {
|
||||||
|
var (
|
||||||
|
size int64
|
||||||
|
sf = make(map[uint64]struct{})
|
||||||
|
)
|
||||||
|
for _, change := range changes {
|
||||||
|
if change.Kind == ChangeModify || change.Kind == ChangeAdd {
|
||||||
|
file := filepath.Join(newDir, change.Path)
|
||||||
|
fileInfo, err := os.Lstat(file)
|
||||||
|
if err != nil {
|
||||||
|
logrus.Errorf("Can not stat %q: %s", file, err)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
if fileInfo != nil && !fileInfo.IsDir() {
|
||||||
|
if hasHardlinks(fileInfo) {
|
||||||
|
inode := getIno(fileInfo)
|
||||||
|
if _, ok := sf[inode]; !ok {
|
||||||
|
size += fileInfo.Size()
|
||||||
|
sf[inode] = struct{}{}
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
size += fileInfo.Size()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return size
|
||||||
|
}
|
||||||
|
|
||||||
|
// ExportChanges produces an Archive from the provided changes, relative to dir.
|
||||||
|
func ExportChanges(dir string, changes []Change, uidMaps, gidMaps []idtools.IDMap) (io.ReadCloser, error) {
|
||||||
|
reader, writer := io.Pipe()
|
||||||
|
go func() {
|
||||||
|
ta := newTarAppender(idtools.NewIDMappingsFromMaps(uidMaps, gidMaps), writer)
|
||||||
|
|
||||||
|
// this buffer is needed for the duration of this piped stream
|
||||||
|
defer pools.BufioWriter32KPool.Put(ta.Buffer)
|
||||||
|
|
||||||
|
sort.Sort(changesByPath(changes))
|
||||||
|
|
||||||
|
// In general we log errors here but ignore them because
|
||||||
|
// during e.g. a diff operation the container can continue
|
||||||
|
// mutating the filesystem and we can see transient errors
|
||||||
|
// from this
|
||||||
|
for _, change := range changes {
|
||||||
|
if change.Kind == ChangeDelete {
|
||||||
|
whiteOutDir := filepath.Dir(change.Path)
|
||||||
|
whiteOutBase := filepath.Base(change.Path)
|
||||||
|
whiteOut := filepath.Join(whiteOutDir, WhiteoutPrefix+whiteOutBase)
|
||||||
|
timestamp := time.Now()
|
||||||
|
hdr := &tar.Header{
|
||||||
|
Name: whiteOut[1:],
|
||||||
|
Size: 0,
|
||||||
|
ModTime: timestamp,
|
||||||
|
AccessTime: timestamp,
|
||||||
|
ChangeTime: timestamp,
|
||||||
|
}
|
||||||
|
if err := ta.TarWriter.WriteHeader(hdr); err != nil {
|
||||||
|
logrus.Debugf("Can't write whiteout header: %s", err)
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
path := filepath.Join(dir, change.Path)
|
||||||
|
if err := ta.addTarFile(path, change.Path[1:]); err != nil {
|
||||||
|
logrus.Debugf("Can't add file %s to tar: %s", path, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Make sure to check the error on Close.
|
||||||
|
if err := ta.TarWriter.Close(); err != nil {
|
||||||
|
logrus.Debugf("Can't close layer: %s", err)
|
||||||
|
}
|
||||||
|
if err := writer.Close(); err != nil {
|
||||||
|
logrus.Debugf("failed close Changes writer: %s", err)
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
return reader, nil
|
||||||
|
}
|
|
@ -0,0 +1,312 @@
|
||||||
|
package archive
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"fmt"
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
"sort"
|
||||||
|
"syscall"
|
||||||
|
"unsafe"
|
||||||
|
|
||||||
|
"github.com/docker/docker/pkg/system"
|
||||||
|
)
|
||||||
|
|
||||||
|
// walker is used to implement collectFileInfoForChanges on linux. Where this
|
||||||
|
// method in general returns the entire contents of two directory trees, we
|
||||||
|
// optimize some FS calls out on linux. In particular, we take advantage of the
|
||||||
|
// fact that getdents(2) returns the inode of each file in the directory being
|
||||||
|
// walked, which, when walking two trees in parallel to generate a list of
|
||||||
|
// changes, can be used to prune subtrees without ever having to lstat(2) them
|
||||||
|
// directly. Eliminating stat calls in this way can save up to seconds on large
|
||||||
|
// images.
|
||||||
|
type walker struct {
|
||||||
|
dir1 string
|
||||||
|
dir2 string
|
||||||
|
root1 *FileInfo
|
||||||
|
root2 *FileInfo
|
||||||
|
}
|
||||||
|
|
||||||
|
// collectFileInfoForChanges returns a complete representation of the trees
|
||||||
|
// rooted at dir1 and dir2, with one important exception: any subtree or
|
||||||
|
// leaf where the inode and device numbers are an exact match between dir1
|
||||||
|
// and dir2 will be pruned from the results. This method is *only* to be used
|
||||||
|
// to generating a list of changes between the two directories, as it does not
|
||||||
|
// reflect the full contents.
|
||||||
|
func collectFileInfoForChanges(dir1, dir2 string) (*FileInfo, *FileInfo, error) {
|
||||||
|
w := &walker{
|
||||||
|
dir1: dir1,
|
||||||
|
dir2: dir2,
|
||||||
|
root1: newRootFileInfo(),
|
||||||
|
root2: newRootFileInfo(),
|
||||||
|
}
|
||||||
|
|
||||||
|
i1, err := os.Lstat(w.dir1)
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
i2, err := os.Lstat(w.dir2)
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := w.walk("/", i1, i2); err != nil {
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return w.root1, w.root2, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Given a FileInfo, its path info, and a reference to the root of the tree
|
||||||
|
// being constructed, register this file with the tree.
|
||||||
|
func walkchunk(path string, fi os.FileInfo, dir string, root *FileInfo) error {
|
||||||
|
if fi == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
parent := root.LookUp(filepath.Dir(path))
|
||||||
|
if parent == nil {
|
||||||
|
return fmt.Errorf("walkchunk: Unexpectedly no parent for %s", path)
|
||||||
|
}
|
||||||
|
info := &FileInfo{
|
||||||
|
name: filepath.Base(path),
|
||||||
|
children: make(map[string]*FileInfo),
|
||||||
|
parent: parent,
|
||||||
|
}
|
||||||
|
cpath := filepath.Join(dir, path)
|
||||||
|
stat, err := system.FromStatT(fi.Sys().(*syscall.Stat_t))
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
info.stat = stat
|
||||||
|
info.capability, _ = system.Lgetxattr(cpath, "security.capability") // lgetxattr(2): fs access
|
||||||
|
parent.children[info.name] = info
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Walk a subtree rooted at the same path in both trees being iterated. For
|
||||||
|
// example, /docker/overlay/1234/a/b/c/d and /docker/overlay/8888/a/b/c/d
|
||||||
|
func (w *walker) walk(path string, i1, i2 os.FileInfo) (err error) {
|
||||||
|
// Register these nodes with the return trees, unless we're still at the
|
||||||
|
// (already-created) roots:
|
||||||
|
if path != "/" {
|
||||||
|
if err := walkchunk(path, i1, w.dir1, w.root1); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if err := walkchunk(path, i2, w.dir2, w.root2); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
is1Dir := i1 != nil && i1.IsDir()
|
||||||
|
is2Dir := i2 != nil && i2.IsDir()
|
||||||
|
|
||||||
|
sameDevice := false
|
||||||
|
if i1 != nil && i2 != nil {
|
||||||
|
si1 := i1.Sys().(*syscall.Stat_t)
|
||||||
|
si2 := i2.Sys().(*syscall.Stat_t)
|
||||||
|
if si1.Dev == si2.Dev {
|
||||||
|
sameDevice = true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// If these files are both non-existent, or leaves (non-dirs), we are done.
|
||||||
|
if !is1Dir && !is2Dir {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Fetch the names of all the files contained in both directories being walked:
|
||||||
|
var names1, names2 []nameIno
|
||||||
|
if is1Dir {
|
||||||
|
names1, err = readdirnames(filepath.Join(w.dir1, path)) // getdents(2): fs access
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if is2Dir {
|
||||||
|
names2, err = readdirnames(filepath.Join(w.dir2, path)) // getdents(2): fs access
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// We have lists of the files contained in both parallel directories, sorted
|
||||||
|
// in the same order. Walk them in parallel, generating a unique merged list
|
||||||
|
// of all items present in either or both directories.
|
||||||
|
var names []string
|
||||||
|
ix1 := 0
|
||||||
|
ix2 := 0
|
||||||
|
|
||||||
|
for {
|
||||||
|
if ix1 >= len(names1) {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
if ix2 >= len(names2) {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
|
ni1 := names1[ix1]
|
||||||
|
ni2 := names2[ix2]
|
||||||
|
|
||||||
|
switch bytes.Compare([]byte(ni1.name), []byte(ni2.name)) {
|
||||||
|
case -1: // ni1 < ni2 -- advance ni1
|
||||||
|
// we will not encounter ni1 in names2
|
||||||
|
names = append(names, ni1.name)
|
||||||
|
ix1++
|
||||||
|
case 0: // ni1 == ni2
|
||||||
|
if ni1.ino != ni2.ino || !sameDevice {
|
||||||
|
names = append(names, ni1.name)
|
||||||
|
}
|
||||||
|
ix1++
|
||||||
|
ix2++
|
||||||
|
case 1: // ni1 > ni2 -- advance ni2
|
||||||
|
// we will not encounter ni2 in names1
|
||||||
|
names = append(names, ni2.name)
|
||||||
|
ix2++
|
||||||
|
}
|
||||||
|
}
|
||||||
|
for ix1 < len(names1) {
|
||||||
|
names = append(names, names1[ix1].name)
|
||||||
|
ix1++
|
||||||
|
}
|
||||||
|
for ix2 < len(names2) {
|
||||||
|
names = append(names, names2[ix2].name)
|
||||||
|
ix2++
|
||||||
|
}
|
||||||
|
|
||||||
|
// For each of the names present in either or both of the directories being
|
||||||
|
// iterated, stat the name under each root, and recurse the pair of them:
|
||||||
|
for _, name := range names {
|
||||||
|
fname := filepath.Join(path, name)
|
||||||
|
var cInfo1, cInfo2 os.FileInfo
|
||||||
|
if is1Dir {
|
||||||
|
cInfo1, err = os.Lstat(filepath.Join(w.dir1, fname)) // lstat(2): fs access
|
||||||
|
if err != nil && !os.IsNotExist(err) {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if is2Dir {
|
||||||
|
cInfo2, err = os.Lstat(filepath.Join(w.dir2, fname)) // lstat(2): fs access
|
||||||
|
if err != nil && !os.IsNotExist(err) {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if err = w.walk(fname, cInfo1, cInfo2); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// {name,inode} pairs used to support the early-pruning logic of the walker type
|
||||||
|
type nameIno struct {
|
||||||
|
name string
|
||||||
|
ino uint64
|
||||||
|
}
|
||||||
|
|
||||||
|
type nameInoSlice []nameIno
|
||||||
|
|
||||||
|
func (s nameInoSlice) Len() int { return len(s) }
|
||||||
|
func (s nameInoSlice) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
|
||||||
|
func (s nameInoSlice) Less(i, j int) bool { return s[i].name < s[j].name }
|
||||||
|
|
||||||
|
// readdirnames is a hacked-apart version of the Go stdlib code, exposing inode
|
||||||
|
// numbers further up the stack when reading directory contents. Unlike
|
||||||
|
// os.Readdirnames, which returns a list of filenames, this function returns a
|
||||||
|
// list of {filename,inode} pairs.
|
||||||
|
func readdirnames(dirname string) (names []nameIno, err error) {
|
||||||
|
var (
|
||||||
|
size = 100
|
||||||
|
buf = make([]byte, 4096)
|
||||||
|
nbuf int
|
||||||
|
bufp int
|
||||||
|
nb int
|
||||||
|
)
|
||||||
|
|
||||||
|
f, err := os.Open(dirname)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
defer f.Close()
|
||||||
|
|
||||||
|
names = make([]nameIno, 0, size) // Empty with room to grow.
|
||||||
|
for {
|
||||||
|
// Refill the buffer if necessary
|
||||||
|
if bufp >= nbuf {
|
||||||
|
bufp = 0
|
||||||
|
nbuf, err = syscall.ReadDirent(int(f.Fd()), buf) // getdents on linux
|
||||||
|
if nbuf < 0 {
|
||||||
|
nbuf = 0
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
return nil, os.NewSyscallError("readdirent", err)
|
||||||
|
}
|
||||||
|
if nbuf <= 0 {
|
||||||
|
break // EOF
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Drain the buffer
|
||||||
|
nb, names = parseDirent(buf[bufp:nbuf], names)
|
||||||
|
bufp += nb
|
||||||
|
}
|
||||||
|
|
||||||
|
sl := nameInoSlice(names)
|
||||||
|
sort.Sort(sl)
|
||||||
|
return sl, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// parseDirent is a minor modification of syscall.ParseDirent (linux version)
|
||||||
|
// which returns {name,inode} pairs instead of just names.
|
||||||
|
func parseDirent(buf []byte, names []nameIno) (consumed int, newnames []nameIno) {
|
||||||
|
origlen := len(buf)
|
||||||
|
for len(buf) > 0 {
|
||||||
|
dirent := (*syscall.Dirent)(unsafe.Pointer(&buf[0]))
|
||||||
|
buf = buf[dirent.Reclen:]
|
||||||
|
if dirent.Ino == 0 { // File absent in directory.
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
bytes := (*[10000]byte)(unsafe.Pointer(&dirent.Name[0]))
|
||||||
|
var name = string(bytes[0:clen(bytes[:])])
|
||||||
|
if name == "." || name == ".." { // Useless names
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
names = append(names, nameIno{name, dirent.Ino})
|
||||||
|
}
|
||||||
|
return origlen - len(buf), names
|
||||||
|
}
|
||||||
|
|
||||||
|
func clen(n []byte) int {
|
||||||
|
for i := 0; i < len(n); i++ {
|
||||||
|
if n[i] == 0 {
|
||||||
|
return i
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return len(n)
|
||||||
|
}
|
||||||
|
|
||||||
|
// OverlayChanges walks the path rw and determines changes for the files in the path,
|
||||||
|
// with respect to the parent layers
|
||||||
|
func OverlayChanges(layers []string, rw string) ([]Change, error) {
|
||||||
|
return changes(layers, rw, overlayDeletedFile, nil)
|
||||||
|
}
|
||||||
|
|
||||||
|
func overlayDeletedFile(root, path string, fi os.FileInfo) (string, error) {
|
||||||
|
if fi.Mode()&os.ModeCharDevice != 0 {
|
||||||
|
s := fi.Sys().(*syscall.Stat_t)
|
||||||
|
if major(uint64(s.Rdev)) == 0 && minor(uint64(s.Rdev)) == 0 {
|
||||||
|
return path, nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if fi.Mode()&os.ModeDir != 0 {
|
||||||
|
opaque, err := system.Lgetxattr(filepath.Join(root, path), "trusted.overlay.opaque")
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
if len(opaque) == 1 && opaque[0] == 'y' {
|
||||||
|
return path, nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return "", nil
|
||||||
|
|
||||||
|
}
|
|
@ -0,0 +1,97 @@
|
||||||
|
// +build !linux
|
||||||
|
|
||||||
|
package archive
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
"runtime"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"github.com/docker/docker/pkg/system"
|
||||||
|
)
|
||||||
|
|
||||||
|
func collectFileInfoForChanges(oldDir, newDir string) (*FileInfo, *FileInfo, error) {
|
||||||
|
var (
|
||||||
|
oldRoot, newRoot *FileInfo
|
||||||
|
err1, err2 error
|
||||||
|
errs = make(chan error, 2)
|
||||||
|
)
|
||||||
|
go func() {
|
||||||
|
oldRoot, err1 = collectFileInfo(oldDir)
|
||||||
|
errs <- err1
|
||||||
|
}()
|
||||||
|
go func() {
|
||||||
|
newRoot, err2 = collectFileInfo(newDir)
|
||||||
|
errs <- err2
|
||||||
|
}()
|
||||||
|
|
||||||
|
// block until both routines have returned
|
||||||
|
for i := 0; i < 2; i++ {
|
||||||
|
if err := <-errs; err != nil {
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return oldRoot, newRoot, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func collectFileInfo(sourceDir string) (*FileInfo, error) {
|
||||||
|
root := newRootFileInfo()
|
||||||
|
|
||||||
|
err := filepath.Walk(sourceDir, func(path string, f os.FileInfo, err error) error {
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Rebase path
|
||||||
|
relPath, err := filepath.Rel(sourceDir, path)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// As this runs on the daemon side, file paths are OS specific.
|
||||||
|
relPath = filepath.Join(string(os.PathSeparator), relPath)
|
||||||
|
|
||||||
|
// See https://github.com/golang/go/issues/9168 - bug in filepath.Join.
|
||||||
|
// Temporary workaround. If the returned path starts with two backslashes,
|
||||||
|
// trim it down to a single backslash. Only relevant on Windows.
|
||||||
|
if runtime.GOOS == "windows" {
|
||||||
|
if strings.HasPrefix(relPath, `\\`) {
|
||||||
|
relPath = relPath[1:]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if relPath == string(os.PathSeparator) {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
parent := root.LookUp(filepath.Dir(relPath))
|
||||||
|
if parent == nil {
|
||||||
|
return fmt.Errorf("collectFileInfo: Unexpectedly no parent for %s", relPath)
|
||||||
|
}
|
||||||
|
|
||||||
|
info := &FileInfo{
|
||||||
|
name: filepath.Base(relPath),
|
||||||
|
children: make(map[string]*FileInfo),
|
||||||
|
parent: parent,
|
||||||
|
}
|
||||||
|
|
||||||
|
s, err := system.Lstat(path)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
info.stat = s
|
||||||
|
|
||||||
|
info.capability, _ = system.Lgetxattr(path, "security.capability")
|
||||||
|
|
||||||
|
parent.children[info.name] = info
|
||||||
|
|
||||||
|
return nil
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return root, nil
|
||||||
|
}
|
|
@ -0,0 +1,36 @@
|
||||||
|
// +build !windows
|
||||||
|
|
||||||
|
package archive
|
||||||
|
|
||||||
|
import (
|
||||||
|
"os"
|
||||||
|
"syscall"
|
||||||
|
|
||||||
|
"github.com/docker/docker/pkg/system"
|
||||||
|
)
|
||||||
|
|
||||||
|
func statDifferent(oldStat *system.StatT, newStat *system.StatT) bool {
|
||||||
|
// Don't look at size for dirs, its not a good measure of change
|
||||||
|
if oldStat.Mode() != newStat.Mode() ||
|
||||||
|
oldStat.UID() != newStat.UID() ||
|
||||||
|
oldStat.GID() != newStat.GID() ||
|
||||||
|
oldStat.Rdev() != newStat.Rdev() ||
|
||||||
|
// Don't look at size for dirs, its not a good measure of change
|
||||||
|
(oldStat.Mode()&syscall.S_IFDIR != syscall.S_IFDIR &&
|
||||||
|
(!sameFsTimeSpec(oldStat.Mtim(), newStat.Mtim()) || (oldStat.Size() != newStat.Size()))) {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
func (info *FileInfo) isDir() bool {
|
||||||
|
return info.parent == nil || info.stat.Mode()&syscall.S_IFDIR != 0
|
||||||
|
}
|
||||||
|
|
||||||
|
func getIno(fi os.FileInfo) uint64 {
|
||||||
|
return uint64(fi.Sys().(*syscall.Stat_t).Ino)
|
||||||
|
}
|
||||||
|
|
||||||
|
func hasHardlinks(fi os.FileInfo) bool {
|
||||||
|
return fi.Sys().(*syscall.Stat_t).Nlink > 1
|
||||||
|
}
|
|
@ -0,0 +1,30 @@
|
||||||
|
package archive
|
||||||
|
|
||||||
|
import (
|
||||||
|
"os"
|
||||||
|
|
||||||
|
"github.com/docker/docker/pkg/system"
|
||||||
|
)
|
||||||
|
|
||||||
|
func statDifferent(oldStat *system.StatT, newStat *system.StatT) bool {
|
||||||
|
|
||||||
|
// Don't look at size for dirs, its not a good measure of change
|
||||||
|
if oldStat.Mtim() != newStat.Mtim() ||
|
||||||
|
oldStat.Mode() != newStat.Mode() ||
|
||||||
|
oldStat.Size() != newStat.Size() && !oldStat.Mode().IsDir() {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
func (info *FileInfo) isDir() bool {
|
||||||
|
return info.parent == nil || info.stat.Mode().IsDir()
|
||||||
|
}
|
||||||
|
|
||||||
|
func getIno(fi os.FileInfo) (inode uint64) {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
func hasHardlinks(fi os.FileInfo) bool {
|
||||||
|
return false
|
||||||
|
}
|
|
@ -0,0 +1,458 @@
|
||||||
|
package archive
|
||||||
|
|
||||||
|
import (
|
||||||
|
"archive/tar"
|
||||||
|
"errors"
|
||||||
|
"io"
|
||||||
|
"io/ioutil"
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"github.com/Sirupsen/logrus"
|
||||||
|
"github.com/docker/docker/pkg/system"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Errors used or returned by this file.
|
||||||
|
var (
|
||||||
|
ErrNotDirectory = errors.New("not a directory")
|
||||||
|
ErrDirNotExists = errors.New("no such directory")
|
||||||
|
ErrCannotCopyDir = errors.New("cannot copy directory")
|
||||||
|
ErrInvalidCopySource = errors.New("invalid copy source content")
|
||||||
|
)
|
||||||
|
|
||||||
|
// PreserveTrailingDotOrSeparator returns the given cleaned path (after
|
||||||
|
// processing using any utility functions from the path or filepath stdlib
|
||||||
|
// packages) and appends a trailing `/.` or `/` if its corresponding original
|
||||||
|
// path (from before being processed by utility functions from the path or
|
||||||
|
// filepath stdlib packages) ends with a trailing `/.` or `/`. If the cleaned
|
||||||
|
// path already ends in a `.` path segment, then another is not added. If the
|
||||||
|
// clean path already ends in a path separator, then another is not added.
|
||||||
|
func PreserveTrailingDotOrSeparator(cleanedPath, originalPath string) string {
|
||||||
|
// Ensure paths are in platform semantics
|
||||||
|
cleanedPath = normalizePath(cleanedPath)
|
||||||
|
originalPath = normalizePath(originalPath)
|
||||||
|
|
||||||
|
if !specifiesCurrentDir(cleanedPath) && specifiesCurrentDir(originalPath) {
|
||||||
|
if !hasTrailingPathSeparator(cleanedPath) {
|
||||||
|
// Add a separator if it doesn't already end with one (a cleaned
|
||||||
|
// path would only end in a separator if it is the root).
|
||||||
|
cleanedPath += string(filepath.Separator)
|
||||||
|
}
|
||||||
|
cleanedPath += "."
|
||||||
|
}
|
||||||
|
|
||||||
|
if !hasTrailingPathSeparator(cleanedPath) && hasTrailingPathSeparator(originalPath) {
|
||||||
|
cleanedPath += string(filepath.Separator)
|
||||||
|
}
|
||||||
|
|
||||||
|
return cleanedPath
|
||||||
|
}
|
||||||
|
|
||||||
|
// assertsDirectory returns whether the given path is
|
||||||
|
// asserted to be a directory, i.e., the path ends with
|
||||||
|
// a trailing '/' or `/.`, assuming a path separator of `/`.
|
||||||
|
func assertsDirectory(path string) bool {
|
||||||
|
return hasTrailingPathSeparator(path) || specifiesCurrentDir(path)
|
||||||
|
}
|
||||||
|
|
||||||
|
// hasTrailingPathSeparator returns whether the given
|
||||||
|
// path ends with the system's path separator character.
|
||||||
|
func hasTrailingPathSeparator(path string) bool {
|
||||||
|
return len(path) > 0 && os.IsPathSeparator(path[len(path)-1])
|
||||||
|
}
|
||||||
|
|
||||||
|
// specifiesCurrentDir returns whether the given path specifies
|
||||||
|
// a "current directory", i.e., the last path segment is `.`.
|
||||||
|
func specifiesCurrentDir(path string) bool {
|
||||||
|
return filepath.Base(path) == "."
|
||||||
|
}
|
||||||
|
|
||||||
|
// SplitPathDirEntry splits the given path between its directory name and its
|
||||||
|
// basename by first cleaning the path but preserves a trailing "." if the
|
||||||
|
// original path specified the current directory.
|
||||||
|
func SplitPathDirEntry(path string) (dir, base string) {
|
||||||
|
cleanedPath := filepath.Clean(normalizePath(path))
|
||||||
|
|
||||||
|
if specifiesCurrentDir(path) {
|
||||||
|
cleanedPath += string(filepath.Separator) + "."
|
||||||
|
}
|
||||||
|
|
||||||
|
return filepath.Dir(cleanedPath), filepath.Base(cleanedPath)
|
||||||
|
}
|
||||||
|
|
||||||
|
// TarResource archives the resource described by the given CopyInfo to a Tar
|
||||||
|
// archive. A non-nil error is returned if sourcePath does not exist or is
|
||||||
|
// asserted to be a directory but exists as another type of file.
|
||||||
|
//
|
||||||
|
// This function acts as a convenient wrapper around TarWithOptions, which
|
||||||
|
// requires a directory as the source path. TarResource accepts either a
|
||||||
|
// directory or a file path and correctly sets the Tar options.
|
||||||
|
func TarResource(sourceInfo CopyInfo) (content io.ReadCloser, err error) {
|
||||||
|
return TarResourceRebase(sourceInfo.Path, sourceInfo.RebaseName)
|
||||||
|
}
|
||||||
|
|
||||||
|
// TarResourceRebase is like TarResource but renames the first path element of
|
||||||
|
// items in the resulting tar archive to match the given rebaseName if not "".
|
||||||
|
func TarResourceRebase(sourcePath, rebaseName string) (content io.ReadCloser, err error) {
|
||||||
|
sourcePath = normalizePath(sourcePath)
|
||||||
|
if _, err = os.Lstat(sourcePath); err != nil {
|
||||||
|
// Catches the case where the source does not exist or is not a
|
||||||
|
// directory if asserted to be a directory, as this also causes an
|
||||||
|
// error.
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Separate the source path between its directory and
|
||||||
|
// the entry in that directory which we are archiving.
|
||||||
|
sourceDir, sourceBase := SplitPathDirEntry(sourcePath)
|
||||||
|
|
||||||
|
filter := []string{sourceBase}
|
||||||
|
|
||||||
|
logrus.Debugf("copying %q from %q", sourceBase, sourceDir)
|
||||||
|
|
||||||
|
return TarWithOptions(sourceDir, &TarOptions{
|
||||||
|
Compression: Uncompressed,
|
||||||
|
IncludeFiles: filter,
|
||||||
|
IncludeSourceDir: true,
|
||||||
|
RebaseNames: map[string]string{
|
||||||
|
sourceBase: rebaseName,
|
||||||
|
},
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// CopyInfo holds basic info about the source
|
||||||
|
// or destination path of a copy operation.
|
||||||
|
type CopyInfo struct {
|
||||||
|
Path string
|
||||||
|
Exists bool
|
||||||
|
IsDir bool
|
||||||
|
RebaseName string
|
||||||
|
}
|
||||||
|
|
||||||
|
// CopyInfoSourcePath stats the given path to create a CopyInfo
|
||||||
|
// struct representing that resource for the source of an archive copy
|
||||||
|
// operation. The given path should be an absolute local path. A source path
|
||||||
|
// has all symlinks evaluated that appear before the last path separator ("/"
|
||||||
|
// on Unix). As it is to be a copy source, the path must exist.
|
||||||
|
func CopyInfoSourcePath(path string, followLink bool) (CopyInfo, error) {
|
||||||
|
// normalize the file path and then evaluate the symbol link
|
||||||
|
// we will use the target file instead of the symbol link if
|
||||||
|
// followLink is set
|
||||||
|
path = normalizePath(path)
|
||||||
|
|
||||||
|
resolvedPath, rebaseName, err := ResolveHostSourcePath(path, followLink)
|
||||||
|
if err != nil {
|
||||||
|
return CopyInfo{}, err
|
||||||
|
}
|
||||||
|
|
||||||
|
stat, err := os.Lstat(resolvedPath)
|
||||||
|
if err != nil {
|
||||||
|
return CopyInfo{}, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return CopyInfo{
|
||||||
|
Path: resolvedPath,
|
||||||
|
Exists: true,
|
||||||
|
IsDir: stat.IsDir(),
|
||||||
|
RebaseName: rebaseName,
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// CopyInfoDestinationPath stats the given path to create a CopyInfo
|
||||||
|
// struct representing that resource for the destination of an archive copy
|
||||||
|
// operation. The given path should be an absolute local path.
|
||||||
|
func CopyInfoDestinationPath(path string) (info CopyInfo, err error) {
|
||||||
|
maxSymlinkIter := 10 // filepath.EvalSymlinks uses 255, but 10 already seems like a lot.
|
||||||
|
path = normalizePath(path)
|
||||||
|
originalPath := path
|
||||||
|
|
||||||
|
stat, err := os.Lstat(path)
|
||||||
|
|
||||||
|
if err == nil && stat.Mode()&os.ModeSymlink == 0 {
|
||||||
|
// The path exists and is not a symlink.
|
||||||
|
return CopyInfo{
|
||||||
|
Path: path,
|
||||||
|
Exists: true,
|
||||||
|
IsDir: stat.IsDir(),
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// While the path is a symlink.
|
||||||
|
for n := 0; err == nil && stat.Mode()&os.ModeSymlink != 0; n++ {
|
||||||
|
if n > maxSymlinkIter {
|
||||||
|
// Don't follow symlinks more than this arbitrary number of times.
|
||||||
|
return CopyInfo{}, errors.New("too many symlinks in " + originalPath)
|
||||||
|
}
|
||||||
|
|
||||||
|
// The path is a symbolic link. We need to evaluate it so that the
|
||||||
|
// destination of the copy operation is the link target and not the
|
||||||
|
// link itself. This is notably different than CopyInfoSourcePath which
|
||||||
|
// only evaluates symlinks before the last appearing path separator.
|
||||||
|
// Also note that it is okay if the last path element is a broken
|
||||||
|
// symlink as the copy operation should create the target.
|
||||||
|
var linkTarget string
|
||||||
|
|
||||||
|
linkTarget, err = os.Readlink(path)
|
||||||
|
if err != nil {
|
||||||
|
return CopyInfo{}, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if !system.IsAbs(linkTarget) {
|
||||||
|
// Join with the parent directory.
|
||||||
|
dstParent, _ := SplitPathDirEntry(path)
|
||||||
|
linkTarget = filepath.Join(dstParent, linkTarget)
|
||||||
|
}
|
||||||
|
|
||||||
|
path = linkTarget
|
||||||
|
stat, err = os.Lstat(path)
|
||||||
|
}
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
// It's okay if the destination path doesn't exist. We can still
|
||||||
|
// continue the copy operation if the parent directory exists.
|
||||||
|
if !os.IsNotExist(err) {
|
||||||
|
return CopyInfo{}, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Ensure destination parent dir exists.
|
||||||
|
dstParent, _ := SplitPathDirEntry(path)
|
||||||
|
|
||||||
|
parentDirStat, err := os.Lstat(dstParent)
|
||||||
|
if err != nil {
|
||||||
|
return CopyInfo{}, err
|
||||||
|
}
|
||||||
|
if !parentDirStat.IsDir() {
|
||||||
|
return CopyInfo{}, ErrNotDirectory
|
||||||
|
}
|
||||||
|
|
||||||
|
return CopyInfo{Path: path}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// The path exists after resolving symlinks.
|
||||||
|
return CopyInfo{
|
||||||
|
Path: path,
|
||||||
|
Exists: true,
|
||||||
|
IsDir: stat.IsDir(),
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// PrepareArchiveCopy prepares the given srcContent archive, which should
|
||||||
|
// contain the archived resource described by srcInfo, to the destination
|
||||||
|
// described by dstInfo. Returns the possibly modified content archive along
|
||||||
|
// with the path to the destination directory which it should be extracted to.
|
||||||
|
func PrepareArchiveCopy(srcContent io.Reader, srcInfo, dstInfo CopyInfo) (dstDir string, content io.ReadCloser, err error) {
|
||||||
|
// Ensure in platform semantics
|
||||||
|
srcInfo.Path = normalizePath(srcInfo.Path)
|
||||||
|
dstInfo.Path = normalizePath(dstInfo.Path)
|
||||||
|
|
||||||
|
// Separate the destination path between its directory and base
|
||||||
|
// components in case the source archive contents need to be rebased.
|
||||||
|
dstDir, dstBase := SplitPathDirEntry(dstInfo.Path)
|
||||||
|
_, srcBase := SplitPathDirEntry(srcInfo.Path)
|
||||||
|
|
||||||
|
switch {
|
||||||
|
case dstInfo.Exists && dstInfo.IsDir:
|
||||||
|
// The destination exists as a directory. No alteration
|
||||||
|
// to srcContent is needed as its contents can be
|
||||||
|
// simply extracted to the destination directory.
|
||||||
|
return dstInfo.Path, ioutil.NopCloser(srcContent), nil
|
||||||
|
case dstInfo.Exists && srcInfo.IsDir:
|
||||||
|
// The destination exists as some type of file and the source
|
||||||
|
// content is a directory. This is an error condition since
|
||||||
|
// you cannot copy a directory to an existing file location.
|
||||||
|
return "", nil, ErrCannotCopyDir
|
||||||
|
case dstInfo.Exists:
|
||||||
|
// The destination exists as some type of file and the source content
|
||||||
|
// is also a file. The source content entry will have to be renamed to
|
||||||
|
// have a basename which matches the destination path's basename.
|
||||||
|
if len(srcInfo.RebaseName) != 0 {
|
||||||
|
srcBase = srcInfo.RebaseName
|
||||||
|
}
|
||||||
|
return dstDir, RebaseArchiveEntries(srcContent, srcBase, dstBase), nil
|
||||||
|
case srcInfo.IsDir:
|
||||||
|
// The destination does not exist and the source content is an archive
|
||||||
|
// of a directory. The archive should be extracted to the parent of
|
||||||
|
// the destination path instead, and when it is, the directory that is
|
||||||
|
// created as a result should take the name of the destination path.
|
||||||
|
// The source content entries will have to be renamed to have a
|
||||||
|
// basename which matches the destination path's basename.
|
||||||
|
if len(srcInfo.RebaseName) != 0 {
|
||||||
|
srcBase = srcInfo.RebaseName
|
||||||
|
}
|
||||||
|
return dstDir, RebaseArchiveEntries(srcContent, srcBase, dstBase), nil
|
||||||
|
case assertsDirectory(dstInfo.Path):
|
||||||
|
// The destination does not exist and is asserted to be created as a
|
||||||
|
// directory, but the source content is not a directory. This is an
|
||||||
|
// error condition since you cannot create a directory from a file
|
||||||
|
// source.
|
||||||
|
return "", nil, ErrDirNotExists
|
||||||
|
default:
|
||||||
|
// The last remaining case is when the destination does not exist, is
|
||||||
|
// not asserted to be a directory, and the source content is not an
|
||||||
|
// archive of a directory. It this case, the destination file will need
|
||||||
|
// to be created when the archive is extracted and the source content
|
||||||
|
// entry will have to be renamed to have a basename which matches the
|
||||||
|
// destination path's basename.
|
||||||
|
if len(srcInfo.RebaseName) != 0 {
|
||||||
|
srcBase = srcInfo.RebaseName
|
||||||
|
}
|
||||||
|
return dstDir, RebaseArchiveEntries(srcContent, srcBase, dstBase), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
// RebaseArchiveEntries rewrites the given srcContent archive replacing
|
||||||
|
// an occurrence of oldBase with newBase at the beginning of entry names.
|
||||||
|
func RebaseArchiveEntries(srcContent io.Reader, oldBase, newBase string) io.ReadCloser {
|
||||||
|
if oldBase == string(os.PathSeparator) {
|
||||||
|
// If oldBase specifies the root directory, use an empty string as
|
||||||
|
// oldBase instead so that newBase doesn't replace the path separator
|
||||||
|
// that all paths will start with.
|
||||||
|
oldBase = ""
|
||||||
|
}
|
||||||
|
|
||||||
|
rebased, w := io.Pipe()
|
||||||
|
|
||||||
|
go func() {
|
||||||
|
srcTar := tar.NewReader(srcContent)
|
||||||
|
rebasedTar := tar.NewWriter(w)
|
||||||
|
|
||||||
|
for {
|
||||||
|
hdr, err := srcTar.Next()
|
||||||
|
if err == io.EOF {
|
||||||
|
// Signals end of archive.
|
||||||
|
rebasedTar.Close()
|
||||||
|
w.Close()
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
w.CloseWithError(err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
hdr.Name = strings.Replace(hdr.Name, oldBase, newBase, 1)
|
||||||
|
|
||||||
|
if err = rebasedTar.WriteHeader(hdr); err != nil {
|
||||||
|
w.CloseWithError(err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
if _, err = io.Copy(rebasedTar, srcTar); err != nil {
|
||||||
|
w.CloseWithError(err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
return rebased
|
||||||
|
}
|
||||||
|
|
||||||
|
// CopyResource performs an archive copy from the given source path to the
|
||||||
|
// given destination path. The source path MUST exist and the destination
|
||||||
|
// path's parent directory must exist.
|
||||||
|
func CopyResource(srcPath, dstPath string, followLink bool) error {
|
||||||
|
var (
|
||||||
|
srcInfo CopyInfo
|
||||||
|
err error
|
||||||
|
)
|
||||||
|
|
||||||
|
// Ensure in platform semantics
|
||||||
|
srcPath = normalizePath(srcPath)
|
||||||
|
dstPath = normalizePath(dstPath)
|
||||||
|
|
||||||
|
// Clean the source and destination paths.
|
||||||
|
srcPath = PreserveTrailingDotOrSeparator(filepath.Clean(srcPath), srcPath)
|
||||||
|
dstPath = PreserveTrailingDotOrSeparator(filepath.Clean(dstPath), dstPath)
|
||||||
|
|
||||||
|
if srcInfo, err = CopyInfoSourcePath(srcPath, followLink); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
content, err := TarResource(srcInfo)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer content.Close()
|
||||||
|
|
||||||
|
return CopyTo(content, srcInfo, dstPath)
|
||||||
|
}
|
||||||
|
|
||||||
|
// CopyTo handles extracting the given content whose
|
||||||
|
// entries should be sourced from srcInfo to dstPath.
|
||||||
|
func CopyTo(content io.Reader, srcInfo CopyInfo, dstPath string) error {
|
||||||
|
// The destination path need not exist, but CopyInfoDestinationPath will
|
||||||
|
// ensure that at least the parent directory exists.
|
||||||
|
dstInfo, err := CopyInfoDestinationPath(normalizePath(dstPath))
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
dstDir, copyArchive, err := PrepareArchiveCopy(content, srcInfo, dstInfo)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer copyArchive.Close()
|
||||||
|
|
||||||
|
options := &TarOptions{
|
||||||
|
NoLchown: true,
|
||||||
|
NoOverwriteDirNonDir: true,
|
||||||
|
}
|
||||||
|
|
||||||
|
return Untar(copyArchive, dstDir, options)
|
||||||
|
}
|
||||||
|
|
||||||
|
// ResolveHostSourcePath decides real path need to be copied with parameters such as
|
||||||
|
// whether to follow symbol link or not, if followLink is true, resolvedPath will return
|
||||||
|
// link target of any symbol link file, else it will only resolve symlink of directory
|
||||||
|
// but return symbol link file itself without resolving.
|
||||||
|
func ResolveHostSourcePath(path string, followLink bool) (resolvedPath, rebaseName string, err error) {
|
||||||
|
if followLink {
|
||||||
|
resolvedPath, err = filepath.EvalSymlinks(path)
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
resolvedPath, rebaseName = GetRebaseName(path, resolvedPath)
|
||||||
|
} else {
|
||||||
|
dirPath, basePath := filepath.Split(path)
|
||||||
|
|
||||||
|
// if not follow symbol link, then resolve symbol link of parent dir
|
||||||
|
var resolvedDirPath string
|
||||||
|
resolvedDirPath, err = filepath.EvalSymlinks(dirPath)
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
// resolvedDirPath will have been cleaned (no trailing path separators) so
|
||||||
|
// we can manually join it with the base path element.
|
||||||
|
resolvedPath = resolvedDirPath + string(filepath.Separator) + basePath
|
||||||
|
if hasTrailingPathSeparator(path) && filepath.Base(path) != filepath.Base(resolvedPath) {
|
||||||
|
rebaseName = filepath.Base(path)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return resolvedPath, rebaseName, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetRebaseName normalizes and compares path and resolvedPath,
|
||||||
|
// return completed resolved path and rebased file name
|
||||||
|
func GetRebaseName(path, resolvedPath string) (string, string) {
|
||||||
|
// linkTarget will have been cleaned (no trailing path separators and dot) so
|
||||||
|
// we can manually join it with them
|
||||||
|
var rebaseName string
|
||||||
|
if specifiesCurrentDir(path) && !specifiesCurrentDir(resolvedPath) {
|
||||||
|
resolvedPath += string(filepath.Separator) + "."
|
||||||
|
}
|
||||||
|
|
||||||
|
if hasTrailingPathSeparator(path) && !hasTrailingPathSeparator(resolvedPath) {
|
||||||
|
resolvedPath += string(filepath.Separator)
|
||||||
|
}
|
||||||
|
|
||||||
|
if filepath.Base(path) != filepath.Base(resolvedPath) {
|
||||||
|
// In the case where the path had a trailing separator and a symlink
|
||||||
|
// evaluation has changed the last path component, we will need to
|
||||||
|
// rebase the name in the archive that is being copied to match the
|
||||||
|
// originally requested name.
|
||||||
|
rebaseName = filepath.Base(path)
|
||||||
|
}
|
||||||
|
return resolvedPath, rebaseName
|
||||||
|
}
|
|
@ -0,0 +1,11 @@
|
||||||
|
// +build !windows
|
||||||
|
|
||||||
|
package archive
|
||||||
|
|
||||||
|
import (
|
||||||
|
"path/filepath"
|
||||||
|
)
|
||||||
|
|
||||||
|
func normalizePath(path string) string {
|
||||||
|
return filepath.ToSlash(path)
|
||||||
|
}
|
|
@ -0,0 +1,9 @@
|
||||||
|
package archive
|
||||||
|
|
||||||
|
import (
|
||||||
|
"path/filepath"
|
||||||
|
)
|
||||||
|
|
||||||
|
func normalizePath(path string) string {
|
||||||
|
return filepath.FromSlash(path)
|
||||||
|
}
|
|
@ -0,0 +1,256 @@
|
||||||
|
package archive
|
||||||
|
|
||||||
|
import (
|
||||||
|
"archive/tar"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"io/ioutil"
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
"runtime"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"github.com/Sirupsen/logrus"
|
||||||
|
"github.com/docker/docker/pkg/idtools"
|
||||||
|
"github.com/docker/docker/pkg/pools"
|
||||||
|
"github.com/docker/docker/pkg/system"
|
||||||
|
)
|
||||||
|
|
||||||
|
// UnpackLayer unpack `layer` to a `dest`. The stream `layer` can be
|
||||||
|
// compressed or uncompressed.
|
||||||
|
// Returns the size in bytes of the contents of the layer.
|
||||||
|
func UnpackLayer(dest string, layer io.Reader, options *TarOptions) (size int64, err error) {
|
||||||
|
tr := tar.NewReader(layer)
|
||||||
|
trBuf := pools.BufioReader32KPool.Get(tr)
|
||||||
|
defer pools.BufioReader32KPool.Put(trBuf)
|
||||||
|
|
||||||
|
var dirs []*tar.Header
|
||||||
|
unpackedPaths := make(map[string]struct{})
|
||||||
|
|
||||||
|
if options == nil {
|
||||||
|
options = &TarOptions{}
|
||||||
|
}
|
||||||
|
if options.ExcludePatterns == nil {
|
||||||
|
options.ExcludePatterns = []string{}
|
||||||
|
}
|
||||||
|
idMappings := idtools.NewIDMappingsFromMaps(options.UIDMaps, options.GIDMaps)
|
||||||
|
|
||||||
|
aufsTempdir := ""
|
||||||
|
aufsHardlinks := make(map[string]*tar.Header)
|
||||||
|
|
||||||
|
// Iterate through the files in the archive.
|
||||||
|
for {
|
||||||
|
hdr, err := tr.Next()
|
||||||
|
if err == io.EOF {
|
||||||
|
// end of tar archive
|
||||||
|
break
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
|
||||||
|
size += hdr.Size
|
||||||
|
|
||||||
|
// Normalize name, for safety and for a simple is-root check
|
||||||
|
hdr.Name = filepath.Clean(hdr.Name)
|
||||||
|
|
||||||
|
// Windows does not support filenames with colons in them. Ignore
|
||||||
|
// these files. This is not a problem though (although it might
|
||||||
|
// appear that it is). Let's suppose a client is running docker pull.
|
||||||
|
// The daemon it points to is Windows. Would it make sense for the
|
||||||
|
// client to be doing a docker pull Ubuntu for example (which has files
|
||||||
|
// with colons in the name under /usr/share/man/man3)? No, absolutely
|
||||||
|
// not as it would really only make sense that they were pulling a
|
||||||
|
// Windows image. However, for development, it is necessary to be able
|
||||||
|
// to pull Linux images which are in the repository.
|
||||||
|
//
|
||||||
|
// TODO Windows. Once the registry is aware of what images are Windows-
|
||||||
|
// specific or Linux-specific, this warning should be changed to an error
|
||||||
|
// to cater for the situation where someone does manage to upload a Linux
|
||||||
|
// image but have it tagged as Windows inadvertently.
|
||||||
|
if runtime.GOOS == "windows" {
|
||||||
|
if strings.Contains(hdr.Name, ":") {
|
||||||
|
logrus.Warnf("Windows: Ignoring %s (is this a Linux image?)", hdr.Name)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Note as these operations are platform specific, so must the slash be.
|
||||||
|
if !strings.HasSuffix(hdr.Name, string(os.PathSeparator)) {
|
||||||
|
// Not the root directory, ensure that the parent directory exists.
|
||||||
|
// This happened in some tests where an image had a tarfile without any
|
||||||
|
// parent directories.
|
||||||
|
parent := filepath.Dir(hdr.Name)
|
||||||
|
parentPath := filepath.Join(dest, parent)
|
||||||
|
|
||||||
|
if _, err := os.Lstat(parentPath); err != nil && os.IsNotExist(err) {
|
||||||
|
err = system.MkdirAll(parentPath, 0600, "")
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Skip AUFS metadata dirs
|
||||||
|
if strings.HasPrefix(hdr.Name, WhiteoutMetaPrefix) {
|
||||||
|
// Regular files inside /.wh..wh.plnk can be used as hardlink targets
|
||||||
|
// We don't want this directory, but we need the files in them so that
|
||||||
|
// such hardlinks can be resolved.
|
||||||
|
if strings.HasPrefix(hdr.Name, WhiteoutLinkDir) && hdr.Typeflag == tar.TypeReg {
|
||||||
|
basename := filepath.Base(hdr.Name)
|
||||||
|
aufsHardlinks[basename] = hdr
|
||||||
|
if aufsTempdir == "" {
|
||||||
|
if aufsTempdir, err = ioutil.TempDir("", "dockerplnk"); err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
defer os.RemoveAll(aufsTempdir)
|
||||||
|
}
|
||||||
|
if err := createTarFile(filepath.Join(aufsTempdir, basename), dest, hdr, tr, true, nil, options.InUserNS); err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if hdr.Name != WhiteoutOpaqueDir {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
}
|
||||||
|
path := filepath.Join(dest, hdr.Name)
|
||||||
|
rel, err := filepath.Rel(dest, path)
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Note as these operations are platform specific, so must the slash be.
|
||||||
|
if strings.HasPrefix(rel, ".."+string(os.PathSeparator)) {
|
||||||
|
return 0, breakoutError(fmt.Errorf("%q is outside of %q", hdr.Name, dest))
|
||||||
|
}
|
||||||
|
base := filepath.Base(path)
|
||||||
|
|
||||||
|
if strings.HasPrefix(base, WhiteoutPrefix) {
|
||||||
|
dir := filepath.Dir(path)
|
||||||
|
if base == WhiteoutOpaqueDir {
|
||||||
|
_, err := os.Lstat(dir)
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
err = filepath.Walk(dir, func(path string, info os.FileInfo, err error) error {
|
||||||
|
if err != nil {
|
||||||
|
if os.IsNotExist(err) {
|
||||||
|
err = nil // parent was deleted
|
||||||
|
}
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if path == dir {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
if _, exists := unpackedPaths[path]; !exists {
|
||||||
|
err := os.RemoveAll(path)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
originalBase := base[len(WhiteoutPrefix):]
|
||||||
|
originalPath := filepath.Join(dir, originalBase)
|
||||||
|
if err := os.RemoveAll(originalPath); err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
// If path exits we almost always just want to remove and replace it.
|
||||||
|
// The only exception is when it is a directory *and* the file from
|
||||||
|
// the layer is also a directory. Then we want to merge them (i.e.
|
||||||
|
// just apply the metadata from the layer).
|
||||||
|
if fi, err := os.Lstat(path); err == nil {
|
||||||
|
if !(fi.IsDir() && hdr.Typeflag == tar.TypeDir) {
|
||||||
|
if err := os.RemoveAll(path); err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
trBuf.Reset(tr)
|
||||||
|
srcData := io.Reader(trBuf)
|
||||||
|
srcHdr := hdr
|
||||||
|
|
||||||
|
// Hard links into /.wh..wh.plnk don't work, as we don't extract that directory, so
|
||||||
|
// we manually retarget these into the temporary files we extracted them into
|
||||||
|
if hdr.Typeflag == tar.TypeLink && strings.HasPrefix(filepath.Clean(hdr.Linkname), WhiteoutLinkDir) {
|
||||||
|
linkBasename := filepath.Base(hdr.Linkname)
|
||||||
|
srcHdr = aufsHardlinks[linkBasename]
|
||||||
|
if srcHdr == nil {
|
||||||
|
return 0, fmt.Errorf("Invalid aufs hardlink")
|
||||||
|
}
|
||||||
|
tmpFile, err := os.Open(filepath.Join(aufsTempdir, linkBasename))
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
defer tmpFile.Close()
|
||||||
|
srcData = tmpFile
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := remapIDs(idMappings, srcHdr); err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := createTarFile(path, dest, srcHdr, srcData, true, nil, options.InUserNS); err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Directory mtimes must be handled at the end to avoid further
|
||||||
|
// file creation in them to modify the directory mtime
|
||||||
|
if hdr.Typeflag == tar.TypeDir {
|
||||||
|
dirs = append(dirs, hdr)
|
||||||
|
}
|
||||||
|
unpackedPaths[path] = struct{}{}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, hdr := range dirs {
|
||||||
|
path := filepath.Join(dest, hdr.Name)
|
||||||
|
if err := system.Chtimes(path, hdr.AccessTime, hdr.ModTime); err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return size, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// ApplyLayer parses a diff in the standard layer format from `layer`,
|
||||||
|
// and applies it to the directory `dest`. The stream `layer` can be
|
||||||
|
// compressed or uncompressed.
|
||||||
|
// Returns the size in bytes of the contents of the layer.
|
||||||
|
func ApplyLayer(dest string, layer io.Reader) (int64, error) {
|
||||||
|
return applyLayerHandler(dest, layer, &TarOptions{}, true)
|
||||||
|
}
|
||||||
|
|
||||||
|
// ApplyUncompressedLayer parses a diff in the standard layer format from
|
||||||
|
// `layer`, and applies it to the directory `dest`. The stream `layer`
|
||||||
|
// can only be uncompressed.
|
||||||
|
// Returns the size in bytes of the contents of the layer.
|
||||||
|
func ApplyUncompressedLayer(dest string, layer io.Reader, options *TarOptions) (int64, error) {
|
||||||
|
return applyLayerHandler(dest, layer, options, false)
|
||||||
|
}
|
||||||
|
|
||||||
|
// do the bulk load of ApplyLayer, but allow for not calling DecompressStream
|
||||||
|
func applyLayerHandler(dest string, layer io.Reader, options *TarOptions, decompress bool) (int64, error) {
|
||||||
|
dest = filepath.Clean(dest)
|
||||||
|
|
||||||
|
// We need to be able to set any perms
|
||||||
|
oldmask, err := system.Umask(0)
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
defer system.Umask(oldmask) // ignore err, ErrNotSupportedPlatform
|
||||||
|
|
||||||
|
if decompress {
|
||||||
|
layer, err = DecompressStream(layer)
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return UnpackLayer(dest, layer, options)
|
||||||
|
}
|
|
@ -0,0 +1,97 @@
|
||||||
|
// +build ignore
|
||||||
|
|
||||||
|
// Simple tool to create an archive stream from an old and new directory
|
||||||
|
//
|
||||||
|
// By default it will stream the comparison of two temporary directories with junk files
|
||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"flag"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"io/ioutil"
|
||||||
|
"os"
|
||||||
|
"path"
|
||||||
|
|
||||||
|
"github.com/Sirupsen/logrus"
|
||||||
|
"github.com/docker/docker/pkg/archive"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
flDebug = flag.Bool("D", false, "debugging output")
|
||||||
|
flNewDir = flag.String("newdir", "", "")
|
||||||
|
flOldDir = flag.String("olddir", "", "")
|
||||||
|
log = logrus.New()
|
||||||
|
)
|
||||||
|
|
||||||
|
func main() {
|
||||||
|
flag.Usage = func() {
|
||||||
|
fmt.Println("Produce a tar from comparing two directory paths. By default a demo tar is created of around 200 files (including hardlinks)")
|
||||||
|
fmt.Printf("%s [OPTIONS]\n", os.Args[0])
|
||||||
|
flag.PrintDefaults()
|
||||||
|
}
|
||||||
|
flag.Parse()
|
||||||
|
log.Out = os.Stderr
|
||||||
|
if (len(os.Getenv("DEBUG")) > 0) || *flDebug {
|
||||||
|
logrus.SetLevel(logrus.DebugLevel)
|
||||||
|
}
|
||||||
|
var newDir, oldDir string
|
||||||
|
|
||||||
|
if len(*flNewDir) == 0 {
|
||||||
|
var err error
|
||||||
|
newDir, err = ioutil.TempDir("", "docker-test-newDir")
|
||||||
|
if err != nil {
|
||||||
|
log.Fatal(err)
|
||||||
|
}
|
||||||
|
defer os.RemoveAll(newDir)
|
||||||
|
if _, err := prepareUntarSourceDirectory(100, newDir, true); err != nil {
|
||||||
|
log.Fatal(err)
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
newDir = *flNewDir
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(*flOldDir) == 0 {
|
||||||
|
oldDir, err := ioutil.TempDir("", "docker-test-oldDir")
|
||||||
|
if err != nil {
|
||||||
|
log.Fatal(err)
|
||||||
|
}
|
||||||
|
defer os.RemoveAll(oldDir)
|
||||||
|
} else {
|
||||||
|
oldDir = *flOldDir
|
||||||
|
}
|
||||||
|
|
||||||
|
changes, err := archive.ChangesDirs(newDir, oldDir)
|
||||||
|
if err != nil {
|
||||||
|
log.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
a, err := archive.ExportChanges(newDir, changes)
|
||||||
|
if err != nil {
|
||||||
|
log.Fatal(err)
|
||||||
|
}
|
||||||
|
defer a.Close()
|
||||||
|
|
||||||
|
i, err := io.Copy(os.Stdout, a)
|
||||||
|
if err != nil && err != io.EOF {
|
||||||
|
log.Fatal(err)
|
||||||
|
}
|
||||||
|
fmt.Fprintf(os.Stderr, "wrote archive of %d bytes", i)
|
||||||
|
}
|
||||||
|
|
||||||
|
func prepareUntarSourceDirectory(numberOfFiles int, targetPath string, makeLinks bool) (int, error) {
|
||||||
|
fileData := []byte("fooo")
|
||||||
|
for n := 0; n < numberOfFiles; n++ {
|
||||||
|
fileName := fmt.Sprintf("file-%d", n)
|
||||||
|
if err := ioutil.WriteFile(path.Join(targetPath, fileName), fileData, 0700); err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
if makeLinks {
|
||||||
|
if err := os.Link(path.Join(targetPath, fileName), path.Join(targetPath, fileName+"-link")); err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
totalSize := numberOfFiles * len(fileData)
|
||||||
|
return totalSize, nil
|
||||||
|
}
|
|
@ -0,0 +1,16 @@
|
||||||
|
package archive
|
||||||
|
|
||||||
|
import (
|
||||||
|
"syscall"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
func timeToTimespec(time time.Time) (ts syscall.Timespec) {
|
||||||
|
if time.IsZero() {
|
||||||
|
// Return UTIME_OMIT special value
|
||||||
|
ts.Sec = 0
|
||||||
|
ts.Nsec = ((1 << 30) - 2)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
return syscall.NsecToTimespec(time.UnixNano())
|
||||||
|
}
|
|
@ -0,0 +1,16 @@
|
||||||
|
// +build !linux
|
||||||
|
|
||||||
|
package archive
|
||||||
|
|
||||||
|
import (
|
||||||
|
"syscall"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
func timeToTimespec(time time.Time) (ts syscall.Timespec) {
|
||||||
|
nsec := int64(0)
|
||||||
|
if !time.IsZero() {
|
||||||
|
nsec = time.UnixNano()
|
||||||
|
}
|
||||||
|
return syscall.NsecToTimespec(nsec)
|
||||||
|
}
|
|
@ -0,0 +1,23 @@
|
||||||
|
package archive
|
||||||
|
|
||||||
|
// Whiteouts are files with a special meaning for the layered filesystem.
|
||||||
|
// Docker uses AUFS whiteout files inside exported archives. In other
|
||||||
|
// filesystems these files are generated/handled on tar creation/extraction.
|
||||||
|
|
||||||
|
// WhiteoutPrefix prefix means file is a whiteout. If this is followed by a
|
||||||
|
// filename this means that file has been removed from the base layer.
|
||||||
|
const WhiteoutPrefix = ".wh."
|
||||||
|
|
||||||
|
// WhiteoutMetaPrefix prefix means whiteout has a special meaning and is not
|
||||||
|
// for removing an actual file. Normally these files are excluded from exported
|
||||||
|
// archives.
|
||||||
|
const WhiteoutMetaPrefix = WhiteoutPrefix + WhiteoutPrefix
|
||||||
|
|
||||||
|
// WhiteoutLinkDir is a directory AUFS uses for storing hardlink links to other
|
||||||
|
// layers. Normally these should not go into exported archives and all changed
|
||||||
|
// hardlinks should be copied to the top layer.
|
||||||
|
const WhiteoutLinkDir = WhiteoutMetaPrefix + "plnk"
|
||||||
|
|
||||||
|
// WhiteoutOpaqueDir file means directory has been made opaque - meaning
|
||||||
|
// readdir calls to this directory do not follow to lower layers.
|
||||||
|
const WhiteoutOpaqueDir = WhiteoutMetaPrefix + ".opq"
|
|
@ -0,0 +1,59 @@
|
||||||
|
package archive
|
||||||
|
|
||||||
|
import (
|
||||||
|
"archive/tar"
|
||||||
|
"bytes"
|
||||||
|
"io"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Generate generates a new archive from the content provided
|
||||||
|
// as input.
|
||||||
|
//
|
||||||
|
// `files` is a sequence of path/content pairs. A new file is
|
||||||
|
// added to the archive for each pair.
|
||||||
|
// If the last pair is incomplete, the file is created with an
|
||||||
|
// empty content. For example:
|
||||||
|
//
|
||||||
|
// Generate("foo.txt", "hello world", "emptyfile")
|
||||||
|
//
|
||||||
|
// The above call will return an archive with 2 files:
|
||||||
|
// * ./foo.txt with content "hello world"
|
||||||
|
// * ./empty with empty content
|
||||||
|
//
|
||||||
|
// FIXME: stream content instead of buffering
|
||||||
|
// FIXME: specify permissions and other archive metadata
|
||||||
|
func Generate(input ...string) (io.Reader, error) {
|
||||||
|
files := parseStringPairs(input...)
|
||||||
|
buf := new(bytes.Buffer)
|
||||||
|
tw := tar.NewWriter(buf)
|
||||||
|
for _, file := range files {
|
||||||
|
name, content := file[0], file[1]
|
||||||
|
hdr := &tar.Header{
|
||||||
|
Name: name,
|
||||||
|
Size: int64(len(content)),
|
||||||
|
}
|
||||||
|
if err := tw.WriteHeader(hdr); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if _, err := tw.Write([]byte(content)); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if err := tw.Close(); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return buf, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func parseStringPairs(input ...string) (output [][2]string) {
|
||||||
|
output = make([][2]string, 0, len(input)/2+1)
|
||||||
|
for i := 0; i < len(input); i += 2 {
|
||||||
|
var pair [2]string
|
||||||
|
pair[0] = input[i]
|
||||||
|
if i+1 < len(input) {
|
||||||
|
pair[1] = input[i+1]
|
||||||
|
}
|
||||||
|
output = append(output, pair)
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
|
@ -0,0 +1,70 @@
|
||||||
|
package chrootarchive
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"io/ioutil"
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
|
||||||
|
"github.com/docker/docker/pkg/archive"
|
||||||
|
"github.com/docker/docker/pkg/idtools"
|
||||||
|
)
|
||||||
|
|
||||||
|
// NewArchiver returns a new Archiver which uses chrootarchive.Untar
|
||||||
|
func NewArchiver(idMappings *idtools.IDMappings) *archive.Archiver {
|
||||||
|
if idMappings == nil {
|
||||||
|
idMappings = &idtools.IDMappings{}
|
||||||
|
}
|
||||||
|
return &archive.Archiver{Untar: Untar, IDMappings: idMappings}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Untar reads a stream of bytes from `archive`, parses it as a tar archive,
|
||||||
|
// and unpacks it into the directory at `dest`.
|
||||||
|
// The archive may be compressed with one of the following algorithms:
|
||||||
|
// identity (uncompressed), gzip, bzip2, xz.
|
||||||
|
func Untar(tarArchive io.Reader, dest string, options *archive.TarOptions) error {
|
||||||
|
return untarHandler(tarArchive, dest, options, true)
|
||||||
|
}
|
||||||
|
|
||||||
|
// UntarUncompressed reads a stream of bytes from `archive`, parses it as a tar archive,
|
||||||
|
// and unpacks it into the directory at `dest`.
|
||||||
|
// The archive must be an uncompressed stream.
|
||||||
|
func UntarUncompressed(tarArchive io.Reader, dest string, options *archive.TarOptions) error {
|
||||||
|
return untarHandler(tarArchive, dest, options, false)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Handler for teasing out the automatic decompression
|
||||||
|
func untarHandler(tarArchive io.Reader, dest string, options *archive.TarOptions, decompress bool) error {
|
||||||
|
if tarArchive == nil {
|
||||||
|
return fmt.Errorf("Empty archive")
|
||||||
|
}
|
||||||
|
if options == nil {
|
||||||
|
options = &archive.TarOptions{}
|
||||||
|
}
|
||||||
|
if options.ExcludePatterns == nil {
|
||||||
|
options.ExcludePatterns = []string{}
|
||||||
|
}
|
||||||
|
|
||||||
|
idMappings := idtools.NewIDMappingsFromMaps(options.UIDMaps, options.GIDMaps)
|
||||||
|
rootIDs := idMappings.RootPair()
|
||||||
|
|
||||||
|
dest = filepath.Clean(dest)
|
||||||
|
if _, err := os.Stat(dest); os.IsNotExist(err) {
|
||||||
|
if err := idtools.MkdirAllAndChownNew(dest, 0755, rootIDs); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
r := ioutil.NopCloser(tarArchive)
|
||||||
|
if decompress {
|
||||||
|
decompressedArchive, err := archive.DecompressStream(tarArchive)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer decompressedArchive.Close()
|
||||||
|
r = decompressedArchive
|
||||||
|
}
|
||||||
|
|
||||||
|
return invokeUnpack(r, dest, options)
|
||||||
|
}
|
|
@ -0,0 +1,86 @@
|
||||||
|
// +build !windows
|
||||||
|
|
||||||
|
package chrootarchive
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"encoding/json"
|
||||||
|
"flag"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"io/ioutil"
|
||||||
|
"os"
|
||||||
|
"runtime"
|
||||||
|
|
||||||
|
"github.com/docker/docker/pkg/archive"
|
||||||
|
"github.com/docker/docker/pkg/reexec"
|
||||||
|
)
|
||||||
|
|
||||||
|
// untar is the entry-point for docker-untar on re-exec. This is not used on
|
||||||
|
// Windows as it does not support chroot, hence no point sandboxing through
|
||||||
|
// chroot and rexec.
|
||||||
|
func untar() {
|
||||||
|
runtime.LockOSThread()
|
||||||
|
flag.Parse()
|
||||||
|
|
||||||
|
var options *archive.TarOptions
|
||||||
|
|
||||||
|
//read the options from the pipe "ExtraFiles"
|
||||||
|
if err := json.NewDecoder(os.NewFile(3, "options")).Decode(&options); err != nil {
|
||||||
|
fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := chroot(flag.Arg(0)); err != nil {
|
||||||
|
fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := archive.Unpack(os.Stdin, "/", options); err != nil {
|
||||||
|
fatal(err)
|
||||||
|
}
|
||||||
|
// fully consume stdin in case it is zero padded
|
||||||
|
if _, err := flush(os.Stdin); err != nil {
|
||||||
|
fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
os.Exit(0)
|
||||||
|
}
|
||||||
|
|
||||||
|
func invokeUnpack(decompressedArchive io.Reader, dest string, options *archive.TarOptions) error {
|
||||||
|
|
||||||
|
// We can't pass a potentially large exclude list directly via cmd line
|
||||||
|
// because we easily overrun the kernel's max argument/environment size
|
||||||
|
// when the full image list is passed (e.g. when this is used by
|
||||||
|
// `docker load`). We will marshall the options via a pipe to the
|
||||||
|
// child
|
||||||
|
r, w, err := os.Pipe()
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("Untar pipe failure: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
cmd := reexec.Command("docker-untar", dest)
|
||||||
|
cmd.Stdin = decompressedArchive
|
||||||
|
|
||||||
|
cmd.ExtraFiles = append(cmd.ExtraFiles, r)
|
||||||
|
output := bytes.NewBuffer(nil)
|
||||||
|
cmd.Stdout = output
|
||||||
|
cmd.Stderr = output
|
||||||
|
|
||||||
|
if err := cmd.Start(); err != nil {
|
||||||
|
return fmt.Errorf("Untar error on re-exec cmd: %v", err)
|
||||||
|
}
|
||||||
|
//write the options to the pipe for the untar exec to read
|
||||||
|
if err := json.NewEncoder(w).Encode(options); err != nil {
|
||||||
|
return fmt.Errorf("Untar json encode to pipe failed: %v", err)
|
||||||
|
}
|
||||||
|
w.Close()
|
||||||
|
|
||||||
|
if err := cmd.Wait(); err != nil {
|
||||||
|
// when `xz -d -c -q | docker-untar ...` failed on docker-untar side,
|
||||||
|
// we need to exhaust `xz`'s output, otherwise the `xz` side will be
|
||||||
|
// pending on write pipe forever
|
||||||
|
io.Copy(ioutil.Discard, decompressedArchive)
|
||||||
|
|
||||||
|
return fmt.Errorf("Error processing tar file(%v): %s", err, output)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
22
vendor/github.com/docker/docker/pkg/chrootarchive/archive_windows.go
generated
vendored
Normal file
22
vendor/github.com/docker/docker/pkg/chrootarchive/archive_windows.go
generated
vendored
Normal file
|
@ -0,0 +1,22 @@
|
||||||
|
package chrootarchive
|
||||||
|
|
||||||
|
import (
|
||||||
|
"io"
|
||||||
|
|
||||||
|
"github.com/docker/docker/pkg/archive"
|
||||||
|
"github.com/docker/docker/pkg/longpath"
|
||||||
|
)
|
||||||
|
|
||||||
|
// chroot is not supported by Windows
|
||||||
|
func chroot(path string) error {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func invokeUnpack(decompressedArchive io.ReadCloser,
|
||||||
|
dest string,
|
||||||
|
options *archive.TarOptions) error {
|
||||||
|
// Windows is different to Linux here because Windows does not support
|
||||||
|
// chroot. Hence there is no point sandboxing a chrooted process to
|
||||||
|
// do the unpack. We call inline instead within the daemon process.
|
||||||
|
return archive.Unpack(decompressedArchive, longpath.AddPrefix(dest), options)
|
||||||
|
}
|
108
vendor/github.com/docker/docker/pkg/chrootarchive/chroot_linux.go
generated
vendored
Normal file
108
vendor/github.com/docker/docker/pkg/chrootarchive/chroot_linux.go
generated
vendored
Normal file
|
@ -0,0 +1,108 @@
|
||||||
|
package chrootarchive
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"io/ioutil"
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
"syscall"
|
||||||
|
|
||||||
|
"github.com/docker/docker/pkg/mount"
|
||||||
|
rsystem "github.com/opencontainers/runc/libcontainer/system"
|
||||||
|
)
|
||||||
|
|
||||||
|
// chroot on linux uses pivot_root instead of chroot
|
||||||
|
// pivot_root takes a new root and an old root.
|
||||||
|
// Old root must be a sub-dir of new root, it is where the current rootfs will reside after the call to pivot_root.
|
||||||
|
// New root is where the new rootfs is set to.
|
||||||
|
// Old root is removed after the call to pivot_root so it is no longer available under the new root.
|
||||||
|
// This is similar to how libcontainer sets up a container's rootfs
|
||||||
|
func chroot(path string) (err error) {
|
||||||
|
// if the engine is running in a user namespace we need to use actual chroot
|
||||||
|
if rsystem.RunningInUserNS() {
|
||||||
|
return realChroot(path)
|
||||||
|
}
|
||||||
|
if err := syscall.Unshare(syscall.CLONE_NEWNS); err != nil {
|
||||||
|
return fmt.Errorf("Error creating mount namespace before pivot: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// make everything in new ns private
|
||||||
|
if err := mount.MakeRPrivate("/"); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if mounted, _ := mount.Mounted(path); !mounted {
|
||||||
|
if err := mount.Mount(path, path, "bind", "rbind,rw"); err != nil {
|
||||||
|
return realChroot(path)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// setup oldRoot for pivot_root
|
||||||
|
pivotDir, err := ioutil.TempDir(path, ".pivot_root")
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("Error setting up pivot dir: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
var mounted bool
|
||||||
|
defer func() {
|
||||||
|
if mounted {
|
||||||
|
// make sure pivotDir is not mounted before we try to remove it
|
||||||
|
if errCleanup := syscall.Unmount(pivotDir, syscall.MNT_DETACH); errCleanup != nil {
|
||||||
|
if err == nil {
|
||||||
|
err = errCleanup
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
errCleanup := os.Remove(pivotDir)
|
||||||
|
// pivotDir doesn't exist if pivot_root failed and chroot+chdir was successful
|
||||||
|
// because we already cleaned it up on failed pivot_root
|
||||||
|
if errCleanup != nil && !os.IsNotExist(errCleanup) {
|
||||||
|
errCleanup = fmt.Errorf("Error cleaning up after pivot: %v", errCleanup)
|
||||||
|
if err == nil {
|
||||||
|
err = errCleanup
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
if err := syscall.PivotRoot(path, pivotDir); err != nil {
|
||||||
|
// If pivot fails, fall back to the normal chroot after cleaning up temp dir
|
||||||
|
if err := os.Remove(pivotDir); err != nil {
|
||||||
|
return fmt.Errorf("Error cleaning up after failed pivot: %v", err)
|
||||||
|
}
|
||||||
|
return realChroot(path)
|
||||||
|
}
|
||||||
|
mounted = true
|
||||||
|
|
||||||
|
// This is the new path for where the old root (prior to the pivot) has been moved to
|
||||||
|
// This dir contains the rootfs of the caller, which we need to remove so it is not visible during extraction
|
||||||
|
pivotDir = filepath.Join("/", filepath.Base(pivotDir))
|
||||||
|
|
||||||
|
if err := syscall.Chdir("/"); err != nil {
|
||||||
|
return fmt.Errorf("Error changing to new root: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Make the pivotDir (where the old root lives) private so it can be unmounted without propagating to the host
|
||||||
|
if err := syscall.Mount("", pivotDir, "", syscall.MS_PRIVATE|syscall.MS_REC, ""); err != nil {
|
||||||
|
return fmt.Errorf("Error making old root private after pivot: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Now unmount the old root so it's no longer visible from the new root
|
||||||
|
if err := syscall.Unmount(pivotDir, syscall.MNT_DETACH); err != nil {
|
||||||
|
return fmt.Errorf("Error while unmounting old root after pivot: %v", err)
|
||||||
|
}
|
||||||
|
mounted = false
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func realChroot(path string) error {
|
||||||
|
if err := syscall.Chroot(path); err != nil {
|
||||||
|
return fmt.Errorf("Error after fallback to chroot: %v", err)
|
||||||
|
}
|
||||||
|
if err := syscall.Chdir("/"); err != nil {
|
||||||
|
return fmt.Errorf("Error changing to new root after chroot: %v", err)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
|
@ -0,0 +1,12 @@
|
||||||
|
// +build !windows,!linux
|
||||||
|
|
||||||
|
package chrootarchive
|
||||||
|
|
||||||
|
import "syscall"
|
||||||
|
|
||||||
|
func chroot(path string) error {
|
||||||
|
if err := syscall.Chroot(path); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return syscall.Chdir("/")
|
||||||
|
}
|
|
@ -0,0 +1,23 @@
|
||||||
|
package chrootarchive
|
||||||
|
|
||||||
|
import (
|
||||||
|
"io"
|
||||||
|
|
||||||
|
"github.com/docker/docker/pkg/archive"
|
||||||
|
)
|
||||||
|
|
||||||
|
// ApplyLayer parses a diff in the standard layer format from `layer`,
|
||||||
|
// and applies it to the directory `dest`. The stream `layer` can only be
|
||||||
|
// uncompressed.
|
||||||
|
// Returns the size in bytes of the contents of the layer.
|
||||||
|
func ApplyLayer(dest string, layer io.Reader) (size int64, err error) {
|
||||||
|
return applyLayerHandler(dest, layer, &archive.TarOptions{}, true)
|
||||||
|
}
|
||||||
|
|
||||||
|
// ApplyUncompressedLayer parses a diff in the standard layer format from
|
||||||
|
// `layer`, and applies it to the directory `dest`. The stream `layer`
|
||||||
|
// can only be uncompressed.
|
||||||
|
// Returns the size in bytes of the contents of the layer.
|
||||||
|
func ApplyUncompressedLayer(dest string, layer io.Reader, options *archive.TarOptions) (int64, error) {
|
||||||
|
return applyLayerHandler(dest, layer, options, false)
|
||||||
|
}
|
|
@ -0,0 +1,130 @@
|
||||||
|
//+build !windows
|
||||||
|
|
||||||
|
package chrootarchive
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"encoding/json"
|
||||||
|
"flag"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"io/ioutil"
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
"runtime"
|
||||||
|
|
||||||
|
"github.com/docker/docker/pkg/archive"
|
||||||
|
"github.com/docker/docker/pkg/reexec"
|
||||||
|
"github.com/docker/docker/pkg/system"
|
||||||
|
rsystem "github.com/opencontainers/runc/libcontainer/system"
|
||||||
|
)
|
||||||
|
|
||||||
|
type applyLayerResponse struct {
|
||||||
|
LayerSize int64 `json:"layerSize"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// applyLayer is the entry-point for docker-applylayer on re-exec. This is not
|
||||||
|
// used on Windows as it does not support chroot, hence no point sandboxing
|
||||||
|
// through chroot and rexec.
|
||||||
|
func applyLayer() {
|
||||||
|
|
||||||
|
var (
|
||||||
|
tmpDir string
|
||||||
|
err error
|
||||||
|
options *archive.TarOptions
|
||||||
|
)
|
||||||
|
runtime.LockOSThread()
|
||||||
|
flag.Parse()
|
||||||
|
|
||||||
|
inUserns := rsystem.RunningInUserNS()
|
||||||
|
if err := chroot(flag.Arg(0)); err != nil {
|
||||||
|
fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// We need to be able to set any perms
|
||||||
|
oldmask, err := system.Umask(0)
|
||||||
|
defer system.Umask(oldmask)
|
||||||
|
if err != nil {
|
||||||
|
fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := json.Unmarshal([]byte(os.Getenv("OPT")), &options); err != nil {
|
||||||
|
fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if inUserns {
|
||||||
|
options.InUserNS = true
|
||||||
|
}
|
||||||
|
|
||||||
|
if tmpDir, err = ioutil.TempDir("/", "temp-docker-extract"); err != nil {
|
||||||
|
fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
os.Setenv("TMPDIR", tmpDir)
|
||||||
|
size, err := archive.UnpackLayer("/", os.Stdin, options)
|
||||||
|
os.RemoveAll(tmpDir)
|
||||||
|
if err != nil {
|
||||||
|
fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
encoder := json.NewEncoder(os.Stdout)
|
||||||
|
if err := encoder.Encode(applyLayerResponse{size}); err != nil {
|
||||||
|
fatal(fmt.Errorf("unable to encode layerSize JSON: %s", err))
|
||||||
|
}
|
||||||
|
|
||||||
|
if _, err := flush(os.Stdin); err != nil {
|
||||||
|
fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
os.Exit(0)
|
||||||
|
}
|
||||||
|
|
||||||
|
// applyLayerHandler parses a diff in the standard layer format from `layer`, and
|
||||||
|
// applies it to the directory `dest`. Returns the size in bytes of the
|
||||||
|
// contents of the layer.
|
||||||
|
func applyLayerHandler(dest string, layer io.Reader, options *archive.TarOptions, decompress bool) (size int64, err error) {
|
||||||
|
dest = filepath.Clean(dest)
|
||||||
|
if decompress {
|
||||||
|
decompressed, err := archive.DecompressStream(layer)
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
defer decompressed.Close()
|
||||||
|
|
||||||
|
layer = decompressed
|
||||||
|
}
|
||||||
|
if options == nil {
|
||||||
|
options = &archive.TarOptions{}
|
||||||
|
if rsystem.RunningInUserNS() {
|
||||||
|
options.InUserNS = true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if options.ExcludePatterns == nil {
|
||||||
|
options.ExcludePatterns = []string{}
|
||||||
|
}
|
||||||
|
|
||||||
|
data, err := json.Marshal(options)
|
||||||
|
if err != nil {
|
||||||
|
return 0, fmt.Errorf("ApplyLayer json encode: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
cmd := reexec.Command("docker-applyLayer", dest)
|
||||||
|
cmd.Stdin = layer
|
||||||
|
cmd.Env = append(cmd.Env, fmt.Sprintf("OPT=%s", data))
|
||||||
|
|
||||||
|
outBuf, errBuf := new(bytes.Buffer), new(bytes.Buffer)
|
||||||
|
cmd.Stdout, cmd.Stderr = outBuf, errBuf
|
||||||
|
|
||||||
|
if err = cmd.Run(); err != nil {
|
||||||
|
return 0, fmt.Errorf("ApplyLayer %s stdout: %s stderr: %s", err, outBuf, errBuf)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Stdout should be a valid JSON struct representing an applyLayerResponse.
|
||||||
|
response := applyLayerResponse{}
|
||||||
|
decoder := json.NewDecoder(outBuf)
|
||||||
|
if err = decoder.Decode(&response); err != nil {
|
||||||
|
return 0, fmt.Errorf("unable to decode ApplyLayer JSON response: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return response.LayerSize, nil
|
||||||
|
}
|
|
@ -0,0 +1,45 @@
|
||||||
|
package chrootarchive
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"io/ioutil"
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
|
||||||
|
"github.com/docker/docker/pkg/archive"
|
||||||
|
"github.com/docker/docker/pkg/longpath"
|
||||||
|
)
|
||||||
|
|
||||||
|
// applyLayerHandler parses a diff in the standard layer format from `layer`, and
|
||||||
|
// applies it to the directory `dest`. Returns the size in bytes of the
|
||||||
|
// contents of the layer.
|
||||||
|
func applyLayerHandler(dest string, layer io.Reader, options *archive.TarOptions, decompress bool) (size int64, err error) {
|
||||||
|
dest = filepath.Clean(dest)
|
||||||
|
|
||||||
|
// Ensure it is a Windows-style volume path
|
||||||
|
dest = longpath.AddPrefix(dest)
|
||||||
|
|
||||||
|
if decompress {
|
||||||
|
decompressed, err := archive.DecompressStream(layer)
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
defer decompressed.Close()
|
||||||
|
|
||||||
|
layer = decompressed
|
||||||
|
}
|
||||||
|
|
||||||
|
tmpDir, err := ioutil.TempDir(os.Getenv("temp"), "temp-docker-extract")
|
||||||
|
if err != nil {
|
||||||
|
return 0, fmt.Errorf("ApplyLayer failed to create temp-docker-extract under %s. %s", dest, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
s, err := archive.UnpackLayer(dest, layer, nil)
|
||||||
|
os.RemoveAll(tmpDir)
|
||||||
|
if err != nil {
|
||||||
|
return 0, fmt.Errorf("ApplyLayer %s failed UnpackLayer to %s: %s", layer, dest, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return s, nil
|
||||||
|
}
|
|
@ -0,0 +1,28 @@
|
||||||
|
// +build !windows
|
||||||
|
|
||||||
|
package chrootarchive
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"io/ioutil"
|
||||||
|
"os"
|
||||||
|
|
||||||
|
"github.com/docker/docker/pkg/reexec"
|
||||||
|
)
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
reexec.Register("docker-applyLayer", applyLayer)
|
||||||
|
reexec.Register("docker-untar", untar)
|
||||||
|
}
|
||||||
|
|
||||||
|
func fatal(err error) {
|
||||||
|
fmt.Fprint(os.Stderr, err)
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
|
||||||
|
// flush consumes all the bytes from the reader discarding
|
||||||
|
// any errors
|
||||||
|
func flush(r io.Reader) (bytes int64, err error) {
|
||||||
|
return io.Copy(ioutil.Discard, r)
|
||||||
|
}
|
|
@ -0,0 +1,4 @@
|
||||||
|
package chrootarchive
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
}
|
|
@ -0,0 +1,298 @@
|
||||||
|
package fileutils
|
||||||
|
|
||||||
|
import (
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
"regexp"
|
||||||
|
"strings"
|
||||||
|
"text/scanner"
|
||||||
|
|
||||||
|
"github.com/Sirupsen/logrus"
|
||||||
|
)
|
||||||
|
|
||||||
|
// PatternMatcher allows checking paths agaist a list of patterns
|
||||||
|
type PatternMatcher struct {
|
||||||
|
patterns []*Pattern
|
||||||
|
exclusions bool
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewPatternMatcher creates a new matcher object for specific patterns that can
|
||||||
|
// be used later to match against patterns against paths
|
||||||
|
func NewPatternMatcher(patterns []string) (*PatternMatcher, error) {
|
||||||
|
pm := &PatternMatcher{
|
||||||
|
patterns: make([]*Pattern, 0, len(patterns)),
|
||||||
|
}
|
||||||
|
for _, p := range patterns {
|
||||||
|
// Eliminate leading and trailing whitespace.
|
||||||
|
p = strings.TrimSpace(p)
|
||||||
|
if p == "" {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
p = filepath.Clean(p)
|
||||||
|
newp := &Pattern{}
|
||||||
|
if p[0] == '!' {
|
||||||
|
if len(p) == 1 {
|
||||||
|
return nil, errors.New("illegal exclusion pattern: \"!\"")
|
||||||
|
}
|
||||||
|
newp.exclusion = true
|
||||||
|
p = p[1:]
|
||||||
|
pm.exclusions = true
|
||||||
|
}
|
||||||
|
// Do some syntax checking on the pattern.
|
||||||
|
// filepath's Match() has some really weird rules that are inconsistent
|
||||||
|
// so instead of trying to dup their logic, just call Match() for its
|
||||||
|
// error state and if there is an error in the pattern return it.
|
||||||
|
// If this becomes an issue we can remove this since its really only
|
||||||
|
// needed in the error (syntax) case - which isn't really critical.
|
||||||
|
if _, err := filepath.Match(p, "."); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
newp.cleanedPattern = p
|
||||||
|
newp.dirs = strings.Split(p, string(os.PathSeparator))
|
||||||
|
pm.patterns = append(pm.patterns, newp)
|
||||||
|
}
|
||||||
|
return pm, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Matches matches path against all the patterns. Matches is not safe to be
|
||||||
|
// called concurrently
|
||||||
|
func (pm *PatternMatcher) Matches(file string) (bool, error) {
|
||||||
|
matched := false
|
||||||
|
file = filepath.FromSlash(file)
|
||||||
|
parentPath := filepath.Dir(file)
|
||||||
|
parentPathDirs := strings.Split(parentPath, string(os.PathSeparator))
|
||||||
|
|
||||||
|
for _, pattern := range pm.patterns {
|
||||||
|
negative := false
|
||||||
|
|
||||||
|
if pattern.exclusion {
|
||||||
|
negative = true
|
||||||
|
}
|
||||||
|
|
||||||
|
match, err := pattern.match(file)
|
||||||
|
if err != nil {
|
||||||
|
return false, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if !match && parentPath != "." {
|
||||||
|
// Check to see if the pattern matches one of our parent dirs.
|
||||||
|
if len(pattern.dirs) <= len(parentPathDirs) {
|
||||||
|
match, _ = pattern.match(strings.Join(parentPathDirs[:len(pattern.dirs)], string(os.PathSeparator)))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if match {
|
||||||
|
matched = !negative
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if matched {
|
||||||
|
logrus.Debugf("Skipping excluded path: %s", file)
|
||||||
|
}
|
||||||
|
|
||||||
|
return matched, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Exclusions returns true if any of the patterns define exclusions
|
||||||
|
func (pm *PatternMatcher) Exclusions() bool {
|
||||||
|
return pm.exclusions
|
||||||
|
}
|
||||||
|
|
||||||
|
// Patterns returns array of active patterns
|
||||||
|
func (pm *PatternMatcher) Patterns() []*Pattern {
|
||||||
|
return pm.patterns
|
||||||
|
}
|
||||||
|
|
||||||
|
// Pattern defines a single regexp used used to filter file paths.
|
||||||
|
type Pattern struct {
|
||||||
|
cleanedPattern string
|
||||||
|
dirs []string
|
||||||
|
regexp *regexp.Regexp
|
||||||
|
exclusion bool
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *Pattern) String() string {
|
||||||
|
return p.cleanedPattern
|
||||||
|
}
|
||||||
|
|
||||||
|
// Exclusion returns true if this pattern defines exclusion
|
||||||
|
func (p *Pattern) Exclusion() bool {
|
||||||
|
return p.exclusion
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *Pattern) match(path string) (bool, error) {
|
||||||
|
|
||||||
|
if p.regexp == nil {
|
||||||
|
if err := p.compile(); err != nil {
|
||||||
|
return false, filepath.ErrBadPattern
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
b := p.regexp.MatchString(path)
|
||||||
|
|
||||||
|
return b, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *Pattern) compile() error {
|
||||||
|
regStr := "^"
|
||||||
|
pattern := p.cleanedPattern
|
||||||
|
// Go through the pattern and convert it to a regexp.
|
||||||
|
// We use a scanner so we can support utf-8 chars.
|
||||||
|
var scan scanner.Scanner
|
||||||
|
scan.Init(strings.NewReader(pattern))
|
||||||
|
|
||||||
|
sl := string(os.PathSeparator)
|
||||||
|
escSL := sl
|
||||||
|
if sl == `\` {
|
||||||
|
escSL += `\`
|
||||||
|
}
|
||||||
|
|
||||||
|
for scan.Peek() != scanner.EOF {
|
||||||
|
ch := scan.Next()
|
||||||
|
|
||||||
|
if ch == '*' {
|
||||||
|
if scan.Peek() == '*' {
|
||||||
|
// is some flavor of "**"
|
||||||
|
scan.Next()
|
||||||
|
|
||||||
|
// Treat **/ as ** so eat the "/"
|
||||||
|
if string(scan.Peek()) == sl {
|
||||||
|
scan.Next()
|
||||||
|
}
|
||||||
|
|
||||||
|
if scan.Peek() == scanner.EOF {
|
||||||
|
// is "**EOF" - to align with .gitignore just accept all
|
||||||
|
regStr += ".*"
|
||||||
|
} else {
|
||||||
|
// is "**"
|
||||||
|
// Note that this allows for any # of /'s (even 0) because
|
||||||
|
// the .* will eat everything, even /'s
|
||||||
|
regStr += "(.*" + escSL + ")?"
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
// is "*" so map it to anything but "/"
|
||||||
|
regStr += "[^" + escSL + "]*"
|
||||||
|
}
|
||||||
|
} else if ch == '?' {
|
||||||
|
// "?" is any char except "/"
|
||||||
|
regStr += "[^" + escSL + "]"
|
||||||
|
} else if ch == '.' || ch == '$' {
|
||||||
|
// Escape some regexp special chars that have no meaning
|
||||||
|
// in golang's filepath.Match
|
||||||
|
regStr += `\` + string(ch)
|
||||||
|
} else if ch == '\\' {
|
||||||
|
// escape next char. Note that a trailing \ in the pattern
|
||||||
|
// will be left alone (but need to escape it)
|
||||||
|
if sl == `\` {
|
||||||
|
// On windows map "\" to "\\", meaning an escaped backslash,
|
||||||
|
// and then just continue because filepath.Match on
|
||||||
|
// Windows doesn't allow escaping at all
|
||||||
|
regStr += escSL
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if scan.Peek() != scanner.EOF {
|
||||||
|
regStr += `\` + string(scan.Next())
|
||||||
|
} else {
|
||||||
|
regStr += `\`
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
regStr += string(ch)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
regStr += "$"
|
||||||
|
|
||||||
|
re, err := regexp.Compile(regStr)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
p.regexp = re
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Matches returns true if file matches any of the patterns
|
||||||
|
// and isn't excluded by any of the subsequent patterns.
|
||||||
|
func Matches(file string, patterns []string) (bool, error) {
|
||||||
|
pm, err := NewPatternMatcher(patterns)
|
||||||
|
if err != nil {
|
||||||
|
return false, err
|
||||||
|
}
|
||||||
|
file = filepath.Clean(file)
|
||||||
|
|
||||||
|
if file == "." {
|
||||||
|
// Don't let them exclude everything, kind of silly.
|
||||||
|
return false, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return pm.Matches(file)
|
||||||
|
}
|
||||||
|
|
||||||
|
// CopyFile copies from src to dst until either EOF is reached
|
||||||
|
// on src or an error occurs. It verifies src exists and removes
|
||||||
|
// the dst if it exists.
|
||||||
|
func CopyFile(src, dst string) (int64, error) {
|
||||||
|
cleanSrc := filepath.Clean(src)
|
||||||
|
cleanDst := filepath.Clean(dst)
|
||||||
|
if cleanSrc == cleanDst {
|
||||||
|
return 0, nil
|
||||||
|
}
|
||||||
|
sf, err := os.Open(cleanSrc)
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
defer sf.Close()
|
||||||
|
if err := os.Remove(cleanDst); err != nil && !os.IsNotExist(err) {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
df, err := os.Create(cleanDst)
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
defer df.Close()
|
||||||
|
return io.Copy(df, sf)
|
||||||
|
}
|
||||||
|
|
||||||
|
// ReadSymlinkedDirectory returns the target directory of a symlink.
|
||||||
|
// The target of the symbolic link may not be a file.
|
||||||
|
func ReadSymlinkedDirectory(path string) (string, error) {
|
||||||
|
var realPath string
|
||||||
|
var err error
|
||||||
|
if realPath, err = filepath.Abs(path); err != nil {
|
||||||
|
return "", fmt.Errorf("unable to get absolute path for %s: %s", path, err)
|
||||||
|
}
|
||||||
|
if realPath, err = filepath.EvalSymlinks(realPath); err != nil {
|
||||||
|
return "", fmt.Errorf("failed to canonicalise path for %s: %s", path, err)
|
||||||
|
}
|
||||||
|
realPathInfo, err := os.Stat(realPath)
|
||||||
|
if err != nil {
|
||||||
|
return "", fmt.Errorf("failed to stat target '%s' of '%s': %s", realPath, path, err)
|
||||||
|
}
|
||||||
|
if !realPathInfo.Mode().IsDir() {
|
||||||
|
return "", fmt.Errorf("canonical path points to a file '%s'", realPath)
|
||||||
|
}
|
||||||
|
return realPath, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// CreateIfNotExists creates a file or a directory only if it does not already exist.
|
||||||
|
func CreateIfNotExists(path string, isDir bool) error {
|
||||||
|
if _, err := os.Stat(path); err != nil {
|
||||||
|
if os.IsNotExist(err) {
|
||||||
|
if isDir {
|
||||||
|
return os.MkdirAll(path, 0755)
|
||||||
|
}
|
||||||
|
if err := os.MkdirAll(filepath.Dir(path), 0755); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
f, err := os.OpenFile(path, os.O_CREATE, 0755)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
f.Close()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
|
@ -0,0 +1,27 @@
|
||||||
|
package fileutils
|
||||||
|
|
||||||
|
import (
|
||||||
|
"os"
|
||||||
|
"os/exec"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
)
|
||||||
|
|
||||||
|
// GetTotalUsedFds returns the number of used File Descriptors by
|
||||||
|
// executing `lsof -p PID`
|
||||||
|
func GetTotalUsedFds() int {
|
||||||
|
pid := os.Getpid()
|
||||||
|
|
||||||
|
cmd := exec.Command("lsof", "-p", strconv.Itoa(pid))
|
||||||
|
|
||||||
|
output, err := cmd.CombinedOutput()
|
||||||
|
if err != nil {
|
||||||
|
return -1
|
||||||
|
}
|
||||||
|
|
||||||
|
outputStr := strings.TrimSpace(string(output))
|
||||||
|
|
||||||
|
fds := strings.Split(outputStr, "\n")
|
||||||
|
|
||||||
|
return len(fds) - 1
|
||||||
|
}
|
|
@ -0,0 +1,7 @@
|
||||||
|
package fileutils
|
||||||
|
|
||||||
|
// GetTotalUsedFds Returns the number of used File Descriptors.
|
||||||
|
// On Solaris these limits are per process and not systemwide
|
||||||
|
func GetTotalUsedFds() int {
|
||||||
|
return -1
|
||||||
|
}
|
|
@ -0,0 +1,22 @@
|
||||||
|
// +build linux freebsd
|
||||||
|
|
||||||
|
package fileutils
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"io/ioutil"
|
||||||
|
"os"
|
||||||
|
|
||||||
|
"github.com/Sirupsen/logrus"
|
||||||
|
)
|
||||||
|
|
||||||
|
// GetTotalUsedFds Returns the number of used File Descriptors by
|
||||||
|
// reading it via /proc filesystem.
|
||||||
|
func GetTotalUsedFds() int {
|
||||||
|
if fds, err := ioutil.ReadDir(fmt.Sprintf("/proc/%d/fd", os.Getpid())); err != nil {
|
||||||
|
logrus.Errorf("Error opening /proc/%d/fd: %s", os.Getpid(), err)
|
||||||
|
} else {
|
||||||
|
return len(fds)
|
||||||
|
}
|
||||||
|
return -1
|
||||||
|
}
|
|
@ -0,0 +1,7 @@
|
||||||
|
package fileutils
|
||||||
|
|
||||||
|
// GetTotalUsedFds Returns the number of used File Descriptors. Not supported
|
||||||
|
// on Windows.
|
||||||
|
func GetTotalUsedFds() int {
|
||||||
|
return -1
|
||||||
|
}
|
|
@ -0,0 +1,279 @@
|
||||||
|
package idtools
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bufio"
|
||||||
|
"fmt"
|
||||||
|
"os"
|
||||||
|
"sort"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
)
|
||||||
|
|
||||||
|
// IDMap contains a single entry for user namespace range remapping. An array
|
||||||
|
// of IDMap entries represents the structure that will be provided to the Linux
|
||||||
|
// kernel for creating a user namespace.
|
||||||
|
type IDMap struct {
|
||||||
|
ContainerID int `json:"container_id"`
|
||||||
|
HostID int `json:"host_id"`
|
||||||
|
Size int `json:"size"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type subIDRange struct {
|
||||||
|
Start int
|
||||||
|
Length int
|
||||||
|
}
|
||||||
|
|
||||||
|
type ranges []subIDRange
|
||||||
|
|
||||||
|
func (e ranges) Len() int { return len(e) }
|
||||||
|
func (e ranges) Swap(i, j int) { e[i], e[j] = e[j], e[i] }
|
||||||
|
func (e ranges) Less(i, j int) bool { return e[i].Start < e[j].Start }
|
||||||
|
|
||||||
|
const (
|
||||||
|
subuidFileName string = "/etc/subuid"
|
||||||
|
subgidFileName string = "/etc/subgid"
|
||||||
|
)
|
||||||
|
|
||||||
|
// MkdirAllAs creates a directory (include any along the path) and then modifies
|
||||||
|
// ownership to the requested uid/gid. If the directory already exists, this
|
||||||
|
// function will still change ownership to the requested uid/gid pair.
|
||||||
|
// Deprecated: Use MkdirAllAndChown
|
||||||
|
func MkdirAllAs(path string, mode os.FileMode, ownerUID, ownerGID int) error {
|
||||||
|
return mkdirAs(path, mode, ownerUID, ownerGID, true, true)
|
||||||
|
}
|
||||||
|
|
||||||
|
// MkdirAs creates a directory and then modifies ownership to the requested uid/gid.
|
||||||
|
// If the directory already exists, this function still changes ownership
|
||||||
|
// Deprecated: Use MkdirAndChown with a IDPair
|
||||||
|
func MkdirAs(path string, mode os.FileMode, ownerUID, ownerGID int) error {
|
||||||
|
return mkdirAs(path, mode, ownerUID, ownerGID, false, true)
|
||||||
|
}
|
||||||
|
|
||||||
|
// MkdirAllAndChown creates a directory (include any along the path) and then modifies
|
||||||
|
// ownership to the requested uid/gid. If the directory already exists, this
|
||||||
|
// function will still change ownership to the requested uid/gid pair.
|
||||||
|
func MkdirAllAndChown(path string, mode os.FileMode, ids IDPair) error {
|
||||||
|
return mkdirAs(path, mode, ids.UID, ids.GID, true, true)
|
||||||
|
}
|
||||||
|
|
||||||
|
// MkdirAndChown creates a directory and then modifies ownership to the requested uid/gid.
|
||||||
|
// If the directory already exists, this function still changes ownership
|
||||||
|
func MkdirAndChown(path string, mode os.FileMode, ids IDPair) error {
|
||||||
|
return mkdirAs(path, mode, ids.UID, ids.GID, false, true)
|
||||||
|
}
|
||||||
|
|
||||||
|
// MkdirAllAndChownNew creates a directory (include any along the path) and then modifies
|
||||||
|
// ownership ONLY of newly created directories to the requested uid/gid. If the
|
||||||
|
// directories along the path exist, no change of ownership will be performed
|
||||||
|
func MkdirAllAndChownNew(path string, mode os.FileMode, ids IDPair) error {
|
||||||
|
return mkdirAs(path, mode, ids.UID, ids.GID, true, false)
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetRootUIDGID retrieves the remapped root uid/gid pair from the set of maps.
|
||||||
|
// If the maps are empty, then the root uid/gid will default to "real" 0/0
|
||||||
|
func GetRootUIDGID(uidMap, gidMap []IDMap) (int, int, error) {
|
||||||
|
uid, err := toHost(0, uidMap)
|
||||||
|
if err != nil {
|
||||||
|
return -1, -1, err
|
||||||
|
}
|
||||||
|
gid, err := toHost(0, gidMap)
|
||||||
|
if err != nil {
|
||||||
|
return -1, -1, err
|
||||||
|
}
|
||||||
|
return uid, gid, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// toContainer takes an id mapping, and uses it to translate a
|
||||||
|
// host ID to the remapped ID. If no map is provided, then the translation
|
||||||
|
// assumes a 1-to-1 mapping and returns the passed in id
|
||||||
|
func toContainer(hostID int, idMap []IDMap) (int, error) {
|
||||||
|
if idMap == nil {
|
||||||
|
return hostID, nil
|
||||||
|
}
|
||||||
|
for _, m := range idMap {
|
||||||
|
if (hostID >= m.HostID) && (hostID <= (m.HostID + m.Size - 1)) {
|
||||||
|
contID := m.ContainerID + (hostID - m.HostID)
|
||||||
|
return contID, nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return -1, fmt.Errorf("Host ID %d cannot be mapped to a container ID", hostID)
|
||||||
|
}
|
||||||
|
|
||||||
|
// toHost takes an id mapping and a remapped ID, and translates the
|
||||||
|
// ID to the mapped host ID. If no map is provided, then the translation
|
||||||
|
// assumes a 1-to-1 mapping and returns the passed in id #
|
||||||
|
func toHost(contID int, idMap []IDMap) (int, error) {
|
||||||
|
if idMap == nil {
|
||||||
|
return contID, nil
|
||||||
|
}
|
||||||
|
for _, m := range idMap {
|
||||||
|
if (contID >= m.ContainerID) && (contID <= (m.ContainerID + m.Size - 1)) {
|
||||||
|
hostID := m.HostID + (contID - m.ContainerID)
|
||||||
|
return hostID, nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return -1, fmt.Errorf("Container ID %d cannot be mapped to a host ID", contID)
|
||||||
|
}
|
||||||
|
|
||||||
|
// IDPair is a UID and GID pair
|
||||||
|
type IDPair struct {
|
||||||
|
UID int
|
||||||
|
GID int
|
||||||
|
}
|
||||||
|
|
||||||
|
// IDMappings contains a mappings of UIDs and GIDs
|
||||||
|
type IDMappings struct {
|
||||||
|
uids []IDMap
|
||||||
|
gids []IDMap
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewIDMappings takes a requested user and group name and
|
||||||
|
// using the data from /etc/sub{uid,gid} ranges, creates the
|
||||||
|
// proper uid and gid remapping ranges for that user/group pair
|
||||||
|
func NewIDMappings(username, groupname string) (*IDMappings, error) {
|
||||||
|
subuidRanges, err := parseSubuid(username)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
subgidRanges, err := parseSubgid(groupname)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if len(subuidRanges) == 0 {
|
||||||
|
return nil, fmt.Errorf("No subuid ranges found for user %q", username)
|
||||||
|
}
|
||||||
|
if len(subgidRanges) == 0 {
|
||||||
|
return nil, fmt.Errorf("No subgid ranges found for group %q", groupname)
|
||||||
|
}
|
||||||
|
|
||||||
|
return &IDMappings{
|
||||||
|
uids: createIDMap(subuidRanges),
|
||||||
|
gids: createIDMap(subgidRanges),
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewIDMappingsFromMaps creates a new mapping from two slices
|
||||||
|
// Deprecated: this is a temporary shim while transitioning to IDMapping
|
||||||
|
func NewIDMappingsFromMaps(uids []IDMap, gids []IDMap) *IDMappings {
|
||||||
|
return &IDMappings{uids: uids, gids: gids}
|
||||||
|
}
|
||||||
|
|
||||||
|
// RootPair returns a uid and gid pair for the root user. The error is ignored
|
||||||
|
// because a root user always exists, and the defaults are correct when the uid
|
||||||
|
// and gid maps are empty.
|
||||||
|
func (i *IDMappings) RootPair() IDPair {
|
||||||
|
uid, gid, _ := GetRootUIDGID(i.uids, i.gids)
|
||||||
|
return IDPair{UID: uid, GID: gid}
|
||||||
|
}
|
||||||
|
|
||||||
|
// ToHost returns the host UID and GID for the container uid, gid.
|
||||||
|
// Remapping is only performed if the ids aren't already the remapped root ids
|
||||||
|
func (i *IDMappings) ToHost(pair IDPair) (IDPair, error) {
|
||||||
|
var err error
|
||||||
|
target := i.RootPair()
|
||||||
|
|
||||||
|
if pair.UID != target.UID {
|
||||||
|
target.UID, err = toHost(pair.UID, i.uids)
|
||||||
|
if err != nil {
|
||||||
|
return target, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if pair.GID != target.GID {
|
||||||
|
target.GID, err = toHost(pair.GID, i.gids)
|
||||||
|
}
|
||||||
|
return target, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// ToContainer returns the container UID and GID for the host uid and gid
|
||||||
|
func (i *IDMappings) ToContainer(pair IDPair) (int, int, error) {
|
||||||
|
uid, err := toContainer(pair.UID, i.uids)
|
||||||
|
if err != nil {
|
||||||
|
return -1, -1, err
|
||||||
|
}
|
||||||
|
gid, err := toContainer(pair.GID, i.gids)
|
||||||
|
return uid, gid, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Empty returns true if there are no id mappings
|
||||||
|
func (i *IDMappings) Empty() bool {
|
||||||
|
return len(i.uids) == 0 && len(i.gids) == 0
|
||||||
|
}
|
||||||
|
|
||||||
|
// UIDs return the UID mapping
|
||||||
|
// TODO: remove this once everything has been refactored to use pairs
|
||||||
|
func (i *IDMappings) UIDs() []IDMap {
|
||||||
|
return i.uids
|
||||||
|
}
|
||||||
|
|
||||||
|
// GIDs return the UID mapping
|
||||||
|
// TODO: remove this once everything has been refactored to use pairs
|
||||||
|
func (i *IDMappings) GIDs() []IDMap {
|
||||||
|
return i.gids
|
||||||
|
}
|
||||||
|
|
||||||
|
func createIDMap(subidRanges ranges) []IDMap {
|
||||||
|
idMap := []IDMap{}
|
||||||
|
|
||||||
|
// sort the ranges by lowest ID first
|
||||||
|
sort.Sort(subidRanges)
|
||||||
|
containerID := 0
|
||||||
|
for _, idrange := range subidRanges {
|
||||||
|
idMap = append(idMap, IDMap{
|
||||||
|
ContainerID: containerID,
|
||||||
|
HostID: idrange.Start,
|
||||||
|
Size: idrange.Length,
|
||||||
|
})
|
||||||
|
containerID = containerID + idrange.Length
|
||||||
|
}
|
||||||
|
return idMap
|
||||||
|
}
|
||||||
|
|
||||||
|
func parseSubuid(username string) (ranges, error) {
|
||||||
|
return parseSubidFile(subuidFileName, username)
|
||||||
|
}
|
||||||
|
|
||||||
|
func parseSubgid(username string) (ranges, error) {
|
||||||
|
return parseSubidFile(subgidFileName, username)
|
||||||
|
}
|
||||||
|
|
||||||
|
// parseSubidFile will read the appropriate file (/etc/subuid or /etc/subgid)
|
||||||
|
// and return all found ranges for a specified username. If the special value
|
||||||
|
// "ALL" is supplied for username, then all ranges in the file will be returned
|
||||||
|
func parseSubidFile(path, username string) (ranges, error) {
|
||||||
|
var rangeList ranges
|
||||||
|
|
||||||
|
subidFile, err := os.Open(path)
|
||||||
|
if err != nil {
|
||||||
|
return rangeList, err
|
||||||
|
}
|
||||||
|
defer subidFile.Close()
|
||||||
|
|
||||||
|
s := bufio.NewScanner(subidFile)
|
||||||
|
for s.Scan() {
|
||||||
|
if err := s.Err(); err != nil {
|
||||||
|
return rangeList, err
|
||||||
|
}
|
||||||
|
|
||||||
|
text := strings.TrimSpace(s.Text())
|
||||||
|
if text == "" || strings.HasPrefix(text, "#") {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
parts := strings.Split(text, ":")
|
||||||
|
if len(parts) != 3 {
|
||||||
|
return rangeList, fmt.Errorf("Cannot parse subuid/gid information: Format not correct for %s file", path)
|
||||||
|
}
|
||||||
|
if parts[0] == username || username == "ALL" {
|
||||||
|
startid, err := strconv.Atoi(parts[1])
|
||||||
|
if err != nil {
|
||||||
|
return rangeList, fmt.Errorf("String to int conversion failed during subuid/gid parsing of %s: %v", path, err)
|
||||||
|
}
|
||||||
|
length, err := strconv.Atoi(parts[2])
|
||||||
|
if err != nil {
|
||||||
|
return rangeList, fmt.Errorf("String to int conversion failed during subuid/gid parsing of %s: %v", path, err)
|
||||||
|
}
|
||||||
|
rangeList = append(rangeList, subIDRange{startid, length})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return rangeList, nil
|
||||||
|
}
|
|
@ -0,0 +1,204 @@
|
||||||
|
// +build !windows
|
||||||
|
|
||||||
|
package idtools
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
"strings"
|
||||||
|
"sync"
|
||||||
|
|
||||||
|
"github.com/docker/docker/pkg/system"
|
||||||
|
"github.com/opencontainers/runc/libcontainer/user"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
entOnce sync.Once
|
||||||
|
getentCmd string
|
||||||
|
)
|
||||||
|
|
||||||
|
func mkdirAs(path string, mode os.FileMode, ownerUID, ownerGID int, mkAll, chownExisting bool) error {
|
||||||
|
// make an array containing the original path asked for, plus (for mkAll == true)
|
||||||
|
// all path components leading up to the complete path that don't exist before we MkdirAll
|
||||||
|
// so that we can chown all of them properly at the end. If chownExisting is false, we won't
|
||||||
|
// chown the full directory path if it exists
|
||||||
|
var paths []string
|
||||||
|
if _, err := os.Stat(path); err != nil && os.IsNotExist(err) {
|
||||||
|
paths = []string{path}
|
||||||
|
} else if err == nil && chownExisting {
|
||||||
|
// short-circuit--we were called with an existing directory and chown was requested
|
||||||
|
return os.Chown(path, ownerUID, ownerGID)
|
||||||
|
} else if err == nil {
|
||||||
|
// nothing to do; directory path fully exists already and chown was NOT requested
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
if mkAll {
|
||||||
|
// walk back to "/" looking for directories which do not exist
|
||||||
|
// and add them to the paths array for chown after creation
|
||||||
|
dirPath := path
|
||||||
|
for {
|
||||||
|
dirPath = filepath.Dir(dirPath)
|
||||||
|
if dirPath == "/" {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
if _, err := os.Stat(dirPath); err != nil && os.IsNotExist(err) {
|
||||||
|
paths = append(paths, dirPath)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if err := system.MkdirAll(path, mode, ""); err != nil && !os.IsExist(err) {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
if err := os.Mkdir(path, mode); err != nil && !os.IsExist(err) {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// even if it existed, we will chown the requested path + any subpaths that
|
||||||
|
// didn't exist when we called MkdirAll
|
||||||
|
for _, pathComponent := range paths {
|
||||||
|
if err := os.Chown(pathComponent, ownerUID, ownerGID); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// CanAccess takes a valid (existing) directory and a uid, gid pair and determines
|
||||||
|
// if that uid, gid pair has access (execute bit) to the directory
|
||||||
|
func CanAccess(path string, pair IDPair) bool {
|
||||||
|
statInfo, err := system.Stat(path)
|
||||||
|
if err != nil {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
fileMode := os.FileMode(statInfo.Mode())
|
||||||
|
permBits := fileMode.Perm()
|
||||||
|
return accessible(statInfo.UID() == uint32(pair.UID),
|
||||||
|
statInfo.GID() == uint32(pair.GID), permBits)
|
||||||
|
}
|
||||||
|
|
||||||
|
func accessible(isOwner, isGroup bool, perms os.FileMode) bool {
|
||||||
|
if isOwner && (perms&0100 == 0100) {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
if isGroup && (perms&0010 == 0010) {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
if perms&0001 == 0001 {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
// LookupUser uses traditional local system files lookup (from libcontainer/user) on a username,
|
||||||
|
// followed by a call to `getent` for supporting host configured non-files passwd and group dbs
|
||||||
|
func LookupUser(username string) (user.User, error) {
|
||||||
|
// first try a local system files lookup using existing capabilities
|
||||||
|
usr, err := user.LookupUser(username)
|
||||||
|
if err == nil {
|
||||||
|
return usr, nil
|
||||||
|
}
|
||||||
|
// local files lookup failed; attempt to call `getent` to query configured passwd dbs
|
||||||
|
usr, err = getentUser(fmt.Sprintf("%s %s", "passwd", username))
|
||||||
|
if err != nil {
|
||||||
|
return user.User{}, err
|
||||||
|
}
|
||||||
|
return usr, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// LookupUID uses traditional local system files lookup (from libcontainer/user) on a uid,
|
||||||
|
// followed by a call to `getent` for supporting host configured non-files passwd and group dbs
|
||||||
|
func LookupUID(uid int) (user.User, error) {
|
||||||
|
// first try a local system files lookup using existing capabilities
|
||||||
|
usr, err := user.LookupUid(uid)
|
||||||
|
if err == nil {
|
||||||
|
return usr, nil
|
||||||
|
}
|
||||||
|
// local files lookup failed; attempt to call `getent` to query configured passwd dbs
|
||||||
|
return getentUser(fmt.Sprintf("%s %d", "passwd", uid))
|
||||||
|
}
|
||||||
|
|
||||||
|
func getentUser(args string) (user.User, error) {
|
||||||
|
reader, err := callGetent(args)
|
||||||
|
if err != nil {
|
||||||
|
return user.User{}, err
|
||||||
|
}
|
||||||
|
users, err := user.ParsePasswd(reader)
|
||||||
|
if err != nil {
|
||||||
|
return user.User{}, err
|
||||||
|
}
|
||||||
|
if len(users) == 0 {
|
||||||
|
return user.User{}, fmt.Errorf("getent failed to find passwd entry for %q", strings.Split(args, " ")[1])
|
||||||
|
}
|
||||||
|
return users[0], nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// LookupGroup uses traditional local system files lookup (from libcontainer/user) on a group name,
|
||||||
|
// followed by a call to `getent` for supporting host configured non-files passwd and group dbs
|
||||||
|
func LookupGroup(groupname string) (user.Group, error) {
|
||||||
|
// first try a local system files lookup using existing capabilities
|
||||||
|
group, err := user.LookupGroup(groupname)
|
||||||
|
if err == nil {
|
||||||
|
return group, nil
|
||||||
|
}
|
||||||
|
// local files lookup failed; attempt to call `getent` to query configured group dbs
|
||||||
|
return getentGroup(fmt.Sprintf("%s %s", "group", groupname))
|
||||||
|
}
|
||||||
|
|
||||||
|
// LookupGID uses traditional local system files lookup (from libcontainer/user) on a group ID,
|
||||||
|
// followed by a call to `getent` for supporting host configured non-files passwd and group dbs
|
||||||
|
func LookupGID(gid int) (user.Group, error) {
|
||||||
|
// first try a local system files lookup using existing capabilities
|
||||||
|
group, err := user.LookupGid(gid)
|
||||||
|
if err == nil {
|
||||||
|
return group, nil
|
||||||
|
}
|
||||||
|
// local files lookup failed; attempt to call `getent` to query configured group dbs
|
||||||
|
return getentGroup(fmt.Sprintf("%s %d", "group", gid))
|
||||||
|
}
|
||||||
|
|
||||||
|
func getentGroup(args string) (user.Group, error) {
|
||||||
|
reader, err := callGetent(args)
|
||||||
|
if err != nil {
|
||||||
|
return user.Group{}, err
|
||||||
|
}
|
||||||
|
groups, err := user.ParseGroup(reader)
|
||||||
|
if err != nil {
|
||||||
|
return user.Group{}, err
|
||||||
|
}
|
||||||
|
if len(groups) == 0 {
|
||||||
|
return user.Group{}, fmt.Errorf("getent failed to find groups entry for %q", strings.Split(args, " ")[1])
|
||||||
|
}
|
||||||
|
return groups[0], nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func callGetent(args string) (io.Reader, error) {
|
||||||
|
entOnce.Do(func() { getentCmd, _ = resolveBinary("getent") })
|
||||||
|
// if no `getent` command on host, can't do anything else
|
||||||
|
if getentCmd == "" {
|
||||||
|
return nil, fmt.Errorf("")
|
||||||
|
}
|
||||||
|
out, err := execCmd(getentCmd, args)
|
||||||
|
if err != nil {
|
||||||
|
exitCode, errC := system.GetExitCode(err)
|
||||||
|
if errC != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
switch exitCode {
|
||||||
|
case 1:
|
||||||
|
return nil, fmt.Errorf("getent reported invalid parameters/database unknown")
|
||||||
|
case 2:
|
||||||
|
terms := strings.Split(args, " ")
|
||||||
|
return nil, fmt.Errorf("getent unable to find entry %q in %s database", terms[1], terms[0])
|
||||||
|
case 3:
|
||||||
|
return nil, fmt.Errorf("getent database doesn't support enumeration")
|
||||||
|
default:
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
return bytes.NewReader(out), nil
|
||||||
|
}
|
|
@ -0,0 +1,25 @@
|
||||||
|
// +build windows
|
||||||
|
|
||||||
|
package idtools
|
||||||
|
|
||||||
|
import (
|
||||||
|
"os"
|
||||||
|
|
||||||
|
"github.com/docker/docker/pkg/system"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Platforms such as Windows do not support the UID/GID concept. So make this
|
||||||
|
// just a wrapper around system.MkdirAll.
|
||||||
|
func mkdirAs(path string, mode os.FileMode, ownerUID, ownerGID int, mkAll, chownExisting bool) error {
|
||||||
|
if err := system.MkdirAll(path, mode, ""); err != nil && !os.IsExist(err) {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// CanAccess takes a valid (existing) directory and a uid, gid pair and determines
|
||||||
|
// if that uid, gid pair has access (execute bit) to the directory
|
||||||
|
// Windows does not require/support this function, so always return true
|
||||||
|
func CanAccess(path string, pair IDPair) bool {
|
||||||
|
return true
|
||||||
|
}
|
164
vendor/github.com/docker/docker/pkg/idtools/usergroupadd_linux.go
generated
vendored
Normal file
164
vendor/github.com/docker/docker/pkg/idtools/usergroupadd_linux.go
generated
vendored
Normal file
|
@ -0,0 +1,164 @@
|
||||||
|
package idtools
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"regexp"
|
||||||
|
"sort"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
"sync"
|
||||||
|
)
|
||||||
|
|
||||||
|
// add a user and/or group to Linux /etc/passwd, /etc/group using standard
|
||||||
|
// Linux distribution commands:
|
||||||
|
// adduser --system --shell /bin/false --disabled-login --disabled-password --no-create-home --group <username>
|
||||||
|
// useradd -r -s /bin/false <username>
|
||||||
|
|
||||||
|
var (
|
||||||
|
once sync.Once
|
||||||
|
userCommand string
|
||||||
|
|
||||||
|
cmdTemplates = map[string]string{
|
||||||
|
"adduser": "--system --shell /bin/false --no-create-home --disabled-login --disabled-password --group %s",
|
||||||
|
"useradd": "-r -s /bin/false %s",
|
||||||
|
"usermod": "-%s %d-%d %s",
|
||||||
|
}
|
||||||
|
|
||||||
|
idOutRegexp = regexp.MustCompile(`uid=([0-9]+).*gid=([0-9]+)`)
|
||||||
|
// default length for a UID/GID subordinate range
|
||||||
|
defaultRangeLen = 65536
|
||||||
|
defaultRangeStart = 100000
|
||||||
|
userMod = "usermod"
|
||||||
|
)
|
||||||
|
|
||||||
|
// AddNamespaceRangesUser takes a username and uses the standard system
|
||||||
|
// utility to create a system user/group pair used to hold the
|
||||||
|
// /etc/sub{uid,gid} ranges which will be used for user namespace
|
||||||
|
// mapping ranges in containers.
|
||||||
|
func AddNamespaceRangesUser(name string) (int, int, error) {
|
||||||
|
if err := addUser(name); err != nil {
|
||||||
|
return -1, -1, fmt.Errorf("Error adding user %q: %v", name, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Query the system for the created uid and gid pair
|
||||||
|
out, err := execCmd("id", name)
|
||||||
|
if err != nil {
|
||||||
|
return -1, -1, fmt.Errorf("Error trying to find uid/gid for new user %q: %v", name, err)
|
||||||
|
}
|
||||||
|
matches := idOutRegexp.FindStringSubmatch(strings.TrimSpace(string(out)))
|
||||||
|
if len(matches) != 3 {
|
||||||
|
return -1, -1, fmt.Errorf("Can't find uid, gid from `id` output: %q", string(out))
|
||||||
|
}
|
||||||
|
uid, err := strconv.Atoi(matches[1])
|
||||||
|
if err != nil {
|
||||||
|
return -1, -1, fmt.Errorf("Can't convert found uid (%s) to int: %v", matches[1], err)
|
||||||
|
}
|
||||||
|
gid, err := strconv.Atoi(matches[2])
|
||||||
|
if err != nil {
|
||||||
|
return -1, -1, fmt.Errorf("Can't convert found gid (%s) to int: %v", matches[2], err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Now we need to create the subuid/subgid ranges for our new user/group (system users
|
||||||
|
// do not get auto-created ranges in subuid/subgid)
|
||||||
|
|
||||||
|
if err := createSubordinateRanges(name); err != nil {
|
||||||
|
return -1, -1, fmt.Errorf("Couldn't create subordinate ID ranges: %v", err)
|
||||||
|
}
|
||||||
|
return uid, gid, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func addUser(userName string) error {
|
||||||
|
once.Do(func() {
|
||||||
|
// set up which commands are used for adding users/groups dependent on distro
|
||||||
|
if _, err := resolveBinary("adduser"); err == nil {
|
||||||
|
userCommand = "adduser"
|
||||||
|
} else if _, err := resolveBinary("useradd"); err == nil {
|
||||||
|
userCommand = "useradd"
|
||||||
|
}
|
||||||
|
})
|
||||||
|
if userCommand == "" {
|
||||||
|
return fmt.Errorf("Cannot add user; no useradd/adduser binary found")
|
||||||
|
}
|
||||||
|
args := fmt.Sprintf(cmdTemplates[userCommand], userName)
|
||||||
|
out, err := execCmd(userCommand, args)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("Failed to add user with error: %v; output: %q", err, string(out))
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func createSubordinateRanges(name string) error {
|
||||||
|
|
||||||
|
// first, we should verify that ranges weren't automatically created
|
||||||
|
// by the distro tooling
|
||||||
|
ranges, err := parseSubuid(name)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("Error while looking for subuid ranges for user %q: %v", name, err)
|
||||||
|
}
|
||||||
|
if len(ranges) == 0 {
|
||||||
|
// no UID ranges; let's create one
|
||||||
|
startID, err := findNextUIDRange()
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("Can't find available subuid range: %v", err)
|
||||||
|
}
|
||||||
|
out, err := execCmd(userMod, fmt.Sprintf(cmdTemplates[userMod], "v", startID, startID+defaultRangeLen-1, name))
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("Unable to add subuid range to user: %q; output: %s, err: %v", name, out, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
ranges, err = parseSubgid(name)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("Error while looking for subgid ranges for user %q: %v", name, err)
|
||||||
|
}
|
||||||
|
if len(ranges) == 0 {
|
||||||
|
// no GID ranges; let's create one
|
||||||
|
startID, err := findNextGIDRange()
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("Can't find available subgid range: %v", err)
|
||||||
|
}
|
||||||
|
out, err := execCmd(userMod, fmt.Sprintf(cmdTemplates[userMod], "w", startID, startID+defaultRangeLen-1, name))
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("Unable to add subgid range to user: %q; output: %s, err: %v", name, out, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func findNextUIDRange() (int, error) {
|
||||||
|
ranges, err := parseSubuid("ALL")
|
||||||
|
if err != nil {
|
||||||
|
return -1, fmt.Errorf("Couldn't parse all ranges in /etc/subuid file: %v", err)
|
||||||
|
}
|
||||||
|
sort.Sort(ranges)
|
||||||
|
return findNextRangeStart(ranges)
|
||||||
|
}
|
||||||
|
|
||||||
|
func findNextGIDRange() (int, error) {
|
||||||
|
ranges, err := parseSubgid("ALL")
|
||||||
|
if err != nil {
|
||||||
|
return -1, fmt.Errorf("Couldn't parse all ranges in /etc/subgid file: %v", err)
|
||||||
|
}
|
||||||
|
sort.Sort(ranges)
|
||||||
|
return findNextRangeStart(ranges)
|
||||||
|
}
|
||||||
|
|
||||||
|
func findNextRangeStart(rangeList ranges) (int, error) {
|
||||||
|
startID := defaultRangeStart
|
||||||
|
for _, arange := range rangeList {
|
||||||
|
if wouldOverlap(arange, startID) {
|
||||||
|
startID = arange.Start + arange.Length
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return startID, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func wouldOverlap(arange subIDRange, ID int) bool {
|
||||||
|
low := ID
|
||||||
|
high := ID + defaultRangeLen
|
||||||
|
if (low >= arange.Start && low <= arange.Start+arange.Length) ||
|
||||||
|
(high <= arange.Start+arange.Length && high >= arange.Start) {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
12
vendor/github.com/docker/docker/pkg/idtools/usergroupadd_unsupported.go
generated
vendored
Normal file
12
vendor/github.com/docker/docker/pkg/idtools/usergroupadd_unsupported.go
generated
vendored
Normal file
|
@ -0,0 +1,12 @@
|
||||||
|
// +build !linux
|
||||||
|
|
||||||
|
package idtools
|
||||||
|
|
||||||
|
import "fmt"
|
||||||
|
|
||||||
|
// AddNamespaceRangesUser takes a name and finds an unused uid, gid pair
|
||||||
|
// and calls the appropriate helper function to add the group and then
|
||||||
|
// the user to the group in /etc/group and /etc/passwd respectively.
|
||||||
|
func AddNamespaceRangesUser(name string) (int, int, error) {
|
||||||
|
return -1, -1, fmt.Errorf("No support for adding users or groups on this OS")
|
||||||
|
}
|
|
@ -0,0 +1,32 @@
|
||||||
|
// +build !windows
|
||||||
|
|
||||||
|
package idtools
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"os/exec"
|
||||||
|
"path/filepath"
|
||||||
|
"strings"
|
||||||
|
)
|
||||||
|
|
||||||
|
func resolveBinary(binname string) (string, error) {
|
||||||
|
binaryPath, err := exec.LookPath(binname)
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
resolvedPath, err := filepath.EvalSymlinks(binaryPath)
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
//only return no error if the final resolved binary basename
|
||||||
|
//matches what was searched for
|
||||||
|
if filepath.Base(resolvedPath) == binname {
|
||||||
|
return resolvedPath, nil
|
||||||
|
}
|
||||||
|
return "", fmt.Errorf("Binary %q does not resolve to a binary of that name in $PATH (%q)", binname, resolvedPath)
|
||||||
|
}
|
||||||
|
|
||||||
|
func execCmd(cmd, args string) ([]byte, error) {
|
||||||
|
execCmd := exec.Command(cmd, strings.Split(args, " ")...)
|
||||||
|
return execCmd.CombinedOutput()
|
||||||
|
}
|
|
@ -0,0 +1,51 @@
|
||||||
|
package ioutils
|
||||||
|
|
||||||
|
import (
|
||||||
|
"errors"
|
||||||
|
"io"
|
||||||
|
)
|
||||||
|
|
||||||
|
var errBufferFull = errors.New("buffer is full")
|
||||||
|
|
||||||
|
type fixedBuffer struct {
|
||||||
|
buf []byte
|
||||||
|
pos int
|
||||||
|
lastRead int
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *fixedBuffer) Write(p []byte) (int, error) {
|
||||||
|
n := copy(b.buf[b.pos:cap(b.buf)], p)
|
||||||
|
b.pos += n
|
||||||
|
|
||||||
|
if n < len(p) {
|
||||||
|
if b.pos == cap(b.buf) {
|
||||||
|
return n, errBufferFull
|
||||||
|
}
|
||||||
|
return n, io.ErrShortWrite
|
||||||
|
}
|
||||||
|
return n, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *fixedBuffer) Read(p []byte) (int, error) {
|
||||||
|
n := copy(p, b.buf[b.lastRead:b.pos])
|
||||||
|
b.lastRead += n
|
||||||
|
return n, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *fixedBuffer) Len() int {
|
||||||
|
return b.pos - b.lastRead
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *fixedBuffer) Cap() int {
|
||||||
|
return cap(b.buf)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *fixedBuffer) Reset() {
|
||||||
|
b.pos = 0
|
||||||
|
b.lastRead = 0
|
||||||
|
b.buf = b.buf[:0]
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *fixedBuffer) String() string {
|
||||||
|
return string(b.buf[b.lastRead:b.pos])
|
||||||
|
}
|
|
@ -0,0 +1,186 @@
|
||||||
|
package ioutils
|
||||||
|
|
||||||
|
import (
|
||||||
|
"errors"
|
||||||
|
"io"
|
||||||
|
"sync"
|
||||||
|
)
|
||||||
|
|
||||||
|
// maxCap is the highest capacity to use in byte slices that buffer data.
|
||||||
|
const maxCap = 1e6
|
||||||
|
|
||||||
|
// minCap is the lowest capacity to use in byte slices that buffer data
|
||||||
|
const minCap = 64
|
||||||
|
|
||||||
|
// blockThreshold is the minimum number of bytes in the buffer which will cause
|
||||||
|
// a write to BytesPipe to block when allocating a new slice.
|
||||||
|
const blockThreshold = 1e6
|
||||||
|
|
||||||
|
var (
|
||||||
|
// ErrClosed is returned when Write is called on a closed BytesPipe.
|
||||||
|
ErrClosed = errors.New("write to closed BytesPipe")
|
||||||
|
|
||||||
|
bufPools = make(map[int]*sync.Pool)
|
||||||
|
bufPoolsLock sync.Mutex
|
||||||
|
)
|
||||||
|
|
||||||
|
// BytesPipe is io.ReadWriteCloser which works similarly to pipe(queue).
|
||||||
|
// All written data may be read at most once. Also, BytesPipe allocates
|
||||||
|
// and releases new byte slices to adjust to current needs, so the buffer
|
||||||
|
// won't be overgrown after peak loads.
|
||||||
|
type BytesPipe struct {
|
||||||
|
mu sync.Mutex
|
||||||
|
wait *sync.Cond
|
||||||
|
buf []*fixedBuffer
|
||||||
|
bufLen int
|
||||||
|
closeErr error // error to return from next Read. set to nil if not closed.
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewBytesPipe creates new BytesPipe, initialized by specified slice.
|
||||||
|
// If buf is nil, then it will be initialized with slice which cap is 64.
|
||||||
|
// buf will be adjusted in a way that len(buf) == 0, cap(buf) == cap(buf).
|
||||||
|
func NewBytesPipe() *BytesPipe {
|
||||||
|
bp := &BytesPipe{}
|
||||||
|
bp.buf = append(bp.buf, getBuffer(minCap))
|
||||||
|
bp.wait = sync.NewCond(&bp.mu)
|
||||||
|
return bp
|
||||||
|
}
|
||||||
|
|
||||||
|
// Write writes p to BytesPipe.
|
||||||
|
// It can allocate new []byte slices in a process of writing.
|
||||||
|
func (bp *BytesPipe) Write(p []byte) (int, error) {
|
||||||
|
bp.mu.Lock()
|
||||||
|
|
||||||
|
written := 0
|
||||||
|
loop0:
|
||||||
|
for {
|
||||||
|
if bp.closeErr != nil {
|
||||||
|
bp.mu.Unlock()
|
||||||
|
return written, ErrClosed
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(bp.buf) == 0 {
|
||||||
|
bp.buf = append(bp.buf, getBuffer(64))
|
||||||
|
}
|
||||||
|
// get the last buffer
|
||||||
|
b := bp.buf[len(bp.buf)-1]
|
||||||
|
|
||||||
|
n, err := b.Write(p)
|
||||||
|
written += n
|
||||||
|
bp.bufLen += n
|
||||||
|
|
||||||
|
// errBufferFull is an error we expect to get if the buffer is full
|
||||||
|
if err != nil && err != errBufferFull {
|
||||||
|
bp.wait.Broadcast()
|
||||||
|
bp.mu.Unlock()
|
||||||
|
return written, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// if there was enough room to write all then break
|
||||||
|
if len(p) == n {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
|
// more data: write to the next slice
|
||||||
|
p = p[n:]
|
||||||
|
|
||||||
|
// make sure the buffer doesn't grow too big from this write
|
||||||
|
for bp.bufLen >= blockThreshold {
|
||||||
|
bp.wait.Wait()
|
||||||
|
if bp.closeErr != nil {
|
||||||
|
continue loop0
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// add new byte slice to the buffers slice and continue writing
|
||||||
|
nextCap := b.Cap() * 2
|
||||||
|
if nextCap > maxCap {
|
||||||
|
nextCap = maxCap
|
||||||
|
}
|
||||||
|
bp.buf = append(bp.buf, getBuffer(nextCap))
|
||||||
|
}
|
||||||
|
bp.wait.Broadcast()
|
||||||
|
bp.mu.Unlock()
|
||||||
|
return written, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// CloseWithError causes further reads from a BytesPipe to return immediately.
|
||||||
|
func (bp *BytesPipe) CloseWithError(err error) error {
|
||||||
|
bp.mu.Lock()
|
||||||
|
if err != nil {
|
||||||
|
bp.closeErr = err
|
||||||
|
} else {
|
||||||
|
bp.closeErr = io.EOF
|
||||||
|
}
|
||||||
|
bp.wait.Broadcast()
|
||||||
|
bp.mu.Unlock()
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Close causes further reads from a BytesPipe to return immediately.
|
||||||
|
func (bp *BytesPipe) Close() error {
|
||||||
|
return bp.CloseWithError(nil)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Read reads bytes from BytesPipe.
|
||||||
|
// Data could be read only once.
|
||||||
|
func (bp *BytesPipe) Read(p []byte) (n int, err error) {
|
||||||
|
bp.mu.Lock()
|
||||||
|
if bp.bufLen == 0 {
|
||||||
|
if bp.closeErr != nil {
|
||||||
|
bp.mu.Unlock()
|
||||||
|
return 0, bp.closeErr
|
||||||
|
}
|
||||||
|
bp.wait.Wait()
|
||||||
|
if bp.bufLen == 0 && bp.closeErr != nil {
|
||||||
|
err := bp.closeErr
|
||||||
|
bp.mu.Unlock()
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
for bp.bufLen > 0 {
|
||||||
|
b := bp.buf[0]
|
||||||
|
read, _ := b.Read(p) // ignore error since fixedBuffer doesn't really return an error
|
||||||
|
n += read
|
||||||
|
bp.bufLen -= read
|
||||||
|
|
||||||
|
if b.Len() == 0 {
|
||||||
|
// it's empty so return it to the pool and move to the next one
|
||||||
|
returnBuffer(b)
|
||||||
|
bp.buf[0] = nil
|
||||||
|
bp.buf = bp.buf[1:]
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(p) == read {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
|
p = p[read:]
|
||||||
|
}
|
||||||
|
|
||||||
|
bp.wait.Broadcast()
|
||||||
|
bp.mu.Unlock()
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
func returnBuffer(b *fixedBuffer) {
|
||||||
|
b.Reset()
|
||||||
|
bufPoolsLock.Lock()
|
||||||
|
pool := bufPools[b.Cap()]
|
||||||
|
bufPoolsLock.Unlock()
|
||||||
|
if pool != nil {
|
||||||
|
pool.Put(b)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func getBuffer(size int) *fixedBuffer {
|
||||||
|
bufPoolsLock.Lock()
|
||||||
|
pool, ok := bufPools[size]
|
||||||
|
if !ok {
|
||||||
|
pool = &sync.Pool{New: func() interface{} { return &fixedBuffer{buf: make([]byte, 0, size)} }}
|
||||||
|
bufPools[size] = pool
|
||||||
|
}
|
||||||
|
bufPoolsLock.Unlock()
|
||||||
|
return pool.Get().(*fixedBuffer)
|
||||||
|
}
|
|
@ -0,0 +1,162 @@
|
||||||
|
package ioutils
|
||||||
|
|
||||||
|
import (
|
||||||
|
"io"
|
||||||
|
"io/ioutil"
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
)
|
||||||
|
|
||||||
|
// NewAtomicFileWriter returns WriteCloser so that writing to it writes to a
|
||||||
|
// temporary file and closing it atomically changes the temporary file to
|
||||||
|
// destination path. Writing and closing concurrently is not allowed.
|
||||||
|
func NewAtomicFileWriter(filename string, perm os.FileMode) (io.WriteCloser, error) {
|
||||||
|
f, err := ioutil.TempFile(filepath.Dir(filename), ".tmp-"+filepath.Base(filename))
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
abspath, err := filepath.Abs(filename)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return &atomicFileWriter{
|
||||||
|
f: f,
|
||||||
|
fn: abspath,
|
||||||
|
perm: perm,
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// AtomicWriteFile atomically writes data to a file named by filename.
|
||||||
|
func AtomicWriteFile(filename string, data []byte, perm os.FileMode) error {
|
||||||
|
f, err := NewAtomicFileWriter(filename, perm)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
n, err := f.Write(data)
|
||||||
|
if err == nil && n < len(data) {
|
||||||
|
err = io.ErrShortWrite
|
||||||
|
f.(*atomicFileWriter).writeErr = err
|
||||||
|
}
|
||||||
|
if err1 := f.Close(); err == nil {
|
||||||
|
err = err1
|
||||||
|
}
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
type atomicFileWriter struct {
|
||||||
|
f *os.File
|
||||||
|
fn string
|
||||||
|
writeErr error
|
||||||
|
perm os.FileMode
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *atomicFileWriter) Write(dt []byte) (int, error) {
|
||||||
|
n, err := w.f.Write(dt)
|
||||||
|
if err != nil {
|
||||||
|
w.writeErr = err
|
||||||
|
}
|
||||||
|
return n, err
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *atomicFileWriter) Close() (retErr error) {
|
||||||
|
defer func() {
|
||||||
|
if retErr != nil || w.writeErr != nil {
|
||||||
|
os.Remove(w.f.Name())
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
if err := w.f.Sync(); err != nil {
|
||||||
|
w.f.Close()
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if err := w.f.Close(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if err := os.Chmod(w.f.Name(), w.perm); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if w.writeErr == nil {
|
||||||
|
return os.Rename(w.f.Name(), w.fn)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// AtomicWriteSet is used to atomically write a set
|
||||||
|
// of files and ensure they are visible at the same time.
|
||||||
|
// Must be committed to a new directory.
|
||||||
|
type AtomicWriteSet struct {
|
||||||
|
root string
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewAtomicWriteSet creates a new atomic write set to
|
||||||
|
// atomically create a set of files. The given directory
|
||||||
|
// is used as the base directory for storing files before
|
||||||
|
// commit. If no temporary directory is given the system
|
||||||
|
// default is used.
|
||||||
|
func NewAtomicWriteSet(tmpDir string) (*AtomicWriteSet, error) {
|
||||||
|
td, err := ioutil.TempDir(tmpDir, "write-set-")
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return &AtomicWriteSet{
|
||||||
|
root: td,
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// WriteFile writes a file to the set, guaranteeing the file
|
||||||
|
// has been synced.
|
||||||
|
func (ws *AtomicWriteSet) WriteFile(filename string, data []byte, perm os.FileMode) error {
|
||||||
|
f, err := ws.FileWriter(filename, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, perm)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
n, err := f.Write(data)
|
||||||
|
if err == nil && n < len(data) {
|
||||||
|
err = io.ErrShortWrite
|
||||||
|
}
|
||||||
|
if err1 := f.Close(); err == nil {
|
||||||
|
err = err1
|
||||||
|
}
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
type syncFileCloser struct {
|
||||||
|
*os.File
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w syncFileCloser) Close() error {
|
||||||
|
err := w.File.Sync()
|
||||||
|
if err1 := w.File.Close(); err == nil {
|
||||||
|
err = err1
|
||||||
|
}
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// FileWriter opens a file writer inside the set. The file
|
||||||
|
// should be synced and closed before calling commit.
|
||||||
|
func (ws *AtomicWriteSet) FileWriter(name string, flag int, perm os.FileMode) (io.WriteCloser, error) {
|
||||||
|
f, err := os.OpenFile(filepath.Join(ws.root, name), flag, perm)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return syncFileCloser{f}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Cancel cancels the set and removes all temporary data
|
||||||
|
// created in the set.
|
||||||
|
func (ws *AtomicWriteSet) Cancel() error {
|
||||||
|
return os.RemoveAll(ws.root)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Commit moves all created files to the target directory. The
|
||||||
|
// target directory must not exist and the parent of the target
|
||||||
|
// directory must exist.
|
||||||
|
func (ws *AtomicWriteSet) Commit(target string) error {
|
||||||
|
return os.Rename(ws.root, target)
|
||||||
|
}
|
||||||
|
|
||||||
|
// String returns the location the set is writing to.
|
||||||
|
func (ws *AtomicWriteSet) String() string {
|
||||||
|
return ws.root
|
||||||
|
}
|
|
@ -0,0 +1,154 @@
|
||||||
|
package ioutils
|
||||||
|
|
||||||
|
import (
|
||||||
|
"crypto/sha256"
|
||||||
|
"encoding/hex"
|
||||||
|
"io"
|
||||||
|
|
||||||
|
"golang.org/x/net/context"
|
||||||
|
)
|
||||||
|
|
||||||
|
type readCloserWrapper struct {
|
||||||
|
io.Reader
|
||||||
|
closer func() error
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *readCloserWrapper) Close() error {
|
||||||
|
return r.closer()
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewReadCloserWrapper returns a new io.ReadCloser.
|
||||||
|
func NewReadCloserWrapper(r io.Reader, closer func() error) io.ReadCloser {
|
||||||
|
return &readCloserWrapper{
|
||||||
|
Reader: r,
|
||||||
|
closer: closer,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
type readerErrWrapper struct {
|
||||||
|
reader io.Reader
|
||||||
|
closer func()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *readerErrWrapper) Read(p []byte) (int, error) {
|
||||||
|
n, err := r.reader.Read(p)
|
||||||
|
if err != nil {
|
||||||
|
r.closer()
|
||||||
|
}
|
||||||
|
return n, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewReaderErrWrapper returns a new io.Reader.
|
||||||
|
func NewReaderErrWrapper(r io.Reader, closer func()) io.Reader {
|
||||||
|
return &readerErrWrapper{
|
||||||
|
reader: r,
|
||||||
|
closer: closer,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// HashData returns the sha256 sum of src.
|
||||||
|
func HashData(src io.Reader) (string, error) {
|
||||||
|
h := sha256.New()
|
||||||
|
if _, err := io.Copy(h, src); err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
return "sha256:" + hex.EncodeToString(h.Sum(nil)), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// OnEOFReader wraps an io.ReadCloser and a function
|
||||||
|
// the function will run at the end of file or close the file.
|
||||||
|
type OnEOFReader struct {
|
||||||
|
Rc io.ReadCloser
|
||||||
|
Fn func()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *OnEOFReader) Read(p []byte) (n int, err error) {
|
||||||
|
n, err = r.Rc.Read(p)
|
||||||
|
if err == io.EOF {
|
||||||
|
r.runFunc()
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Close closes the file and run the function.
|
||||||
|
func (r *OnEOFReader) Close() error {
|
||||||
|
err := r.Rc.Close()
|
||||||
|
r.runFunc()
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *OnEOFReader) runFunc() {
|
||||||
|
if fn := r.Fn; fn != nil {
|
||||||
|
fn()
|
||||||
|
r.Fn = nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// cancelReadCloser wraps an io.ReadCloser with a context for cancelling read
|
||||||
|
// operations.
|
||||||
|
type cancelReadCloser struct {
|
||||||
|
cancel func()
|
||||||
|
pR *io.PipeReader // Stream to read from
|
||||||
|
pW *io.PipeWriter
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewCancelReadCloser creates a wrapper that closes the ReadCloser when the
|
||||||
|
// context is cancelled. The returned io.ReadCloser must be closed when it is
|
||||||
|
// no longer needed.
|
||||||
|
func NewCancelReadCloser(ctx context.Context, in io.ReadCloser) io.ReadCloser {
|
||||||
|
pR, pW := io.Pipe()
|
||||||
|
|
||||||
|
// Create a context used to signal when the pipe is closed
|
||||||
|
doneCtx, cancel := context.WithCancel(context.Background())
|
||||||
|
|
||||||
|
p := &cancelReadCloser{
|
||||||
|
cancel: cancel,
|
||||||
|
pR: pR,
|
||||||
|
pW: pW,
|
||||||
|
}
|
||||||
|
|
||||||
|
go func() {
|
||||||
|
_, err := io.Copy(pW, in)
|
||||||
|
select {
|
||||||
|
case <-ctx.Done():
|
||||||
|
// If the context was closed, p.closeWithError
|
||||||
|
// was already called. Calling it again would
|
||||||
|
// change the error that Read returns.
|
||||||
|
default:
|
||||||
|
p.closeWithError(err)
|
||||||
|
}
|
||||||
|
in.Close()
|
||||||
|
}()
|
||||||
|
go func() {
|
||||||
|
for {
|
||||||
|
select {
|
||||||
|
case <-ctx.Done():
|
||||||
|
p.closeWithError(ctx.Err())
|
||||||
|
case <-doneCtx.Done():
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
return p
|
||||||
|
}
|
||||||
|
|
||||||
|
// Read wraps the Read method of the pipe that provides data from the wrapped
|
||||||
|
// ReadCloser.
|
||||||
|
func (p *cancelReadCloser) Read(buf []byte) (n int, err error) {
|
||||||
|
return p.pR.Read(buf)
|
||||||
|
}
|
||||||
|
|
||||||
|
// closeWithError closes the wrapper and its underlying reader. It will
|
||||||
|
// cause future calls to Read to return err.
|
||||||
|
func (p *cancelReadCloser) closeWithError(err error) {
|
||||||
|
p.pW.CloseWithError(err)
|
||||||
|
p.cancel()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Close closes the wrapper its underlying reader. It will cause
|
||||||
|
// future calls to Read to return io.EOF.
|
||||||
|
func (p *cancelReadCloser) Close() error {
|
||||||
|
p.closeWithError(io.EOF)
|
||||||
|
return nil
|
||||||
|
}
|
|
@ -0,0 +1,10 @@
|
||||||
|
// +build !windows
|
||||||
|
|
||||||
|
package ioutils
|
||||||
|
|
||||||
|
import "io/ioutil"
|
||||||
|
|
||||||
|
// TempDir on Unix systems is equivalent to ioutil.TempDir.
|
||||||
|
func TempDir(dir, prefix string) (string, error) {
|
||||||
|
return ioutil.TempDir(dir, prefix)
|
||||||
|
}
|
|
@ -0,0 +1,18 @@
|
||||||
|
// +build windows
|
||||||
|
|
||||||
|
package ioutils
|
||||||
|
|
||||||
|
import (
|
||||||
|
"io/ioutil"
|
||||||
|
|
||||||
|
"github.com/docker/docker/pkg/longpath"
|
||||||
|
)
|
||||||
|
|
||||||
|
// TempDir is the equivalent of ioutil.TempDir, except that the result is in Windows longpath format.
|
||||||
|
func TempDir(dir, prefix string) (string, error) {
|
||||||
|
tempDir, err := ioutil.TempDir(dir, prefix)
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
return longpath.AddPrefix(tempDir), nil
|
||||||
|
}
|
|
@ -0,0 +1,92 @@
|
||||||
|
package ioutils
|
||||||
|
|
||||||
|
import (
|
||||||
|
"io"
|
||||||
|
"sync"
|
||||||
|
)
|
||||||
|
|
||||||
|
// WriteFlusher wraps the Write and Flush operation ensuring that every write
|
||||||
|
// is a flush. In addition, the Close method can be called to intercept
|
||||||
|
// Read/Write calls if the targets lifecycle has already ended.
|
||||||
|
type WriteFlusher struct {
|
||||||
|
w io.Writer
|
||||||
|
flusher flusher
|
||||||
|
flushed chan struct{}
|
||||||
|
flushedOnce sync.Once
|
||||||
|
closed chan struct{}
|
||||||
|
closeLock sync.Mutex
|
||||||
|
}
|
||||||
|
|
||||||
|
type flusher interface {
|
||||||
|
Flush()
|
||||||
|
}
|
||||||
|
|
||||||
|
var errWriteFlusherClosed = io.EOF
|
||||||
|
|
||||||
|
func (wf *WriteFlusher) Write(b []byte) (n int, err error) {
|
||||||
|
select {
|
||||||
|
case <-wf.closed:
|
||||||
|
return 0, errWriteFlusherClosed
|
||||||
|
default:
|
||||||
|
}
|
||||||
|
|
||||||
|
n, err = wf.w.Write(b)
|
||||||
|
wf.Flush() // every write is a flush.
|
||||||
|
return n, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Flush the stream immediately.
|
||||||
|
func (wf *WriteFlusher) Flush() {
|
||||||
|
select {
|
||||||
|
case <-wf.closed:
|
||||||
|
return
|
||||||
|
default:
|
||||||
|
}
|
||||||
|
|
||||||
|
wf.flushedOnce.Do(func() {
|
||||||
|
close(wf.flushed)
|
||||||
|
})
|
||||||
|
wf.flusher.Flush()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Flushed returns the state of flushed.
|
||||||
|
// If it's flushed, return true, or else it return false.
|
||||||
|
func (wf *WriteFlusher) Flushed() bool {
|
||||||
|
// BUG(stevvooe): Remove this method. Its use is inherently racy. Seems to
|
||||||
|
// be used to detect whether or a response code has been issued or not.
|
||||||
|
// Another hook should be used instead.
|
||||||
|
var flushed bool
|
||||||
|
select {
|
||||||
|
case <-wf.flushed:
|
||||||
|
flushed = true
|
||||||
|
default:
|
||||||
|
}
|
||||||
|
return flushed
|
||||||
|
}
|
||||||
|
|
||||||
|
// Close closes the write flusher, disallowing any further writes to the
|
||||||
|
// target. After the flusher is closed, all calls to write or flush will
|
||||||
|
// result in an error.
|
||||||
|
func (wf *WriteFlusher) Close() error {
|
||||||
|
wf.closeLock.Lock()
|
||||||
|
defer wf.closeLock.Unlock()
|
||||||
|
|
||||||
|
select {
|
||||||
|
case <-wf.closed:
|
||||||
|
return errWriteFlusherClosed
|
||||||
|
default:
|
||||||
|
close(wf.closed)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewWriteFlusher returns a new WriteFlusher.
|
||||||
|
func NewWriteFlusher(w io.Writer) *WriteFlusher {
|
||||||
|
var fl flusher
|
||||||
|
if f, ok := w.(flusher); ok {
|
||||||
|
fl = f
|
||||||
|
} else {
|
||||||
|
fl = &NopFlusher{}
|
||||||
|
}
|
||||||
|
return &WriteFlusher{w: w, flusher: fl, closed: make(chan struct{}), flushed: make(chan struct{})}
|
||||||
|
}
|
|
@ -0,0 +1,66 @@
|
||||||
|
package ioutils
|
||||||
|
|
||||||
|
import "io"
|
||||||
|
|
||||||
|
// NopWriter represents a type which write operation is nop.
|
||||||
|
type NopWriter struct{}
|
||||||
|
|
||||||
|
func (*NopWriter) Write(buf []byte) (int, error) {
|
||||||
|
return len(buf), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
type nopWriteCloser struct {
|
||||||
|
io.Writer
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *nopWriteCloser) Close() error { return nil }
|
||||||
|
|
||||||
|
// NopWriteCloser returns a nopWriteCloser.
|
||||||
|
func NopWriteCloser(w io.Writer) io.WriteCloser {
|
||||||
|
return &nopWriteCloser{w}
|
||||||
|
}
|
||||||
|
|
||||||
|
// NopFlusher represents a type which flush operation is nop.
|
||||||
|
type NopFlusher struct{}
|
||||||
|
|
||||||
|
// Flush is a nop operation.
|
||||||
|
func (f *NopFlusher) Flush() {}
|
||||||
|
|
||||||
|
type writeCloserWrapper struct {
|
||||||
|
io.Writer
|
||||||
|
closer func() error
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *writeCloserWrapper) Close() error {
|
||||||
|
return r.closer()
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewWriteCloserWrapper returns a new io.WriteCloser.
|
||||||
|
func NewWriteCloserWrapper(r io.Writer, closer func() error) io.WriteCloser {
|
||||||
|
return &writeCloserWrapper{
|
||||||
|
Writer: r,
|
||||||
|
closer: closer,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// WriteCounter wraps a concrete io.Writer and hold a count of the number
|
||||||
|
// of bytes written to the writer during a "session".
|
||||||
|
// This can be convenient when write return is masked
|
||||||
|
// (e.g., json.Encoder.Encode())
|
||||||
|
type WriteCounter struct {
|
||||||
|
Count int64
|
||||||
|
Writer io.Writer
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewWriteCounter returns a new WriteCounter.
|
||||||
|
func NewWriteCounter(w io.Writer) *WriteCounter {
|
||||||
|
return &WriteCounter{
|
||||||
|
Writer: w,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (wc *WriteCounter) Write(p []byte) (count int, err error) {
|
||||||
|
count, err = wc.Writer.Write(p)
|
||||||
|
wc.Count += int64(count)
|
||||||
|
return
|
||||||
|
}
|
|
@ -0,0 +1,137 @@
|
||||||
|
// Package pools provides a collection of pools which provide various
|
||||||
|
// data types with buffers. These can be used to lower the number of
|
||||||
|
// memory allocations and reuse buffers.
|
||||||
|
//
|
||||||
|
// New pools should be added to this package to allow them to be
|
||||||
|
// shared across packages.
|
||||||
|
//
|
||||||
|
// Utility functions which operate on pools should be added to this
|
||||||
|
// package to allow them to be reused.
|
||||||
|
package pools
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bufio"
|
||||||
|
"io"
|
||||||
|
"sync"
|
||||||
|
|
||||||
|
"github.com/docker/docker/pkg/ioutils"
|
||||||
|
)
|
||||||
|
|
||||||
|
const buffer32K = 32 * 1024
|
||||||
|
|
||||||
|
var (
|
||||||
|
// BufioReader32KPool is a pool which returns bufio.Reader with a 32K buffer.
|
||||||
|
BufioReader32KPool = newBufioReaderPoolWithSize(buffer32K)
|
||||||
|
// BufioWriter32KPool is a pool which returns bufio.Writer with a 32K buffer.
|
||||||
|
BufioWriter32KPool = newBufioWriterPoolWithSize(buffer32K)
|
||||||
|
buffer32KPool = newBufferPoolWithSize(buffer32K)
|
||||||
|
)
|
||||||
|
|
||||||
|
// BufioReaderPool is a bufio reader that uses sync.Pool.
|
||||||
|
type BufioReaderPool struct {
|
||||||
|
pool sync.Pool
|
||||||
|
}
|
||||||
|
|
||||||
|
// newBufioReaderPoolWithSize is unexported because new pools should be
|
||||||
|
// added here to be shared where required.
|
||||||
|
func newBufioReaderPoolWithSize(size int) *BufioReaderPool {
|
||||||
|
return &BufioReaderPool{
|
||||||
|
pool: sync.Pool{
|
||||||
|
New: func() interface{} { return bufio.NewReaderSize(nil, size) },
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get returns a bufio.Reader which reads from r. The buffer size is that of the pool.
|
||||||
|
func (bufPool *BufioReaderPool) Get(r io.Reader) *bufio.Reader {
|
||||||
|
buf := bufPool.pool.Get().(*bufio.Reader)
|
||||||
|
buf.Reset(r)
|
||||||
|
return buf
|
||||||
|
}
|
||||||
|
|
||||||
|
// Put puts the bufio.Reader back into the pool.
|
||||||
|
func (bufPool *BufioReaderPool) Put(b *bufio.Reader) {
|
||||||
|
b.Reset(nil)
|
||||||
|
bufPool.pool.Put(b)
|
||||||
|
}
|
||||||
|
|
||||||
|
type bufferPool struct {
|
||||||
|
pool sync.Pool
|
||||||
|
}
|
||||||
|
|
||||||
|
func newBufferPoolWithSize(size int) *bufferPool {
|
||||||
|
return &bufferPool{
|
||||||
|
pool: sync.Pool{
|
||||||
|
New: func() interface{} { return make([]byte, size) },
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (bp *bufferPool) Get() []byte {
|
||||||
|
return bp.pool.Get().([]byte)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (bp *bufferPool) Put(b []byte) {
|
||||||
|
bp.pool.Put(b)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Copy is a convenience wrapper which uses a buffer to avoid allocation in io.Copy.
|
||||||
|
func Copy(dst io.Writer, src io.Reader) (written int64, err error) {
|
||||||
|
buf := buffer32KPool.Get()
|
||||||
|
written, err = io.CopyBuffer(dst, src, buf)
|
||||||
|
buffer32KPool.Put(buf)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewReadCloserWrapper returns a wrapper which puts the bufio.Reader back
|
||||||
|
// into the pool and closes the reader if it's an io.ReadCloser.
|
||||||
|
func (bufPool *BufioReaderPool) NewReadCloserWrapper(buf *bufio.Reader, r io.Reader) io.ReadCloser {
|
||||||
|
return ioutils.NewReadCloserWrapper(r, func() error {
|
||||||
|
if readCloser, ok := r.(io.ReadCloser); ok {
|
||||||
|
readCloser.Close()
|
||||||
|
}
|
||||||
|
bufPool.Put(buf)
|
||||||
|
return nil
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// BufioWriterPool is a bufio writer that uses sync.Pool.
|
||||||
|
type BufioWriterPool struct {
|
||||||
|
pool sync.Pool
|
||||||
|
}
|
||||||
|
|
||||||
|
// newBufioWriterPoolWithSize is unexported because new pools should be
|
||||||
|
// added here to be shared where required.
|
||||||
|
func newBufioWriterPoolWithSize(size int) *BufioWriterPool {
|
||||||
|
return &BufioWriterPool{
|
||||||
|
pool: sync.Pool{
|
||||||
|
New: func() interface{} { return bufio.NewWriterSize(nil, size) },
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get returns a bufio.Writer which writes to w. The buffer size is that of the pool.
|
||||||
|
func (bufPool *BufioWriterPool) Get(w io.Writer) *bufio.Writer {
|
||||||
|
buf := bufPool.pool.Get().(*bufio.Writer)
|
||||||
|
buf.Reset(w)
|
||||||
|
return buf
|
||||||
|
}
|
||||||
|
|
||||||
|
// Put puts the bufio.Writer back into the pool.
|
||||||
|
func (bufPool *BufioWriterPool) Put(b *bufio.Writer) {
|
||||||
|
b.Reset(nil)
|
||||||
|
bufPool.pool.Put(b)
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewWriteCloserWrapper returns a wrapper which puts the bufio.Writer back
|
||||||
|
// into the pool and closes the writer if it's an io.Writecloser.
|
||||||
|
func (bufPool *BufioWriterPool) NewWriteCloserWrapper(buf *bufio.Writer, w io.Writer) io.WriteCloser {
|
||||||
|
return ioutils.NewWriteCloserWrapper(w, func() error {
|
||||||
|
buf.Flush()
|
||||||
|
if writeCloser, ok := w.(io.WriteCloser); ok {
|
||||||
|
writeCloser.Close()
|
||||||
|
}
|
||||||
|
bufPool.Put(buf)
|
||||||
|
return nil
|
||||||
|
})
|
||||||
|
}
|
|
@ -0,0 +1,11 @@
|
||||||
|
package promise
|
||||||
|
|
||||||
|
// Go is a basic promise implementation: it wraps calls a function in a goroutine,
|
||||||
|
// and returns a channel which will later return the function's return value.
|
||||||
|
func Go(f func() error) chan error {
|
||||||
|
ch := make(chan error, 1)
|
||||||
|
go func() {
|
||||||
|
ch <- f()
|
||||||
|
}()
|
||||||
|
return ch
|
||||||
|
}
|
|
@ -0,0 +1,5 @@
|
||||||
|
# reexec
|
||||||
|
|
||||||
|
The `reexec` package facilitates the busybox style reexec of the docker binary that we require because
|
||||||
|
of the forking limitations of using Go. Handlers can be registered with a name and the argv 0 of
|
||||||
|
the exec of the binary will be used to find and execute custom init paths.
|
|
@ -0,0 +1,28 @@
|
||||||
|
// +build linux
|
||||||
|
|
||||||
|
package reexec
|
||||||
|
|
||||||
|
import (
|
||||||
|
"os/exec"
|
||||||
|
"syscall"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Self returns the path to the current process's binary.
|
||||||
|
// Returns "/proc/self/exe".
|
||||||
|
func Self() string {
|
||||||
|
return "/proc/self/exe"
|
||||||
|
}
|
||||||
|
|
||||||
|
// Command returns *exec.Cmd which has Path as current binary. Also it setting
|
||||||
|
// SysProcAttr.Pdeathsig to SIGTERM.
|
||||||
|
// This will use the in-memory version (/proc/self/exe) of the current binary,
|
||||||
|
// it is thus safe to delete or replace the on-disk binary (os.Args[0]).
|
||||||
|
func Command(args ...string) *exec.Cmd {
|
||||||
|
return &exec.Cmd{
|
||||||
|
Path: Self(),
|
||||||
|
Args: args,
|
||||||
|
SysProcAttr: &syscall.SysProcAttr{
|
||||||
|
Pdeathsig: syscall.SIGTERM,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
|
@ -0,0 +1,23 @@
|
||||||
|
// +build freebsd solaris darwin
|
||||||
|
|
||||||
|
package reexec
|
||||||
|
|
||||||
|
import (
|
||||||
|
"os/exec"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Self returns the path to the current process's binary.
|
||||||
|
// Uses os.Args[0].
|
||||||
|
func Self() string {
|
||||||
|
return naiveSelf()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Command returns *exec.Cmd which has Path as current binary.
|
||||||
|
// For example if current binary is "docker" at "/usr/bin/", then cmd.Path will
|
||||||
|
// be set to "/usr/bin/docker".
|
||||||
|
func Command(args ...string) *exec.Cmd {
|
||||||
|
return &exec.Cmd{
|
||||||
|
Path: Self(),
|
||||||
|
Args: args,
|
||||||
|
}
|
||||||
|
}
|
|
@ -0,0 +1,12 @@
|
||||||
|
// +build !linux,!windows,!freebsd,!solaris,!darwin
|
||||||
|
|
||||||
|
package reexec
|
||||||
|
|
||||||
|
import (
|
||||||
|
"os/exec"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Command is unsupported on operating systems apart from Linux, Windows, Solaris and Darwin.
|
||||||
|
func Command(args ...string) *exec.Cmd {
|
||||||
|
return nil
|
||||||
|
}
|
|
@ -0,0 +1,23 @@
|
||||||
|
// +build windows
|
||||||
|
|
||||||
|
package reexec
|
||||||
|
|
||||||
|
import (
|
||||||
|
"os/exec"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Self returns the path to the current process's binary.
|
||||||
|
// Uses os.Args[0].
|
||||||
|
func Self() string {
|
||||||
|
return naiveSelf()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Command returns *exec.Cmd which has Path as current binary.
|
||||||
|
// For example if current binary is "docker.exe" at "C:\", then cmd.Path will
|
||||||
|
// be set to "C:\docker.exe".
|
||||||
|
func Command(args ...string) *exec.Cmd {
|
||||||
|
return &exec.Cmd{
|
||||||
|
Path: Self(),
|
||||||
|
Args: args,
|
||||||
|
}
|
||||||
|
}
|
|
@ -0,0 +1,47 @@
|
||||||
|
package reexec
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"os"
|
||||||
|
"os/exec"
|
||||||
|
"path/filepath"
|
||||||
|
)
|
||||||
|
|
||||||
|
var registeredInitializers = make(map[string]func())
|
||||||
|
|
||||||
|
// Register adds an initialization func under the specified name
|
||||||
|
func Register(name string, initializer func()) {
|
||||||
|
if _, exists := registeredInitializers[name]; exists {
|
||||||
|
panic(fmt.Sprintf("reexec func already registered under name %q", name))
|
||||||
|
}
|
||||||
|
|
||||||
|
registeredInitializers[name] = initializer
|
||||||
|
}
|
||||||
|
|
||||||
|
// Init is called as the first part of the exec process and returns true if an
|
||||||
|
// initialization function was called.
|
||||||
|
func Init() bool {
|
||||||
|
initializer, exists := registeredInitializers[os.Args[0]]
|
||||||
|
if exists {
|
||||||
|
initializer()
|
||||||
|
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
func naiveSelf() string {
|
||||||
|
name := os.Args[0]
|
||||||
|
if filepath.Base(name) == name {
|
||||||
|
if lp, err := exec.LookPath(name); err == nil {
|
||||||
|
return lp
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// handle conversion of relative paths to absolute
|
||||||
|
if absName, err := filepath.Abs(name); err == nil {
|
||||||
|
return absName
|
||||||
|
}
|
||||||
|
// if we couldn't get absolute name, return original
|
||||||
|
// (NOTE: Go only errors on Abs() if os.Getwd fails)
|
||||||
|
return name
|
||||||
|
}
|
|
@ -0,0 +1 @@
|
||||||
|
This package provides helper functions for dealing with string identifiers
|
|
@ -0,0 +1,99 @@
|
||||||
|
// Package stringid provides helper functions for dealing with string identifiers
|
||||||
|
package stringid
|
||||||
|
|
||||||
|
import (
|
||||||
|
cryptorand "crypto/rand"
|
||||||
|
"encoding/hex"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"math"
|
||||||
|
"math/big"
|
||||||
|
"math/rand"
|
||||||
|
"regexp"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
const shortLen = 12
|
||||||
|
|
||||||
|
var (
|
||||||
|
validShortID = regexp.MustCompile("^[a-f0-9]{12}$")
|
||||||
|
validHex = regexp.MustCompile(`^[a-f0-9]{64}$`)
|
||||||
|
)
|
||||||
|
|
||||||
|
// IsShortID determines if an arbitrary string *looks like* a short ID.
|
||||||
|
func IsShortID(id string) bool {
|
||||||
|
return validShortID.MatchString(id)
|
||||||
|
}
|
||||||
|
|
||||||
|
// TruncateID returns a shorthand version of a string identifier for convenience.
|
||||||
|
// A collision with other shorthands is very unlikely, but possible.
|
||||||
|
// In case of a collision a lookup with TruncIndex.Get() will fail, and the caller
|
||||||
|
// will need to use a longer prefix, or the full-length Id.
|
||||||
|
func TruncateID(id string) string {
|
||||||
|
if i := strings.IndexRune(id, ':'); i >= 0 {
|
||||||
|
id = id[i+1:]
|
||||||
|
}
|
||||||
|
if len(id) > shortLen {
|
||||||
|
id = id[:shortLen]
|
||||||
|
}
|
||||||
|
return id
|
||||||
|
}
|
||||||
|
|
||||||
|
func generateID(r io.Reader) string {
|
||||||
|
b := make([]byte, 32)
|
||||||
|
for {
|
||||||
|
if _, err := io.ReadFull(r, b); err != nil {
|
||||||
|
panic(err) // This shouldn't happen
|
||||||
|
}
|
||||||
|
id := hex.EncodeToString(b)
|
||||||
|
// if we try to parse the truncated for as an int and we don't have
|
||||||
|
// an error then the value is all numeric and causes issues when
|
||||||
|
// used as a hostname. ref #3869
|
||||||
|
if _, err := strconv.ParseInt(TruncateID(id), 10, 64); err == nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
return id
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// GenerateRandomID returns a unique id.
|
||||||
|
func GenerateRandomID() string {
|
||||||
|
return generateID(cryptorand.Reader)
|
||||||
|
}
|
||||||
|
|
||||||
|
// GenerateNonCryptoID generates unique id without using cryptographically
|
||||||
|
// secure sources of random.
|
||||||
|
// It helps you to save entropy.
|
||||||
|
func GenerateNonCryptoID() string {
|
||||||
|
return generateID(readerFunc(rand.Read))
|
||||||
|
}
|
||||||
|
|
||||||
|
// ValidateID checks whether an ID string is a valid image ID.
|
||||||
|
func ValidateID(id string) error {
|
||||||
|
if ok := validHex.MatchString(id); !ok {
|
||||||
|
return fmt.Errorf("image ID %q is invalid", id)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
// safely set the seed globally so we generate random ids. Tries to use a
|
||||||
|
// crypto seed before falling back to time.
|
||||||
|
var seed int64
|
||||||
|
if cryptoseed, err := cryptorand.Int(cryptorand.Reader, big.NewInt(math.MaxInt64)); err != nil {
|
||||||
|
// This should not happen, but worst-case fallback to time-based seed.
|
||||||
|
seed = time.Now().UnixNano()
|
||||||
|
} else {
|
||||||
|
seed = cryptoseed.Int64()
|
||||||
|
}
|
||||||
|
|
||||||
|
rand.Seed(seed)
|
||||||
|
}
|
||||||
|
|
||||||
|
type readerFunc func(p []byte) (int, error)
|
||||||
|
|
||||||
|
func (fn readerFunc) Read(p []byte) (int, error) {
|
||||||
|
return fn(p)
|
||||||
|
}
|
|
@ -0,0 +1,21 @@
|
||||||
|
package tarsum
|
||||||
|
|
||||||
|
// BuilderContext is an interface extending TarSum by adding the Remove method.
|
||||||
|
// In general there was concern about adding this method to TarSum itself
|
||||||
|
// so instead it is being added just to "BuilderContext" which will then
|
||||||
|
// only be used during the .dockerignore file processing
|
||||||
|
// - see builder/evaluator.go
|
||||||
|
type BuilderContext interface {
|
||||||
|
TarSum
|
||||||
|
Remove(string)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (bc *tarSum) Remove(filename string) {
|
||||||
|
for i, fis := range bc.sums {
|
||||||
|
if fis.Name() == filename {
|
||||||
|
bc.sums = append(bc.sums[:i], bc.sums[i+1:]...)
|
||||||
|
// Note, we don't just return because there could be
|
||||||
|
// more than one with this name
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
|
@ -0,0 +1,126 @@
|
||||||
|
package tarsum
|
||||||
|
|
||||||
|
import "sort"
|
||||||
|
|
||||||
|
// FileInfoSumInterface provides an interface for accessing file checksum
|
||||||
|
// information within a tar file. This info is accessed through interface
|
||||||
|
// so the actual name and sum cannot be melded with.
|
||||||
|
type FileInfoSumInterface interface {
|
||||||
|
// File name
|
||||||
|
Name() string
|
||||||
|
// Checksum of this particular file and its headers
|
||||||
|
Sum() string
|
||||||
|
// Position of file in the tar
|
||||||
|
Pos() int64
|
||||||
|
}
|
||||||
|
|
||||||
|
type fileInfoSum struct {
|
||||||
|
name string
|
||||||
|
sum string
|
||||||
|
pos int64
|
||||||
|
}
|
||||||
|
|
||||||
|
func (fis fileInfoSum) Name() string {
|
||||||
|
return fis.name
|
||||||
|
}
|
||||||
|
func (fis fileInfoSum) Sum() string {
|
||||||
|
return fis.sum
|
||||||
|
}
|
||||||
|
func (fis fileInfoSum) Pos() int64 {
|
||||||
|
return fis.pos
|
||||||
|
}
|
||||||
|
|
||||||
|
// FileInfoSums provides a list of FileInfoSumInterfaces.
|
||||||
|
type FileInfoSums []FileInfoSumInterface
|
||||||
|
|
||||||
|
// GetFile returns the first FileInfoSumInterface with a matching name.
|
||||||
|
func (fis FileInfoSums) GetFile(name string) FileInfoSumInterface {
|
||||||
|
for i := range fis {
|
||||||
|
if fis[i].Name() == name {
|
||||||
|
return fis[i]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetAllFile returns a FileInfoSums with all matching names.
|
||||||
|
func (fis FileInfoSums) GetAllFile(name string) FileInfoSums {
|
||||||
|
f := FileInfoSums{}
|
||||||
|
for i := range fis {
|
||||||
|
if fis[i].Name() == name {
|
||||||
|
f = append(f, fis[i])
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return f
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetDuplicatePaths returns a FileInfoSums with all duplicated paths.
|
||||||
|
func (fis FileInfoSums) GetDuplicatePaths() (dups FileInfoSums) {
|
||||||
|
seen := make(map[string]int, len(fis)) // allocate earl. no need to grow this map.
|
||||||
|
for i := range fis {
|
||||||
|
f := fis[i]
|
||||||
|
if _, ok := seen[f.Name()]; ok {
|
||||||
|
dups = append(dups, f)
|
||||||
|
} else {
|
||||||
|
seen[f.Name()] = 0
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return dups
|
||||||
|
}
|
||||||
|
|
||||||
|
// Len returns the size of the FileInfoSums.
|
||||||
|
func (fis FileInfoSums) Len() int { return len(fis) }
|
||||||
|
|
||||||
|
// Swap swaps two FileInfoSum values if a FileInfoSums list.
|
||||||
|
func (fis FileInfoSums) Swap(i, j int) { fis[i], fis[j] = fis[j], fis[i] }
|
||||||
|
|
||||||
|
// SortByPos sorts FileInfoSums content by position.
|
||||||
|
func (fis FileInfoSums) SortByPos() {
|
||||||
|
sort.Sort(byPos{fis})
|
||||||
|
}
|
||||||
|
|
||||||
|
// SortByNames sorts FileInfoSums content by name.
|
||||||
|
func (fis FileInfoSums) SortByNames() {
|
||||||
|
sort.Sort(byName{fis})
|
||||||
|
}
|
||||||
|
|
||||||
|
// SortBySums sorts FileInfoSums content by sums.
|
||||||
|
func (fis FileInfoSums) SortBySums() {
|
||||||
|
dups := fis.GetDuplicatePaths()
|
||||||
|
if len(dups) > 0 {
|
||||||
|
sort.Sort(bySum{fis, dups})
|
||||||
|
} else {
|
||||||
|
sort.Sort(bySum{fis, nil})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// byName is a sort.Sort helper for sorting by file names.
|
||||||
|
// If names are the same, order them by their appearance in the tar archive
|
||||||
|
type byName struct{ FileInfoSums }
|
||||||
|
|
||||||
|
func (bn byName) Less(i, j int) bool {
|
||||||
|
if bn.FileInfoSums[i].Name() == bn.FileInfoSums[j].Name() {
|
||||||
|
return bn.FileInfoSums[i].Pos() < bn.FileInfoSums[j].Pos()
|
||||||
|
}
|
||||||
|
return bn.FileInfoSums[i].Name() < bn.FileInfoSums[j].Name()
|
||||||
|
}
|
||||||
|
|
||||||
|
// bySum is a sort.Sort helper for sorting by the sums of all the fileinfos in the tar archive
|
||||||
|
type bySum struct {
|
||||||
|
FileInfoSums
|
||||||
|
dups FileInfoSums
|
||||||
|
}
|
||||||
|
|
||||||
|
func (bs bySum) Less(i, j int) bool {
|
||||||
|
if bs.dups != nil && bs.FileInfoSums[i].Name() == bs.FileInfoSums[j].Name() {
|
||||||
|
return bs.FileInfoSums[i].Pos() < bs.FileInfoSums[j].Pos()
|
||||||
|
}
|
||||||
|
return bs.FileInfoSums[i].Sum() < bs.FileInfoSums[j].Sum()
|
||||||
|
}
|
||||||
|
|
||||||
|
// byPos is a sort.Sort helper for sorting by the sums of all the fileinfos by their original order
|
||||||
|
type byPos struct{ FileInfoSums }
|
||||||
|
|
||||||
|
func (bp byPos) Less(i, j int) bool {
|
||||||
|
return bp.FileInfoSums[i].Pos() < bp.FileInfoSums[j].Pos()
|
||||||
|
}
|
|
@ -0,0 +1,295 @@
|
||||||
|
// Package tarsum provides algorithms to perform checksum calculation on
|
||||||
|
// filesystem layers.
|
||||||
|
//
|
||||||
|
// The transportation of filesystems, regarding Docker, is done with tar(1)
|
||||||
|
// archives. There are a variety of tar serialization formats [2], and a key
|
||||||
|
// concern here is ensuring a repeatable checksum given a set of inputs from a
|
||||||
|
// generic tar archive. Types of transportation include distribution to and from a
|
||||||
|
// registry endpoint, saving and loading through commands or Docker daemon APIs,
|
||||||
|
// transferring the build context from client to Docker daemon, and committing the
|
||||||
|
// filesystem of a container to become an image.
|
||||||
|
//
|
||||||
|
// As tar archives are used for transit, but not preserved in many situations, the
|
||||||
|
// focus of the algorithm is to ensure the integrity of the preserved filesystem,
|
||||||
|
// while maintaining a deterministic accountability. This includes neither
|
||||||
|
// constraining the ordering or manipulation of the files during the creation or
|
||||||
|
// unpacking of the archive, nor include additional metadata state about the file
|
||||||
|
// system attributes.
|
||||||
|
package tarsum
|
||||||
|
|
||||||
|
import (
|
||||||
|
"archive/tar"
|
||||||
|
"bytes"
|
||||||
|
"compress/gzip"
|
||||||
|
"crypto"
|
||||||
|
"crypto/sha256"
|
||||||
|
"encoding/hex"
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"hash"
|
||||||
|
"io"
|
||||||
|
"path"
|
||||||
|
"strings"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
buf8K = 8 * 1024
|
||||||
|
buf16K = 16 * 1024
|
||||||
|
buf32K = 32 * 1024
|
||||||
|
)
|
||||||
|
|
||||||
|
// NewTarSum creates a new interface for calculating a fixed time checksum of a
|
||||||
|
// tar archive.
|
||||||
|
//
|
||||||
|
// This is used for calculating checksums of layers of an image, in some cases
|
||||||
|
// including the byte payload of the image's json metadata as well, and for
|
||||||
|
// calculating the checksums for buildcache.
|
||||||
|
func NewTarSum(r io.Reader, dc bool, v Version) (TarSum, error) {
|
||||||
|
return NewTarSumHash(r, dc, v, DefaultTHash)
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewTarSumHash creates a new TarSum, providing a THash to use rather than
|
||||||
|
// the DefaultTHash.
|
||||||
|
func NewTarSumHash(r io.Reader, dc bool, v Version, tHash THash) (TarSum, error) {
|
||||||
|
headerSelector, err := getTarHeaderSelector(v)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
ts := &tarSum{Reader: r, DisableCompression: dc, tarSumVersion: v, headerSelector: headerSelector, tHash: tHash}
|
||||||
|
err = ts.initTarSum()
|
||||||
|
return ts, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewTarSumForLabel creates a new TarSum using the provided TarSum version+hash label.
|
||||||
|
func NewTarSumForLabel(r io.Reader, disableCompression bool, label string) (TarSum, error) {
|
||||||
|
parts := strings.SplitN(label, "+", 2)
|
||||||
|
if len(parts) != 2 {
|
||||||
|
return nil, errors.New("tarsum label string should be of the form: {tarsum_version}+{hash_name}")
|
||||||
|
}
|
||||||
|
|
||||||
|
versionName, hashName := parts[0], parts[1]
|
||||||
|
|
||||||
|
version, ok := tarSumVersionsByName[versionName]
|
||||||
|
if !ok {
|
||||||
|
return nil, fmt.Errorf("unknown TarSum version name: %q", versionName)
|
||||||
|
}
|
||||||
|
|
||||||
|
hashConfig, ok := standardHashConfigs[hashName]
|
||||||
|
if !ok {
|
||||||
|
return nil, fmt.Errorf("unknown TarSum hash name: %q", hashName)
|
||||||
|
}
|
||||||
|
|
||||||
|
tHash := NewTHash(hashConfig.name, hashConfig.hash.New)
|
||||||
|
|
||||||
|
return NewTarSumHash(r, disableCompression, version, tHash)
|
||||||
|
}
|
||||||
|
|
||||||
|
// TarSum is the generic interface for calculating fixed time
|
||||||
|
// checksums of a tar archive.
|
||||||
|
type TarSum interface {
|
||||||
|
io.Reader
|
||||||
|
GetSums() FileInfoSums
|
||||||
|
Sum([]byte) string
|
||||||
|
Version() Version
|
||||||
|
Hash() THash
|
||||||
|
}
|
||||||
|
|
||||||
|
// tarSum struct is the structure for a Version0 checksum calculation.
|
||||||
|
type tarSum struct {
|
||||||
|
io.Reader
|
||||||
|
tarR *tar.Reader
|
||||||
|
tarW *tar.Writer
|
||||||
|
writer writeCloseFlusher
|
||||||
|
bufTar *bytes.Buffer
|
||||||
|
bufWriter *bytes.Buffer
|
||||||
|
bufData []byte
|
||||||
|
h hash.Hash
|
||||||
|
tHash THash
|
||||||
|
sums FileInfoSums
|
||||||
|
fileCounter int64
|
||||||
|
currentFile string
|
||||||
|
finished bool
|
||||||
|
first bool
|
||||||
|
DisableCompression bool // false by default. When false, the output gzip compressed.
|
||||||
|
tarSumVersion Version // this field is not exported so it can not be mutated during use
|
||||||
|
headerSelector tarHeaderSelector // handles selecting and ordering headers for files in the archive
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ts tarSum) Hash() THash {
|
||||||
|
return ts.tHash
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ts tarSum) Version() Version {
|
||||||
|
return ts.tarSumVersion
|
||||||
|
}
|
||||||
|
|
||||||
|
// THash provides a hash.Hash type generator and its name.
|
||||||
|
type THash interface {
|
||||||
|
Hash() hash.Hash
|
||||||
|
Name() string
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewTHash is a convenience method for creating a THash.
|
||||||
|
func NewTHash(name string, h func() hash.Hash) THash {
|
||||||
|
return simpleTHash{n: name, h: h}
|
||||||
|
}
|
||||||
|
|
||||||
|
type tHashConfig struct {
|
||||||
|
name string
|
||||||
|
hash crypto.Hash
|
||||||
|
}
|
||||||
|
|
||||||
|
var (
|
||||||
|
// NOTE: DO NOT include MD5 or SHA1, which are considered insecure.
|
||||||
|
standardHashConfigs = map[string]tHashConfig{
|
||||||
|
"sha256": {name: "sha256", hash: crypto.SHA256},
|
||||||
|
"sha512": {name: "sha512", hash: crypto.SHA512},
|
||||||
|
}
|
||||||
|
)
|
||||||
|
|
||||||
|
// DefaultTHash is default TarSum hashing algorithm - "sha256".
|
||||||
|
var DefaultTHash = NewTHash("sha256", sha256.New)
|
||||||
|
|
||||||
|
type simpleTHash struct {
|
||||||
|
n string
|
||||||
|
h func() hash.Hash
|
||||||
|
}
|
||||||
|
|
||||||
|
func (sth simpleTHash) Name() string { return sth.n }
|
||||||
|
func (sth simpleTHash) Hash() hash.Hash { return sth.h() }
|
||||||
|
|
||||||
|
func (ts *tarSum) encodeHeader(h *tar.Header) error {
|
||||||
|
for _, elem := range ts.headerSelector.selectHeaders(h) {
|
||||||
|
if _, err := ts.h.Write([]byte(elem[0] + elem[1])); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ts *tarSum) initTarSum() error {
|
||||||
|
ts.bufTar = bytes.NewBuffer([]byte{})
|
||||||
|
ts.bufWriter = bytes.NewBuffer([]byte{})
|
||||||
|
ts.tarR = tar.NewReader(ts.Reader)
|
||||||
|
ts.tarW = tar.NewWriter(ts.bufTar)
|
||||||
|
if !ts.DisableCompression {
|
||||||
|
ts.writer = gzip.NewWriter(ts.bufWriter)
|
||||||
|
} else {
|
||||||
|
ts.writer = &nopCloseFlusher{Writer: ts.bufWriter}
|
||||||
|
}
|
||||||
|
if ts.tHash == nil {
|
||||||
|
ts.tHash = DefaultTHash
|
||||||
|
}
|
||||||
|
ts.h = ts.tHash.Hash()
|
||||||
|
ts.h.Reset()
|
||||||
|
ts.first = true
|
||||||
|
ts.sums = FileInfoSums{}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ts *tarSum) Read(buf []byte) (int, error) {
|
||||||
|
if ts.finished {
|
||||||
|
return ts.bufWriter.Read(buf)
|
||||||
|
}
|
||||||
|
if len(ts.bufData) < len(buf) {
|
||||||
|
switch {
|
||||||
|
case len(buf) <= buf8K:
|
||||||
|
ts.bufData = make([]byte, buf8K)
|
||||||
|
case len(buf) <= buf16K:
|
||||||
|
ts.bufData = make([]byte, buf16K)
|
||||||
|
case len(buf) <= buf32K:
|
||||||
|
ts.bufData = make([]byte, buf32K)
|
||||||
|
default:
|
||||||
|
ts.bufData = make([]byte, len(buf))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
buf2 := ts.bufData[:len(buf)]
|
||||||
|
|
||||||
|
n, err := ts.tarR.Read(buf2)
|
||||||
|
if err != nil {
|
||||||
|
if err == io.EOF {
|
||||||
|
if _, err := ts.h.Write(buf2[:n]); err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
if !ts.first {
|
||||||
|
ts.sums = append(ts.sums, fileInfoSum{name: ts.currentFile, sum: hex.EncodeToString(ts.h.Sum(nil)), pos: ts.fileCounter})
|
||||||
|
ts.fileCounter++
|
||||||
|
ts.h.Reset()
|
||||||
|
} else {
|
||||||
|
ts.first = false
|
||||||
|
}
|
||||||
|
|
||||||
|
currentHeader, err := ts.tarR.Next()
|
||||||
|
if err != nil {
|
||||||
|
if err == io.EOF {
|
||||||
|
if err := ts.tarW.Close(); err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
if _, err := io.Copy(ts.writer, ts.bufTar); err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
if err := ts.writer.Close(); err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
ts.finished = true
|
||||||
|
return n, nil
|
||||||
|
}
|
||||||
|
return n, err
|
||||||
|
}
|
||||||
|
ts.currentFile = path.Clean(currentHeader.Name)
|
||||||
|
if err := ts.encodeHeader(currentHeader); err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
if err := ts.tarW.WriteHeader(currentHeader); err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
if _, err := ts.tarW.Write(buf2[:n]); err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
ts.tarW.Flush()
|
||||||
|
if _, err := io.Copy(ts.writer, ts.bufTar); err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
ts.writer.Flush()
|
||||||
|
|
||||||
|
return ts.bufWriter.Read(buf)
|
||||||
|
}
|
||||||
|
return n, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Filling the hash buffer
|
||||||
|
if _, err = ts.h.Write(buf2[:n]); err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Filling the tar writer
|
||||||
|
if _, err = ts.tarW.Write(buf2[:n]); err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
ts.tarW.Flush()
|
||||||
|
|
||||||
|
// Filling the output writer
|
||||||
|
if _, err = io.Copy(ts.writer, ts.bufTar); err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
ts.writer.Flush()
|
||||||
|
|
||||||
|
return ts.bufWriter.Read(buf)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ts *tarSum) Sum(extra []byte) string {
|
||||||
|
ts.sums.SortBySums()
|
||||||
|
h := ts.tHash.Hash()
|
||||||
|
if extra != nil {
|
||||||
|
h.Write(extra)
|
||||||
|
}
|
||||||
|
for _, fis := range ts.sums {
|
||||||
|
h.Write([]byte(fis.Sum()))
|
||||||
|
}
|
||||||
|
checksum := ts.Version().String() + "+" + ts.tHash.Name() + ":" + hex.EncodeToString(h.Sum(nil))
|
||||||
|
return checksum
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ts *tarSum) GetSums() FileInfoSums {
|
||||||
|
return ts.sums
|
||||||
|
}
|
|
@ -0,0 +1,158 @@
|
||||||
|
package tarsum
|
||||||
|
|
||||||
|
import (
|
||||||
|
"archive/tar"
|
||||||
|
"errors"
|
||||||
|
"io"
|
||||||
|
"sort"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Version is used for versioning of the TarSum algorithm
|
||||||
|
// based on the prefix of the hash used
|
||||||
|
// i.e. "tarsum+sha256:e58fcf7418d4390dec8e8fb69d88c06ec07039d651fedd3aa72af9972e7d046b"
|
||||||
|
type Version int
|
||||||
|
|
||||||
|
// Prefix of "tarsum"
|
||||||
|
const (
|
||||||
|
Version0 Version = iota
|
||||||
|
Version1
|
||||||
|
// VersionDev this constant will be either the latest or an unsettled next-version of the TarSum calculation
|
||||||
|
VersionDev
|
||||||
|
)
|
||||||
|
|
||||||
|
// WriteV1Header writes a tar header to a writer in V1 tarsum format.
|
||||||
|
func WriteV1Header(h *tar.Header, w io.Writer) {
|
||||||
|
for _, elem := range v1TarHeaderSelect(h) {
|
||||||
|
w.Write([]byte(elem[0] + elem[1]))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// VersionLabelForChecksum returns the label for the given tarsum
|
||||||
|
// checksum, i.e., everything before the first `+` character in
|
||||||
|
// the string or an empty string if no label separator is found.
|
||||||
|
func VersionLabelForChecksum(checksum string) string {
|
||||||
|
// Checksums are in the form: {versionLabel}+{hashID}:{hex}
|
||||||
|
sepIndex := strings.Index(checksum, "+")
|
||||||
|
if sepIndex < 0 {
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
return checksum[:sepIndex]
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetVersions gets a list of all known tarsum versions.
|
||||||
|
func GetVersions() []Version {
|
||||||
|
v := []Version{}
|
||||||
|
for k := range tarSumVersions {
|
||||||
|
v = append(v, k)
|
||||||
|
}
|
||||||
|
return v
|
||||||
|
}
|
||||||
|
|
||||||
|
var (
|
||||||
|
tarSumVersions = map[Version]string{
|
||||||
|
Version0: "tarsum",
|
||||||
|
Version1: "tarsum.v1",
|
||||||
|
VersionDev: "tarsum.dev",
|
||||||
|
}
|
||||||
|
tarSumVersionsByName = map[string]Version{
|
||||||
|
"tarsum": Version0,
|
||||||
|
"tarsum.v1": Version1,
|
||||||
|
"tarsum.dev": VersionDev,
|
||||||
|
}
|
||||||
|
)
|
||||||
|
|
||||||
|
func (tsv Version) String() string {
|
||||||
|
return tarSumVersions[tsv]
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetVersionFromTarsum returns the Version from the provided string.
|
||||||
|
func GetVersionFromTarsum(tarsum string) (Version, error) {
|
||||||
|
tsv := tarsum
|
||||||
|
if strings.Contains(tarsum, "+") {
|
||||||
|
tsv = strings.SplitN(tarsum, "+", 2)[0]
|
||||||
|
}
|
||||||
|
for v, s := range tarSumVersions {
|
||||||
|
if s == tsv {
|
||||||
|
return v, nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return -1, ErrNotVersion
|
||||||
|
}
|
||||||
|
|
||||||
|
// Errors that may be returned by functions in this package
|
||||||
|
var (
|
||||||
|
ErrNotVersion = errors.New("string does not include a TarSum Version")
|
||||||
|
ErrVersionNotImplemented = errors.New("TarSum Version is not yet implemented")
|
||||||
|
)
|
||||||
|
|
||||||
|
// tarHeaderSelector is the interface which different versions
|
||||||
|
// of tarsum should use for selecting and ordering tar headers
|
||||||
|
// for each item in the archive.
|
||||||
|
type tarHeaderSelector interface {
|
||||||
|
selectHeaders(h *tar.Header) (orderedHeaders [][2]string)
|
||||||
|
}
|
||||||
|
|
||||||
|
type tarHeaderSelectFunc func(h *tar.Header) (orderedHeaders [][2]string)
|
||||||
|
|
||||||
|
func (f tarHeaderSelectFunc) selectHeaders(h *tar.Header) (orderedHeaders [][2]string) {
|
||||||
|
return f(h)
|
||||||
|
}
|
||||||
|
|
||||||
|
func v0TarHeaderSelect(h *tar.Header) (orderedHeaders [][2]string) {
|
||||||
|
return [][2]string{
|
||||||
|
{"name", h.Name},
|
||||||
|
{"mode", strconv.FormatInt(h.Mode, 10)},
|
||||||
|
{"uid", strconv.Itoa(h.Uid)},
|
||||||
|
{"gid", strconv.Itoa(h.Gid)},
|
||||||
|
{"size", strconv.FormatInt(h.Size, 10)},
|
||||||
|
{"mtime", strconv.FormatInt(h.ModTime.UTC().Unix(), 10)},
|
||||||
|
{"typeflag", string([]byte{h.Typeflag})},
|
||||||
|
{"linkname", h.Linkname},
|
||||||
|
{"uname", h.Uname},
|
||||||
|
{"gname", h.Gname},
|
||||||
|
{"devmajor", strconv.FormatInt(h.Devmajor, 10)},
|
||||||
|
{"devminor", strconv.FormatInt(h.Devminor, 10)},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func v1TarHeaderSelect(h *tar.Header) (orderedHeaders [][2]string) {
|
||||||
|
// Get extended attributes.
|
||||||
|
xAttrKeys := make([]string, len(h.Xattrs))
|
||||||
|
for k := range h.Xattrs {
|
||||||
|
xAttrKeys = append(xAttrKeys, k)
|
||||||
|
}
|
||||||
|
sort.Strings(xAttrKeys)
|
||||||
|
|
||||||
|
// Make the slice with enough capacity to hold the 11 basic headers
|
||||||
|
// we want from the v0 selector plus however many xattrs we have.
|
||||||
|
orderedHeaders = make([][2]string, 0, 11+len(xAttrKeys))
|
||||||
|
|
||||||
|
// Copy all headers from v0 excluding the 'mtime' header (the 5th element).
|
||||||
|
v0headers := v0TarHeaderSelect(h)
|
||||||
|
orderedHeaders = append(orderedHeaders, v0headers[0:5]...)
|
||||||
|
orderedHeaders = append(orderedHeaders, v0headers[6:]...)
|
||||||
|
|
||||||
|
// Finally, append the sorted xattrs.
|
||||||
|
for _, k := range xAttrKeys {
|
||||||
|
orderedHeaders = append(orderedHeaders, [2]string{k, h.Xattrs[k]})
|
||||||
|
}
|
||||||
|
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
var registeredHeaderSelectors = map[Version]tarHeaderSelectFunc{
|
||||||
|
Version0: v0TarHeaderSelect,
|
||||||
|
Version1: v1TarHeaderSelect,
|
||||||
|
VersionDev: v1TarHeaderSelect,
|
||||||
|
}
|
||||||
|
|
||||||
|
func getTarHeaderSelector(v Version) (tarHeaderSelector, error) {
|
||||||
|
headerSelector, ok := registeredHeaderSelectors[v]
|
||||||
|
if !ok {
|
||||||
|
return nil, ErrVersionNotImplemented
|
||||||
|
}
|
||||||
|
|
||||||
|
return headerSelector, nil
|
||||||
|
}
|
|
@ -0,0 +1,22 @@
|
||||||
|
package tarsum
|
||||||
|
|
||||||
|
import (
|
||||||
|
"io"
|
||||||
|
)
|
||||||
|
|
||||||
|
type writeCloseFlusher interface {
|
||||||
|
io.WriteCloser
|
||||||
|
Flush() error
|
||||||
|
}
|
||||||
|
|
||||||
|
type nopCloseFlusher struct {
|
||||||
|
io.Writer
|
||||||
|
}
|
||||||
|
|
||||||
|
func (n *nopCloseFlusher) Close() error {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (n *nopCloseFlusher) Flush() error {
|
||||||
|
return nil
|
||||||
|
}
|
111
vendor/github.com/opencontainers/runc/libcontainer/user/lookup.go
generated
vendored
Normal file
111
vendor/github.com/opencontainers/runc/libcontainer/user/lookup.go
generated
vendored
Normal file
|
@ -0,0 +1,111 @@
|
||||||
|
package user
|
||||||
|
|
||||||
|
import (
|
||||||
|
"errors"
|
||||||
|
|
||||||
|
"golang.org/x/sys/unix"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
// The current operating system does not provide the required data for user lookups.
|
||||||
|
ErrUnsupported = errors.New("user lookup: operating system does not provide passwd-formatted data")
|
||||||
|
// No matching entries found in file.
|
||||||
|
ErrNoPasswdEntries = errors.New("no matching entries in passwd file")
|
||||||
|
ErrNoGroupEntries = errors.New("no matching entries in group file")
|
||||||
|
)
|
||||||
|
|
||||||
|
func lookupUser(filter func(u User) bool) (User, error) {
|
||||||
|
// Get operating system-specific passwd reader-closer.
|
||||||
|
passwd, err := GetPasswd()
|
||||||
|
if err != nil {
|
||||||
|
return User{}, err
|
||||||
|
}
|
||||||
|
defer passwd.Close()
|
||||||
|
|
||||||
|
// Get the users.
|
||||||
|
users, err := ParsePasswdFilter(passwd, filter)
|
||||||
|
if err != nil {
|
||||||
|
return User{}, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// No user entries found.
|
||||||
|
if len(users) == 0 {
|
||||||
|
return User{}, ErrNoPasswdEntries
|
||||||
|
}
|
||||||
|
|
||||||
|
// Assume the first entry is the "correct" one.
|
||||||
|
return users[0], nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// CurrentUser looks up the current user by their user id in /etc/passwd. If the
|
||||||
|
// user cannot be found (or there is no /etc/passwd file on the filesystem),
|
||||||
|
// then CurrentUser returns an error.
|
||||||
|
func CurrentUser() (User, error) {
|
||||||
|
return LookupUid(unix.Getuid())
|
||||||
|
}
|
||||||
|
|
||||||
|
// LookupUser looks up a user by their username in /etc/passwd. If the user
|
||||||
|
// cannot be found (or there is no /etc/passwd file on the filesystem), then
|
||||||
|
// LookupUser returns an error.
|
||||||
|
func LookupUser(username string) (User, error) {
|
||||||
|
return lookupUser(func(u User) bool {
|
||||||
|
return u.Name == username
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// LookupUid looks up a user by their user id in /etc/passwd. If the user cannot
|
||||||
|
// be found (or there is no /etc/passwd file on the filesystem), then LookupId
|
||||||
|
// returns an error.
|
||||||
|
func LookupUid(uid int) (User, error) {
|
||||||
|
return lookupUser(func(u User) bool {
|
||||||
|
return u.Uid == uid
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func lookupGroup(filter func(g Group) bool) (Group, error) {
|
||||||
|
// Get operating system-specific group reader-closer.
|
||||||
|
group, err := GetGroup()
|
||||||
|
if err != nil {
|
||||||
|
return Group{}, err
|
||||||
|
}
|
||||||
|
defer group.Close()
|
||||||
|
|
||||||
|
// Get the users.
|
||||||
|
groups, err := ParseGroupFilter(group, filter)
|
||||||
|
if err != nil {
|
||||||
|
return Group{}, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// No user entries found.
|
||||||
|
if len(groups) == 0 {
|
||||||
|
return Group{}, ErrNoGroupEntries
|
||||||
|
}
|
||||||
|
|
||||||
|
// Assume the first entry is the "correct" one.
|
||||||
|
return groups[0], nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// CurrentGroup looks up the current user's group by their primary group id's
|
||||||
|
// entry in /etc/passwd. If the group cannot be found (or there is no
|
||||||
|
// /etc/group file on the filesystem), then CurrentGroup returns an error.
|
||||||
|
func CurrentGroup() (Group, error) {
|
||||||
|
return LookupGid(unix.Getgid())
|
||||||
|
}
|
||||||
|
|
||||||
|
// LookupGroup looks up a group by its name in /etc/group. If the group cannot
|
||||||
|
// be found (or there is no /etc/group file on the filesystem), then LookupGroup
|
||||||
|
// returns an error.
|
||||||
|
func LookupGroup(groupname string) (Group, error) {
|
||||||
|
return lookupGroup(func(g Group) bool {
|
||||||
|
return g.Name == groupname
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// LookupGid looks up a group by its group id in /etc/group. If the group cannot
|
||||||
|
// be found (or there is no /etc/group file on the filesystem), then LookupGid
|
||||||
|
// returns an error.
|
||||||
|
func LookupGid(gid int) (Group, error) {
|
||||||
|
return lookupGroup(func(g Group) bool {
|
||||||
|
return g.Gid == gid
|
||||||
|
})
|
||||||
|
}
|
30
vendor/github.com/opencontainers/runc/libcontainer/user/lookup_unix.go
generated
vendored
Normal file
30
vendor/github.com/opencontainers/runc/libcontainer/user/lookup_unix.go
generated
vendored
Normal file
|
@ -0,0 +1,30 @@
|
||||||
|
// +build darwin dragonfly freebsd linux netbsd openbsd solaris
|
||||||
|
|
||||||
|
package user
|
||||||
|
|
||||||
|
import (
|
||||||
|
"io"
|
||||||
|
"os"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Unix-specific path to the passwd and group formatted files.
|
||||||
|
const (
|
||||||
|
unixPasswdPath = "/etc/passwd"
|
||||||
|
unixGroupPath = "/etc/group"
|
||||||
|
)
|
||||||
|
|
||||||
|
func GetPasswdPath() (string, error) {
|
||||||
|
return unixPasswdPath, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func GetPasswd() (io.ReadCloser, error) {
|
||||||
|
return os.Open(unixPasswdPath)
|
||||||
|
}
|
||||||
|
|
||||||
|
func GetGroupPath() (string, error) {
|
||||||
|
return unixGroupPath, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func GetGroup() (io.ReadCloser, error) {
|
||||||
|
return os.Open(unixGroupPath)
|
||||||
|
}
|
21
vendor/github.com/opencontainers/runc/libcontainer/user/lookup_unsupported.go
generated
vendored
Normal file
21
vendor/github.com/opencontainers/runc/libcontainer/user/lookup_unsupported.go
generated
vendored
Normal file
|
@ -0,0 +1,21 @@
|
||||||
|
// +build !darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!solaris
|
||||||
|
|
||||||
|
package user
|
||||||
|
|
||||||
|
import "io"
|
||||||
|
|
||||||
|
func GetPasswdPath() (string, error) {
|
||||||
|
return "", ErrUnsupported
|
||||||
|
}
|
||||||
|
|
||||||
|
func GetPasswd() (io.ReadCloser, error) {
|
||||||
|
return nil, ErrUnsupported
|
||||||
|
}
|
||||||
|
|
||||||
|
func GetGroupPath() (string, error) {
|
||||||
|
return "", ErrUnsupported
|
||||||
|
}
|
||||||
|
|
||||||
|
func GetGroup() (io.ReadCloser, error) {
|
||||||
|
return nil, ErrUnsupported
|
||||||
|
}
|
|
@ -0,0 +1,441 @@
|
||||||
|
package user
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bufio"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"os"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
minId = 0
|
||||||
|
maxId = 1<<31 - 1 //for 32-bit systems compatibility
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
ErrRange = fmt.Errorf("uids and gids must be in range %d-%d", minId, maxId)
|
||||||
|
)
|
||||||
|
|
||||||
|
type User struct {
|
||||||
|
Name string
|
||||||
|
Pass string
|
||||||
|
Uid int
|
||||||
|
Gid int
|
||||||
|
Gecos string
|
||||||
|
Home string
|
||||||
|
Shell string
|
||||||
|
}
|
||||||
|
|
||||||
|
type Group struct {
|
||||||
|
Name string
|
||||||
|
Pass string
|
||||||
|
Gid int
|
||||||
|
List []string
|
||||||
|
}
|
||||||
|
|
||||||
|
func parseLine(line string, v ...interface{}) {
|
||||||
|
if line == "" {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
parts := strings.Split(line, ":")
|
||||||
|
for i, p := range parts {
|
||||||
|
// Ignore cases where we don't have enough fields to populate the arguments.
|
||||||
|
// Some configuration files like to misbehave.
|
||||||
|
if len(v) <= i {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
|
// Use the type of the argument to figure out how to parse it, scanf() style.
|
||||||
|
// This is legit.
|
||||||
|
switch e := v[i].(type) {
|
||||||
|
case *string:
|
||||||
|
*e = p
|
||||||
|
case *int:
|
||||||
|
// "numbers", with conversion errors ignored because of some misbehaving configuration files.
|
||||||
|
*e, _ = strconv.Atoi(p)
|
||||||
|
case *[]string:
|
||||||
|
// Comma-separated lists.
|
||||||
|
if p != "" {
|
||||||
|
*e = strings.Split(p, ",")
|
||||||
|
} else {
|
||||||
|
*e = []string{}
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
// Someone goof'd when writing code using this function. Scream so they can hear us.
|
||||||
|
panic(fmt.Sprintf("parseLine only accepts {*string, *int, *[]string} as arguments! %#v is not a pointer!", e))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func ParsePasswdFile(path string) ([]User, error) {
|
||||||
|
passwd, err := os.Open(path)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
defer passwd.Close()
|
||||||
|
return ParsePasswd(passwd)
|
||||||
|
}
|
||||||
|
|
||||||
|
func ParsePasswd(passwd io.Reader) ([]User, error) {
|
||||||
|
return ParsePasswdFilter(passwd, nil)
|
||||||
|
}
|
||||||
|
|
||||||
|
func ParsePasswdFileFilter(path string, filter func(User) bool) ([]User, error) {
|
||||||
|
passwd, err := os.Open(path)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
defer passwd.Close()
|
||||||
|
return ParsePasswdFilter(passwd, filter)
|
||||||
|
}
|
||||||
|
|
||||||
|
func ParsePasswdFilter(r io.Reader, filter func(User) bool) ([]User, error) {
|
||||||
|
if r == nil {
|
||||||
|
return nil, fmt.Errorf("nil source for passwd-formatted data")
|
||||||
|
}
|
||||||
|
|
||||||
|
var (
|
||||||
|
s = bufio.NewScanner(r)
|
||||||
|
out = []User{}
|
||||||
|
)
|
||||||
|
|
||||||
|
for s.Scan() {
|
||||||
|
if err := s.Err(); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
line := strings.TrimSpace(s.Text())
|
||||||
|
if line == "" {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
// see: man 5 passwd
|
||||||
|
// name:password:UID:GID:GECOS:directory:shell
|
||||||
|
// Name:Pass:Uid:Gid:Gecos:Home:Shell
|
||||||
|
// root:x:0:0:root:/root:/bin/bash
|
||||||
|
// adm:x:3:4:adm:/var/adm:/bin/false
|
||||||
|
p := User{}
|
||||||
|
parseLine(line, &p.Name, &p.Pass, &p.Uid, &p.Gid, &p.Gecos, &p.Home, &p.Shell)
|
||||||
|
|
||||||
|
if filter == nil || filter(p) {
|
||||||
|
out = append(out, p)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return out, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func ParseGroupFile(path string) ([]Group, error) {
|
||||||
|
group, err := os.Open(path)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
defer group.Close()
|
||||||
|
return ParseGroup(group)
|
||||||
|
}
|
||||||
|
|
||||||
|
func ParseGroup(group io.Reader) ([]Group, error) {
|
||||||
|
return ParseGroupFilter(group, nil)
|
||||||
|
}
|
||||||
|
|
||||||
|
func ParseGroupFileFilter(path string, filter func(Group) bool) ([]Group, error) {
|
||||||
|
group, err := os.Open(path)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
defer group.Close()
|
||||||
|
return ParseGroupFilter(group, filter)
|
||||||
|
}
|
||||||
|
|
||||||
|
func ParseGroupFilter(r io.Reader, filter func(Group) bool) ([]Group, error) {
|
||||||
|
if r == nil {
|
||||||
|
return nil, fmt.Errorf("nil source for group-formatted data")
|
||||||
|
}
|
||||||
|
|
||||||
|
var (
|
||||||
|
s = bufio.NewScanner(r)
|
||||||
|
out = []Group{}
|
||||||
|
)
|
||||||
|
|
||||||
|
for s.Scan() {
|
||||||
|
if err := s.Err(); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
text := s.Text()
|
||||||
|
if text == "" {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
// see: man 5 group
|
||||||
|
// group_name:password:GID:user_list
|
||||||
|
// Name:Pass:Gid:List
|
||||||
|
// root:x:0:root
|
||||||
|
// adm:x:4:root,adm,daemon
|
||||||
|
p := Group{}
|
||||||
|
parseLine(text, &p.Name, &p.Pass, &p.Gid, &p.List)
|
||||||
|
|
||||||
|
if filter == nil || filter(p) {
|
||||||
|
out = append(out, p)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return out, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
type ExecUser struct {
|
||||||
|
Uid int
|
||||||
|
Gid int
|
||||||
|
Sgids []int
|
||||||
|
Home string
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetExecUserPath is a wrapper for GetExecUser. It reads data from each of the
|
||||||
|
// given file paths and uses that data as the arguments to GetExecUser. If the
|
||||||
|
// files cannot be opened for any reason, the error is ignored and a nil
|
||||||
|
// io.Reader is passed instead.
|
||||||
|
func GetExecUserPath(userSpec string, defaults *ExecUser, passwdPath, groupPath string) (*ExecUser, error) {
|
||||||
|
var passwd, group io.Reader
|
||||||
|
|
||||||
|
if passwdFile, err := os.Open(passwdPath); err == nil {
|
||||||
|
passwd = passwdFile
|
||||||
|
defer passwdFile.Close()
|
||||||
|
}
|
||||||
|
|
||||||
|
if groupFile, err := os.Open(groupPath); err == nil {
|
||||||
|
group = groupFile
|
||||||
|
defer groupFile.Close()
|
||||||
|
}
|
||||||
|
|
||||||
|
return GetExecUser(userSpec, defaults, passwd, group)
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetExecUser parses a user specification string (using the passwd and group
|
||||||
|
// readers as sources for /etc/passwd and /etc/group data, respectively). In
|
||||||
|
// the case of blank fields or missing data from the sources, the values in
|
||||||
|
// defaults is used.
|
||||||
|
//
|
||||||
|
// GetExecUser will return an error if a user or group literal could not be
|
||||||
|
// found in any entry in passwd and group respectively.
|
||||||
|
//
|
||||||
|
// Examples of valid user specifications are:
|
||||||
|
// * ""
|
||||||
|
// * "user"
|
||||||
|
// * "uid"
|
||||||
|
// * "user:group"
|
||||||
|
// * "uid:gid
|
||||||
|
// * "user:gid"
|
||||||
|
// * "uid:group"
|
||||||
|
//
|
||||||
|
// It should be noted that if you specify a numeric user or group id, they will
|
||||||
|
// not be evaluated as usernames (only the metadata will be filled). So attempting
|
||||||
|
// to parse a user with user.Name = "1337" will produce the user with a UID of
|
||||||
|
// 1337.
|
||||||
|
func GetExecUser(userSpec string, defaults *ExecUser, passwd, group io.Reader) (*ExecUser, error) {
|
||||||
|
if defaults == nil {
|
||||||
|
defaults = new(ExecUser)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Copy over defaults.
|
||||||
|
user := &ExecUser{
|
||||||
|
Uid: defaults.Uid,
|
||||||
|
Gid: defaults.Gid,
|
||||||
|
Sgids: defaults.Sgids,
|
||||||
|
Home: defaults.Home,
|
||||||
|
}
|
||||||
|
|
||||||
|
// Sgids slice *cannot* be nil.
|
||||||
|
if user.Sgids == nil {
|
||||||
|
user.Sgids = []int{}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Allow for userArg to have either "user" syntax, or optionally "user:group" syntax
|
||||||
|
var userArg, groupArg string
|
||||||
|
parseLine(userSpec, &userArg, &groupArg)
|
||||||
|
|
||||||
|
// Convert userArg and groupArg to be numeric, so we don't have to execute
|
||||||
|
// Atoi *twice* for each iteration over lines.
|
||||||
|
uidArg, uidErr := strconv.Atoi(userArg)
|
||||||
|
gidArg, gidErr := strconv.Atoi(groupArg)
|
||||||
|
|
||||||
|
// Find the matching user.
|
||||||
|
users, err := ParsePasswdFilter(passwd, func(u User) bool {
|
||||||
|
if userArg == "" {
|
||||||
|
// Default to current state of the user.
|
||||||
|
return u.Uid == user.Uid
|
||||||
|
}
|
||||||
|
|
||||||
|
if uidErr == nil {
|
||||||
|
// If the userArg is numeric, always treat it as a UID.
|
||||||
|
return uidArg == u.Uid
|
||||||
|
}
|
||||||
|
|
||||||
|
return u.Name == userArg
|
||||||
|
})
|
||||||
|
|
||||||
|
// If we can't find the user, we have to bail.
|
||||||
|
if err != nil && passwd != nil {
|
||||||
|
if userArg == "" {
|
||||||
|
userArg = strconv.Itoa(user.Uid)
|
||||||
|
}
|
||||||
|
return nil, fmt.Errorf("unable to find user %s: %v", userArg, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
var matchedUserName string
|
||||||
|
if len(users) > 0 {
|
||||||
|
// First match wins, even if there's more than one matching entry.
|
||||||
|
matchedUserName = users[0].Name
|
||||||
|
user.Uid = users[0].Uid
|
||||||
|
user.Gid = users[0].Gid
|
||||||
|
user.Home = users[0].Home
|
||||||
|
} else if userArg != "" {
|
||||||
|
// If we can't find a user with the given username, the only other valid
|
||||||
|
// option is if it's a numeric username with no associated entry in passwd.
|
||||||
|
|
||||||
|
if uidErr != nil {
|
||||||
|
// Not numeric.
|
||||||
|
return nil, fmt.Errorf("unable to find user %s: %v", userArg, ErrNoPasswdEntries)
|
||||||
|
}
|
||||||
|
user.Uid = uidArg
|
||||||
|
|
||||||
|
// Must be inside valid uid range.
|
||||||
|
if user.Uid < minId || user.Uid > maxId {
|
||||||
|
return nil, ErrRange
|
||||||
|
}
|
||||||
|
|
||||||
|
// Okay, so it's numeric. We can just roll with this.
|
||||||
|
}
|
||||||
|
|
||||||
|
// On to the groups. If we matched a username, we need to do this because of
|
||||||
|
// the supplementary group IDs.
|
||||||
|
if groupArg != "" || matchedUserName != "" {
|
||||||
|
groups, err := ParseGroupFilter(group, func(g Group) bool {
|
||||||
|
// If the group argument isn't explicit, we'll just search for it.
|
||||||
|
if groupArg == "" {
|
||||||
|
// Check if user is a member of this group.
|
||||||
|
for _, u := range g.List {
|
||||||
|
if u == matchedUserName {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
if gidErr == nil {
|
||||||
|
// If the groupArg is numeric, always treat it as a GID.
|
||||||
|
return gidArg == g.Gid
|
||||||
|
}
|
||||||
|
|
||||||
|
return g.Name == groupArg
|
||||||
|
})
|
||||||
|
if err != nil && group != nil {
|
||||||
|
return nil, fmt.Errorf("unable to find groups for spec %v: %v", matchedUserName, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Only start modifying user.Gid if it is in explicit form.
|
||||||
|
if groupArg != "" {
|
||||||
|
if len(groups) > 0 {
|
||||||
|
// First match wins, even if there's more than one matching entry.
|
||||||
|
user.Gid = groups[0].Gid
|
||||||
|
} else {
|
||||||
|
// If we can't find a group with the given name, the only other valid
|
||||||
|
// option is if it's a numeric group name with no associated entry in group.
|
||||||
|
|
||||||
|
if gidErr != nil {
|
||||||
|
// Not numeric.
|
||||||
|
return nil, fmt.Errorf("unable to find group %s: %v", groupArg, ErrNoGroupEntries)
|
||||||
|
}
|
||||||
|
user.Gid = gidArg
|
||||||
|
|
||||||
|
// Must be inside valid gid range.
|
||||||
|
if user.Gid < minId || user.Gid > maxId {
|
||||||
|
return nil, ErrRange
|
||||||
|
}
|
||||||
|
|
||||||
|
// Okay, so it's numeric. We can just roll with this.
|
||||||
|
}
|
||||||
|
} else if len(groups) > 0 {
|
||||||
|
// Supplementary group ids only make sense if in the implicit form.
|
||||||
|
user.Sgids = make([]int, len(groups))
|
||||||
|
for i, group := range groups {
|
||||||
|
user.Sgids[i] = group.Gid
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return user, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetAdditionalGroups looks up a list of groups by name or group id
|
||||||
|
// against the given /etc/group formatted data. If a group name cannot
|
||||||
|
// be found, an error will be returned. If a group id cannot be found,
|
||||||
|
// or the given group data is nil, the id will be returned as-is
|
||||||
|
// provided it is in the legal range.
|
||||||
|
func GetAdditionalGroups(additionalGroups []string, group io.Reader) ([]int, error) {
|
||||||
|
var groups = []Group{}
|
||||||
|
if group != nil {
|
||||||
|
var err error
|
||||||
|
groups, err = ParseGroupFilter(group, func(g Group) bool {
|
||||||
|
for _, ag := range additionalGroups {
|
||||||
|
if g.Name == ag || strconv.Itoa(g.Gid) == ag {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("Unable to find additional groups %v: %v", additionalGroups, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
gidMap := make(map[int]struct{})
|
||||||
|
for _, ag := range additionalGroups {
|
||||||
|
var found bool
|
||||||
|
for _, g := range groups {
|
||||||
|
// if we found a matched group either by name or gid, take the
|
||||||
|
// first matched as correct
|
||||||
|
if g.Name == ag || strconv.Itoa(g.Gid) == ag {
|
||||||
|
if _, ok := gidMap[g.Gid]; !ok {
|
||||||
|
gidMap[g.Gid] = struct{}{}
|
||||||
|
found = true
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// we asked for a group but didn't find it. let's check to see
|
||||||
|
// if we wanted a numeric group
|
||||||
|
if !found {
|
||||||
|
gid, err := strconv.Atoi(ag)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("Unable to find group %s", ag)
|
||||||
|
}
|
||||||
|
// Ensure gid is inside gid range.
|
||||||
|
if gid < minId || gid > maxId {
|
||||||
|
return nil, ErrRange
|
||||||
|
}
|
||||||
|
gidMap[gid] = struct{}{}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
gids := []int{}
|
||||||
|
for gid := range gidMap {
|
||||||
|
gids = append(gids, gid)
|
||||||
|
}
|
||||||
|
return gids, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetAdditionalGroupsPath is a wrapper around GetAdditionalGroups
|
||||||
|
// that opens the groupPath given and gives it as an argument to
|
||||||
|
// GetAdditionalGroups.
|
||||||
|
func GetAdditionalGroupsPath(additionalGroups []string, groupPath string) ([]int, error) {
|
||||||
|
var group io.Reader
|
||||||
|
|
||||||
|
if groupFile, err := os.Open(groupPath); err == nil {
|
||||||
|
group = groupFile
|
||||||
|
defer groupFile.Close()
|
||||||
|
}
|
||||||
|
return GetAdditionalGroups(additionalGroups, group)
|
||||||
|
}
|
|
@ -0,0 +1,202 @@
|
||||||
|
Apache License
|
||||||
|
Version 2.0, January 2004
|
||||||
|
http://www.apache.org/licenses/
|
||||||
|
|
||||||
|
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||||
|
|
||||||
|
1. Definitions.
|
||||||
|
|
||||||
|
"License" shall mean the terms and conditions for use, reproduction,
|
||||||
|
and distribution as defined by Sections 1 through 9 of this document.
|
||||||
|
|
||||||
|
"Licensor" shall mean the copyright owner or entity authorized by
|
||||||
|
the copyright owner that is granting the License.
|
||||||
|
|
||||||
|
"Legal Entity" shall mean the union of the acting entity and all
|
||||||
|
other entities that control, are controlled by, or are under common
|
||||||
|
control with that entity. For the purposes of this definition,
|
||||||
|
"control" means (i) the power, direct or indirect, to cause the
|
||||||
|
direction or management of such entity, whether by contract or
|
||||||
|
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||||
|
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||||
|
|
||||||
|
"You" (or "Your") shall mean an individual or Legal Entity
|
||||||
|
exercising permissions granted by this License.
|
||||||
|
|
||||||
|
"Source" form shall mean the preferred form for making modifications,
|
||||||
|
including but not limited to software source code, documentation
|
||||||
|
source, and configuration files.
|
||||||
|
|
||||||
|
"Object" form shall mean any form resulting from mechanical
|
||||||
|
transformation or translation of a Source form, including but
|
||||||
|
not limited to compiled object code, generated documentation,
|
||||||
|
and conversions to other media types.
|
||||||
|
|
||||||
|
"Work" shall mean the work of authorship, whether in Source or
|
||||||
|
Object form, made available under the License, as indicated by a
|
||||||
|
copyright notice that is included in or attached to the work
|
||||||
|
(an example is provided in the Appendix below).
|
||||||
|
|
||||||
|
"Derivative Works" shall mean any work, whether in Source or Object
|
||||||
|
form, that is based on (or derived from) the Work and for which the
|
||||||
|
editorial revisions, annotations, elaborations, or other modifications
|
||||||
|
represent, as a whole, an original work of authorship. For the purposes
|
||||||
|
of this License, Derivative Works shall not include works that remain
|
||||||
|
separable from, or merely link (or bind by name) to the interfaces of,
|
||||||
|
the Work and Derivative Works thereof.
|
||||||
|
|
||||||
|
"Contribution" shall mean any work of authorship, including
|
||||||
|
the original version of the Work and any modifications or additions
|
||||||
|
to that Work or Derivative Works thereof, that is intentionally
|
||||||
|
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||||
|
or by an individual or Legal Entity authorized to submit on behalf of
|
||||||
|
the copyright owner. For the purposes of this definition, "submitted"
|
||||||
|
means any form of electronic, verbal, or written communication sent
|
||||||
|
to the Licensor or its representatives, including but not limited to
|
||||||
|
communication on electronic mailing lists, source code control systems,
|
||||||
|
and issue tracking systems that are managed by, or on behalf of, the
|
||||||
|
Licensor for the purpose of discussing and improving the Work, but
|
||||||
|
excluding communication that is conspicuously marked or otherwise
|
||||||
|
designated in writing by the copyright owner as "Not a Contribution."
|
||||||
|
|
||||||
|
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||||
|
on behalf of whom a Contribution has been received by Licensor and
|
||||||
|
subsequently incorporated within the Work.
|
||||||
|
|
||||||
|
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||||
|
this License, each Contributor hereby grants to You a perpetual,
|
||||||
|
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||||
|
copyright license to reproduce, prepare Derivative Works of,
|
||||||
|
publicly display, publicly perform, sublicense, and distribute the
|
||||||
|
Work and such Derivative Works in Source or Object form.
|
||||||
|
|
||||||
|
3. Grant of Patent License. Subject to the terms and conditions of
|
||||||
|
this License, each Contributor hereby grants to You a perpetual,
|
||||||
|
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||||
|
(except as stated in this section) patent license to make, have made,
|
||||||
|
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||||
|
where such license applies only to those patent claims licensable
|
||||||
|
by such Contributor that are necessarily infringed by their
|
||||||
|
Contribution(s) alone or by combination of their Contribution(s)
|
||||||
|
with the Work to which such Contribution(s) was submitted. If You
|
||||||
|
institute patent litigation against any entity (including a
|
||||||
|
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||||
|
or a Contribution incorporated within the Work constitutes direct
|
||||||
|
or contributory patent infringement, then any patent licenses
|
||||||
|
granted to You under this License for that Work shall terminate
|
||||||
|
as of the date such litigation is filed.
|
||||||
|
|
||||||
|
4. Redistribution. You may reproduce and distribute copies of the
|
||||||
|
Work or Derivative Works thereof in any medium, with or without
|
||||||
|
modifications, and in Source or Object form, provided that You
|
||||||
|
meet the following conditions:
|
||||||
|
|
||||||
|
(a) You must give any other recipients of the Work or
|
||||||
|
Derivative Works a copy of this License; and
|
||||||
|
|
||||||
|
(b) You must cause any modified files to carry prominent notices
|
||||||
|
stating that You changed the files; and
|
||||||
|
|
||||||
|
(c) You must retain, in the Source form of any Derivative Works
|
||||||
|
that You distribute, all copyright, patent, trademark, and
|
||||||
|
attribution notices from the Source form of the Work,
|
||||||
|
excluding those notices that do not pertain to any part of
|
||||||
|
the Derivative Works; and
|
||||||
|
|
||||||
|
(d) If the Work includes a "NOTICE" text file as part of its
|
||||||
|
distribution, then any Derivative Works that You distribute must
|
||||||
|
include a readable copy of the attribution notices contained
|
||||||
|
within such NOTICE file, excluding those notices that do not
|
||||||
|
pertain to any part of the Derivative Works, in at least one
|
||||||
|
of the following places: within a NOTICE text file distributed
|
||||||
|
as part of the Derivative Works; within the Source form or
|
||||||
|
documentation, if provided along with the Derivative Works; or,
|
||||||
|
within a display generated by the Derivative Works, if and
|
||||||
|
wherever such third-party notices normally appear. The contents
|
||||||
|
of the NOTICE file are for informational purposes only and
|
||||||
|
do not modify the License. You may add Your own attribution
|
||||||
|
notices within Derivative Works that You distribute, alongside
|
||||||
|
or as an addendum to the NOTICE text from the Work, provided
|
||||||
|
that such additional attribution notices cannot be construed
|
||||||
|
as modifying the License.
|
||||||
|
|
||||||
|
You may add Your own copyright statement to Your modifications and
|
||||||
|
may provide additional or different license terms and conditions
|
||||||
|
for use, reproduction, or distribution of Your modifications, or
|
||||||
|
for any such Derivative Works as a whole, provided Your use,
|
||||||
|
reproduction, and distribution of the Work otherwise complies with
|
||||||
|
the conditions stated in this License.
|
||||||
|
|
||||||
|
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||||
|
any Contribution intentionally submitted for inclusion in the Work
|
||||||
|
by You to the Licensor shall be under the terms and conditions of
|
||||||
|
this License, without any additional terms or conditions.
|
||||||
|
Notwithstanding the above, nothing herein shall supersede or modify
|
||||||
|
the terms of any separate license agreement you may have executed
|
||||||
|
with Licensor regarding such Contributions.
|
||||||
|
|
||||||
|
6. Trademarks. This License does not grant permission to use the trade
|
||||||
|
names, trademarks, service marks, or product names of the Licensor,
|
||||||
|
except as required for reasonable and customary use in describing the
|
||||||
|
origin of the Work and reproducing the content of the NOTICE file.
|
||||||
|
|
||||||
|
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||||
|
agreed to in writing, Licensor provides the Work (and each
|
||||||
|
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||||
|
implied, including, without limitation, any warranties or conditions
|
||||||
|
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||||
|
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||||
|
appropriateness of using or redistributing the Work and assume any
|
||||||
|
risks associated with Your exercise of permissions under this License.
|
||||||
|
|
||||||
|
8. Limitation of Liability. In no event and under no legal theory,
|
||||||
|
whether in tort (including negligence), contract, or otherwise,
|
||||||
|
unless required by applicable law (such as deliberate and grossly
|
||||||
|
negligent acts) or agreed to in writing, shall any Contributor be
|
||||||
|
liable to You for damages, including any direct, indirect, special,
|
||||||
|
incidental, or consequential damages of any character arising as a
|
||||||
|
result of this License or out of the use or inability to use the
|
||||||
|
Work (including but not limited to damages for loss of goodwill,
|
||||||
|
work stoppage, computer failure or malfunction, or any and all
|
||||||
|
other commercial damages or losses), even if such Contributor
|
||||||
|
has been advised of the possibility of such damages.
|
||||||
|
|
||||||
|
9. Accepting Warranty or Additional Liability. While redistributing
|
||||||
|
the Work or Derivative Works thereof, You may choose to offer,
|
||||||
|
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||||
|
or other liability obligations and/or rights consistent with this
|
||||||
|
License. However, in accepting such obligations, You may act only
|
||||||
|
on Your own behalf and on Your sole responsibility, not on behalf
|
||||||
|
of any other Contributor, and only if You agree to indemnify,
|
||||||
|
defend, and hold each Contributor harmless for any liability
|
||||||
|
incurred by, or claims asserted against, such Contributor by reason
|
||||||
|
of your accepting any such warranty or additional liability.
|
||||||
|
|
||||||
|
END OF TERMS AND CONDITIONS
|
||||||
|
|
||||||
|
APPENDIX: How to apply the Apache License to your work.
|
||||||
|
|
||||||
|
To apply the Apache License to your work, attach the following
|
||||||
|
boilerplate notice, with the fields enclosed by brackets "{}"
|
||||||
|
replaced with your own identifying information. (Don't include
|
||||||
|
the brackets!) The text should be enclosed in the appropriate
|
||||||
|
comment syntax for the file format. We also recommend that a
|
||||||
|
file or class name and description of purpose be included on the
|
||||||
|
same "printed page" as the copyright notice for easier
|
||||||
|
identification within third-party archives.
|
||||||
|
|
||||||
|
Copyright {yyyy} {name of copyright owner}
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
|
|
@ -0,0 +1,19 @@
|
||||||
|
# continuity
|
||||||
|
|
||||||
|
[![GoDoc](https://godoc.org/github.com/containerd/continuity?status.svg)](https://godoc.org/github.com/containerd/continuity)
|
||||||
|
[![Build Status](https://travis-ci.org/containerd/continuity.svg?branch=master)](https://travis-ci.org/containerd/continuity)
|
||||||
|
|
||||||
|
A transport-agnostic, filesystem metadata manifest system
|
||||||
|
|
||||||
|
This project is a staging area for experiments in providing transport agnostic
|
||||||
|
metadata storage.
|
||||||
|
|
||||||
|
Please see https://github.com/opencontainers/specs/issues/11 for more details.
|
||||||
|
|
||||||
|
## Building Proto Package
|
||||||
|
|
||||||
|
If you change the proto file you will need to rebuild the generated Go with `go generate`.
|
||||||
|
|
||||||
|
```
|
||||||
|
go generate ./proto
|
||||||
|
```
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue