cache: fix merge ref chain IDs.
This fixes an issue where merge refs were incorrectly setting their chain IDs to their last input's ID. This resulted in errors where GetByBlob thought the merge ref and the final input ref were equivalent. Now, merge refs have their chain IDs computed by digesting each blob in the full chain. Signed-off-by: Erik Sipsma <erik@sipsma.dev>master
parent
dea9a4a9da
commit
5872bf3dd1
|
@ -286,43 +286,46 @@ func (sr *immutableRef) computeChainMetadata(ctx context.Context) error {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
var chainIDs []digest.Digest
|
var chainID digest.Digest
|
||||||
var blobChainIDs []digest.Digest
|
var blobChainID digest.Digest
|
||||||
|
|
||||||
// Blobs should be set the actual layers in the ref's chain, no
|
|
||||||
// any merge refs.
|
|
||||||
layerChain := sr.layerChain()
|
|
||||||
var layerParent *cacheRecord
|
|
||||||
switch sr.kind() {
|
switch sr.kind() {
|
||||||
case Merge:
|
case BaseLayer:
|
||||||
layerParent = layerChain[len(layerChain)-1].cacheRecord
|
diffID := sr.getDiffID()
|
||||||
|
chainID = diffID
|
||||||
|
blobChainID = imagespecidentity.ChainID([]digest.Digest{digest.Digest(sr.getBlob()), diffID})
|
||||||
case Layer:
|
case Layer:
|
||||||
// skip the last layer in the chain, which is this ref itself
|
if parentChainID := sr.layerParent.getChainID(); parentChainID != "" {
|
||||||
layerParent = layerChain[len(layerChain)-2].cacheRecord
|
chainID = parentChainID
|
||||||
}
|
|
||||||
if layerParent != nil {
|
|
||||||
if parentChainID := layerParent.getChainID(); parentChainID != "" {
|
|
||||||
chainIDs = append(chainIDs, parentChainID)
|
|
||||||
} else {
|
} else {
|
||||||
return errors.Errorf("failed to set chain for reference with non-addressable parent")
|
return errors.Errorf("failed to set chain for reference with non-addressable parent %q", sr.layerParent.GetDescription())
|
||||||
}
|
}
|
||||||
if parentBlobChainID := layerParent.getBlobChainID(); parentBlobChainID != "" {
|
if parentBlobChainID := sr.layerParent.getBlobChainID(); parentBlobChainID != "" {
|
||||||
blobChainIDs = append(blobChainIDs, parentBlobChainID)
|
blobChainID = parentBlobChainID
|
||||||
} else {
|
} else {
|
||||||
return errors.Errorf("failed to set blobchain for reference with non-addressable parent")
|
return errors.Errorf("failed to set blobchain for reference with non-addressable parent %q", sr.layerParent.GetDescription())
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
switch sr.kind() {
|
|
||||||
case Layer, BaseLayer:
|
|
||||||
diffID := digest.Digest(sr.getDiffID())
|
diffID := digest.Digest(sr.getDiffID())
|
||||||
chainIDs = append(chainIDs, diffID)
|
chainID = imagespecidentity.ChainID([]digest.Digest{chainID, diffID})
|
||||||
blobChainIDs = append(blobChainIDs, imagespecidentity.ChainID([]digest.Digest{digest.Digest(sr.getBlob()), diffID}))
|
blobID := imagespecidentity.ChainID([]digest.Digest{digest.Digest(sr.getBlob()), diffID})
|
||||||
|
blobChainID = imagespecidentity.ChainID([]digest.Digest{blobChainID, blobID})
|
||||||
|
case Merge:
|
||||||
|
// Merge chain IDs can re-use the first input chain ID as a base, but after that have to
|
||||||
|
// be computed one-by-one for each blob in the chain. It should have the same value as
|
||||||
|
// if you had created the merge by unpacking all the blobs on top of each other with GetByBlob.
|
||||||
|
baseInput := sr.mergeParents[0]
|
||||||
|
chainID = baseInput.getChainID()
|
||||||
|
blobChainID = baseInput.getBlobChainID()
|
||||||
|
for _, mergeParent := range sr.mergeParents[1:] {
|
||||||
|
for _, layer := range mergeParent.layerChain() {
|
||||||
|
diffID := digest.Digest(layer.getDiffID())
|
||||||
|
chainID = imagespecidentity.ChainID([]digest.Digest{chainID, diffID})
|
||||||
|
blobID := imagespecidentity.ChainID([]digest.Digest{digest.Digest(layer.getBlob()), diffID})
|
||||||
|
blobChainID = imagespecidentity.ChainID([]digest.Digest{blobChainID, blobID})
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
chainID := imagespecidentity.ChainID(chainIDs)
|
|
||||||
blobChainID := imagespecidentity.ChainID(blobChainIDs)
|
|
||||||
|
|
||||||
sr.queueChainID(chainID)
|
sr.queueChainID(chainID)
|
||||||
sr.queueBlobChainID(blobChainID)
|
sr.queueBlobChainID(blobChainID)
|
||||||
if err := sr.commitMetadata(); err != nil {
|
if err := sr.commitMetadata(); err != nil {
|
||||||
|
|
|
@ -337,6 +337,75 @@ func TestLazyGetByBlob(t *testing.T) {
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestMergeBlobchainID(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
ctx := namespaces.WithNamespace(context.Background(), "buildkit-test")
|
||||||
|
|
||||||
|
tmpdir, err := ioutil.TempDir("", "cachemanager")
|
||||||
|
require.NoError(t, err)
|
||||||
|
defer os.RemoveAll(tmpdir)
|
||||||
|
|
||||||
|
snapshotter, err := native.NewSnapshotter(filepath.Join(tmpdir, "snapshots"))
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
co, cleanup, err := newCacheManager(ctx, cmOpt{
|
||||||
|
snapshotter: snapshotter,
|
||||||
|
snapshotterName: "native",
|
||||||
|
})
|
||||||
|
require.NoError(t, err)
|
||||||
|
defer cleanup()
|
||||||
|
cm := co.manager
|
||||||
|
|
||||||
|
// create a merge ref that has 3 inputs, with each input being a 3 layer blob chain
|
||||||
|
var mergeInputs []ImmutableRef
|
||||||
|
var descs []ocispecs.Descriptor
|
||||||
|
descHandlers := DescHandlers(map[digest.Digest]*DescHandler{})
|
||||||
|
for i := 0; i < 3; i++ {
|
||||||
|
contentBuffer := contentutil.NewBuffer()
|
||||||
|
var curBlob ImmutableRef
|
||||||
|
for j := 0; j < 3; j++ {
|
||||||
|
blobBytes, desc, err := mapToBlob(map[string]string{strconv.Itoa(i): strconv.Itoa(j)}, true)
|
||||||
|
require.NoError(t, err)
|
||||||
|
cw, err := contentBuffer.Writer(ctx)
|
||||||
|
require.NoError(t, err)
|
||||||
|
_, err = cw.Write(blobBytes)
|
||||||
|
require.NoError(t, err)
|
||||||
|
err = cw.Commit(ctx, 0, cw.Digest())
|
||||||
|
require.NoError(t, err)
|
||||||
|
descHandlers[desc.Digest] = &DescHandler{
|
||||||
|
Provider: func(_ session.Group) content.Provider { return contentBuffer },
|
||||||
|
}
|
||||||
|
curBlob, err = cm.GetByBlob(ctx, desc, curBlob, descHandlers)
|
||||||
|
require.NoError(t, err)
|
||||||
|
descs = append(descs, desc)
|
||||||
|
}
|
||||||
|
mergeInputs = append(mergeInputs, curBlob.Clone())
|
||||||
|
}
|
||||||
|
|
||||||
|
mergeRef, err := cm.Merge(ctx, mergeInputs)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
_, err = mergeRef.GetRemotes(ctx, true, solver.CompressionOpt{Type: compression.Default}, false, nil)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
// verify the merge blobchain ID isn't just set to one of the inputs (regression test)
|
||||||
|
mergeBlobchainID := mergeRef.(*immutableRef).getBlobChainID()
|
||||||
|
for _, mergeInput := range mergeInputs {
|
||||||
|
inputBlobchainID := mergeInput.(*immutableRef).getBlobChainID()
|
||||||
|
require.NotEqual(t, mergeBlobchainID, inputBlobchainID)
|
||||||
|
}
|
||||||
|
|
||||||
|
// verify you get the merge ref when asking for an equivalent blob chain
|
||||||
|
var curBlob ImmutableRef
|
||||||
|
for _, desc := range descs[:len(descs)-1] {
|
||||||
|
curBlob, err = cm.GetByBlob(ctx, desc, curBlob, descHandlers)
|
||||||
|
require.NoError(t, err)
|
||||||
|
}
|
||||||
|
blobRef, err := cm.GetByBlob(ctx, descs[len(descs)-1], curBlob, descHandlers)
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.Equal(t, mergeRef.ID(), blobRef.ID())
|
||||||
|
}
|
||||||
|
|
||||||
func TestSnapshotExtract(t *testing.T) {
|
func TestSnapshotExtract(t *testing.T) {
|
||||||
if runtime.GOOS == "windows" {
|
if runtime.GOOS == "windows" {
|
||||||
t.Skip("Depends on unimplemented containerd bind-mount support on Windows")
|
t.Skip("Depends on unimplemented containerd bind-mount support on Windows")
|
||||||
|
|
Loading…
Reference in New Issue