Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
17 changes: 12 additions & 5 deletions commit_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -296,12 +296,18 @@ func TestCommitCompression(t *testing.T) {
name string
expectError bool
layerMediaType string
os string
}{
{archive.Uncompressed, "uncompressed", false, v1.MediaTypeImageLayer},
{archive.Gzip, "gzip", false, v1.MediaTypeImageLayerGzip},
{archive.Bzip2, "bz2", true, ""},
{archive.Xz, "xz", true, ""},
{archive.Zstd, "zstd", false, v1.MediaTypeImageLayerZstd},
{archive.Uncompressed, "uncompressed", false, v1.MediaTypeImageLayer, ""},
{archive.Uncompressed, "uncompressed-win", false, v1.MediaTypeImageLayer, "windows"},
{archive.Gzip, "gzip", false, v1.MediaTypeImageLayerGzip, ""},
{archive.Gzip, "gzip-win", false, v1.MediaTypeImageLayerGzip, "windows"},
{archive.Bzip2, "bz2", true, "", ""},
{archive.Bzip2, "bz2-win", true, "", "windows"},
{archive.Xz, "xz", true, "", ""},
{archive.Xz, "xz-win", true, "", "windows"},
{archive.Zstd, "zstd", false, v1.MediaTypeImageLayerZstd, ""},
{archive.Zstd, "zstd-win", false, v1.MediaTypeImageLayerZstd, "windows"},
} {
t.Run(compressor.name, func(t *testing.T) {
var ref types.ImageReference
Expand All @@ -310,6 +316,7 @@ func TestCommitCompression(t *testing.T) {
SystemContext: &testSystemContext,
Compression: compressor.compression,
}
b.OCIv1.OS = compressor.os
imageName := compressor.name
ref, err := imageStorage.Transport.ParseStoreReference(store, imageName)
require.NoErrorf(t, err, "parsing reference for to-be-committed local image %q", imageName)
Expand Down
294 changes: 242 additions & 52 deletions image.go
Original file line number Diff line number Diff line change
Expand Up @@ -59,6 +59,34 @@ const (
// create uniquely-named files, but we don't want to try to use their
// contents until after they've been written to
containerExcludesSubstring = ".tmp"

// Windows-specific PAX record keys
keyFileAttr = "MSWINDOWS.fileattr"
keySDRaw = "MSWINDOWS.rawsd"
keyCreationTime = "LIBARCHIVE.creationtime"

// Windows Security Descriptors (base64-encoded)
// SDDL: O:BAG:SYD:(A;OICI;FA;;;BA)(A;OICI;FA;;;SY)(A;;FA;;;BA)(A;OICIIO;GA;;;CO)(A;OICI;0x1200a9;;;BU)(A;CI;LC;;;BU)(A;CI;DC;;;BU)
// Owner: Built-in Administrators (BA)
// Group: Local System (SY)
// DACL:
// - Allow OBJECT_INHERIT+CONTAINER_INHERIT Full Access to Built-in Administrators (BA)
// - Allow OBJECT_INHERIT+CONTAINER_INHERIT Full Access to Local System (SY)
// - Allow Full Access to Built-in Administrators (BA)
// - Allow OBJECT_INHERIT+CONTAINER_INHERIT+INHERIT_ONLY Generic All to Creator Owner (CO)
// - Allow OBJECT_INHERIT+CONTAINER_INHERIT Read/Execute permissions to Built-in Users (BU)
// - Allow CONTAINER_INHERIT List Contents to Built-in Users (BU)
// - Allow CONTAINER_INHERIT Delete Child to Built-in Users (BU)
winSecurityDescriptorDirectory = "AQAEgBQAAAAkAAAAAAAAADAAAAABAgAAAAAABSAAAAAgAgAAAQEAAAAAAAUSAAAAAgCoAAcAAAAAAxgA/wEfAAECAAAAAAAFIAAAACACAAAAAxQA/wEfAAEBAAAAAAAFEgAAAAAAGAD/AR8AAQIAAAAAAAUgAAAAIAIAAAALFAAAAAAQAQEAAAAAAAMAAAAAAAMYAKkAEgABAgAAAAAABSAAAAAhAgAAAAIYAAQAAAABAgAAAAAABSAAAAAhAgAAAAIYAAIAAAABAgAAAAAABSAAAAAhAgAA"

// SDDL: O:BAG:SYD:(A;;FA;;;BA)(A;;FA;;;SY)(A;;0x1200a9;;;BU)
// Owner: Built-in Administrators (BA)
// Group: Local System (SY)
// DACL:
// - Allow Full Access to Built-in Administrators (BA)
// - Allow Full Access to Local System (SY)
// - Allow Read/Execute permissions to Built-in Users (BU)
winSecurityDescriptorFile = "AQAEgBQAAAAkAAAAAAAAADAAAAABAgAAAAAABSAAAAAgAgAAAQEAAAAAAAUSAAAAAgBMAAMAAAAAABgA/wEfAAECAAAAAAAFIAAAACACAAAAABQA/wEfAAEBAAAAAAAFEgAAAAAAGACpABIAAQIAAAAAAAUgAAAAIQIAAA=="
)

// ExtractRootfsOptions is consumed by ExtractRootfs() which allows users to
Expand Down Expand Up @@ -112,6 +140,7 @@ type containerImageRef struct {
unsetAnnotations []string
setAnnotations []string
createdAnnotation types.OptionalBool
os string
}

type blobLayerInfo struct {
Expand Down Expand Up @@ -1068,61 +1097,27 @@ func (i *containerImageRef) NewImageSource(ctx context.Context, _ *types.SystemC
}
}
}
srcHasher := digest.Canonical.Digester()
// Set up to write the possibly-recompressed blob.
layerFile, err := os.OpenFile(filepath.Join(path, "layer"), os.O_CREATE|os.O_WRONLY, 0o600)
if err != nil {
rc.Close()
return nil, fmt.Errorf("opening file for %s: %w", what, err)
}

counter := ioutils.NewWriteCounter(layerFile)
var destHasher digest.Digester
var multiWriter io.Writer
// Avoid rehashing when we compress or mess with the layer contents somehow.
// At this point, there are multiple ways that can happen.
diffBeingAltered := i.compression != archive.Uncompressed
diffBeingAltered = diffBeingAltered || i.layerModTime != nil || i.layerLatestModTime != nil
diffBeingAltered = diffBeingAltered || len(layerExclusions) != 0
if diffBeingAltered {
destHasher = digest.Canonical.Digester()
multiWriter = io.MultiWriter(counter, destHasher.Hash())
} else {
destHasher = srcHasher
multiWriter = counter
}
// Compress the layer, if we're recompressing it.
writeCloser, err := archive.CompressStream(multiWriter, i.compression)
layerFileWriter, err := newLayerWriter(layerFile, i.compression, i.layerModTime, i.layerLatestModTime, layerExclusions, i.os == "windows")
if err != nil {
layerFile.Close()
rc.Close()
return nil, fmt.Errorf("compressing %s: %w", what, err)
}
writer := io.MultiWriter(writeCloser, srcHasher.Hash())

// Use specified timestamps in the layer, if we're doing that for history
// entries.
nestedWriteCloser := ioutils.NewWriteCloserWrapper(writer, writeCloser.Close)
writeCloser = makeFilteredLayerWriteCloser(nestedWriteCloser, i.layerModTime, i.layerLatestModTime, layerExclusions)
writer = writeCloser
// Okay, copy from the raw diff through the filter, compressor, and counter and
// digesters.
size, err := io.Copy(writer, rc)
return nil, fmt.Errorf("creating layer writer for %s: %w", what, err)
}
size, err := io.Copy(layerFileWriter, rc)
if err != nil {
writeCloser.Close()
layerFile.Close()
layerFileWriter.Close()
rc.Close()
return nil, fmt.Errorf("storing %s to file: on copy: %w", what, err)
}
if err := writeCloser.Close(); err != nil {
layerFile.Close()
rc.Close()
return nil, fmt.Errorf("storing %s to file: on pipe close: %w", what, err)
}
if err := layerFile.Close(); err != nil {
rc.Close()
return nil, fmt.Errorf("storing %s to file: on file close: %w", what, err)
}
layerFileWriter.Close()
layerFile.Close()
rc.Close()

if errChan != nil {
Expand All @@ -1131,24 +1126,22 @@ func (i *containerImageRef) NewImageSource(ctx context.Context, _ *types.SystemC
return nil, fmt.Errorf("extracting container rootfs: %w", err)
}
}

if err != nil {
return nil, fmt.Errorf("storing %s to file: %w", what, err)
}
if diffBeingAltered {
size = counter.Count
// If the stream was transformed (compression or Windows mutation), use TotalWritten()
// Otherwise verify that io.Copy size matches TotalWritten()
if layerFileWriter.SourceDigest() != layerFileWriter.DestDigest() || i.os == "windows" {
size = layerFileWriter.TotalWritten()
} else {
if size != counter.Count {
return nil, fmt.Errorf("storing %s to file: inconsistent layer size (copied %d, wrote %d)", what, size, counter.Count)
if size != layerFileWriter.TotalWritten() {
return nil, fmt.Errorf("storing %s to file: inconsistent layer size (copied %d, wrote %d)", what, size, layerFileWriter.TotalWritten())
}
}
logrus.Debugf("%s size is %d bytes, uncompressed digest %s, possibly-compressed digest %s", what, size, srcHasher.Digest().String(), destHasher.Digest().String())
logrus.Debugf("%s size is %d bytes, uncompressed digest %s, possibly-compressed digest %s", what, size, layerFileWriter.SourceDigest().String(), layerFileWriter.DestDigest().String())
// Rename the layer so that we can more easily find it by digest later.
finalBlobName := filepath.Join(path, destHasher.Digest().String())
finalBlobName := filepath.Join(path, layerFileWriter.DestDigest().String())
if err = os.Rename(filepath.Join(path, "layer"), finalBlobName); err != nil {
return nil, fmt.Errorf("storing %s to file while renaming %q to %q: %w", what, filepath.Join(path, "layer"), finalBlobName, err)
}
mb.addLayer(destHasher.Digest(), size, srcHasher.Digest())
mb.addLayer(layerFileWriter.DestDigest(), size, layerFileWriter.SourceDigest())
}

// Only attempt to append history if history was not disabled explicitly.
Expand Down Expand Up @@ -1181,6 +1174,202 @@ func (i *containerImageRef) NewImageSource(ctx context.Context, _ *types.SystemC
return src, nil
}

func prepareWinHeader(h *tar.Header) {
if h.PAXRecords == nil {
h.PAXRecords = map[string]string{}
}
if h.Typeflag == tar.TypeDir {
h.Mode |= 1 << 14
h.PAXRecords[keyFileAttr] = "16"
}

if h.Typeflag == tar.TypeReg {
h.Mode |= 1 << 15
h.PAXRecords[keyFileAttr] = "32"
}

if !h.ModTime.IsZero() {
h.PAXRecords[keyCreationTime] = fmt.Sprintf("%d.%d", h.ModTime.Unix(), h.ModTime.Nanosecond())
}

h.Format = tar.FormatPAX
}

func addSecurityDescriptor(h *tar.Header) {
if h.Typeflag == tar.TypeDir {
h.PAXRecords[keySDRaw] = winSecurityDescriptorDirectory
}

if h.Typeflag == tar.TypeReg {
h.PAXRecords[keySDRaw] = winSecurityDescriptorFile
}
}

// winMutator implements io.WriteCloser and transforms an incoming tar stream to fit the expected format of a Windows
// container image
type winMutator struct {
pw *io.PipeWriter
done chan error
}

func (w *winMutator) Write(b []byte) (int, error) {
return w.pw.Write(b)
}

func (w *winMutator) Close() error {
if err := w.pw.Close(); err != nil {
return fmt.Errorf("error closing pipewriter")
}
return <-w.done
}

func newWindowsMutator(outStream io.WriteCloser) *winMutator {
pr, pw := io.Pipe()
done := make(chan error)
go func() {
tarReader := tar.NewReader(pr)
tarWriter := tar.NewWriter(outStream)

err := func() error {
h := &tar.Header{
Name: "Hives",
Typeflag: tar.TypeDir,
ModTime: time.Now(),
}
prepareWinHeader(h)
if err := tarWriter.WriteHeader(h); err != nil {
return err
}

h = &tar.Header{
Name: "Files",
Typeflag: tar.TypeDir,
ModTime: time.Now(),
}
prepareWinHeader(h)
if err := tarWriter.WriteHeader(h); err != nil {
return err
}

for {
h, err := tarReader.Next()
if err == io.EOF {
break
}
if err != nil {
return err
}
h.Name = filepath.Join("Files", h.Name)
if h.Linkname != "" {
h.Linkname = filepath.Join("Files", h.Linkname)
}
prepareWinHeader(h)
addSecurityDescriptor(h)
if err := tarWriter.WriteHeader(h); err != nil {
return err
}
if h.Size > 0 {
if _, err := io.Copy(tarWriter, tarReader); err != nil {
return err
}
}
}
return tarWriter.Close()
}()
done <- err
}()
return &winMutator{pw: pw, done: done}
}

// layerWriter represents a pipeline of writers
type layerWriter struct {
outputCounter *int64
inputDigester digest.Digester
outputDigester digest.Digester
input io.Writer
closerStack []func() error
}

// SourceDigest returns the digest of the input stream so far. The digest is calculated after filtering the stream,
// but before compressing.
func (l *layerWriter) SourceDigest() digest.Digest {
return l.inputDigester.Digest()
}

// DestDigest returns the digest of the output stream so far
func (l *layerWriter) DestDigest() digest.Digest {
if l.outputDigester == nil {
return l.inputDigester.Digest()
}
return l.outputDigester.Digest()
}

func (l *layerWriter) Write(in []byte) (int, error) {
return l.input.Write(in)
}

func (l *layerWriter) Close() error {
for i := len(l.closerStack) - 1; i >= 0; i-- {
err := (l.closerStack[i])()
if err != nil {
logrus.Warnf("error closing layerWriter %d: %v", i, err)
}
l.closerStack = l.closerStack[:i]
}
return nil
}

// TotalWritten returns the byte count written to the output destination
func (l *layerWriter) TotalWritten() int64 {
return *l.outputCounter
}

// newLayerWriter creates a writer pipeline which processes an input stream and ultimately writes to the given destination.
// The write stream pipeline is as follows:
// input -> filter -> compressor -> destination
func newLayerWriter(destination io.Writer, compression archive.Compression, layerModTime, layerLatestModTime *time.Time, layerExclusions []copier.ConditionalRemovePath, windows bool) (*layerWriter, error) {
layerWriteCounter := ioutils.NewWriteCounter(destination)
var multiWriter io.Writer
var destHasher digest.Digester
srcHasher := digest.Canonical.Digester()
var closers []func() error

// If the input stream will not be different from the output stream, avoid rehashing
if windows || compression != archive.Uncompressed || layerModTime != nil || layerLatestModTime != nil || len(layerExclusions) != 0 {
destHasher = digest.Canonical.Digester()
multiWriter = io.MultiWriter(layerWriteCounter, destHasher.Hash())
} else {
multiWriter = layerWriteCounter
}

// Compress the layer, if we're recompressing it.
compressor, err := archive.CompressStream(multiWriter, compression)
if err != nil {
return nil, fmt.Errorf("creating compression stream: %w", err)
}
closers = append(closers, compressor.Close)
compressorHasher := io.MultiWriter(compressor, srcHasher.Hash())

// Apply Windows layer mutation if needed, before calculating the uncompressed hash
preCompressor := ioutils.NopWriteCloser(compressorHasher)
if windows {
// preCompressor.Close is owned by makeFilteredLayerWriteCloser
preCompressor = newWindowsMutator(preCompressor)
}

// Use specified timestamps in the layer, if we're doing that for history entries.
filter := makeFilteredLayerWriteCloser(preCompressor, layerModTime, layerLatestModTime, layerExclusions)
closers = append(closers, filter.Close)

return &layerWriter{
outputCounter: &layerWriteCounter.Count,
inputDigester: srcHasher,
outputDigester: destHasher,
input: filter,
closerStack: closers,
}, nil
}

func (i *containerImageRef) NewImageDestination(_ context.Context, _ *types.SystemContext) (types.ImageDestination, error) {
return nil, errors.New("can't write to a container")
}
Expand Down Expand Up @@ -1677,6 +1866,7 @@ func (b *Builder) makeContainerImageRef(options CommitOptions) (*containerImageR
layerMountTargets: layerMountTargets,
layerPullUps: layerPullUps,
createdAnnotation: options.CreatedAnnotation,
os: b.OCIv1.OS,
}
if ref.created != nil {
for i := range ref.preEmptyLayers {
Expand Down
2 changes: 2 additions & 0 deletions tests/bud.bats
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,8 @@

load helpers

# do not merge

@test "bud with a path to a Dockerfile (-f) containing a non-directory entry" {
run_buildah 125 build -f $BUDFILES/non-directory-in-path/non-directory/Dockerfile
expect_output --substring "non-directory/Dockerfile: not a directory"
Expand Down