-
Notifications
You must be signed in to change notification settings - Fork 808
/
Copy pathstage_executor.go
2530 lines (2424 loc) · 93.8 KB
/
stage_executor.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
package imagebuildah
import (
"context"
"crypto/sha256"
"errors"
"fmt"
"io"
"os"
"path"
"path/filepath"
"slices"
"strconv"
"strings"
"time"
"github.com/containers/buildah"
"github.com/containers/buildah/copier"
"github.com/containers/buildah/define"
buildahdocker "github.com/containers/buildah/docker"
"github.com/containers/buildah/internal"
"github.com/containers/buildah/internal/sanitize"
"github.com/containers/buildah/internal/tmpdir"
internalUtil "github.com/containers/buildah/internal/util"
"github.com/containers/buildah/pkg/parse"
"github.com/containers/buildah/pkg/rusage"
"github.com/containers/buildah/util"
config "github.com/containers/common/pkg/config"
cp "github.com/containers/image/v5/copy"
imagedocker "github.com/containers/image/v5/docker"
"github.com/containers/image/v5/docker/reference"
"github.com/containers/image/v5/manifest"
is "github.com/containers/image/v5/storage"
"github.com/containers/image/v5/transports"
"github.com/containers/image/v5/types"
"github.com/containers/storage"
"github.com/containers/storage/pkg/chrootarchive"
"github.com/containers/storage/pkg/unshare"
docker "github.com/fsouza/go-dockerclient"
buildkitparser "github.com/moby/buildkit/frontend/dockerfile/parser"
digest "github.com/opencontainers/go-digest"
v1 "github.com/opencontainers/image-spec/specs-go/v1"
"github.com/opencontainers/runtime-spec/specs-go"
"github.com/openshift/imagebuilder"
"github.com/openshift/imagebuilder/dockerfile/command"
"github.com/openshift/imagebuilder/dockerfile/parser"
"github.com/sirupsen/logrus"
)
// StageExecutor bundles up what we need to know when executing one stage of a
// (possibly multi-stage) build.
// Each stage may need to produce an image to be used as the base in a later
// stage (with the last stage's image being the end product of the build), and
// it may need to leave its working container in place so that the container's
// root filesystem's contents can be used as the source for a COPY instruction
// in a later stage.
// Each stage has its own base image, so it starts with its own configuration
// and set of volumes.
// If we're naming the result of the build, only the last stage will apply that
// name to the image that it produces.
type StageExecutor struct {
ctx context.Context
executor *Executor
log func(format string, args ...interface{})
index int
stages imagebuilder.Stages
name string
builder *buildah.Builder
preserved int
volumes imagebuilder.VolumeSet // list of directories which are volumes
volumeCache map[string]string // mapping from volume directories to cache archives (used by vfs method)
volumeCacheInfo map[string]os.FileInfo // mapping from volume directories to perms/datestamps to reset after restoring
mountPoint string
output string
containerIDs []string
stage *imagebuilder.Stage
didExecute bool
argsFromContainerfile []string
}
// Preserve informs the stage executor that from this point on, it needs to
// ensure that only COPY and ADD instructions can modify the contents of this
// directory or anything below it.
// When CompatVolumes is enabled, the StageExecutor handles this by caching the
// contents of directories which have been marked this way before executing a
// RUN instruction, invalidating that cache when an ADD or COPY instruction
// sets any location under the directory as the destination, and using the
// cache to reset the contents of the directory tree after processing each RUN
// instruction.
// It would be simpler if we could just mark the directory as a read-only bind
// mount of itself during Run(), but the directory is expected to be remain
// writeable while the RUN instruction is being handled, even if any changes
// made within the directory are ultimately discarded.
func (s *StageExecutor) Preserve(path string) error {
logrus.Debugf("PRESERVE %q in %q (already preserving %v)", path, s.builder.ContainerID, s.volumes)
// Try and resolve the symlink (if one exists)
// Set archivedPath and path based on whether a symlink is found or not
var archivedPath string
if evaluated, err := copier.Eval(s.mountPoint, filepath.Join(s.mountPoint, path), copier.EvalOptions{}); err == nil {
symLink, err := filepath.Rel(s.mountPoint, evaluated)
if err != nil {
return fmt.Errorf("making evaluated path %q relative to %q: %w", evaluated, s.mountPoint, err)
}
if strings.HasPrefix(symLink, ".."+string(os.PathSeparator)) {
return fmt.Errorf("evaluated path %q was not below %q", evaluated, s.mountPoint)
}
archivedPath = evaluated
path = string(os.PathSeparator) + symLink
} else {
return fmt.Errorf("evaluating path %q: %w", path, err)
}
// Whether or not we're caching and restoring the contents of this
// directory, we need to ensure it exists now.
const createdDirPerms = os.FileMode(0o755)
st, err := os.Stat(archivedPath)
if errors.Is(err, os.ErrNotExist) {
// Yup, we do have to create it. That means it's not in any
// cached copy of the path that covers it, so we have to
// invalidate such cached copy.
logrus.Debugf("have to create volume %q", path)
createdDirPerms := createdDirPerms
if err := copier.Mkdir(s.mountPoint, archivedPath, copier.MkdirOptions{ChmodNew: &createdDirPerms}); err != nil {
return fmt.Errorf("ensuring volume path exists: %w", err)
}
if err := s.volumeCacheInvalidate(path); err != nil {
return fmt.Errorf("ensuring volume path %q is preserved: %w", filepath.Join(s.mountPoint, path), err)
}
if st, err = os.Stat(archivedPath); err != nil {
return fmt.Errorf("checking on just-created volume path: %w", err)
}
}
if err != nil {
return fmt.Errorf("reading info cache for volume at %q: %w", path, err)
}
if s.volumes.Covers(path) {
// This path is a subdirectory of a volume path that we're
// already preserving, so there's nothing new to be done now
// that we've ensured that it exists.
s.volumeCacheInfo[path] = st
return nil
}
// Add the new volume path to the ones that we're tracking.
if !s.volumes.Add(path) {
// This path is not a subdirectory of a volume path that we're
// already preserving, so adding it to the list should have
// worked.
return fmt.Errorf("adding %q to the volume cache", path)
}
s.volumeCacheInfo[path] = st
// If we're not doing save/restore, we're done, since volumeCache
// should be empty.
if s.executor.compatVolumes != types.OptionalBoolTrue {
logrus.Debugf("not doing volume save-and-restore of %q in %q", path, s.builder.ContainerID)
return nil
}
// Decide where the cache for this volume will be stored.
s.preserved++
cacheDir, err := s.executor.store.ContainerDirectory(s.builder.ContainerID)
if err != nil {
return fmt.Errorf("unable to locate temporary directory for container")
}
cacheFile := filepath.Join(cacheDir, fmt.Sprintf("volume%d.tar", s.preserved))
s.volumeCache[path] = cacheFile
// Now prune cache files for volumes that are newly supplanted by this one.
removed := []string{}
for cachedPath := range s.volumeCache {
// Walk our list of cached volumes, and check that they're
// still in the list of locations that we need to cache.
found := false
for _, volume := range s.volumes {
if volume == cachedPath {
// We need to keep this volume's cache.
found = true
break
}
}
if !found {
// We don't need to keep this volume's cache. Make a
// note to remove it.
removed = append(removed, cachedPath)
}
}
// Actually remove the caches that we decided to remove.
for _, cachedPath := range removed {
archivedPath := filepath.Join(s.mountPoint, cachedPath)
logrus.Debugf("no longer need cache of %q in %q", archivedPath, s.volumeCache[cachedPath])
if err := os.Remove(s.volumeCache[cachedPath]); err != nil {
if errors.Is(err, os.ErrNotExist) {
continue
}
return fmt.Errorf("removing cache of %q: %w", archivedPath, err)
}
delete(s.volumeCache, cachedPath)
}
return nil
}
// Remove any volume cache item which will need to be re-saved because we're
// writing to part of it.
func (s *StageExecutor) volumeCacheInvalidate(path string) error {
invalidated := []string{}
for cachedPath := range s.volumeCache {
if strings.HasPrefix(path, cachedPath+string(os.PathSeparator)) {
invalidated = append(invalidated, cachedPath)
}
}
for _, cachedPath := range invalidated {
if err := os.Remove(s.volumeCache[cachedPath]); err != nil {
if errors.Is(err, os.ErrNotExist) {
continue
}
return err
}
archivedPath := filepath.Join(s.mountPoint, cachedPath)
logrus.Debugf("invalidated volume cache %q for %q from %q", archivedPath, path, s.volumeCache[cachedPath])
}
return nil
}
// Save the contents of each of the executor's list of volumes for which we
// don't already have a cache file.
func (s *StageExecutor) volumeCacheSaveVFS() (mounts []specs.Mount, err error) {
for cachedPath, cacheFile := range s.volumeCache {
archivedPath, err := copier.Eval(s.mountPoint, filepath.Join(s.mountPoint, cachedPath), copier.EvalOptions{})
if err != nil {
return nil, fmt.Errorf("evaluating volume path: %w", err)
}
relativePath, err := filepath.Rel(s.mountPoint, archivedPath)
if err != nil {
return nil, fmt.Errorf("converting %q into a path relative to %q: %w", archivedPath, s.mountPoint, err)
}
if strings.HasPrefix(relativePath, ".."+string(os.PathSeparator)) {
return nil, fmt.Errorf("converting %q into a path relative to %q", archivedPath, s.mountPoint)
}
_, err = os.Stat(cacheFile)
if err == nil {
logrus.Debugf("contents of volume %q are already cached in %q", archivedPath, cacheFile)
continue
}
if !errors.Is(err, os.ErrNotExist) {
return nil, fmt.Errorf("checking for presence of a cached copy of %q at %q: %w", cachedPath, cacheFile, err)
}
logrus.Debugf("caching contents of volume %q in %q", archivedPath, cacheFile)
cache, err := os.Create(cacheFile)
if err != nil {
return nil, fmt.Errorf("creating cache for volume %q: %w", archivedPath, err)
}
defer cache.Close()
rc, err := chrootarchive.Tar(archivedPath, nil, s.mountPoint)
if err != nil {
return nil, fmt.Errorf("archiving %q: %w", archivedPath, err)
}
defer rc.Close()
_, err = io.Copy(cache, rc)
if err != nil {
return nil, fmt.Errorf("archiving %q to %q: %w", archivedPath, cacheFile, err)
}
mount := specs.Mount{
Source: archivedPath,
Destination: string(os.PathSeparator) + relativePath,
Type: "bind",
Options: []string{"private"},
}
mounts = append(mounts, mount)
}
return nil, nil
}
// Restore the contents of each of the executor's list of volumes.
func (s *StageExecutor) volumeCacheRestoreVFS() (err error) {
for cachedPath, cacheFile := range s.volumeCache {
archivedPath, err := copier.Eval(s.mountPoint, filepath.Join(s.mountPoint, cachedPath), copier.EvalOptions{})
if err != nil {
return fmt.Errorf("evaluating volume path: %w", err)
}
logrus.Debugf("restoring contents of volume %q from %q", archivedPath, cacheFile)
cache, err := os.Open(cacheFile)
if err != nil {
return fmt.Errorf("restoring contents of volume %q: %w", archivedPath, err)
}
defer cache.Close()
if err := copier.Remove(s.mountPoint, archivedPath, copier.RemoveOptions{All: true}); err != nil {
return err
}
err = chrootarchive.Untar(cache, archivedPath, nil)
if err != nil {
return fmt.Errorf("extracting archive at %q: %w", archivedPath, err)
}
if st, ok := s.volumeCacheInfo[cachedPath]; ok {
if err := os.Chmod(archivedPath, st.Mode()); err != nil {
return err
}
uid := 0
gid := 0
if st.Sys() != nil {
uid = util.UID(st)
gid = util.GID(st)
}
if err := os.Chown(archivedPath, uid, gid); err != nil {
return err
}
if err := os.Chtimes(archivedPath, st.ModTime(), st.ModTime()); err != nil {
return err
}
}
}
return nil
}
// Save the contents of each of the executor's list of volumes for which we
// don't already have a cache file. For overlay, we "save" and "restore" by
// using it as a lower for an overlay mount in the same location, and then
// discarding the upper.
func (s *StageExecutor) volumeCacheSaveOverlay() (mounts []specs.Mount, err error) {
for cachedPath := range s.volumeCache {
volumePath := filepath.Join(s.mountPoint, cachedPath)
mount := specs.Mount{
Source: volumePath,
Destination: cachedPath,
Options: []string{"O", "private"},
}
mounts = append(mounts, mount)
}
return mounts, nil
}
// Reset the contents of each of the executor's list of volumes.
func (s *StageExecutor) volumeCacheRestoreOverlay() error {
return nil
}
// Save the contents of each of the executor's list of volumes for which we
// don't already have a cache file.
func (s *StageExecutor) volumeCacheSave() (mounts []specs.Mount, err error) {
switch s.executor.store.GraphDriverName() {
case "overlay":
return s.volumeCacheSaveOverlay()
}
return s.volumeCacheSaveVFS()
}
// Reset the contents of each of the executor's list of volumes.
func (s *StageExecutor) volumeCacheRestore() error {
switch s.executor.store.GraphDriverName() {
case "overlay":
return s.volumeCacheRestoreOverlay()
}
return s.volumeCacheRestoreVFS()
}
// Copy copies data into the working tree. The "Download" field is how
// imagebuilder tells us the instruction was "ADD" and not "COPY".
func (s *StageExecutor) Copy(excludes []string, copies ...imagebuilder.Copy) error {
for _, cp := range copies {
if cp.KeepGitDir {
if cp.Download {
return errors.New("ADD --keep-git-dir is not supported")
}
return errors.New("COPY --keep-git-dir is not supported")
}
if cp.Link {
return errors.New("COPY --link is not supported")
}
if cp.Parents {
return errors.New("COPY --parents is not supported")
}
if len(cp.Excludes) > 0 {
excludes = append(slices.Clone(excludes), cp.Excludes...)
}
}
s.builder.ContentDigester.Restart()
return s.performCopy(excludes, copies...)
}
func (s *StageExecutor) performCopy(excludes []string, copies ...imagebuilder.Copy) error {
copiesExtend := []imagebuilder.Copy{}
for _, copy := range copies {
if err := s.volumeCacheInvalidate(copy.Dest); err != nil {
return err
}
var sources []string
// The From field says to read the content from another
// container. Update the ID mappings and
// all-content-comes-from-below-this-directory value.
var idMappingOptions *define.IDMappingOptions
var copyExcludes []string
stripSetuid := false
stripSetgid := false
preserveOwnership := false
contextDir := s.executor.contextDir
// If we are copying files via heredoc syntax, then
// its time to create these temporary files on host
// and copy these to container
if len(copy.Files) > 0 {
// If we are copying files from heredoc syntax, there
// maybe regular files from context as well so split and
// process them differently
if len(copy.Src) > len(copy.Files) {
regularSources := []string{}
for _, src := range copy.Src {
// If this source is not a heredoc, then it is a regular file from
// build context or from another stage (`--from=`) so treat this differently.
if !strings.HasPrefix(src, "<<") {
regularSources = append(regularSources, src)
}
}
copyEntry := copy
// Remove heredoc if any, since we are already processing them
// so create new entry with sources containing regular files
// only, since regular files can have different context then
// heredoc files.
copyEntry.Files = nil
copyEntry.Src = regularSources
copiesExtend = append(copiesExtend, copyEntry)
}
copySources := []string{}
for _, file := range copy.Files {
data := file.Data
// remove first break line added while parsing heredoc
data = strings.TrimPrefix(data, "\n")
// add breakline when heredoc ends for docker compat
data = data + "\n"
// Create seperate subdir for this file.
tmpDir, err := os.MkdirTemp(parse.GetTempDir(), "buildah-heredoc")
if err != nil {
return fmt.Errorf("unable to create tmp dir for heredoc run %q: %w", parse.GetTempDir(), err)
}
defer os.RemoveAll(tmpDir)
tmpFile, err := os.Create(filepath.Join(tmpDir, path.Base(filepath.ToSlash(file.Name))))
if err != nil {
return fmt.Errorf("unable to create tmp file for COPY instruction at %q: %w", parse.GetTempDir(), err)
}
err = tmpFile.Chmod(0o644) // 644 is consistent with buildkit
if err != nil {
tmpFile.Close()
return fmt.Errorf("unable to chmod tmp file created for COPY instruction at %q: %w", tmpFile.Name(), err)
}
defer os.Remove(tmpFile.Name())
_, err = tmpFile.WriteString(data)
if err != nil {
tmpFile.Close()
return fmt.Errorf("unable to write contents of heredoc file at %q: %w", tmpFile.Name(), err)
}
copySources = append(copySources, filepath.Join(filepath.Base(tmpDir), filepath.Base(tmpFile.Name())))
tmpFile.Close()
}
contextDir = parse.GetTempDir()
copy.Src = copySources
}
if len(copy.From) > 0 && len(copy.Files) == 0 {
// If from has an argument within it, resolve it to its
// value. Otherwise just return the value found.
from, fromErr := imagebuilder.ProcessWord(copy.From, s.stage.Builder.Arguments())
if fromErr != nil {
return fmt.Errorf("unable to resolve argument %q: %w", copy.From, fromErr)
}
var additionalBuildContext *define.AdditionalBuildContext
if foundContext, ok := s.executor.additionalBuildContexts[from]; ok {
additionalBuildContext = foundContext
} else {
// Maybe index is given in COPY --from=index
// if that's the case check if provided index
// exists and if stage short_name matches any
// additionalContext replace stage with additional
// build context.
if index, err := strconv.Atoi(from); err == nil {
from = s.stages[index].Name
}
if foundContext, ok := s.executor.additionalBuildContexts[from]; ok {
additionalBuildContext = foundContext
}
}
if additionalBuildContext != nil {
if !additionalBuildContext.IsImage {
contextDir = additionalBuildContext.Value
if additionalBuildContext.IsURL {
// Check if following buildContext was already
// downloaded before in any other RUN step. If not
// download it and populate DownloadCache field for
// future RUN steps.
if additionalBuildContext.DownloadedCache == "" {
// additional context contains a tar file
// so download and explode tar to buildah
// temp and point context to that.
path, subdir, err := define.TempDirForURL(tmpdir.GetTempDir(), internal.BuildahExternalArtifactsDir, additionalBuildContext.Value)
if err != nil {
return fmt.Errorf("unable to download context from external source %q: %w", additionalBuildContext.Value, err)
}
// point context dir to the extracted path
contextDir = filepath.Join(path, subdir)
// populate cache for next RUN step
additionalBuildContext.DownloadedCache = contextDir
} else {
contextDir = additionalBuildContext.DownloadedCache
}
} else {
// This points to a path on the filesystem
// Check to see if there's a .containerignore
// file, update excludes for this stage before
// proceeding
buildContextExcludes, _, err := parse.ContainerIgnoreFile(additionalBuildContext.Value, "", nil)
if err != nil {
return err
}
excludes = append(excludes, buildContextExcludes...)
}
} else {
copy.From = additionalBuildContext.Value
}
}
if additionalBuildContext == nil {
if isStage, err := s.executor.waitForStage(s.ctx, from, s.stages[:s.index]); isStage && err != nil {
return err
}
if other, ok := s.executor.stages[from]; ok && other.index < s.index {
contextDir = other.mountPoint
idMappingOptions = &other.builder.IDMappingOptions
} else if builder, ok := s.executor.containerMap[copy.From]; ok {
contextDir = builder.MountPoint
idMappingOptions = &builder.IDMappingOptions
} else {
return fmt.Errorf("the stage %q has not been built", copy.From)
}
} else if additionalBuildContext.IsImage {
// Image was selected as additionalContext so only process image.
mountPoint, err := s.getImageRootfs(s.ctx, copy.From)
if err != nil {
return err
}
contextDir = mountPoint
}
// This isn't --from the build context directory, so we don't want to force everything to 0:0
preserveOwnership = true
copyExcludes = excludes
} else {
copyExcludes = append(s.executor.excludes, excludes...)
stripSetuid = true // did this change between 18.06 and 19.03?
stripSetgid = true // did this change between 18.06 and 19.03?
}
if copy.Download {
logrus.Debugf("ADD %#v, %#v", excludes, copy)
} else {
logrus.Debugf("COPY %#v, %#v", excludes, copy)
}
for _, src := range copy.Src {
if strings.HasPrefix(src, "http://") || strings.HasPrefix(src, "https://") {
// Source is a URL, allowed for ADD but not COPY.
if copy.Download {
sources = append(sources, src)
} else {
// returns an error to be compatible with docker
return fmt.Errorf("source can't be a URL for COPY")
}
} else {
sources = append(sources, filepath.Join(contextDir, src))
}
}
options := buildah.AddAndCopyOptions{
Chmod: copy.Chmod,
Chown: copy.Chown,
Checksum: copy.Checksum,
PreserveOwnership: preserveOwnership,
ContextDir: contextDir,
Excludes: copyExcludes,
IgnoreFile: s.executor.ignoreFile,
IDMappingOptions: idMappingOptions,
StripSetuidBit: stripSetuid,
StripSetgidBit: stripSetgid,
// The values for these next two fields are ultimately
// based on command line flags with names that sound
// much more generic.
CertPath: s.executor.systemContext.DockerCertPath,
InsecureSkipTLSVerify: s.executor.systemContext.DockerInsecureSkipTLSVerify,
MaxRetries: s.executor.maxPullPushRetries,
RetryDelay: s.executor.retryPullPushDelay,
}
if len(copy.Files) > 0 {
// If we are copying heredoc files, we need to temporary place
// them in the context dir and then move to container via copier
// there are cases where .containerignore can have a patterns like
// '*' which can match our heredoc files so let's not set any excludes
// or IgnoreFile for this copy.
options.Excludes = nil
options.IgnoreFile = ""
}
if err := s.builder.Add(copy.Dest, copy.Download, options, sources...); err != nil {
return err
}
}
if len(copiesExtend) > 0 {
// If we found heredocs and regularfiles together
// in same statement then we produced new copies to
// process regular files separately since they need
// different context.
return s.performCopy(excludes, copiesExtend...)
}
return nil
}
// Returns a map of StageName/ImageName:internal.StageMountDetails for the
// items in the passed-in mounts list which include a "from=" value. The ""
// key in the returned map corresponds to the default build context.
func (s *StageExecutor) runStageMountPoints(mountList []string) (map[string]internal.StageMountDetails, error) {
stageMountPoints := make(map[string]internal.StageMountDetails)
stageMountPoints[""] = internal.StageMountDetails{
MountPoint: s.executor.contextDir,
IsWritesDiscardedOverlay: s.executor.contextDirWritesAreDiscarded,
}
for _, flag := range mountList {
if strings.Contains(flag, "from") {
tokens := strings.Split(flag, ",")
if len(tokens) < 2 {
return nil, fmt.Errorf("Invalid --mount command: %s", flag)
}
for _, token := range tokens {
key, val, hasVal := strings.Cut(token, "=")
switch key {
case "from":
if !hasVal {
return nil, fmt.Errorf("unable to resolve argument for `from=`: bad argument")
}
if val == "" {
return nil, fmt.Errorf("unable to resolve argument for `from=`: from points to an empty value")
}
from, fromErr := imagebuilder.ProcessWord(val, s.stage.Builder.Arguments())
if fromErr != nil {
return nil, fmt.Errorf("unable to resolve argument %q: %w", val, fromErr)
}
// If the value corresponds to an additional build context,
// the mount source is either either the rootfs of the image,
// the filesystem path, or a temporary directory populated
// with the contents of the URL, all in preference to any
// stage which might have the value as its name.
if additionalBuildContext, ok := s.executor.additionalBuildContexts[from]; ok {
if additionalBuildContext.IsImage {
mountPoint, err := s.getImageRootfs(s.ctx, additionalBuildContext.Value)
if err != nil {
return nil, fmt.Errorf("%s from=%s: image not found with that name", flag, from)
}
// The `from` in stageMountPoints should point
// to `mountPoint` replaced from additional
// build-context. Reason: Parser will use this
// `from` to refer from stageMountPoints map later.
stageMountPoints[from] = internal.StageMountDetails{
IsAdditionalBuildContext: true,
IsImage: true,
DidExecute: true,
MountPoint: mountPoint,
}
break
}
// Most likely this points to path on filesystem
// or external tar archive, Treat it as a stage
// nothing is different for this. So process and
// point mountPoint to path on host and it will
// be automatically handled correctly by since
// GetBindMount will honor IsStage:false while
// processing stageMountPoints.
mountPoint := additionalBuildContext.Value
if additionalBuildContext.IsURL {
// Check if following buildContext was already
// downloaded before in any other RUN step. If not
// download it and populate DownloadCache field for
// future RUN steps.
if additionalBuildContext.DownloadedCache == "" {
// additional context contains a tar file
// so download and explode tar to buildah
// temp and point context to that.
path, subdir, err := define.TempDirForURL(tmpdir.GetTempDir(), internal.BuildahExternalArtifactsDir, additionalBuildContext.Value)
if err != nil {
return nil, fmt.Errorf("unable to download context from external source %q: %w", additionalBuildContext.Value, err)
}
// point context dir to the extracted path
mountPoint = filepath.Join(path, subdir)
// populate cache for next RUN step
additionalBuildContext.DownloadedCache = mountPoint
} else {
mountPoint = additionalBuildContext.DownloadedCache
}
}
stageMountPoints[from] = internal.StageMountDetails{
IsAdditionalBuildContext: true,
DidExecute: true,
MountPoint: mountPoint,
}
break
}
// If the source's name corresponds to the
// result of an earlier stage, wait for that
// stage to finish being built.
if isStage, err := s.executor.waitForStage(s.ctx, from, s.stages[:s.index]); isStage && err != nil {
return nil, err
}
// If the source's name is a stage, return a
// pointer to its rootfs.
if otherStage, ok := s.executor.stages[from]; ok && otherStage.index < s.index {
stageMountPoints[from] = internal.StageMountDetails{
IsStage: true,
DidExecute: otherStage.didExecute,
MountPoint: otherStage.mountPoint,
}
break
} else {
// Treat the source's name as the name of an image.
mountPoint, err := s.getImageRootfs(s.ctx, from)
if err != nil {
return nil, fmt.Errorf("%s from=%s: no stage or image found with that name", flag, from)
}
stageMountPoints[from] = internal.StageMountDetails{
IsImage: true,
DidExecute: true,
MountPoint: mountPoint,
}
break
}
default:
continue
}
}
}
}
return stageMountPoints, nil
}
func (s *StageExecutor) createNeededHeredocMountsForRun(files []imagebuilder.File) ([]Mount, error) {
mountResult := []Mount{}
for _, file := range files {
f, err := os.CreateTemp(parse.GetTempDir(), "buildahheredoc")
if err != nil {
return nil, err
}
if _, err := f.WriteString(file.Data); err != nil {
f.Close()
return nil, err
}
err = f.Chmod(0o755)
if err != nil {
f.Close()
return nil, err
}
// dest path is same as buildkit for compat
dest := filepath.Join("/dev/pipes/", filepath.Base(f.Name()))
mount := Mount{Destination: dest, Type: define.TypeBind, Source: f.Name(), Options: append(define.BindOptions, "rprivate", "z", "Z")}
mountResult = append(mountResult, mount)
f.Close()
}
return mountResult, nil
}
func parseSheBang(data string) string {
lines := strings.Split(data, "\n")
if len(lines) > 2 && strings.HasPrefix(lines[1], "#!") {
shebang := strings.TrimLeft(lines[1], "#!")
return shebang
}
return ""
}
// Run executes a RUN instruction using the stage's current working container
// as a root directory.
func (s *StageExecutor) Run(run imagebuilder.Run, config docker.Config) error {
logrus.Debugf("RUN %#v, %#v", run, config)
args := run.Args
heredocMounts := []Mount{}
if len(run.Files) > 0 {
if heredoc := buildkitparser.MustParseHeredoc(args[0]); heredoc != nil {
if strings.HasPrefix(run.Files[0].Data, "#!") || strings.HasPrefix(run.Files[0].Data, "\n#!") {
// This is a single heredoc with a shebang, so create a file
// and run it with program specified in shebang.
heredocMount, err := s.createNeededHeredocMountsForRun(run.Files)
if err != nil {
return err
}
shebangArgs := parseSheBang(run.Files[0].Data)
if shebangArgs != "" {
args = []string{shebangArgs + " " + heredocMount[0].Destination}
} else {
args = []string{heredocMount[0].Destination}
}
heredocMounts = append(heredocMounts, heredocMount...)
} else {
args = []string{run.Files[0].Data}
}
} else {
full := args[0]
for _, file := range run.Files {
full += file.Data + "\n" + file.Name
}
args = []string{full}
}
}
stageMountPoints, err := s.runStageMountPoints(run.Mounts)
if err != nil {
return err
}
if s.builder == nil {
return fmt.Errorf("no build container available")
}
stdin := s.executor.in
if stdin == nil {
devNull, err := os.Open(os.DevNull)
if err != nil {
return fmt.Errorf("opening %q for reading: %v", os.DevNull, err)
}
defer devNull.Close()
stdin = devNull
}
namespaceOptions := append([]define.NamespaceOption{}, s.executor.namespaceOptions...)
options := buildah.RunOptions{
Args: s.executor.runtimeArgs,
Cmd: config.Cmd,
ContextDir: s.executor.contextDir,
ConfigureNetwork: s.executor.configureNetwork,
Entrypoint: config.Entrypoint,
Env: config.Env,
Hostname: config.Hostname,
Logger: s.executor.logger,
Mounts: slices.Clone(s.executor.transientMounts),
NamespaceOptions: namespaceOptions,
NoHostname: s.executor.noHostname,
NoHosts: s.executor.noHosts,
NoPivot: os.Getenv("BUILDAH_NOPIVOT") != "" || s.executor.noPivotRoot,
Quiet: s.executor.quiet,
CompatBuiltinVolumes: types.OptionalBoolFalse,
RunMounts: run.Mounts,
Runtime: s.executor.runtime,
Secrets: s.executor.secrets,
SSHSources: s.executor.sshsources,
StageMountPoints: stageMountPoints,
Stderr: s.executor.err,
Stdin: stdin,
Stdout: s.executor.out,
SystemContext: s.executor.systemContext,
Terminal: buildah.WithoutTerminal,
User: config.User,
WorkingDir: config.WorkingDir,
}
// Honor `RUN --network=<>`.
switch run.Network {
case "host":
options.NamespaceOptions.AddOrReplace(define.NamespaceOption{Name: "network", Host: true})
options.ConfigureNetwork = define.NetworkEnabled
case "none":
options.ConfigureNetwork = define.NetworkDisabled
case "", "default":
// do nothing
default:
return fmt.Errorf(`unsupported value %q for "RUN --network", must be either "host" or "none"`, run.Network)
}
if config.NetworkDisabled {
options.ConfigureNetwork = buildah.NetworkDisabled
}
if run.Shell {
if len(config.Shell) > 0 && s.builder.Format == define.Dockerv2ImageManifest {
args = append(config.Shell, args...)
} else {
args = append([]string{"/bin/sh", "-c"}, args...)
}
}
if s.executor.compatVolumes == types.OptionalBoolTrue {
// Only bother with saving/restoring the contents of volumes if
// we've been specifically asked to.
mounts, err := s.volumeCacheSave()
if err != nil {
return err
}
options.Mounts = append(options.Mounts, mounts...)
}
// The list of built-in volumes isn't passed in via RunOptions, so make
// sure the builder's list of built-in volumes includes anything that
// the configuration thinks is a built-in volume.
s.builder.ClearVolumes()
for v := range config.Volumes {
s.builder.AddVolume(v)
}
if len(heredocMounts) > 0 {
options.Mounts = append(options.Mounts, heredocMounts...)
}
err = s.builder.Run(args, options)
if s.executor.compatVolumes == types.OptionalBoolTrue {
// Only bother with saving/restoring the contents of volumes if
// we've been specifically asked to.
if err2 := s.volumeCacheRestore(); err2 != nil {
if err == nil {
return err2
}
}
}
return err
}
// UnrecognizedInstruction is called when we encounter an instruction that the
// imagebuilder parser didn't understand.
func (s *StageExecutor) UnrecognizedInstruction(step *imagebuilder.Step) error {
errStr := fmt.Sprintf("Build error: Unknown instruction: %q ", strings.ToUpper(step.Command))
err := fmt.Sprintf(errStr+"%#v", step)
if s.executor.ignoreUnrecognizedInstructions {
logrus.Debug(err)
return nil
}
switch logrus.GetLevel() {
case logrus.ErrorLevel:
s.executor.logger.Error(errStr)
case logrus.DebugLevel:
logrus.Debug(err)
default:
s.executor.logger.Errorf("+(UNHANDLED LOGLEVEL) %#v", step)
}
return errors.New(err)
}
// sanitizeFrom limits which image transports we'll accept. For those it
// accepts which refer to filesystem objects, where relative path names are
// evaluated relative to "contextDir", it will create a copy of the original
// image, under "tmpdir", which contains no symbolic links, and return either
// the original image reference or a reference to a sanitized copy which should
// be used instead.
func (s *StageExecutor) sanitizeFrom(from, tmpdir string) (newFrom string, err error) {
transportName, restOfImageName, maybeHasTransportName := strings.Cut(from, ":")
if !maybeHasTransportName || transports.Get(transportName) == nil {
if _, err = reference.ParseNormalizedNamed(from); err == nil {
// this is a normal-looking image-in-a-registry-or-named-in-storage name
return from, nil
}
if img, err := s.executor.store.Image(from); img != nil && err == nil {
// this is an image ID
return from, nil
}
return "", fmt.Errorf("parsing image name %q: %w", from, err)
}
// TODO: drop this part and just return an error... someday
return sanitize.ImageName(transportName, restOfImageName, s.executor.contextDir, tmpdir)
}
// prepare creates a working container based on the specified image, or if one
// isn't specified, the first argument passed to the first FROM instruction we
// can find in the stage's parsed tree.
func (s *StageExecutor) prepare(ctx context.Context, from string, initializeIBConfig, rebase, preserveBaseImageAnnotations bool, pullPolicy define.PullPolicy) (builder *buildah.Builder, err error) {
stage := s.stage
ib := stage.Builder
node := stage.Node
if from == "" {
base, err := ib.From(node)
if err != nil {
logrus.Debugf("prepare(node.Children=%#v)", node.Children)
return nil, fmt.Errorf("determining starting point for build: %w", err)
}
from = base
}
sanitizedDir, err := os.MkdirTemp(tmpdir.GetTempDir(), "buildah-context-")
if err != nil {
return nil, fmt.Errorf("creating temporary directory: %w", err)
}
defer func() {
if err := os.RemoveAll(sanitizedDir); err != nil {
logrus.Warn(err)
}
}()
sanitizedFrom, err := s.sanitizeFrom(from, tmpdir.GetTempDir())
if err != nil {
return nil, fmt.Errorf("invalid base image specification %q: %w", from, err)
}
displayFrom := from
if ib.Platform != "" {
displayFrom = "--platform=" + ib.Platform + " " + displayFrom
}
// stage.Name will be a numeric string for all stages without an "AS" clause
asImageName := stage.Name
if asImageName != "" {
if _, err := strconv.Atoi(asImageName); err != nil {
displayFrom += " AS " + asImageName
}
}
if initializeIBConfig && rebase {
logrus.Debugf("FROM %#v", displayFrom)
if !s.executor.quiet {
s.log("FROM %s", displayFrom)
}