diff --git a/go.mod b/go.mod index c8c6e47..b2548b5 100644 --- a/go.mod +++ b/go.mod @@ -13,6 +13,7 @@ require ( github.com/dop251/goja v0.0.0-20241024094426-79f3a7efcdbd github.com/fsnotify/fsnotify v1.7.0 github.com/google/go-cmp v0.6.0 + github.com/google/go-containerregistry v0.17.0 github.com/google/uuid v1.6.0 github.com/gookit/color v1.5.4 github.com/imdario/mergo v0.3.13 @@ -34,6 +35,7 @@ require ( ) require ( + github.com/Microsoft/go-winio v0.6.1 // indirect github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.8 // indirect github.com/aws/aws-sdk-go-v2/credentials v1.17.59 // indirect github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.28 // indirect @@ -49,15 +51,25 @@ require ( github.com/aws/aws-sdk-go-v2/service/ssooidc v1.28.14 // indirect github.com/aws/aws-sdk-go-v2/service/sts v1.33.14 // indirect github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869 // indirect + github.com/containerd/stargz-snapshotter/estargz v0.14.3 // indirect github.com/coreos/go-systemd/v22 v22.3.2 // indirect github.com/cyphar/filepath-securejoin v0.2.3 // indirect github.com/davecgh/go-spew v1.1.1 // indirect github.com/dlclark/regexp2 v1.11.4 // indirect + github.com/docker/distribution v2.8.2+incompatible // indirect + github.com/docker/docker v24.0.0+incompatible // indirect + github.com/docker/go-connections v0.4.0 // indirect + github.com/docker/go-units v0.5.0 // indirect github.com/go-sourcemap/sourcemap v2.1.3+incompatible // indirect github.com/godbus/dbus/v5 v5.0.6 // indirect + github.com/gogo/protobuf v1.3.2 // indirect github.com/google/pprof v0.0.0-20230207041349-798e818bf904 // indirect github.com/inconshreveable/mousetrap v1.1.0 // indirect + github.com/klauspost/compress v1.16.5 // indirect github.com/moby/sys/mountinfo v0.5.0 // indirect + github.com/opencontainers/go-digest v1.0.0 // indirect + github.com/opencontainers/image-spec v1.1.0-rc3 // indirect + github.com/pkg/errors v0.9.1 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect github.com/rogpeppe/go-internal v1.13.1 // indirect github.com/seccomp/libseccomp-golang v0.9.2-0.20220502022130-f33da4d89646 // indirect @@ -65,10 +77,12 @@ require ( github.com/segmentio/backo-go v1.0.0 // indirect github.com/shibumi/go-pathspec v1.3.0 // indirect github.com/spf13/pflag v1.0.5 // indirect + github.com/vbatts/tar-split v0.11.3 // indirect github.com/xo/terminfo v0.0.0-20210125001918-ca9a967f8778 // indirect - golang.org/x/crypto v0.18.0 // indirect - golang.org/x/net v0.20.0 // indirect + golang.org/x/crypto v0.24.0 // indirect + golang.org/x/net v0.26.0 // indirect golang.org/x/sys v0.21.0 // indirect - golang.org/x/text v0.14.0 // indirect + golang.org/x/text v0.16.0 // indirect + golang.org/x/tools v0.22.0 // indirect sigs.k8s.io/release-utils v0.7.7 // indirect ) diff --git a/go.sum b/go.sum index 9c83e08..78ee119 100644 --- a/go.sum +++ b/go.sum @@ -1,5 +1,10 @@ +github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 h1:UQHMgLO+TxOElx5B5HZ4hJQsoJ/PvUvKRhJHDQXO8P8= +github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E= +github.com/BurntSushi/toml v1.2.1/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ= github.com/Masterminds/semver/v3 v3.2.1 h1:RN9w6+7QoMeJVGyfmbcgs28Br8cvmnucEXnY0rYXWg0= github.com/Masterminds/semver/v3 v3.2.1/go.mod h1:qvl/7zhW3nngYb5+80sSMF+FG2BjYrf8m9wsX0PNOMQ= +github.com/Microsoft/go-winio v0.6.1 h1:9/kr64B9VUZrLm5YYwbGtUJnMgqWVOdUAXu6Migciow= +github.com/Microsoft/go-winio v0.6.1/go.mod h1:LRdKpFKfdobln8UmuiYcKPot9D2v6svN5+sAH+4kjUM= github.com/aws/aws-sdk-go-v2 v1.36.1 h1:iTDl5U6oAhkNPba0e1t1hrwAo02ZMqbrGq4k5JBWM5E= github.com/aws/aws-sdk-go-v2 v1.36.1/go.mod h1:5PMILGVKiW32oDzjj6RU52yrNrDPUHcbZQYr1sM7qmM= github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.8 h1:zAxi9p3wsZMIaVCdoiQp2uZ9k1LsZvmAnoTBeZPXom0= @@ -44,8 +49,11 @@ github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869 h1:DDGfHa7BWjL4Yn github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869/go.mod h1:Ekp36dRnpXw/yCqJaO+ZrUyxD+3VXMFFr56k5XYrpB4= github.com/codahale/rfc6979 v0.0.0-20141003034818-6a90f24967eb h1:EDmT6Q9Zs+SbUoc7Ik9EfrFqcylYqgPZ9ANSbTAntnE= github.com/codahale/rfc6979 v0.0.0-20141003034818-6a90f24967eb/go.mod h1:ZjrT6AXHbDs86ZSdt/osfBi5qfexBrKUdONk989Wnk4= +github.com/containerd/stargz-snapshotter/estargz v0.14.3 h1:OqlDCK3ZVUO6C3B/5FSkDwbkEETK84kQgEeFwDC+62k= +github.com/containerd/stargz-snapshotter/estargz v0.14.3/go.mod h1:KY//uOCIkSuNAHhJogcZtrNHdKrA99/FCCRjE3HD36o= github.com/coreos/go-systemd/v22 v22.3.2 h1:D9/bQk5vlXQFZ6Kwuu6zaiXJ9oTPe68++AzAJc1DzSI= github.com/coreos/go-systemd/v22 v22.3.2/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= +github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= github.com/cpuguy83/go-md2man/v2 v2.0.4/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= github.com/creack/pty v1.1.23 h1:4M6+isWdcStXEf15G/RbrMPOQj1dZ7HPZCGwE4kOeP0= github.com/creack/pty v1.1.23/go.mod h1:08sCNb52WyoAwi2QDyzUCTgcvVFhUzewun7wtTfvcwE= @@ -58,6 +66,18 @@ github.com/disiqueira/gotree v1.0.0 h1:en5wk87n7/Jyk6gVME3cx3xN9KmUCstJ1IjHr4Se4 github.com/disiqueira/gotree v1.0.0/go.mod h1:7CwL+VWsWAU95DovkdRZAtA7YbtHwGk+tLV/kNi8niU= github.com/dlclark/regexp2 v1.11.4 h1:rPYF9/LECdNymJufQKmri9gV604RvvABwgOA8un7yAo= github.com/dlclark/regexp2 v1.11.4/go.mod h1:DHkYz0B9wPfa6wondMfaivmHpzrQ3v9q8cnmRbL6yW8= +github.com/docker/cli v24.0.0+incompatible h1:0+1VshNwBQzQAx9lOl+OYCTCEAD8fKs/qeXMx3O0wqM= +github.com/docker/cli v24.0.0+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= +github.com/docker/distribution v2.8.2+incompatible h1:T3de5rq0dB1j30rp0sA2rER+m322EBzniBPB6ZIzuh8= +github.com/docker/distribution v2.8.2+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= +github.com/docker/docker v24.0.0+incompatible h1:z4bf8HvONXX9Tde5lGBMQ7yCJgNahmJumdrStZAbeY4= +github.com/docker/docker v24.0.0+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker-credential-helpers v0.7.0 h1:xtCHsjxogADNZcdv1pKUHXryefjlVRqWqIhk/uXJp0A= +github.com/docker/docker-credential-helpers v0.7.0/go.mod h1:rETQfLdHNT3foU5kuNkFR1R1V12OJRRO5lzt2D1b5X0= +github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ= +github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec= +github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= +github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= github.com/dop251/goja v0.0.0-20241024094426-79f3a7efcdbd h1:QMSNEh9uQkDjyPwu/J541GgSH+4hw+0skJDIj9HJ3mE= github.com/dop251/goja v0.0.0-20241024094426-79f3a7efcdbd/go.mod h1:MxLav0peU43GgvwVgNbLAj1s/bSGboKkhuULvq/7hx4= github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA= @@ -67,8 +87,12 @@ github.com/go-sourcemap/sourcemap v2.1.3+incompatible/go.mod h1:F8jJfvm2KbVjc5Nq github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= github.com/godbus/dbus/v5 v5.0.6 h1:mkgN1ofwASrYnJ5W6U/BxG15eXXXjirgZc7CLqkcaro= github.com/godbus/dbus/v5 v5.0.6/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= +github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= +github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/go-containerregistry v0.17.0 h1:5p+zYs/R4VGHkhyvgWurWrpJ2hW4Vv9fQI+GzdcwXLk= +github.com/google/go-containerregistry v0.17.0/go.mod h1:u0qB2l7mvtWVR5kNcbFIhFY1hLbf8eeGapA+vbFDCtQ= github.com/google/pprof v0.0.0-20230207041349-798e818bf904 h1:4/hN5RUoecvl+RmJRE2YxKWtnnQls6rQjjW5oV7qg2U= github.com/google/pprof v0.0.0-20230207041349-798e818bf904/go.mod h1:uglQLonpP8qtYCYyzA+8c/9qtqgA3qsXGYqCPKARAFg= github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= @@ -83,20 +107,36 @@ github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2 github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= github.com/karrick/godirwalk v1.17.0 h1:b4kY7nqDdioR/6qnbHQyDvmA17u5G1cZ6J+CZXwSWoI= github.com/karrick/godirwalk v1.17.0/go.mod h1:j4mkqPuvaLI8mp1DroR3P6ad7cyYd4c1qeJ3RV7ULlk= +github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= +github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/klauspost/compress v1.16.5 h1:IFV2oUNUzZaz+XyusxpLzpzS8Pt5rh0Z16For/djlyI= +github.com/klauspost/compress v1.16.5/go.mod h1:ntbaceVETuRiXiv4DpjP66DpAtAGkEQskQzEyD//IeE= github.com/kr/pretty v0.3.0 h1:WgNl7dwNpEZ6jJ9k1snq4pZsg7DOEN8hP9Xw0Tsjwk0= github.com/kr/pretty v0.3.0/go.mod h1:640gp4NfQd8pI5XOwp5fnNeVWj67G7CFk/SaSQn7NBk= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= github.com/minio/highwayhash v1.0.2 h1:Aak5U0nElisjDCfPSG79Tgzkn2gl66NxOMspRrKnA/g= github.com/minio/highwayhash v1.0.2/go.mod h1:BQskDq+xkJ12lmlUUi7U0M5Swg3EWR+dLTk+kldvVxY= +github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= +github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= github.com/moby/sys/mountinfo v0.5.0 h1:2Ks8/r6lopsxWi9m58nlwjaeSzUX9iiL1vj5qB/9ObI= github.com/moby/sys/mountinfo v0.5.0/go.mod h1:3bMD3Rg+zkqx8MRYPi7Pyb0Ie97QEBmdxbhnCLlSvSU= +github.com/moby/term v0.5.0 h1:xt8Q1nalod/v7BqbG21f8mQPqH+xAaC9C3N3wfWbVP0= +github.com/moby/term v0.5.0/go.mod h1:8FzsFHVUBGZdbDsJw/ot+X+d5HLUbvklYLJ9uGfcI3Y= +github.com/morikuni/aec v1.0.0 h1:nP9CBfwrvYnBRgY6qfDQkygYDmYwOilePFkwzv4dU8A= +github.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc= +github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= +github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= +github.com/opencontainers/image-spec v1.1.0-rc3 h1:fzg1mXZFj8YdPeNkRXMg+zb88BFV0Ys52cJydRwBkb8= +github.com/opencontainers/image-spec v1.1.0-rc3/go.mod h1:X4pATf0uXsnn3g5aiGIsVnJBR4mxhKzfwmvK/B2NTm8= github.com/opencontainers/runc v1.1.10 h1:EaL5WeO9lv9wmS6SASjszOeQdSctvpbu0DdBQBizE40= github.com/opencontainers/runc v1.1.10/go.mod h1:+/R6+KmDlh+hOO8NkjmgkG9Qzvypzk0yXxAPYYR65+M= github.com/opencontainers/runtime-spec v1.1.0 h1:HHUyrt9mwHUjtasSbXSMvs4cyFxh+Bll4AjJ9odEGpg= github.com/opencontainers/runtime-spec v1.1.0/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= github.com/opencontainers/selinux v1.10.0 h1:rAiKF8hTcgLI3w0DHm6i0ylVVcOrlgR1kK99DRLDhyU= github.com/opencontainers/selinux v1.10.0/go.mod h1:2i0OySw99QjzBBQByd1Gr9gSjvuho1lHsJxIJ3gGbJI= +github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= +github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII= @@ -114,6 +154,7 @@ github.com/segmentio/textio v1.2.0 h1:Ug4IkV3kh72juJbG8azoSBlgebIbUUxVNrfFcKHfTS github.com/segmentio/textio v1.2.0/go.mod h1:+Rb7v0YVODP+tK5F7FD9TCkV7gOYx9IgLHWiqtvY8ag= github.com/shibumi/go-pathspec v1.3.0 h1:QUyMZhFo0Md5B8zV8x2tesohbb5kfbpTi9rBnKh5dkI= github.com/shibumi/go-pathspec v1.3.0/go.mod h1:Xutfslp817l2I1cZvgcfeMQJG5QnU2lh5tVaaMCl3jE= +github.com/sirupsen/logrus v1.9.0/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ= github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= github.com/spf13/cobra v1.8.1 h1:e5/vxKd/rZsfSJMUX1agtjeTDf+qv1/JdBF8gg5k9ZM= @@ -121,38 +162,81 @@ github.com/spf13/cobra v1.8.1/go.mod h1:wHxEcudfqmLYa8iTfL+OuZPbBZkmvliBWKIezN3k github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= +github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= +github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= +github.com/urfave/cli v1.22.12/go.mod h1:sSBEIC79qR6OvcmsD4U3KABeOTxDqQtdDnaFuUN30b8= +github.com/vbatts/tar-split v0.11.3 h1:hLFqsOLQ1SsppQNTMpkpPXClLDfC2A3Zgy9OUU+RVck= +github.com/vbatts/tar-split v0.11.3/go.mod h1:9QlHN18E+fEH7RdG+QAJJcuya3rqT7eXSTY7wGrAokY= github.com/xo/terminfo v0.0.0-20210125001918-ca9a967f8778 h1:QldyIu/L63oPpyvQmHgvgickp1Yw510KJOqX7H24mg8= github.com/xo/terminfo v0.0.0-20210125001918-ca9a967f8778/go.mod h1:2MuV+tbUrU1zIOPMxZ5EncGwgmMJsa+9ucAQZXxsObs= -golang.org/x/crypto v0.18.0 h1:PGVlW0xEltQnzFZ55hkuX5+KLyrMYhHld1YHO4AKcdc= -golang.org/x/crypto v0.18.0/go.mod h1:R0j02AL6hcrfOiy9T4ZYp/rcWeMxM3L6QYxlOuEG1mg= +github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.24.0 h1:mnl8DM0o513X8fdIkmyFE/5hTYxbwYOjDS/+rK6qpRI= +golang.org/x/crypto v0.24.0/go.mod h1:Z1PMYSOR5nyMcyAVAIQSKCDwalqy85Aqn1x3Ws4L5DM= +golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.23.0 h1:Zb7khfcRGKk+kqfxFaP5tZqCnDZMjC5VtUBs87Hr6QM= golang.org/x/mod v0.23.0/go.mod h1:6SkKJ3Xj0I0BrPOZoBy3bdMptDDU9oJrpohJ3eWZ1fY= -golang.org/x/net v0.20.0 h1:aCL9BSgETF1k+blQaYUBx9hJ9LOGP3gAVemcZlf1Kpo= -golang.org/x/net v0.20.0/go.mod h1:z8BVo6PvndSri0LbOE3hAn0apkU+1YvI6E70E9jsnvY= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.26.0 h1:soB7SVo0PWrY4vPW/+ay0jKDNScG2X9wFeYlXIvJsOQ= +golang.org/x/net v0.26.0/go.mod h1:5YKkiSynbBIh3p6iOc/vibscux0x38BZDkn8sCUPxHE= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.11.0 h1:GGz8+XQP4FvTTrjZPzNKTMFtSXH80RAzG+5ghFPgK9w= golang.org/x/sync v0.11.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.0.0-20190130150945-aca44879d564/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20211025201205-69cdffdb9359/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220906165534-d0df966e6959/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.21.0 h1:rF+pYz3DAGSQAxAu1CbC7catZg4ebC4UIeIhKxBZvws= golang.org/x/sys v0.21.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/term v0.16.0 h1:m+B6fahuftsE9qjo0VWp2FW0mB3MTJvR0BaMQrq0pmE= -golang.org/x/term v0.16.0/go.mod h1:yn7UURbUtPyrVJPGPq404EukNFxcm/foM+bV/bfcDsY= -golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ= -golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= +golang.org/x/term v0.21.0 h1:WVXCp+/EBEHOj53Rvu+7KiT/iElMrO8ACK16SMZ3jaA= +golang.org/x/term v0.21.0/go.mod h1:ooXLefLobQVslOqselCNF4SxFAaoS6KujMbsGzSDmX0= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.16.0 h1:a94ExnEXNtEwYLGJSIUxnWoxoRz/ZcCsV63ROupILh4= +golang.org/x/text v0.16.0/go.mod h1:GhwF1Be+LQoKShO3cGOHzqOgRrGaYc9AvblQOmPVHnI= +golang.org/x/time v0.0.0-20210723032227-1f47c861a9ac h1:7zkz7BUtwNFFqcowJ+RIgu2MaV/MapERkDIy+mwPyjs= +golang.org/x/time v0.0.0-20210723032227-1f47c861a9ac/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.22.0 h1:gqSGLZqv+AI9lIQzniJ0nZDRG5GBPsSi+DRNHWNz6yA= +golang.org/x/tools v0.22.0/go.mod h1:aCwcsjqvq7Yqt6TNyX7QMU2enbQ/Gt0bo6krSeEri+c= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20240903120638-7835f813f4da h1:noIWHXmPHxILtqtCOPIhSt0ABwskkZKjD3bXGnZGpNY= golang.org/x/xerrors v0.0.0-20240903120638-7835f813f4da/go.mod h1:NDW/Ps6MPRej6fsCIbMTohpP40sJ/P/vI1MoTEGwX90= -gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.0/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gotest.tools/v3 v3.0.3 h1:4AuOwCGf4lLR9u3YOe2awrHygurzhO/HeQ6laiA6Sx0= +gotest.tools/v3 v3.0.3/go.mod h1:Z7Lb0S5l+klDB31fvDQX8ss/FlKDxtlFlw3Oa8Ymbl8= sigs.k8s.io/bom v0.6.0 h1:IPMPHx6XdmMeW2oEeF66DgNyP5d4RxfuXwiC1qn+n9o= sigs.k8s.io/bom v0.6.0/go.mod h1:MV0D3vdGlkaPgi5EwpwMBeQ8n8QS8Q2u1lJ5LyE7RLM= sigs.k8s.io/release-utils v0.7.7 h1:JKDOvhCk6zW8ipEOkpTGDH/mW3TI+XqtPp16aaQ79FU= diff --git a/pkg/leeway/build.go b/pkg/leeway/build.go index aa75e55..75eb71e 100644 --- a/pkg/leeway/build.go +++ b/pkg/leeway/build.go @@ -16,7 +16,6 @@ import ( "os/exec" "path/filepath" "regexp" - "runtime" "strconv" "strings" "sync" @@ -95,17 +94,6 @@ const ( dockerMetadataFile = "metadata.yaml" ) -var ( - compressor = "gzip" -) - -func init() { - pigz, err := exec.LookPath("pigz") - if err == nil { - compressor = pigz - } -} - // buildProcessVersions contain the current version of the respective build processes. // Increment this value if you change any of the build procedures. var buildProcessVersions = map[PackageType]int{ @@ -730,6 +718,14 @@ func (p *Package) build(buildctx *buildContext) error { } } + // Execute post-processing hook if available - this should run regardless of provenance settings + if bld.PostProcess != nil { + log.WithField("package", p.FullName()).Debug("running post-processing hook") + if err := bld.PostProcess(buildctx, p, builddir); err != nil { + return xerrors.Errorf("post-processing failed: %w", err) + } + } + // Handle provenance subjects if p.C.W.Provenance.Enabled { if err := handleProvenance(p, buildctx, builddir, bld, sources, now); err != nil { @@ -916,6 +912,11 @@ type packageBuild struct { // If the package build has tests but the test coverage cannot be computed, this function must return an error. // This function is guaranteed to be called after the test phase has finished. TestCoverage testCoverageFunc + + // PostProcess is called after all build phases complete but before packaging. + // It's used for post-build processing that needs to happen regardless of provenance settings, + // such as Docker image extraction. + PostProcess func(buildCtx *buildContext, pkg *Package, buildDir string) error } type testCoverageFunc func() (coverage, funcsWithoutTest, funcsWithTest int, err error) @@ -1516,33 +1517,101 @@ func (p *Package) buildDocker(buildctx *buildContext, wd, result string) (res *p buildcmd = append(buildcmd, ".") commands[PackageBuildPhaseBuild] = append(commands[PackageBuildPhaseBuild], buildcmd) + var pkgCommands [][]string + if len(cfg.Image) == 0 { - // we don't push the image, let's export it - ef := strings.TrimSuffix(result, ".gz") + // we don't push the image, let's extract it into a standard format + // Create a content directory for better organization + containerDir := filepath.Join(wd, "container") + // Create a subdirectory specifically for the filesystem content + contentDir := filepath.Join(containerDir, "content") + commands[PackageBuildPhaseBuild] = append(commands[PackageBuildPhaseBuild], [][]string{ - {"docker", "save", "-o", ef, version}, + // Create container files directories with parents + {"mkdir", "-p", contentDir}, }...) - } - res = &packageBuild{ - Commands: commands, - } + res = &packageBuild{ + Commands: commands, + } - var pkgCommands [][]string - if len(cfg.Image) == 0 { - // We've already built the build artifact by exporting the archive using "docker save" - // At the very least we need to add the provenance bundle to that archive. - ef := strings.TrimSuffix(result, ".gz") - res.PostBuild = dockerExportPostBuild(wd, ef) + // Add a post-processing hook to extract the container filesystem + // This will run after all build phases but before packaging + res.PostProcess = func(buildCtx *buildContext, pkg *Package, buildDir string) error { + extractLogger := log.WithFields(log.Fields{ + "image": version, + "destDir": contentDir, + }) + extractLogger.Debug("Extracting container filesystem") - var pkgcmds [][]string - if p.C.W.Provenance.Enabled { - pkgcmds = append(pkgcmds, []string{"tar", "fr", ef, "./" + provenanceBundleFilename}) + // First, verify the image exists + imageExists, err := checkImageExists(version) + if err != nil { + return xerrors.Errorf("failed to check if image exists: %w", err) + } + if !imageExists { + return xerrors.Errorf("image %s not found - build may have failed silently", version) + } + + // Use the OCI libraries for extraction with more robust error handling + if err := ExtractImageWithOCILibs(contentDir, version); err != nil { + return xerrors.Errorf("failed to extract container files: %w", err) + } + + // Verify extraction was successful + if isEmpty(contentDir) { + return xerrors.Errorf("container extraction resulted in empty directory - extraction may have failed") + } + + // Create metadata files at the container root level + if err := createDockerMetadataFiles(containerDir, version, cfg.Metadata); err != nil { + return xerrors.Errorf("failed to create metadata files: %w", err) + } + + // Log the resulting directory structure for diagnostic purposes + if log.IsLevelEnabled(log.DebugLevel) { + if err := logDirectoryStructure(containerDir, extractLogger); err != nil { + extractLogger.WithError(err).Warn("Failed to log directory structure") + } + } + + extractLogger.Debug("Container files extracted successfully") + + return nil + } + + // Keep the PostBuild function for provenance to ensure backward compatibility + res.PostBuild = func(sources fileset) (subj []in_toto.Subject, absResultDir string, err error) { + // Calculate subjects for provenance based on the entire container directory + postBuild, err := computeFileset(containerDir) + if err != nil { + return nil, containerDir, xerrors.Errorf("failed to compute fileset: %w", err) + } + subjects, err := postBuild.Sub(sources).Subjects(containerDir) + if err != nil { + return nil, containerDir, xerrors.Errorf("failed to compute subjects: %w", err) + } + return subjects, containerDir, nil } - pkgcmds = append(pkgcmds, []string{compressor, ef}) + // Create package with improved diagnostic logging + var pkgcmds [][]string + + // Add a diagnostic command to generate a manifest of what we're packaging + pkgcmds = append(pkgcmds, []string{"sh", "-c", fmt.Sprintf("find %s -type f | sort > %s/files-manifest.txt", containerDir, containerDir)}) + + // Create final tar with container files and metadata + pkgcmds = append(pkgcmds, BuildTarCommand( + WithOutputFile(result), + WithWorkingDir(containerDir), + WithCompression(!buildctx.DontCompress), + )) + commands[PackageBuildPhasePackage] = pkgcmds } else if len(cfg.Image) > 0 { + // Image push workflow + log.WithField("images", cfg.Image).Debug("configuring image push") + for _, img := range cfg.Image { pkgCommands = append(pkgCommands, [][]string{ {"docker", "tag", version, img}, @@ -1556,21 +1625,25 @@ func (p *Package) buildDocker(buildctx *buildContext, wd, result string) (res *p for _, img := range cfg.Image { pkgCommands = append(pkgCommands, []string{"sh", "-c", fmt.Sprintf("echo %s >> %s", img, dockerImageNamesFiles)}, - []string{"sh", "-c", fmt.Sprintf("echo built image: %s", img)}, + []string{"sh", "-c", fmt.Sprintf("echo built and pushed image: %s", img)}, ) } - // In addition to the imgnames.txt we also produce a file that contains the configured metadata, - // which provides a sensible way to add metadata to the image names. - consts, err := yaml.Marshal(cfg.Metadata) + + // Add metadata file with improved error handling + metadataContent, err := yaml.Marshal(cfg.Metadata) if err != nil { - return nil, err + return nil, xerrors.Errorf("failed to marshal metadata: %w", err) } - pkgCommands = append(pkgCommands, []string{"sh", "-c", fmt.Sprintf("echo %s | base64 -d > %s", base64.StdEncoding.EncodeToString(consts), dockerMetadataFile)}) + encodedMetadata := base64.StdEncoding.EncodeToString(metadataContent) + pkgCommands = append(pkgCommands, []string{"sh", "-c", fmt.Sprintf("echo %s | base64 -d > %s", encodedMetadata, dockerMetadataFile)}) + + // Prepare for packaging sourcePaths := []string{fmt.Sprintf("./%s", dockerImageNamesFiles), fmt.Sprintf("./%s", dockerMetadataFile)} if p.C.W.Provenance.Enabled { sourcePaths = append(sourcePaths, fmt.Sprintf("./%s", provenanceBundleFilename)) } + archiveCmd := BuildTarCommand( WithOutputFile(result), WithSourcePaths(sourcePaths...), @@ -1579,87 +1652,86 @@ func (p *Package) buildDocker(buildctx *buildContext, wd, result string) (res *p pkgCommands = append(pkgCommands, archiveCmd) commands[PackageBuildPhasePackage] = pkgCommands - res.Subjects = func() (res []in_toto.Subject, err error) { - defer func() { - if err != nil { - err = xerrors.Errorf("provenance get subjects: %w", err) - } - }() + + // Initialize res with commands + res = &packageBuild{ + Commands: commands, + } + + // Enhanced subjects function with better error handling and logging + res.Subjects = func() ([]in_toto.Subject, error) { + subjectLogger := log.WithField("operation", "provenance-subjects") + subjectLogger.Debug("Calculating provenance subjects for pushed images") + + // Get image digest with improved error handling out, err := exec.Command("docker", "inspect", version).CombinedOutput() if err != nil { - return nil, xerrors.Errorf("cannot determine ID of the image we just built") + return nil, xerrors.Errorf("failed to inspect image %s: %w\nOutput: %s", + version, err, string(out)) } + var inspectRes []struct { - ID string `json:"Id"` + ID string `json:"Id"` + RepoDigests []string `json:"RepoDigests"` } - err = json.Unmarshal(out, &inspectRes) - if err != nil { - return nil, xerrors.Errorf("cannot unmarshal Docker inspect response \"%s\": %w", string(out), err) + + if err := json.Unmarshal(out, &inspectRes); err != nil { + return nil, xerrors.Errorf("cannot unmarshal Docker inspect response: %w", err) } + if len(inspectRes) == 0 { - return nil, xerrors.Errorf("did not receive a proper Docker inspect response") + return nil, xerrors.Errorf("docker inspect returned empty result for image %s", version) + } + + // Try to get digest from ID first (most reliable) + var digest common.DigestSet + if inspectRes[0].ID != "" { + segs := strings.Split(inspectRes[0].ID, ":") + if len(segs) == 2 { + digest = common.DigestSet{ + segs[0]: segs[1], + } + } } - segs := strings.Split(inspectRes[0].ID, ":") - if len(segs) != 2 { - return nil, xerrors.Errorf("docker inspect returned invalid digest: %s", inspectRes[0].ID) + + // If we couldn't get digest from ID, try RepoDigests as fallback + if len(digest) == 0 && len(inspectRes[0].RepoDigests) > 0 { + for _, repoDigest := range inspectRes[0].RepoDigests { + parts := strings.Split(repoDigest, "@") + if len(parts) == 2 { + digestParts := strings.Split(parts[1], ":") + if len(digestParts) == 2 { + digest = common.DigestSet{ + digestParts[0]: digestParts[1], + } + break + } + } + } } - digest := common.DigestSet{ - segs[0]: segs[1], + + if len(digest) == 0 { + return nil, xerrors.Errorf("could not determine digest for image %s", version) } - res = make([]in_toto.Subject, 0, len(cfg.Image)) + subjectLogger.WithField("digest", digest).Debug("Found image digest") + + // Create subjects for each image + result := make([]in_toto.Subject, 0, len(cfg.Image)) for _, tag := range cfg.Image { - res = append(res, in_toto.Subject{ + result = append(result, in_toto.Subject{ Name: tag, Digest: digest, }) } - return res, nil + return result, nil } } return res, nil } -func dockerExportPostBuild(builddir, result string) func(sources fileset) (subj []in_toto.Subject, absResultDir string, err error) { - return func(sources fileset) (subj []in_toto.Subject, absResultDir string, err error) { - f, err := os.Open(result) - if err != nil { - return - } - defer f.Close() - - archive := tar.NewReader(f) - for { - var hdr *tar.Header - hdr, err = archive.Next() - if err == io.EOF { - break - } - if err != nil { - return - } - if hdr.Typeflag != tar.TypeReg { - continue - } - - hash := sha256.New() - _, err = io.Copy(hash, io.LimitReader(archive, hdr.Size)) - if err != nil { - return nil, builddir, err - } - - subj = append(subj, in_toto.Subject{ - Name: hdr.Name, - Digest: common.DigestSet{"sha256": hex.EncodeToString(hash.Sum(nil))}, - }) - } - - return subj, builddir, nil - } -} - // extractImageNameFromCache extracts the Docker image name of a previously built package // from the cache tar.gz file of that package. func extractImageNameFromCache(pkgName, cacheBundleFN string) (imgname string, err error) { @@ -1929,221 +2001,47 @@ func toPackageMap(in map[cache.Package]struct{}) map[*Package]struct{} { return result } -// TarOptions represents configuration options for creating tar archives -type TarOptions struct { - // OutputFile is the path to the output .tar or .tar.gz file - OutputFile string - - // SourcePaths are the files/directories to include in the archive - SourcePaths []string - - // WorkingDir changes to this directory before archiving (-C flag) - WorkingDir string - - // UseCompression determines whether to apply compression - UseCompression bool - - // FilesFrom specifies a file containing a list of files to include - FilesFrom string -} - -// WithOutputFile sets the output file path for the tar archive -func WithOutputFile(path string) func(*TarOptions) { - return func(opts *TarOptions) { - opts.OutputFile = path - } -} - -// WithSourcePaths adds files or directories to include in the archive -func WithSourcePaths(paths ...string) func(*TarOptions) { - return func(opts *TarOptions) { - opts.SourcePaths = append(opts.SourcePaths, paths...) - } -} - -// WithWorkingDir sets the working directory for the tar command -func WithWorkingDir(dir string) func(*TarOptions) { - return func(opts *TarOptions) { - opts.WorkingDir = dir - } -} - -// WithCompression enables compression for the tar archive -func WithCompression(enabled bool) func(*TarOptions) { - return func(opts *TarOptions) { - opts.UseCompression = enabled - } -} - -// WithFilesFrom specifies a file containing the list of files to archive -func WithFilesFrom(filePath string) func(*TarOptions) { - return func(opts *TarOptions) { - opts.FilesFrom = filePath - } -} - -// BuildTarCommand creates a platform-optimized tar command with the given options -func BuildTarCommand(options ...func(*TarOptions)) []string { - // Initialize default options - opts := &TarOptions{ - UseCompression: true, // Default to using compression - } - - // Apply all option functions - for _, option := range options { - option(opts) - } - - // Start building the command - cmd := []string{"tar"} - - // Add Linux-specific optimizations - if runtime.GOOS == "linux" { - cmd = append(cmd, "--sparse") - } - - // Handle files-from case specially - if opts.FilesFrom != "" { - return append(cmd, "--files-from", opts.FilesFrom) - } - - // Basic create command - if opts.UseCompression { - if !strings.HasSuffix(opts.OutputFile, ".gz") { - opts.OutputFile = opts.OutputFile + ".gz" +// checkImageExists verifies if a Docker image exists locally +func checkImageExists(imageName string) (bool, error) { + cmd := exec.Command("docker", "image", "inspect", imageName) + err := cmd.Run() + if err != nil { + if exitErr, ok := err.(*exec.ExitError); ok && exitErr.ExitCode() == 1 { + // Exit code 1 means the image doesn't exist + return false, nil } + return false, err } - - cmd = append(cmd, "-cf", opts.OutputFile) - - // Add working directory if specified - if opts.WorkingDir != "" { - cmd = append(cmd, "-C", opts.WorkingDir) - } - - // Add compression if needed - if opts.UseCompression { - cmd = append(cmd, fmt.Sprintf("--use-compress-program=%v", compressor)) - } - - // Add source paths (or "." if none specified) - if len(opts.SourcePaths) > 0 { - cmd = append(cmd, opts.SourcePaths...) - } else { - cmd = append(cmd, ".") - } - - return cmd + return true, nil } -// UnTarOptions represents configuration options for extracting tar archives -type UnTarOptions struct { - // InputFile is the path to the .tar or .tar.gz file to extract - InputFile string - - // TargetDir is the directory where files should be extracted - TargetDir string - - // PreserveSameOwner determines whether to preserve file ownership - PreserveSameOwner bool - - // AutoDetectCompression will check if the file is compressed - AutoDetectCompression bool -} - -// WithInputFile sets the input archive file path -func WithInputFile(path string) func(*UnTarOptions) { - return func(opts *UnTarOptions) { - opts.InputFile = path - } -} - -// WithTargetDir sets the directory where files will be extracted -func WithTargetDir(dir string) func(*UnTarOptions) { - return func(opts *UnTarOptions) { - opts.TargetDir = dir - } -} - -// WithPreserveSameOwner enables preserving file ownership -func WithPreserveSameOwner(preserve bool) func(*UnTarOptions) { - return func(opts *UnTarOptions) { - opts.PreserveSameOwner = preserve - } -} - -// WithAutoDetectCompression enables automatic detection of file compression -func WithAutoDetectCompression(detect bool) func(*UnTarOptions) { - return func(opts *UnTarOptions) { - opts.AutoDetectCompression = detect - } -} - -// isCompressedFile checks if a file is compressed by examining its header -func isCompressedFile(filepath string) (bool, error) { - file, err := os.Open(filepath) - if err != nil { - return false, fmt.Errorf("failed to open file for compression detection: %w", err) - } - defer file.Close() - - // Read the first few bytes to check for gzip magic number (1F 8B) - header := make([]byte, 2) - _, err = file.Read(header) +// logDirectoryStructure logs the directory structure for debugging +func logDirectoryStructure(dir string, logger *log.Entry) error { + cmd := exec.Command("find", dir, "-type", "f", "-o", "-type", "d", "|", "sort") + cmd.Dir = dir + output, err := cmd.CombinedOutput() if err != nil { - return false, fmt.Errorf("failed to read file header: %w", err) + return err } - // Check for gzip magic number - return header[0] == 0x1F && header[1] == 0x8B, nil + logger.WithField("structure", string(output)).Debug("Directory structure") + return nil } -// BuildUnTarCommand creates a command to extract tar archives -func BuildUnTarCommand(options ...func(*UnTarOptions)) ([]string, error) { - // Initialize default options - opts := &UnTarOptions{ - PreserveSameOwner: false, // Default to not preserving ownership - AutoDetectCompression: true, // Default to auto-detecting compression - } - - // Apply all option functions - for _, option := range options { - option(opts) - } - - // Start building the command - cmd := []string{"tar"} - - // Add Linux-specific optimizations - if runtime.GOOS == "linux" { - cmd = append(cmd, "--sparse") - } - - // Basic extraction command - cmd = append(cmd, "-xf", opts.InputFile) - - // Add ownership flag if needed - if !opts.PreserveSameOwner { - cmd = append(cmd, "--no-same-owner") - } - - // Add target directory if specified - if opts.TargetDir != "" { - cmd = append(cmd, "-C", opts.TargetDir) - } - - // Handle compression if needed - if opts.AutoDetectCompression { - isCompressed, err := isCompressedFile(opts.InputFile) - if err != nil { - return nil, err - } - if isCompressed { - // Use the same compressor as in BuildTarCommand but with decompression flag - decompressFlag := fmt.Sprintf("--use-compress-program=%v -d", compressor) - cmd = append(cmd, decompressFlag) +// Check if directory is empty (indicating a failed or scratch container export) +func isEmpty(dir string) bool { + // Check for common directories that would indicate a real filesystem + commonDirs := []string{"bin", "usr", "etc", "var", "lib"} + for _, d := range commonDirs { + if _, err := os.Stat(filepath.Join(dir, d)); err == nil { + return false } } - return cmd, nil + // Also check if there are any files at all + entries, err := os.ReadDir(dir) + if err != nil { + return true + } + return len(entries) == 0 } diff --git a/pkg/leeway/build_test.go b/pkg/leeway/build_test.go index 6c98921..875dc47 100644 --- a/pkg/leeway/build_test.go +++ b/pkg/leeway/build_test.go @@ -21,6 +21,11 @@ while [[ $# -gt 0 ]]; do shift # past argument shift # past value ;; + inspect) + # Mock docker inspect to return a valid ID + echo '[{"Id":"sha256:1234567890abcdef"}]' + exit 0 + ;; *) POSITIONAL_ARGS+=("$1") # save positional arg shift # past argument @@ -35,6 +40,48 @@ if [ "${POSITIONAL_ARGS}" == "save" ]; then fi ` +// Create a mock for extractImageWithOCILibs to avoid dependency on actual Docker daemon +func init() { + // Override with a simple mock implementation for tests + leeway.ExtractImageWithOCILibs = func(destDir, imgTag string) error { + log.WithFields(log.Fields{ + "image": imgTag, + "destDir": destDir, + }).Info("Mock: Extracting container filesystem") + + // Create required directories + contentDir := filepath.Join(destDir, "content") + if err := os.MkdirAll(contentDir, 0755); err != nil { + return err + } + + // Create a mock file structure similar to what a real extraction would produce + mockFiles := map[string]string{ + filepath.Join(destDir, "imgnames.txt"): imgTag + "\n", + filepath.Join(destDir, "metadata.yaml"): "test: metadata\n", + filepath.Join(destDir, "image-metadata.json"): `{"image":"` + imgTag + `"}`, + filepath.Join(contentDir, "bin/testfile"): "test content", + filepath.Join(contentDir, "README.md"): "# Test Container", + } + + // Create directories for the mock files + for filename := range mockFiles { + if err := os.MkdirAll(filepath.Dir(filename), 0755); err != nil { + return err + } + } + + // Create the mock files + for filename, content := range mockFiles { + if err := os.WriteFile(filename, []byte(content), 0644); err != nil { + return err + } + } + + return nil + } +} + func TestBuildDockerDeps(t *testing.T) { if *testutil.Dut { pth, err := os.MkdirTemp("", "") @@ -77,6 +124,7 @@ func TestBuildDockerDeps(t *testing.T) { Config: leeway.DockerPkgConfig{ Dockerfile: "pkg0.Dockerfile", Image: []string{"foobar:1234"}, + Metadata: make(map[string]string), }, }, { @@ -87,6 +135,90 @@ func TestBuildDockerDeps(t *testing.T) { }, Config: leeway.DockerPkgConfig{ Dockerfile: "pkg1.Dockerfile", + Metadata: make(map[string]string), + }, + }, + }, + }, + }, + }, + }, + } + + for _, test := range tests { + test.Run() + } +} + +func TestDockerPostProcessing(t *testing.T) { + if *testutil.Dut { + pth, err := os.MkdirTemp("", "") + if err != nil { + t.Fatal(err) + } + err = os.WriteFile(filepath.Join(pth, "docker"), []byte(dummyDocker), 0755) + if err != nil { + t.Fatal(err) + } + t.Cleanup(func() { os.RemoveAll(pth) }) + + os.Setenv("PATH", pth+":"+os.Getenv("PATH")) + log.WithField("path", os.Getenv("PATH")).Debug("modified path to use dummy docker") + } + testutil.RunDUT() + + tests := []*testutil.CommandFixtureTest{ + { + Name: "docker extraction", + T: t, + Args: []string{"build", "-v", "-c", "none", "comp:pkg"}, + StderrSub: "Extracting container filesystem", + ExitCode: 0, + Fixture: &testutil.Setup{ + Components: []testutil.Component{ + { + Location: "comp", + Files: map[string]string{ + "Dockerfile": "FROM alpine:latest", + }, + Packages: []leeway.Package{ + { + PackageInternal: leeway.PackageInternal{ + Name: "pkg", + Type: leeway.DockerPackage, + }, + Config: leeway.DockerPkgConfig{ + Dockerfile: "Dockerfile", + // No Image entry - should trigger extraction + }, + }, + }, + }, + }, + }, + }, + { + Name: "docker content directory structure", + T: t, + Args: []string{"build", "-v", "-c", "none", "comp:content-test"}, + StderrSub: "Container files extracted successfully", + ExitCode: 0, + Fixture: &testutil.Setup{ + Components: []testutil.Component{ + { + Location: "comp", + Files: map[string]string{ + "content.Dockerfile": "FROM alpine:latest\nRUN mkdir -p /test/dir\nRUN echo 'test' > /test/file.txt", + }, + Packages: []leeway.Package{ + { + PackageInternal: leeway.PackageInternal{ + Name: "content-test", + Type: leeway.DockerPackage, + }, + Config: leeway.DockerPkgConfig{ + Dockerfile: "content.Dockerfile", + // No Image entry - should trigger extraction }, }, }, diff --git a/pkg/leeway/compression.go b/pkg/leeway/compression.go new file mode 100644 index 0000000..01929f4 --- /dev/null +++ b/pkg/leeway/compression.go @@ -0,0 +1,391 @@ +package leeway + +import ( + "fmt" + "os" + "os/exec" + "runtime" + "strings" +) + +var ( + compressor = "gzip" + decompressor = "gzip -d" + // Number of CPU cores for parallel processing + cpuCores = runtime.NumCPU() +) + +func init() { + // Check for pigz (parallel gzip) for faster compression + pigz, err := exec.LookPath("pigz") + if err == nil { + // Use all available CPU cores by default + compressor = fmt.Sprintf("%s -p %d", pigz, cpuCores) + } +} + +// CompressionAlgorithm represents supported compression algorithms +type CompressionAlgorithm string + +const ( + Gzip CompressionAlgorithm = "gzip" + Zstd CompressionAlgorithm = "zstd" + NoCompr CompressionAlgorithm = "none" +) + +// TarOptions represents configuration options for creating tar archives +type TarOptions struct { + // OutputFile is the path to the output .tar or .tar.gz file + OutputFile string + + // SourcePaths are the files/directories to include in the archive + SourcePaths []string + + // WorkingDir changes to this directory before archiving (-C flag) + WorkingDir string + + // UseCompression determines whether to apply compression + UseCompression bool + + // CompressionAlgorithm specifies which algorithm to use + CompressionAlgorithm CompressionAlgorithm + + // CompressionLevel allows setting compression level (1-9 for gzip/pigz) + CompressionLevel int + + // FilesFrom specifies a file containing a list of files to include + FilesFrom string + + // ExcludePatterns specifies patterns to exclude + ExcludePatterns []string +} + +// WithOutputFile sets the output file path for the tar archive +func WithOutputFile(path string) func(*TarOptions) { + return func(opts *TarOptions) { + opts.OutputFile = path + } +} + +// WithSourcePaths adds files or directories to include in the archive +func WithSourcePaths(paths ...string) func(*TarOptions) { + return func(opts *TarOptions) { + opts.SourcePaths = append(opts.SourcePaths, paths...) + } +} + +// WithWorkingDir sets the working directory for the tar command +func WithWorkingDir(dir string) func(*TarOptions) { + return func(opts *TarOptions) { + opts.WorkingDir = dir + } +} + +// WithCompression enables compression for the tar archive +func WithCompression(enabled bool) func(*TarOptions) { + return func(opts *TarOptions) { + opts.UseCompression = enabled + } +} + +// WithCompressionAlgorithm specifies which compression algorithm to use +func WithCompressionAlgorithm(algo CompressionAlgorithm) func(*TarOptions) { + return func(opts *TarOptions) { + opts.CompressionAlgorithm = algo + } +} + +// WithCompressionLevel sets the compression level +func WithCompressionLevel(level int) func(*TarOptions) { + return func(opts *TarOptions) { + opts.CompressionLevel = level + } +} + +// WithFilesFrom specifies a file containing the list of files to archive +func WithFilesFrom(filePath string) func(*TarOptions) { + return func(opts *TarOptions) { + opts.FilesFrom = filePath + } +} + +// WithExcludePatterns specifies patterns to exclude from the archive +func WithExcludePatterns(patterns ...string) func(*TarOptions) { + return func(opts *TarOptions) { + opts.ExcludePatterns = append(opts.ExcludePatterns, patterns...) + } +} + +// getCompressionCommand returns the appropriate compression command based on options +func getCompressionCommand(algo CompressionAlgorithm, level int) string { + switch algo { + case Zstd: + if level > 0 { + return fmt.Sprintf("zstd -%d", level) + } + return "zstd" + case NoCompr: + return "" + default: // Gzip or fallback + if level > 0 { + return fmt.Sprintf("gzip -%d", level) + } + return compressor + } +} + +// getDecompressionCommand returns the appropriate decompression command based on file extension +func getDecompressionCommand(filename string) string { + switch { + case strings.HasSuffix(filename, ".gz"): + return decompressor + case strings.HasSuffix(filename, ".zst"): + return "zstd -d" + default: + return "" + } +} + +// getFileExtension returns the appropriate file extension based on compression algorithm +func getFileExtension(algo CompressionAlgorithm) string { + switch algo { + case Zstd: + return ".zst" + case NoCompr: + return "" + default: // Gzip, Pigz or fallback + return ".gz" + } +} + +// BuildTarCommand creates a platform-optimized tar command with the given options +func BuildTarCommand(options ...func(*TarOptions)) []string { + // Initialize default options + opts := &TarOptions{ + UseCompression: true, // Default to using compression + CompressionAlgorithm: Gzip, // Default to gzip + CompressionLevel: 0, // Default compression level (0 = default for the algorithm) + } + + // Apply all option functions + for _, option := range options { + option(opts) + } + + // Start building the command + cmd := []string{"tar"} + + // Add verbose flag if needed + // cmd = append(cmd, "-v") + + // Add Linux-specific optimizations + if runtime.GOOS == "linux" { + cmd = append(cmd, "--sparse") + } + + // Handle files-from case specially + if opts.FilesFrom != "" { + cmd = append(cmd, "--files-from", opts.FilesFrom) + } + + // Basic create command + cmd = append(cmd, "-cf") + + // Add file extension based on compression algorithm if needed + if opts.UseCompression && opts.CompressionAlgorithm != NoCompr { + ext := getFileExtension(opts.CompressionAlgorithm) + if !strings.HasSuffix(opts.OutputFile, ext) { + opts.OutputFile = opts.OutputFile + ext + } + } + cmd = append(cmd, opts.OutputFile) + + // Add working directory if specified + if opts.WorkingDir != "" { + cmd = append(cmd, "-C", opts.WorkingDir) + } + + // Add exclude patterns if any + for _, pattern := range opts.ExcludePatterns { + cmd = append(cmd, "--exclude", pattern) + } + + // Add compression if needed + if opts.UseCompression && opts.CompressionAlgorithm != NoCompr { + comprCmd := getCompressionCommand(opts.CompressionAlgorithm, opts.CompressionLevel) + if comprCmd != "" { + cmd = append(cmd, fmt.Sprintf("--use-compress-program=%v", comprCmd)) + } + } + + // Add source paths (or "." if none specified) + if len(opts.SourcePaths) > 0 { + cmd = append(cmd, opts.SourcePaths...) + } else { + cmd = append(cmd, ".") + } + + return cmd +} + +// UnTarOptions represents configuration options for extracting tar archives +type UnTarOptions struct { + // InputFile is the path to the .tar or .tar.gz file to extract + InputFile string + + // TargetDir is the directory where files should be extracted + TargetDir string + + // PreserveSameOwner determines whether to preserve file ownership + PreserveSameOwner bool + + // AutoDetectCompression will check if the file is compressed + AutoDetectCompression bool + + // Verbose enables verbose output + Verbose bool + + // IncludePatterns specifies patterns to include during extraction + IncludePatterns []string +} + +// WithInputFile sets the input archive file path +func WithInputFile(path string) func(*UnTarOptions) { + return func(opts *UnTarOptions) { + opts.InputFile = path + } +} + +// WithTargetDir sets the directory where files will be extracted +func WithTargetDir(dir string) func(*UnTarOptions) { + return func(opts *UnTarOptions) { + opts.TargetDir = dir + } +} + +// WithPreserveSameOwner enables preserving file ownership +func WithPreserveSameOwner(preserve bool) func(*UnTarOptions) { + return func(opts *UnTarOptions) { + opts.PreserveSameOwner = preserve + } +} + +// WithAutoDetectCompression enables automatic detection of file compression +func WithAutoDetectCompression(detect bool) func(*UnTarOptions) { + return func(opts *UnTarOptions) { + opts.AutoDetectCompression = detect + } +} + +// WithVerboseExtraction enables verbose output during extraction +func WithVerboseExtraction(verbose bool) func(*UnTarOptions) { + return func(opts *UnTarOptions) { + opts.Verbose = verbose + } +} + +// WithIncludePatterns specifies patterns to include during extraction +func WithIncludePatterns(patterns ...string) func(*UnTarOptions) { + return func(opts *UnTarOptions) { + opts.IncludePatterns = append(opts.IncludePatterns, patterns...) + } +} + +// isCompressedFile checks if a file is compressed by examining its header +func isCompressedFile(filepath string) (CompressionAlgorithm, error) { + file, err := os.Open(filepath) + if err != nil { + return NoCompr, fmt.Errorf("failed to open file for compression detection: %w", err) + } + defer file.Close() + + // Read the first few bytes to check for magic numbers + header := make([]byte, 4) + _, err = file.Read(header) + if err != nil { + return NoCompr, fmt.Errorf("failed to read file header: %w", err) + } + + // Check for gzip magic number (1F 8B) + if header[0] == 0x1F && header[1] == 0x8B { + return Gzip, nil + } + + // Check for zstd magic number (28 b5 2f fd) + if header[0] == 0x28 && header[1] == 0xb5 && header[2] == 0x2f && header[3] == 0xfd { + return Zstd, nil + } + + return NoCompr, nil +} + +// BuildUnTarCommand creates a command to extract tar archives +func BuildUnTarCommand(options ...func(*UnTarOptions)) ([]string, error) { + // Initialize default options + opts := &UnTarOptions{ + PreserveSameOwner: false, // Default to not preserving ownership + AutoDetectCompression: true, // Default to auto-detecting compression + Verbose: false, // Default to non-verbose output + } + + // Apply all option functions + for _, option := range options { + option(opts) + } + + // Start building the command + cmd := []string{"tar"} + + // Add verbose flag if requested + if opts.Verbose { + cmd = append(cmd, "-v") + } + + // Add Linux-specific optimizations + if runtime.GOOS == "linux" { + cmd = append(cmd, "--sparse") + } + + // Basic extraction command + cmd = append(cmd, "-xf", opts.InputFile) + + // Add ownership flag if needed + if !opts.PreserveSameOwner { + cmd = append(cmd, "--no-same-owner") + } + + // Add target directory if specified + if opts.TargetDir != "" { + cmd = append(cmd, "-C", opts.TargetDir) + } + + // Add include patterns if any + cmd = append(cmd, opts.IncludePatterns...) + + // Handle compression if needed + if opts.AutoDetectCompression { + // First check by file extension for efficiency + decomprCmd := getDecompressionCommand(opts.InputFile) + + // If no match by extension, try to detect by file header + if decomprCmd == "" { + comprAlgo, err := isCompressedFile(opts.InputFile) + if err != nil { + return nil, err + } + + switch comprAlgo { + case Gzip: + decomprCmd = decompressor + case Zstd: + decomprCmd = "zstd -d" + } + } + + if decomprCmd != "" { + cmd = append(cmd, fmt.Sprintf("--use-compress-program=%v", decomprCmd)) + } + } + + return cmd, nil +} diff --git a/pkg/leeway/container_image.go b/pkg/leeway/container_image.go new file mode 100644 index 0000000..1afe5c1 --- /dev/null +++ b/pkg/leeway/container_image.go @@ -0,0 +1,414 @@ +package leeway + +import ( + "archive/tar" + "encoding/json" + "fmt" + "io" + "os" + "path/filepath" + "strings" + + "github.com/google/go-containerregistry/pkg/name" + v1 "github.com/google/go-containerregistry/pkg/v1" + "github.com/google/go-containerregistry/pkg/v1/daemon" + "github.com/google/go-containerregistry/pkg/v1/mutate" + log "github.com/sirupsen/logrus" + "gopkg.in/yaml.v3" +) + +// ExtractImageFunc is the type for the image extraction function +type ExtractImageFunc func(destDir, imgTag string) error + +// ExtractImageWithOCILibs is the function used to extract Docker images +// It can be replaced in tests for mocking +var ExtractImageWithOCILibs ExtractImageFunc = extractImageWithOCILibsImpl + +// extractImageWithOCILibsImpl extracts a Docker image's filesystem content +// using the OCI distribution and image libraries +func extractImageWithOCILibsImpl(destDir, imgTag string) error { + // Create destination directory if it doesn't exist + if err := os.MkdirAll(destDir, 0755); err != nil { + return fmt.Errorf("failed to create destination directory: %w", err) + } + + log.WithFields(log.Fields{ + "image": imgTag, + "dest": destDir, + }).Debug("Extracting image using OCI libraries") + + // Create a temporary directory for initial extraction + tempExtractDir, err := os.MkdirTemp(destDir, "extract-temp-") + if err != nil { + return fmt.Errorf("failed to create temporary extraction directory: %w", err) + } + defer os.RemoveAll(tempExtractDir) // Clean up temp dir after we're done + + // Parse the image reference + ref, err := name.ParseReference(imgTag) + if err != nil { + return fmt.Errorf("parsing image reference: %w", err) + } + + // Get the image from the local Docker daemon + img, err := daemon.Image(ref) + if err != nil { + return fmt.Errorf("getting image from daemon: %w", err) + } + + // Get image config to check if it's a scratch image + config, err := img.ConfigFile() + if err != nil { + return fmt.Errorf("getting image config: %w", err) + } + + // Get image digest for metadata + digest, err := img.Digest() + if err != nil { + log.WithError(err).Warn("Failed to get image digest") + } + + // Extract metadata to the final destination directory + if err := extractImageMetadata(destDir, imgTag, config, digest); err != nil { + log.WithError(err).Warn("Failed to extract image metadata") + } + + // Get the layers + layers, err := img.Layers() + if err != nil { + return fmt.Errorf("getting image layers: %w", err) + } + + // Check if this is a scratch image (no layers) + if len(layers) == 0 { + log.Info("Image appears to be a scratch image with no layers") + return handleScratchImage(destDir, imgTag, config, digest) + } + + log.WithField("layerCount", len(layers)).Debug("Extracting image layers") + + // Extract the filesystem by flattening the layers to the temp directory + fs := mutate.Extract(img) + defer fs.Close() + + // Extract the tar contents to the temporary directory + if err := extractTarToDir(fs, tempExtractDir); err != nil { + return fmt.Errorf("extracting filesystem: %w", err) + } + + // Check if extraction produced any files + if isEmpty(tempExtractDir) { + log.Warn("Image extraction produced empty filesystem - might be a scratch or minimal image") + return handleScratchImage(destDir, imgTag, config, digest) + } + + // Create content directory in the final destination + contentDir := filepath.Join(destDir, "content") + if err := os.MkdirAll(contentDir, 0755); err != nil { + return fmt.Errorf("failed to create content directory: %w", err) + } + + // Move content from temp dir to content dir + if err := organizeContainerContent(tempExtractDir, contentDir); err != nil { + return fmt.Errorf("failed to organize container content: %w", err) + } + + log.Debug("Successfully extracted image contents") + return nil +} + +// extractImageMetadata extracts the image metadata and saves it to files +func extractImageMetadata(destDir, imgTag string, config *v1.ConfigFile, digest v1.Hash) error { + // Create imgnames.txt with the image tag + if err := os.WriteFile(filepath.Join(destDir, dockerImageNamesFiles), []byte(imgTag+"\n"), 0644); err != nil { + return fmt.Errorf("creating imgnames.txt: %w", err) + } + + // Create metadata files with image information + metadata := map[string]interface{}{ + "image": imgTag, + "digest": digest.String(), + "created": config.Created.Time, + "os": config.OS, + "arch": config.Architecture, + "env": config.Config.Env, + "cmd": config.Config.Cmd, + "entrypoint": config.Config.Entrypoint, + "labels": config.Config.Labels, + } + + metadataBytes, err := json.MarshalIndent(metadata, "", " ") + if err != nil { + return fmt.Errorf("marshaling metadata: %w", err) + } + + if err := os.WriteFile(filepath.Join(destDir, "image-metadata.json"), metadataBytes, 0644); err != nil { + return fmt.Errorf("writing image-metadata.json: %w", err) + } + + return nil +} + +// handleScratchImage creates appropriate files for a scratch-based image +func handleScratchImage(destDir, imgTag string, config *v1.ConfigFile, digest v1.Hash) error { + log.WithField("image", imgTag).Info("Creating marker files for empty/scratch image") + + // Create content directory + contentDir := filepath.Join(destDir, "content") + if err := os.MkdirAll(contentDir, 0755); err != nil { + return fmt.Errorf("creating content directory: %w", err) + } + + // Create a marker file in the content directory + markerContent := fmt.Sprintf("Empty or scratch-based Docker image: %s\nDigest: %s\n", + imgTag, digest.String()) + if err := os.WriteFile(filepath.Join(contentDir, ".empty-image-marker"), []byte(markerContent), 0644); err != nil { + return fmt.Errorf("creating empty image marker: %w", err) + } + + // Create a readme with more details in the content directory + readmeContent := fmt.Sprintf(`# Empty Container Image + +This archive represents a Docker image that appears to be empty or scratch-based: %s + +Image information: +- Digest: %s +- Created: %s +- Architecture: %s/%s +`, imgTag, digest.String(), config.Created.Format("2006-01-02T15:04:05Z07:00"), + config.OS, config.Architecture) + + // Add command information if available + if len(config.Config.Cmd) > 0 { + readmeContent += fmt.Sprintf("- Command: %v\n", config.Config.Cmd) + } + if len(config.Config.Entrypoint) > 0 { + readmeContent += fmt.Sprintf("- Entrypoint: %v\n", config.Config.Entrypoint) + } + + if err := os.WriteFile(filepath.Join(contentDir, "README.md"), []byte(readmeContent), 0644); err != nil { + return fmt.Errorf("creating README: %w", err) + } + + // Create metadata files in the root directory + if err := extractImageMetadata(destDir, imgTag, config, digest); err != nil { + log.WithError(err).Warn("Failed to extract image metadata for scratch image") + } + + return nil +} + +// extractTarToDir extracts a tar archive to a directory +func extractTarToDir(r io.Reader, destDir string) error { + tr := tar.NewReader(r) + for { + header, err := tr.Next() + if err == io.EOF { + break + } + if err != nil { + return err + } + + // Skip whiteout files which are used by Docker to remove files + if strings.Contains(header.Name, ".wh.") { + continue + } + + // Get the target path, with safety checks + target := filepath.Join(destDir, header.Name) + + // Prevent directory traversal attacks + if !strings.HasPrefix(target, destDir) { + continue + } + + switch header.Typeflag { + case tar.TypeDir: + // Create directory + if err := os.MkdirAll(target, 0755); err != nil { + return err + } + + case tar.TypeReg: + // Create containing directory + if err := os.MkdirAll(filepath.Dir(target), 0755); err != nil { + return err + } + + // Create file + f, err := os.OpenFile(target, os.O_CREATE|os.O_WRONLY, os.FileMode(header.Mode)) + if err != nil { + return err + } + + if _, err := io.Copy(f, tr); err != nil { + f.Close() + return err + } + f.Close() + + case tar.TypeSymlink: + // Create containing directory + if err := os.MkdirAll(filepath.Dir(target), 0755); err != nil { + return err + } + + // Create symlink (with safety check) + linkTarget := header.Linkname + if filepath.IsAbs(linkTarget) { + // Convert absolute symlinks to relative ones contained within the extract directory + linkTarget = filepath.Join(destDir, linkTarget) + if !strings.HasPrefix(linkTarget, destDir) { + // Skip symlinks that point outside the destination directory + continue + } + linkTarget, _ = filepath.Rel(filepath.Dir(target), linkTarget) + } + + if err := os.Symlink(linkTarget, target); err != nil { + // Ignore errors on symlinks, which are common in cross-platform extraction + log.WithError(err).Debugf("Failed to create symlink %s -> %s", target, linkTarget) + } + } + } + + return nil +} + +// createDockerMetadataFiles creates the required metadata files in the destination directory +// This function is updated to support structured metadata +func createDockerMetadataFiles(destDir string, imgTag string, metadata map[string]string) error { + // Create imgnames.txt with the image tag + if err := os.WriteFile(filepath.Join(destDir, dockerImageNamesFiles), []byte(imgTag+"\n"), 0644); err != nil { + return fmt.Errorf("failed to create imgnames.txt: %w", err) + } + + // Create metadata.yaml with the provided metadata + // Use empty map if metadata is nil + metadataToWrite := metadata + if metadataToWrite == nil { + metadataToWrite = make(map[string]string) + } + + metadataBytes, err := yaml.Marshal(metadataToWrite) + if err != nil { + return fmt.Errorf("failed to marshal metadata: %w", err) + } + + if err := os.WriteFile(filepath.Join(destDir, dockerMetadataFile), metadataBytes, 0644); err != nil { + return fmt.Errorf("failed to create metadata.yaml: %w", err) + } + + return nil +} + +// organizeContainerContent moves filesystem content from the source directory to the content directory +// This function is used to organize Docker image extraction results +func organizeContainerContent(sourceDir, contentDir string) error { + entries, err := os.ReadDir(sourceDir) + if err != nil { + return fmt.Errorf("reading source directory: %w", err) + } + + // Process all entries in the source directory + for _, entry := range entries { + sourcePath := filepath.Join(sourceDir, entry.Name()) + + // Skip metadata files - they'll be generated directly in the final directory + if entry.Name() == dockerImageNamesFiles || + entry.Name() == dockerMetadataFile || + entry.Name() == "image-metadata.json" { + continue + } + + // Move content to the contentDir + targetPath := filepath.Join(contentDir, entry.Name()) + + if entry.IsDir() { + // For directories, create them and copy contents recursively + if err := os.MkdirAll(targetPath, 0755); err != nil { + return fmt.Errorf("creating directory %s: %w", targetPath, err) + } + + // Read directory contents + dirEntries, err := os.ReadDir(sourcePath) + if err != nil { + return fmt.Errorf("reading directory %s: %w", sourcePath, err) + } + + // Move each item in the directory + for _, dirEntry := range dirEntries { + sourceItemPath := filepath.Join(sourcePath, dirEntry.Name()) + targetItemPath := filepath.Join(targetPath, dirEntry.Name()) + + if err := os.Rename(sourceItemPath, targetItemPath); err != nil { + // If rename fails (e.g., cross-device), fall back to copy and remove + if err := copyFileOrDirectory(sourceItemPath, targetItemPath); err != nil { + return fmt.Errorf("copying %s to %s: %w", sourceItemPath, targetItemPath, err) + } + os.RemoveAll(sourceItemPath) + } + } + } else { + // For files, move them directly + if err := os.Rename(sourcePath, targetPath); err != nil { + // If rename fails, fall back to copy and remove + if err := copyFileOrDirectory(sourcePath, targetPath); err != nil { + return fmt.Errorf("copying %s to %s: %w", sourcePath, targetPath, err) + } + os.Remove(sourcePath) + } + } + } + + return nil +} + +// copyFileOrDirectory recursively copies a file or directory +func copyFileOrDirectory(src, dst string) error { + sourceInfo, err := os.Stat(src) + if err != nil { + return err + } + + if sourceInfo.IsDir() { + // Create destination directory + if err := os.MkdirAll(dst, sourceInfo.Mode()); err != nil { + return err + } + + // Read source directory + entries, err := os.ReadDir(src) + if err != nil { + return err + } + + // Copy each entry + for _, entry := range entries { + sourcePath := filepath.Join(src, entry.Name()) + destPath := filepath.Join(dst, entry.Name()) + if err := copyFileOrDirectory(sourcePath, destPath); err != nil { + return err + } + } + return nil + } + + // Handle regular files + source, err := os.Open(src) + if err != nil { + return err + } + defer source.Close() + + // Create destination file + destination, err := os.OpenFile(dst, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, sourceInfo.Mode()) + if err != nil { + return err + } + defer destination.Close() + + _, err = io.Copy(destination, source) + return err +}