23
23
AMD64_DEBIAN_KERNEL_HEADERS_URL = "http://deb.debian.org/debian-security/pool/updates/main/l/linux-5.10/linux-headers-5.10.0-0.deb10.28-amd64_5.10.209-2~deb10u1_amd64.deb"
24
24
ARM64_DEBIAN_KERNEL_HEADERS_URL = "http://deb.debian.org/debian-security/pool/updates/main/l/linux-5.10/linux-headers-5.10.0-0.deb10.28-arm64_5.10.209-2~deb10u1_arm64.deb"
25
25
26
- DOCKER_REGISTRY = "486234852809.dkr.ecr.us-east-1.amazonaws.com"
27
26
DOCKER_BASE_IMAGES = {
28
- "x64" : f"{ DOCKER_REGISTRY } /ci/datadog-agent-buildimages/linux-glibc-2. 17-x64" ,
29
- "arm64" : f"{ DOCKER_REGISTRY } /ci/datadog-agent-buildimages/linux-glibc-2. 23-arm64" ,
27
+ "x64" : f"registry.ddbuild.io /ci/datadog-agent-buildimages/linux-glibc-2- 17-x64" ,
28
+ "arm64" : f"registry.ddbuild.io /ci/datadog-agent-buildimages/linux-glibc-2- 23-arm64" ,
30
29
}
31
30
32
31
@@ -49,21 +48,6 @@ def get_docker_image_name(ctx: Context, container: str) -> str:
49
48
return data [0 ]["Config" ]["Image" ]
50
49
51
50
52
- def has_docker_auth_helpers () -> bool :
53
- docker_config = Path ("~/.docker/config.json" ).expanduser ()
54
- if not docker_config .exists ():
55
- return False
56
-
57
- try :
58
- with open (docker_config ) as f :
59
- config = json .load (f )
60
- except json .JSONDecodeError :
61
- # Invalid JSON (or empty file), we don't have the helper
62
- return False
63
-
64
- return DOCKER_REGISTRY in config .get ("credHelpers" , {})
65
-
66
-
67
51
class CompilerImage :
68
52
def __init__ (self , ctx : Context , arch : Arch ):
69
53
self .ctx = ctx
@@ -115,15 +99,18 @@ def ensure_version(self):
115
99
warn (f"[!] Running compiler image { image_used } is different from the expected { self .image } , will restart" )
116
100
self .start ()
117
101
118
- def exec (self , cmd : str , user = "compiler" , verbose = True , run_dir : PathOrStr | None = None , allow_fail = False ):
102
+ def exec (self , cmd : str , user = "compiler" , verbose = True , run_dir : PathOrStr | None = None , allow_fail = False , force_color = True ):
119
103
if run_dir :
120
104
cmd = f"cd { run_dir } && { cmd } "
121
105
122
106
self .ensure_running ()
107
+ color_env = "-e FORCE_COLOR=1"
108
+ if not force_color :
109
+ color_env = ""
123
110
124
111
# Set FORCE_COLOR=1 so that termcolor works in the container
125
112
return self .ctx .run (
126
- f"docker exec -u { user } -i -e FORCE_COLOR=1 { self .name } bash -c \" { cmd } \" " ,
113
+ f"docker exec -u { user } -i { color_env } { self .name } bash -l -c \" { cmd } \" " ,
127
114
hide = (not verbose ),
128
115
warn = allow_fail ,
129
116
)
@@ -139,23 +126,14 @@ def start(self) -> None:
139
126
# Check if the image exists
140
127
res = self .ctx .run (f"docker image inspect { self .image } " , hide = True , warn = True )
141
128
if res is None or not res .ok :
142
- info (f"[!] Image { self .image } not found, logging in and pulling..." )
143
-
144
- if has_docker_auth_helpers ():
145
- # With ddtool helpers (installed with ddtool auth helpers install), docker automatically
146
- # pulls credentials from ddtool, and we require the aws-vault context to pull
147
- docker_pull_auth = "aws-vault exec sso-build-stable-developer -- "
148
- else :
149
- # Without the helpers, we need to get the password and login manually to docker
150
- self .ctx .run (
151
- "aws-vault exec sso-build-stable-developer -- aws ecr --region us-east-1 get-login-password | docker login --username AWS --password-stdin 486234852809.dkr.ecr.us-east-1.amazonaws.com"
152
- )
153
- docker_pull_auth = ""
154
-
155
- self .ctx .run (f"{ docker_pull_auth } docker pull { self .image } " )
129
+ info (f"[!] Image { self .image } not found, pulling..." )
130
+ self .ctx .run (f"docker pull { self .image } " )
156
131
132
+ platform = ""
133
+ if self .arch != Arch .local ():
134
+ platform = f"--platform linux/{ self .arch .go_arch } "
157
135
res = self .ctx .run (
158
- f"docker run -d --restart always --name { self .name } "
136
+ f"docker run { platform } -d --restart always --name { self .name } "
159
137
f"--mount type=bind,source={ os .getcwd ()} ,target={ CONTAINER_AGENT_PATH } "
160
138
f"{ self .image } sleep \" infinity\" " ,
161
139
warn = True ,
@@ -183,51 +161,18 @@ def start(self) -> None:
183
161
)
184
162
185
163
self .exec ("chmod a+rx /root" , user = "root" ) # Some binaries will be in /root and need to be readable
186
- self .exec ("apt install sudo" , user = "root" )
164
+ self .exec ("apt-get install -y --no-install-recommends sudo" , user = "root" )
187
165
self .exec ("usermod -aG sudo compiler && echo 'compiler ALL=(ALL) NOPASSWD:ALL' >> /etc/sudoers" , user = "root" )
188
- self .exec ("echo conda activate ddpy3 >> /home/compiler/.bashrc" , user = "compiler" )
166
+ self .exec (f"cp /root/.bashrc /home/compiler/.bashrc && chown { uid } :{ gid } /home/compiler/.bashrc" , user = "root" )
167
+ self .exec ("mkdir ~/.cargo && touch ~/.cargo/env" , user = "compiler" )
168
+ self .exec ("dda self telemetry disable" , user = "compiler" , force_color = False )
189
169
self .exec (f"install -d -m 0777 -o { uid } -g { uid } /go" , user = "root" )
170
+ self .exec (f"echo export DD_CC={ self .arch .gcc_arch } -unknown-linux-gnu-gcc >> /home/compiler/.bashrc" , user = "compiler" )
171
+ self .exec (f"echo export DD_CXX={ self .arch .gcc_arch } -unknown-linux-gnu-g++ >> /home/compiler/.bashrc" , user = "compiler" )
190
172
191
- self .prepare_for_cross_compile ()
192
173
193
- def ensure_ready_for_cross_compile (self ):
194
- res = self .exec ("test -f /tmp/cross-compile-ready" , user = "root" , allow_fail = True )
195
- if res is None or not res .ok :
196
- info ("[*] Compiler image not ready for cross-compilation, preparing..." )
197
- self .prepare_for_cross_compile ()
198
-
199
- def prepare_for_cross_compile (self ):
200
- target = ARCH_AMD64 if self .arch == ARCH_ARM64 else ARCH_ARM64
201
-
202
- # Hardcoded links to the header packages for each architecture. Why do this and not have something more automated?
203
- # 1. While right now the URLs are similar and we'd only need a single link with variable replacement, this might
204
- # change as the repository layout is not under our control.
205
- # 2. Automatic detection of these URLs is not direct (querying the package repo APIs is not trivial) and we'd need some
206
- # level of hard-coding some URLs or assumptions anyways.
207
- # 3. Even if someone forgets to update these URLs, it's not a big deal, as we're building inside of a Docker image which will
208
- # likely have a different kernel than the target system where the built eBPF files are going to run anyways.
209
- header_package_urls : dict [Arch , str ] = {
210
- ARCH_AMD64 : AMD64_DEBIAN_KERNEL_HEADERS_URL ,
211
- ARCH_ARM64 : ARM64_DEBIAN_KERNEL_HEADERS_URL ,
212
- }
213
-
214
- header_package_path = "/tmp/headers.deb"
215
- self .exec (f"wget -O { header_package_path } { header_package_urls [target ]} " )
216
-
217
- # Uncompress the package in the root directory, so that we have access to the headers
218
- # We cannot install because the architecture will not match
219
- # Extract into a .tar file and then use tar to extract the contents to avoid issues
220
- # with dpkg-deb not respecting symlinks.
221
- self .exec (f"dpkg-deb --fsys-tarfile { header_package_path } > { header_package_path } .tar" , user = "root" )
222
- self .exec (f"tar -h -xf { header_package_path } .tar -C /" , user = "root" )
223
-
224
- # Install the corresponding arch compilers
225
- self .exec (f"apt update && apt install -y gcc-{ target .gcc_arch .replace ('_' , '-' )} -linux-gnu" , user = "root" )
226
- self .exec ("touch /tmp/cross-compile-ready" ) # Signal that we're ready for cross-compilation
227
-
228
-
229
- def get_compiler (ctx : Context ):
230
- cc = CompilerImage (ctx , Arch .local ())
174
+ def get_compiler (ctx : Context , arch_obj : Arch ):
175
+ cc = CompilerImage (ctx , arch_obj )
231
176
cc .ensure_version ()
232
177
cc .ensure_running ()
233
178
0 commit comments