@@ -201,9 +201,97 @@ load_image() {
201201 fi
202202 fi
203203
204- # Load the image into the KIND cluster
205- kind load docker-image " $WVA_IMAGE_REPO :$WVA_IMAGE_TAG " --name " $CLUSTER_NAME "
206- log_success " Image '$WVA_IMAGE_REPO :$WVA_IMAGE_TAG ' loaded into KIND cluster '$CLUSTER_NAME '"
204+ # Load the image into the KIND cluster.
205+ # Try `kind load docker-image` first. If it fails (common with Docker Desktop's
206+ # containerd image store where `docker save` chokes on multi-platform manifests),
207+ # fall back to pulling the image directly into each KIND node's containerd.
208+ local full_image=" $WVA_IMAGE_REPO :$WVA_IMAGE_TAG "
209+ local load_stderr
210+ if load_stderr=" $( kind load docker-image " $full_image " --name " $CLUSTER_NAME " 2>&1 ) " ; then
211+ log_success " Image '$full_image ' loaded into KIND cluster '$CLUSTER_NAME '"
212+ return
213+ fi
214+
215+ # Only fall back to the crictl/ctr path for the known containerd image store
216+ # issue (docker save fails on multi-platform manifests, kubernetes-sigs/kind#3795).
217+ # For any other error, report it and abort.
218+ if ! echo " $load_stderr " | grep -qiE " docker save|multi-?platform|manifest|content digest|no such image|not found" ; then
219+ log_error " 'kind load docker-image' failed:"
220+ log_error " $load_stderr "
221+ exit 1
222+ fi
223+
224+ log_warning " 'kind load docker-image' failed (containerd image store issue) — falling back to pulling directly into KIND nodes"
225+ log_info " kind load stderr: $load_stderr "
226+
227+ # Pull the image directly into each KIND node's containerd, bypassing
228+ # Docker Desktop entirely. This avoids the `docker save` multi-platform
229+ # manifest issue (kubernetes-sigs/kind#3795).
230+ local nodes
231+ nodes=" $( kind get nodes --name " $CLUSTER_NAME " ) " || {
232+ log_error " No nodes found in KIND cluster '$CLUSTER_NAME '"
233+ exit 1
234+ }
235+ if [ -z " $nodes " ]; then
236+ log_error " No nodes found in KIND cluster '$CLUSTER_NAME '"
237+ exit 1
238+ fi
239+
240+ # Detect if an image reference is qualified with an explicit registry hostname.
241+ # Heuristic used by Docker/containerd/podman:
242+ # If the first '/'-separated segment contains a '.', a ':', or equals 'localhost',
243+ # it is treated as a registry hostname (e.g., quay.io/foo, registry.k8s.io/pause,
244+ # localhost:5000/myimg).
245+ local first_segment
246+ first_segment=" ${full_image%%/* } "
247+ local has_explicit_registry=false
248+ case " $first_segment " in
249+ * .* |* :* |localhost) has_explicit_registry=true ;;
250+ esac
251+
252+ local successful_nodes=()
253+ for node in $nodes ; do
254+ log_info " Pulling image on node '$node '..."
255+ local pull_stderr
256+ if pull_stderr=" $( docker exec " $node " crictl pull " $full_image " 2>&1 ) " ; then
257+ successful_nodes+=(" $node " )
258+ continue
259+ fi
260+ log_warning " crictl pull failed on node '$node ': $pull_stderr "
261+
262+ # crictl may not resolve short names; try with docker.io prefix, but
263+ # only for unqualified image names (no registry hostname prefix like quay.io/).
264+ if [ " $has_explicit_registry " = true ]; then
265+ log_error " Failed to pull image on node '$node ' (image has explicit registry, skipping docker.io fallback): $pull_stderr "
266+ # Best-effort rollback to avoid partial cluster state.
267+ for ok_node in " ${successful_nodes[@]} " ; do
268+ docker exec " $ok_node " ctr --namespace=k8s.io images rm " $full_image " > /dev/null 2>&1 || true
269+ done
270+ exit 1
271+ fi
272+
273+ if pull_stderr=" $( docker exec " $node " ctr --namespace=k8s.io images pull " docker.io/$full_image " 2>&1 ) " ; then
274+ # Tag so kubelet can find it by the original name, but only if it doesn't already exist.
275+ if ! docker exec " $node " ctr --namespace=k8s.io images ls -q | grep -Fxq " $full_image " ; then
276+ if ! docker exec " $node " ctr --namespace=k8s.io images tag " docker.io/$full_image " " $full_image " > /dev/null 2>&1 ; then
277+ log_error " Failed to tag image on node '$node ' (docker.io/$full_image -> $full_image )"
278+ for ok_node in " ${successful_nodes[@]} " ; do
279+ docker exec " $ok_node " ctr --namespace=k8s.io images rm " $full_image " > /dev/null 2>&1 || true
280+ done
281+ exit 1
282+ fi
283+ fi
284+ successful_nodes+=(" $node " )
285+ else
286+ log_error " Failed to pull image on node '$node ': $pull_stderr "
287+ for ok_node in " ${successful_nodes[@]} " ; do
288+ docker exec " $ok_node " ctr --namespace=k8s.io images rm " $full_image " > /dev/null 2>&1 || true
289+ done
290+ exit 1
291+ fi
292+ done
293+
294+ log_success " Image '$full_image ' pulled directly into KIND cluster '$CLUSTER_NAME ' nodes"
207295}
208296
209297KUBE_LIKE_VALUES_DEV_IF_PRESENT=true
0 commit comments