diff --git a/hack/dummy_config.yaml b/hack/dummy_config.yaml index d23802f9..dccda44b 100644 --- a/hack/dummy_config.yaml +++ b/hack/dummy_config.yaml @@ -29,45 +29,54 @@ etcd_endpoint: localhost:2379 extend_expiration: true force_expire: false hostname: test.hostname +ip: + - 192.168.126.99 + - 2001:db8::99 # proxy: http://registry.kni-qe-0.lab.eng.rdu2.redhat.com:3128|http://registry.kni-qe-0.lab.eng.rdu2.redhat.com:3130|.cluster.local,.kni-qe-2.lab.eng.rdu2.redhat.com,.svc,127.0.0.1,2620:52:0:11c::/64,2620:52:0:11c::1,2620:52:0:11c::10,2620:52:0:11c::11,2620:52:0:199::/64,api-int.kni-qe-2.lab.eng.rdu2.redhat.com,fd01::/48,fd02::/112,localhost|http://registry.kni-qe-0.lab.eng.rdu2.redhat.com:3128|http://registry.kni-qe-0.lab.eng.rdu2.redhat.com:3130|.cluster.local,.kni-qe-2.lab.eng.rdu2.redhat.com,.svc,127.0.0.1,2620:52:0:11c::/64,2620:52:0:11c::1,2620:52:0:11c::10,2620:52:0:11c::11,2620:52:0:199::/64,api-int.kni-qe-2.lab.eng.rdu2.redhat.com,fd01::/48,fd02::/112,localhost,moreproxy install_config: | - additionalTrustBundlePolicy: Proxyonly - apiVersion: v1 - baseDomain: ibo0.redhat.com - bootstrapInPlace: - installationDisk: /dev/disk/by-path/pci-0000:04:00.0 - compute: - - architecture: amd64 - hyperthreading: Enabled - name: worker - platform: {} - replicas: 0 - controlPlane: - architecture: amd64 - hyperthreading: Enabled - name: master - platform: {} - replicas: 1 - metadata: - creationTimestamp: null - name: seed - networking: - clusterNetwork: - - cidr: 10.128.0.0/14 - hostPrefix: 23 - machineNetwork: - - cidr: 192.168.126.0/24 - networkType: OVNKubernetes - serviceNetwork: - - 172.30.0.0/16 - platform: - none: {} - publish: External - pullSecret: "" - sshKey: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQDThIOETj6iTvbCaNv15tZg121nWLcwtJuZofc1QS5iAdw8C8fN2R39cSW/ambikl2Fr6YNBBVR3znbtmattOyWyxAOFUfdY0aw0MqZb4LWLf04q6X0KsWIYWaV3ol0KxTzgvX38i/IU42XQfJwMYFE8dQ15TZ7l+FTKKi3SUPXLuy/9CXRfaCDZ2dKMcCkelkTr0KR1HdjiKQ86rMfk9JUbAf7D29aAQq4h1WNnHMM9vnbqN7MW9L8ebn/lCTJjGQ56r0UmurgyIEMt0P+CGp1e4AUNKYsPoYFB0GNwUkr/rB8LeuCOaZcoWdYXlUJaN45GjtCDon56+AoMA9V8tYkV6HqyFwGQjoGKI1cRCHXDJnGyAbMd9OK94TWJmNvtdHkbSURHyw2G7otZpAkRuEvMP0C7R+3JmuxrDA8yaUgWvgccqGcmFl1krClksW6KrAXNlwhZ4QOAMhDrXwwPfOOQoG82zPpg+g9gZQIhkro1Cje4bmWz5z5fiuDloTq1vc= - root@edge-01.edge.lab.eng.rdu2.redhat.com -ip: 192.168.126.99 -kubeadmin_password_hash: $2a$10$20Q4iRLy7cWZkjn/D07bF.RZQZonKwstyRGH0qiYbYRkx5Pe4Ztyi + additionalTrustBundlePolicy: Proxyonly + apiVersion: v1 + baseDomain: ibo0.redhat.com + bootstrapInPlace: + installationDisk: /dev/disk/by-path/pci-0000:04:00.0 + compute: + - architecture: amd64 + hyperthreading: Enabled + name: worker + platform: {} + replicas: 0 + controlPlane: + architecture: amd64 + hyperthreading: Enabled + name: master + platform: {} + replicas: 1 + metadata: + creationTimestamp: null + name: seed + networking: + clusterNetwork: + - cidr: 10.128.0.0/14 + hostPrefix: 23 + - cidr: fd01::/48 + hostPrefix: 64 + machineNetwork: + - cidr: 192.168.128.0/24 + - cidr: 1001:db8::/120 + networkType: OVNKubernetes + serviceNetwork: + - 172.30.0.0/16 + - fd02::/112 + platform: + none: {} + publish: External + pullSecret: "" + sshKey: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQDThIOETj6iTvbCaNv15tZg121nWLcwtJuZofc1QS5iAdw8C8fN2R39cSW/ambikl2Fr6YNBBVR3znbtmattOyWyxAOFUfdY0aw0MqZb4LWLf04q6X0KsWIYWaV3ol0KxTzgvX38i/IU42XQfJwMYFE8dQ15TZ7l+FTKKi3SUPXLuy/9CXRfaCDZ2dKMcCkelkTr0KR1HdjiKQ86rMfk9JUbAf7D29aAQq4h1WNnHMM9vnbqN7MW9L8ebn/lCTJjGQ56r0UmurgyIEMt0P+CGp1e4AUNKYsPoYFB0GNwUkr/rB8LeuCOaZcoWdYXlUJaN45GjtCDon56+AoMA9V8tYkV6HqyFwGQjoGKI1cRCHXDJnGyAbMd9OK94TWJmNvtdHkbSURHyw2G7otZpAkRuEvMP0C7R+3JmuxrDA8yaUgWvgccqGcmFl1krClksW6KrAXNlwhZ4QOAMhDrXwwPfOOQoG82zPpg+g9gZQIhkro1Cje4bmWz5z5fiuDloTq1vc= + root@edge-01.edge.lab.eng.rdu2.redhat.com +machine_network_cidr: + - 192.168.128.0/24 + - 1001:db8::/120 +kubeadmin_password_hash: "$2a$10$20Q4iRLy7cWZkjn/D07bF.RZQZonKwstyRGH0qiYbYRkx5Pe4Ztyi" # proxy_trusted_ca_bundle: 'user-ca-bundle:' # user_ca_bundle: | # # Foo @@ -92,7 +101,6 @@ kubeadmin_password_hash: $2a$10$20Q4iRLy7cWZkjn/D07bF.RZQZonKwstyRGH0qiYbYRkx5Pe # 42TI0UzcqRV4CWDoARMSV8yMLajZ0g1eEreUprwmFcOy17V7KCeV6E8lKb21OU8M # Ad9q3H0iXjct # -----END CERTIFICATE----- -machine_network_cidr: 192.168.127.0/24 postprocess_only: false pull_secret: '{"auths":{"empty_registry":{"username":"empty","password":"empty","auth":"ZW1wdHk6ZW1wdHk=","email":""}}}' summary_file: summary.yaml diff --git a/src/config.rs b/src/config.rs index 0647bb56..8bd65987 100644 --- a/src/config.rs +++ b/src/config.rs @@ -42,7 +42,7 @@ pub(crate) struct ClusterCustomizations { pub(crate) files: Vec, pub(crate) cluster_rename: Option, pub(crate) hostname: Option, - pub(crate) ip: Option, + pub(crate) ip_addresses: Vec, pub(crate) proxy: Option, pub(crate) install_config: Option, pub(crate) kubeadmin_password_hash: Option, @@ -50,7 +50,7 @@ pub(crate) struct ClusterCustomizations { pub(crate) pull_secret: Option, pub(crate) user_ca_bundle: Option, pub(crate) proxy_trusted_ca_bundle: Option, - pub(crate) machine_network_cidr: Option, + pub(crate) machine_network_cidrs: Vec, pub(crate) chrony_config: Option, } @@ -155,12 +155,12 @@ impl RecertConfig { files: vec![], cluster_rename: None, hostname: None, - ip: None, + ip_addresses: vec![], kubeadmin_password_hash: None, pull_secret: None, proxy: None, install_config: None, - machine_network_cidr: None, + machine_network_cidrs: vec![], user_ca_bundle: None, proxy_trusted_ca_bundle: None, chrony_config: None, @@ -222,9 +222,20 @@ impl RecertConfig { Some(value) => Some(value.as_str().context("hostname must be a string")?.to_string()), None => None, }; - let ip = match value.remove("ip") { - Some(value) => Some(value.as_str().context("ip must be a string")?.to_string()), - None => None, + let ip_addresses: Vec = match value.remove("ip") { + Some(serde_json::Value::Array(array)) => { + ensure!(array.len() <= 2, "ip array must up to 2 elements"); + array + .iter() + .map(|v| -> Result { Ok(v.as_str().context("ip array element must be a string")?.to_string()) }) + .collect::, _>>()? + } + Some(serde_json::Value::String(single_ip)) => { + // Handle single IP for backward compatibility + vec![single_ip.to_string()] + } + None => vec![], + _ => anyhow::bail!("ip must be a string or an array of strings"), }; let pull_secret = match value.remove("pull_secret") { Some(value) => Some(value.as_str().context("pull_secret must be a string")?.to_string()), @@ -260,9 +271,24 @@ impl RecertConfig { ), None => None, }; - let machine_network_cidr = match value.remove("machine_network_cidr") { - Some(value) => Some(value.as_str().context("machine_network_cidr must be a string")?.to_string()), - None => None, + let machine_network_cidrs = match value.remove("machine_network_cidr") { + Some(serde_json::Value::Array(array)) => { + ensure!(array.len() <= 2, "machine_network_cidr array must up to 2 elements"); + array + .iter() + .map(|v| -> Result { + Ok(v.as_str() + .context("machine_network_cidr array element must be a string")? + .to_string()) + }) + .collect::, _>>()? + } + Some(serde_json::Value::String(single_cidr)) => { + // Handle single CIDR for backward compatibility + vec![single_cidr.to_string()] + } + None => vec![], + _ => anyhow::bail!("machine_network_cidr must be a string or an array of strings"), }; let chrony_config = match value.remove("chrony_config") { Some(value) => Some(value.as_str().context("chrony_config must be a string")?.to_string()), @@ -338,14 +364,14 @@ impl RecertConfig { files: cluster_customization_files, cluster_rename, hostname, - ip, + ip_addresses, kubeadmin_password_hash: set_kubeadmin_password_hash, pull_secret, user_ca_bundle, proxy_trusted_ca_bundle, proxy, install_config, - machine_network_cidr, + machine_network_cidrs, chrony_config, }; @@ -429,14 +455,14 @@ impl RecertConfig { }, cluster_rename: cli.cluster_rename, hostname: cli.hostname, - ip: cli.ip, + ip_addresses: cli.ip, proxy: cli.proxy, install_config: cli.install_config, kubeadmin_password_hash: cli.kubeadmin_password_hash, pull_secret: cli.pull_secret, user_ca_bundle: cli.user_ca_bundle, proxy_trusted_ca_bundle: cli.proxy_trusted_ca_bundle, - machine_network_cidr: cli.machine_network_cidr, + machine_network_cidrs: cli.machine_network_cidr, chrony_config: cli.chrony_config, }, encryption_customizations: EncryptionCustomizations { diff --git a/src/config/cli.rs b/src/config/cli.rs index 6c3750f6..246f70db 100644 --- a/src/config/cli.rs +++ b/src/config/cli.rs @@ -97,9 +97,9 @@ pub(crate) struct Cli { pub(crate) hostname: Option, /// If given, the cluster resources that include the IP address will be modified to use this - /// one instead. + /// one instead. For dual stack, provide multiple IP addresses (IPv4 first). #[clap(long)] - pub(crate) ip: Option, + pub(crate) ip: Vec, /// If given, the cluster's HTTP proxy configuration will be modified to use this one instead. #[clap(long, value_parser = Proxy::parse)] @@ -150,10 +150,11 @@ pub(crate) struct Cli { /// The CIDR of the machine network. If given, the machine network CIDR which appears in the /// install-config found in the cluster-config-v1 configmaps will be modified to use this - /// machine CIDR. WARNING: If a different machine network CIDR is stated in the + /// machine CIDR. For dual stack, provide multiple IPv4 and IPv6 CIDRs (IPv4 first). + /// WARNING: If a different machine network CIDR is stated in the /// --install-config parameter, it might overwrite the one given here. #[clap(long)] - pub(crate) machine_network_cidr: Option, + pub(crate) machine_network_cidr: Vec, /// If given, the cluster resources that include chrony.config be modified to have this value. #[clap(long)] diff --git a/src/etcd_encoding.rs b/src/etcd_encoding.rs index c577b979..784c288f 100644 --- a/src/etcd_encoding.rs +++ b/src/etcd_encoding.rs @@ -5,7 +5,7 @@ use super::protobuf_gen::{ admissionregistration::v1::{MutatingWebhookConfiguration, ValidatingWebhookConfiguration}, apps::v1::{ControllerRevision, DaemonSet, Deployment, StatefulSet}, batch::v1::{CronJob, Job}, - core::v1::{ConfigMap, Secret}, + core::v1::{ConfigMap, Node, Secret}, }, apimachinery::pkg::runtime::{TypeMeta, Unknown}, }, @@ -56,6 +56,7 @@ k8s_type!(JobWithMeta, Job); k8s_type!(CronJobWithMeta, CronJob); k8s_type!(StatefulSetWithMeta, StatefulSet); k8s_type!(ConfigMapWithMeta, ConfigMap); +k8s_type!(NodeWithMeta, Node); k8s_type!(SecretWithMeta, Secret); k8s_type!(ValidatingWebhookConfigurationWithMeta, ValidatingWebhookConfiguration); k8s_type!(MutatingWebhookConfigurationWithMeta, MutatingWebhookConfiguration); @@ -86,6 +87,7 @@ pub(crate) async fn decode(data: &[u8]) -> Result> { "StatefulSet" => serde_json::to_vec(&StatefulSetWithMeta::try_from(unknown)?)?, "DaemonSet" => serde_json::to_vec(&DaemonsSetWithMeta::try_from(unknown)?)?, "ConfigMap" => serde_json::to_vec(&ConfigMapWithMeta::try_from(unknown)?)?, + "Node" => serde_json::to_vec(&NodeWithMeta::try_from(unknown)?)?, "Secret" => serde_json::to_vec(&SecretWithMeta::try_from(unknown)?)?, "ValidatingWebhookConfiguration" => serde_json::to_vec(&ValidatingWebhookConfigurationWithMeta::try_from(unknown)?)?, "MutatingWebhookConfiguration" => serde_json::to_vec(&MutatingWebhookConfigurationWithMeta::try_from(unknown)?)?, diff --git a/src/ocp_postprocess.rs b/src/ocp_postprocess.rs index a039df85..fe8515e5 100644 --- a/src/ocp_postprocess.rs +++ b/src/ocp_postprocess.rs @@ -152,8 +152,19 @@ async fn run_cluster_customizations( .context("renaming cluster")?; } - if let Some(ip) = &cluster_customizations.ip { - ip_rename(in_memory_etcd_client, ip, dirs, files).await.context("renaming IP")?; + let ips = &cluster_customizations.ip_addresses; + if ips.len() == 1 { + log::info!("Processing single IP: {}", ips[0]); + ip_rename(in_memory_etcd_client, &ips[0], dirs, files) + .await + .context(format!("renaming IP {}", ips[0]))?; + } else if ips.len() == 2 { + log::info!("Processing dual-stack IPs: {}", ips.join(", ")); + ip_rename_dual_stack(in_memory_etcd_client, ips, dirs, files) + .await + .context("renaming dual-stack IPs")?; + } else if ips.is_empty() { + log::info!("No IPs were provided, skipping IP rename"); } if let Some(hostname) = &cluster_customizations.hostname { @@ -196,8 +207,11 @@ async fn run_cluster_customizations( .await .context("renaming additional trust bundle")?; - if let Some(machine_network_cidr) = &cluster_customizations.machine_network_cidr { - fix_machine_network_cidr(in_memory_etcd_client, machine_network_cidr, dirs, files) + let machine_network_cidrs = &cluster_customizations.machine_network_cidrs; + if !machine_network_cidrs.is_empty() { + let combined_cidrs = machine_network_cidrs.join(","); + log::info!("Processing machine network CIDRs: {}", combined_cidrs); + fix_machine_network_cidr(in_memory_etcd_client, &combined_cidrs, dirs, files) .await .context("fixing machine network CIDR")?; } @@ -913,6 +927,21 @@ pub(crate) async fn ip_rename( Ok(()) } +pub(crate) async fn ip_rename_dual_stack( + in_memory_etcd_client: &Arc, + ips: &[String], + dirs: &[ConfigPath], + files: &[ConfigPath], +) -> Result<()> { + let etcd_client = in_memory_etcd_client; + + ip_rename::rename_all_dual_stack(etcd_client, ips, dirs, files) + .await + .context("renaming all dual stack")?; + + Ok(()) +} + pub(crate) async fn pull_secret_rename( in_memory_etcd_client: &Arc, pull_secret: &str, diff --git a/src/ocp_postprocess/ip_rename.rs b/src/ocp_postprocess/ip_rename.rs index 560bc29e..ef73d1b3 100644 --- a/src/ocp_postprocess/ip_rename.rs +++ b/src/ocp_postprocess/ip_rename.rs @@ -1,5 +1,5 @@ use crate::{config::path::ConfigPath, k8s_etcd::InMemoryK8sEtcd}; -use anyhow::{Context, Result}; +use anyhow::{ensure, Context, Result}; use std::{path::Path, sync::Arc}; mod etcd_rename; @@ -15,6 +15,27 @@ pub(crate) async fn rename_all(etcd_client: &Arc, ip: &str, dir Ok(()) } +pub(crate) async fn rename_all_dual_stack( + etcd_client: &Arc, + ips: &[String], + dirs: &[ConfigPath], + files: &[ConfigPath], +) -> Result<()> { + let original_ips = extract_original_dual_stack_ips(etcd_client) + .await + .context("extracting original dual-stack IPs")?; + + fix_etcd_resources_dual_stack(etcd_client, &original_ips, ips) + .await + .context("modifying etcd resources for dual stack")?; + + fix_filesystem_resources_dual_stack(&original_ips, ips, dirs, files) + .await + .context("renaming filesystem resources for dual stack")?; + + Ok(()) +} + async fn fix_filesystem_resources(original_ip: &str, ip: &str, dirs: &[ConfigPath], files: &[ConfigPath]) -> Result<()> { for dir in dirs { fix_dir_resources(original_ip, ip, dir).await?; @@ -27,6 +48,23 @@ async fn fix_filesystem_resources(original_ip: &str, ip: &str, dirs: &[ConfigPat Ok(()) } +async fn fix_filesystem_resources_dual_stack( + original_ips: &[String], + ips: &[String], + dirs: &[ConfigPath], + files: &[ConfigPath], +) -> Result<()> { + for dir in dirs { + fix_dir_resources_dual_stack(original_ips, ips, dir).await?; + } + + for file in files { + fix_file_resources_dual_stack(original_ips, ips, file).await?; + } + + Ok(()) +} + async fn fix_dir_resources(original_ip: &str, ip: &str, dir: &Path) -> Result<()> { filesystem_rename::fix_filesystem_ip(original_ip, ip, dir) .await @@ -34,56 +72,140 @@ async fn fix_dir_resources(original_ip: &str, ip: &str, dir: &Path) -> Result<() Ok(()) } +async fn fix_dir_resources_dual_stack(original_ips: &[String], ips: &[String], dir: &Path) -> Result<()> { + // Apply IPv4 replacement (original IPv4 → new IPv4) + filesystem_rename::fix_filesystem_ip(&original_ips[0], &ips[0], dir) + .await + .context(format!("fix filesystem IPv4 in {:?}", dir))?; + + // Apply IPv6 replacement (original IPv6 → new IPv6) - both are guaranteed to be present + filesystem_rename::fix_filesystem_ip(&original_ips[1], &ips[1], dir) + .await + .context(format!("fix filesystem IPv6 in {:?}", dir))?; + + Ok(()) +} + async fn fix_file_resources(_original_ip: &str, _ip: &str, _file: &Path) -> Result<()> { Ok(()) } -async fn fix_etcd_resources(etcd_client: &Arc, ip: &str) -> Result { - let original_ip = etcd_rename::fix_openshift_apiserver_configmap(etcd_client, ip) +async fn fix_file_resources_dual_stack(_original_ips: &[String], _ips: &[String], _file: &Path) -> Result<()> { + // Keep consistent with single-stack version (no-op) + Ok(()) +} + +async fn fix_etcd_resources_for_ip_pair(etcd_client: &Arc, original_ip: &str, new_ip: &str) -> Result<()> { + etcd_rename::fix_openshift_apiserver_configmap(etcd_client, original_ip, new_ip) .await .context("fixing openshift apiserver config configmap")?; - etcd_rename::fix_etcd_endpoints(etcd_client, ip) + etcd_rename::fix_etcd_endpoints(etcd_client, original_ip, new_ip) .await - .context("fixing etcd secrets")?; + .context("fixing etcd endpoints")?; - etcd_rename::fix_etcd_pod(etcd_client, &original_ip, ip) + etcd_rename::fix_etcd_pod(etcd_client, original_ip, new_ip) .await .context("fixing etcd-pod")?; - etcd_rename::fix_etcd_scripts(etcd_client, &original_ip, ip) + etcd_rename::fix_etcd_scripts(etcd_client, original_ip, new_ip) .await .context("fixing etcd-scripts")?; - etcd_rename::fix_etcd_secrets(etcd_client, &original_ip, ip) + etcd_rename::fix_etcd_secrets(etcd_client, original_ip, new_ip) .await .context("fixing etcd secrets")?; - etcd_rename::fix_kube_apiserver_configs(etcd_client, &original_ip, ip) + etcd_rename::fix_kube_apiserver_configs(etcd_client, original_ip, new_ip) .await .context("fixing kube apiserver configs")?; - etcd_rename::fix_kubeapiservers_cluster(etcd_client, &original_ip, ip) + etcd_rename::fix_kubeapiservers_cluster(etcd_client, original_ip, new_ip) .await .context("fixing kubeapiservers/cluster")?; - etcd_rename::fix_authentications_cluster(etcd_client, &original_ip, ip) + etcd_rename::fix_authentications_cluster(etcd_client, original_ip, new_ip) .await - .context("fixing kubeapiservers/cluster")?; + .context("fixing authentications/cluster")?; - etcd_rename::fix_openshiftapiservers_cluster(etcd_client, ip) + etcd_rename::fix_openshiftapiservers_cluster(etcd_client, original_ip, new_ip) .await - .context("fixing kubeapiservers/cluster")?; + .context("fixing openshiftapiservers/cluster")?; - etcd_rename::fix_networks_cluster(etcd_client, ip) + etcd_rename::fix_networks_cluster(etcd_client, original_ip, new_ip) .await .context("fixing networks/cluster")?; - etcd_rename::fix_oauth_apiserver_deployment(etcd_client, &original_ip, ip) + etcd_rename::fix_oauth_apiserver_deployment(etcd_client, original_ip, new_ip) .await .context("fixing oauth apiserver deployment")?; - etcd_rename::fix_etcd_member(etcd_client, ip).await.context("fixing etcd member")?; + etcd_rename::fix_etcd_member(etcd_client, original_ip, new_ip) + .await + .context("fixing etcd member")?; + + Ok(()) +} - Ok(original_ip) +async fn fix_etcd_resources(etcd_client: &Arc, ip: &str) -> Result { + let original_ips = etcd_rename::extract_original_ips(etcd_client) + .await + .context("extracting original IPs from node configuration")?; + + ensure!( + original_ips.len() == 1, + "Expected single-stack (1 IP) but found {} IPs", + original_ips.len() + ); + let original_ip = &original_ips[0]; + + fix_etcd_resources_for_ip_pair(etcd_client, original_ip, ip) + .await + .context("applying etcd resource fixes")?; + + Ok(original_ip.clone()) +} + +async fn extract_original_dual_stack_ips(etcd_client: &Arc) -> Result> { + let original_ips = etcd_rename::extract_original_ips(etcd_client) + .await + .context("extracting original IPs from node configuration")?; + + ensure!( + original_ips.len() == 2, + "Expected dual-stack (2 IPs) but found {} IPs", + original_ips.len() + ); + + Ok(original_ips) +} + +async fn fix_etcd_resources_dual_stack(etcd_client: &Arc, original_ips: &[String], new_ips: &[String]) -> Result<()> { + let original_ipv4 = &original_ips[0]; + let original_ipv6 = &original_ips[1]; + + let new_ipv4 = &new_ips[0]; + let new_ipv6 = new_ips.get(1).context("Second IP (IPv6) is required for dual-stack processing")?; + + log::info!( + "Applying dual-stack IP changes - IPv4: {} → {}, IPv6: {} → {}", + original_ipv4, + new_ipv4, + original_ipv6, + new_ipv6 + ); + + log::info!("Applying IPv4 replacements: {} → {}", original_ipv4, new_ipv4); + + fix_etcd_resources_for_ip_pair(etcd_client, original_ipv4, new_ipv4) + .await + .context("applying IPv4 etcd resource fixes")?; + + log::info!("Applying IPv6 replacements: {} → {}", original_ipv6, new_ipv6); + + fix_etcd_resources_for_ip_pair(etcd_client, original_ipv6, new_ipv6) + .await + .context("applying IPv6 etcd resource fixes")?; + + Ok(()) } diff --git a/src/ocp_postprocess/ip_rename/etcd_rename.rs b/src/ocp_postprocess/ip_rename/etcd_rename.rs index 08a1d0cd..567de5c7 100644 --- a/src/ocp_postprocess/ip_rename/etcd_rename.rs +++ b/src/ocp_postprocess/ip_rename/etcd_rename.rs @@ -6,10 +6,83 @@ use crate::{ use anyhow::{bail, ensure, Context, Result}; use futures_util::future::join_all; use serde_json::Value; -use std::net::Ipv6Addr; +use std::net::{IpAddr, Ipv6Addr}; use std::sync::Arc; -pub(crate) async fn fix_openshift_apiserver_configmap(etcd_client: &Arc, ip: &str) -> Result { +fn is_ipv6(ip: &str) -> Result { + let addr = ip + .parse::() + .with_context(|| format!("Failed to parse IP address: {}", ip))?; + Ok(addr.is_ipv6()) +} + +// Extract both original IPv4 and IPv6 IPs from dual-stack cluster node configuration +pub(crate) async fn extract_original_ips(etcd_client: &Arc) -> Result> { + // Extract IPs from node addresses - works for both single-stack and dual-stack + extract_original_ips_from_nodes(etcd_client).await +} + +// Extract original IPs from node configuration - returns 1 IP for single-stack, 2 for dual-stack +async fn extract_original_ips_from_nodes(etcd_client: &Arc) -> Result> { + let node_keys = etcd_client.list_keys("minions").await?; + + ensure!( + node_keys.len() == 1, + "Expected exactly one node in the cluster, found {}", + node_keys.len() + ); + + let node_key = &node_keys[0]; + + let etcd_result = etcd_client.get(node_key.clone()).await?.context("Failed to get node from etcd")?; + + let node: Value = serde_yaml::from_slice(&etcd_result.value).context("Failed to deserialize node value")?; + + let addresses = node + .pointer("/status/addresses") + .and_then(|a| a.as_array()) + .context("Node does not have /status/addresses array")?; + + let mut original_ipv4: Option = None; + let mut original_ipv6: Option = None; + + for address in addresses { + if let (Some(addr_type), Some(addr_value)) = ( + address.pointer("/type").and_then(|t| t.as_str()), + address.pointer("/address").and_then(|a| a.as_str()), + ) { + if addr_type == "InternalIP" { + if is_ipv6(addr_value)? { + original_ipv6 = Some(addr_value.to_string()); + } else { + original_ipv4 = Some(addr_value.to_string()); + } + } + } + } + + let mut result = Vec::new(); + + if let Some(ipv4) = original_ipv4 { + result.push(ipv4); + } + + if let Some(ipv6) = original_ipv6 { + result.push(ipv6); + } + + ensure!(!result.is_empty(), "No InternalIP addresses found in node configuration"); + + if result.len() == 1 { + log::info!("Found single-stack IP: {}", result[0]); + } else { + log::info!("Found dual-stack IPs - IPv4: {}, IPv6: {}", result[0], result[1]); + } + + Ok(result) +} + +pub(crate) async fn fix_openshift_apiserver_configmap(etcd_client: &Arc, original_ip: &str, ip: &str) -> Result<()> { let k8s_resource_location = K8sResourceLocation::new(Some("openshift-apiserver"), "Configmap", "config", "v1"); let mut configmap = get_etcd_json(etcd_client, &k8s_resource_location) @@ -25,49 +98,68 @@ pub(crate) async fn fix_openshift_apiserver_configmap(etcd_client: &Arc Result<()> { +fn fix_storage_config(config: &mut Value, original_ip: &str, ip: &str) -> Result<()> { let storage_config = config.pointer_mut("/storageConfig").context("storageConfig not found")?; - let ip = if ip.contains(':') { format!("[{ip}]") } else { ip.to_string() }; + let current_urls = storage_config + .pointer("/urls") + .and_then(|urls| urls.as_array()) + .context("storageConfig/urls not found or not an array")?; + + let original_ip_formatted = if original_ip.contains(':') { + format!("[{original_ip}]") + } else { + original_ip.to_string() + }; + let expected_url = format!("https://{original_ip_formatted}:2379"); + + // Only replace if the original IP is found in the URLs + let contains_original = current_urls.iter().any(|url| url.as_str().map_or(false, |s| s == expected_url)); + + if contains_original { + let new_ip = if ip.contains(':') { format!("[{ip}]") } else { ip.to_string() }; + storage_config.as_object_mut().context("storageConfig not an object")?.insert( + "urls".to_string(), + serde_json::Value::Array(vec![serde_json::Value::String(format!("https://{new_ip}:2379"))]), + ); + } else { + log::info!("Original IP {} not found in storage config URLs, skipping replacement", original_ip); + } - storage_config.as_object_mut().context("storageConfig not an object")?.insert( - "urls".to_string(), - serde_json::Value::Array(vec![serde_json::Value::String(format!("https://{ip}:2379"))]), - ); Ok(()) } @@ -121,7 +213,7 @@ pub(crate) async fn fix_kube_apiserver_configs(etcd_client: &Arc, ip: &str) -> Result<()> { +pub(crate) async fn fix_etcd_endpoints(etcd_client: &Arc, original_ip: &str, ip: &str) -> Result<()> { join_all( etcd_client .list_keys("configmaps/openshift-etcd/etcd-endpoints".to_string().as_str()) @@ -154,9 +246,19 @@ pub(crate) async fn fix_etcd_endpoints(etcd_client: &Arc, ip: & // Ensure above guarantees that this unwrap will never panic #[allow(clippy::unwrap_used)] let current_member_id = data.keys().next().unwrap().clone(); - data[¤t_member_id] = serde_json::Value::String(ip.to_string()); - - put_etcd_yaml(etcd_client, &k8s_resource_location, configmap).await?; + let current_value = data[¤t_member_id].as_str().context("current member value not a string")?; + + // Only replace if the original IP is found + if current_value == original_ip { + data[¤t_member_id] = serde_json::Value::String(ip.to_string()); + put_etcd_yaml(etcd_client, &k8s_resource_location, configmap).await?; + } else { + log::info!( + "Original IP {} not found in etcd endpoints, current value is {}, skipping replacement", + original_ip, + current_value + ); + } Ok(()) }), @@ -338,7 +440,7 @@ pub(crate) async fn fix_kubeapiservers_cluster(etcd_client: &Arc, ip: &str) -> Result<()> { +pub(crate) async fn fix_openshiftapiservers_cluster(etcd_client: &Arc, original_ip: &str, ip: &str) -> Result<()> { let k8s_resource_location = K8sResourceLocation::new(None, "OpenShiftAPIServer", "cluster", "operator.openshift.io/v1"); let mut cluster = get_etcd_json(etcd_client, &k8s_resource_location) .await? @@ -346,7 +448,7 @@ pub(crate) async fn fix_openshiftapiservers_cluster(etcd_client: &Arc, original_ip: &str, ip: &str) -> Result<()> { - let k8s_resource_location = K8sResourceLocation::new(Some("openshift-oauth-apiserver"), "Deployment", "apiserver", "v1"); + let k8s_resource_location = K8sResourceLocation::new(Some("openshift-oauth-apiserver"), "Deployment", "apiserver", "apps/v1"); + let mut deployment = get_etcd_json(etcd_client, &k8s_resource_location) .await? .context("getting openshift-oauth-apiserver deployment/apiserver")?; @@ -407,8 +510,15 @@ pub(crate) async fn fix_oauth_apiserver_deployment(etcd_client: &Arc, ip: &str) -> Result<()> { +pub(crate) async fn fix_networks_cluster(etcd_client: &Arc, original_ip: &str, ip: &str) -> Result<()> { let k8s_resource_location = K8sResourceLocation::new(None, "Network", "cluster", "operator.openshift.io/v1"); let mut cluster = get_etcd_json(etcd_client, &k8s_resource_location) @@ -439,23 +549,73 @@ pub(crate) async fn fix_networks_cluster(etcd_client: &Arc, ip: let annotations = annotations.as_object_mut().context("/metadata/annotations not an object")?; let key = "networkoperator.openshift.io/ovn-cluster-initiator"; - if annotations.contains_key(key) { - annotations.insert(key.to_string(), Value::String(ip.to_string())); - - put_etcd_yaml(etcd_client, &k8s_resource_location, cluster).await?; + if let Some(current_value) = annotations.get(key) { + let current_value_str = current_value.as_str().context("annotation value not a string")?; + // Only replace if the original IP is found + if current_value_str == original_ip { + annotations.insert(key.to_string(), Value::String(ip.to_string())); + put_etcd_yaml(etcd_client, &k8s_resource_location, cluster).await?; + } else { + log::info!( + "Original IP {} not found in networks cluster annotation, current value is {}, skipping replacement", + original_ip, + current_value_str + ); + } + } else { + log::info!("Network cluster annotation {} not found, skipping replacement", key); } } Ok(()) } -pub(crate) async fn fix_etcd_member(etcd_client: &Arc, ip: &str) -> Result<()> { - let mut update = format!("https://{}:2380", ip).to_string(); - if ip.parse::().is_ok() { - update = format!("https://[{}]:2380", ip).to_string(); +pub(crate) async fn fix_etcd_member(etcd_client: &Arc, original_ip: &str, ip: &str) -> Result<()> { + let etcd_client_ref = etcd_client.etcd_client.as_ref().context("etcd client not configured")?; + + // Get current member list to check if original IP is present + let members_list = etcd_client_ref + .cluster_client() + .member_list() + .await + .context("listing etcd members list")?; + + let members = members_list.members(); + ensure!( + members.len() == 1, + "single-node must have exactly one etcd member, found {}", + members.len() + ); + + let current_member = &members[0]; + let expected_original_url = if original_ip.parse::().is_ok() { + format!("https://[{}]:2380", original_ip) + } else { + format!("https://{}:2380", original_ip) + }; + + let contains_original = current_member.peer_urls().iter().any(|url| url == &expected_original_url); + + if contains_original { + let new_member_url = if ip.parse::().is_ok() { + format!("https://[{}]:2380", ip) + } else { + format!("https://{}:2380", ip) + }; + + log::debug!("Updating etcd member from {} to {}", expected_original_url, new_member_url); + etcd_client + .update_member(new_member_url) + .await + .context("failed to update etcd member")?; + } else { + log::info!( + "Original IP {} not found in etcd member peer URLs, current URLs are {:?}, skipping replacement", + original_ip, + current_member.peer_urls() + ); } - etcd_client.update_member(update).await.context("failed to update etcd member")?; Ok(()) } diff --git a/src/ocp_postprocess/machine_config_cidr_rename/etcd_rename.rs b/src/ocp_postprocess/machine_config_cidr_rename/etcd_rename.rs index f56ab069..8ff829e0 100644 --- a/src/ocp_postprocess/machine_config_cidr_rename/etcd_rename.rs +++ b/src/ocp_postprocess/machine_config_cidr_rename/etcd_rename.rs @@ -2,7 +2,7 @@ use crate::{ cluster_crypto::locations::K8sResourceLocation, k8s_etcd::{get_etcd_json, put_etcd_yaml, InMemoryK8sEtcd}, }; -use anyhow::{ensure, Context, Result}; +use anyhow::{Context, Result}; use serde_json::Value; pub(crate) async fn fix_configmap( @@ -31,13 +31,15 @@ pub(crate) async fn fix_configmap( .as_array_mut() .context("machineNetwork not an array")?; - ensure!( - machine_network.len() == 1, - "machineNetwork has more than one entry, dual stack clusters are not currently supported" - ); + // For dual stack clusters, preserve all existing entries and just replace them with new network + // If machine_config_network contains commas, split into multiple networks for dual stack + let new_networks: Vec<_> = machine_config_network + .split(',') + .map(|cidr| serde_json::json!({"cidr": cidr.trim()})) + .collect(); - machine_network.remove(0); - machine_network.push(serde_json::Value::String(machine_config_network.to_string())); + machine_network.clear(); + machine_network.extend(new_networks); data.insert( "install-config".to_string(), diff --git a/src/ocp_postprocess/rename_utils.rs b/src/ocp_postprocess/rename_utils.rs index 7b7c637c..c9ccd6ff 100644 --- a/src/ocp_postprocess/rename_utils.rs +++ b/src/ocp_postprocess/rename_utils.rs @@ -36,9 +36,10 @@ pub(crate) fn fix_apiserver_url_file(original_data: Vec, cluster_domain: &st }) .join("\n"); - if !found { - bail!("could not find line starting with KUBERNETES_SERVICE_HOST='api-int. in apiserver-url.env"); - } + ensure!( + found, + "could not find line starting with KUBERNETES_SERVICE_HOST='api-int. in apiserver-url.env" + ); Ok(format!("{new}\n")) } @@ -245,9 +246,7 @@ pub(crate) async fn fix_kubeconfig(cluster_name: &str, cluster_domain: &str, kub .as_array_mut() .context("clusters not an array")?; - if clusters.is_empty() { - bail!("expected at least one cluster in kubeconfig"); - } + ensure!(!clusters.is_empty(), "expected at least one cluster in kubeconfig"); clusters.iter_mut().try_for_each(|cluster| { // Only the kubelet kubeconfig contains the cluster's name as a .clusters[].cluster.name, @@ -348,9 +347,7 @@ pub(crate) fn fix_kcm_pod(pod: &mut Value, generated_infra_id: &str) -> Result<( .as_array_mut() .context("clusters not an object")?; - if containers.is_empty() { - bail!("expected at least one container in pod.yaml"); - } + ensure!(!containers.is_empty(), "expected at least one container in pod.yaml"); containers .iter_mut() @@ -362,9 +359,7 @@ pub(crate) fn fix_kcm_pod(pod: &mut Value, generated_infra_id: &str) -> Result<( .as_array_mut() .context("args not an array")?; - if args.is_empty() { - bail!("expected at least one arg in kube-controller-manager"); - } + ensure!(!args.is_empty(), "expected at least one arg in kube-controller-manager"); let arg = args .iter_mut() @@ -697,9 +692,7 @@ pub(crate) fn fix_pod_container_env(pod: &mut Value, domain: &str, container_nam .as_array_mut() .context("env not an array")?; - if env.is_empty() { - bail!("expected at least one env in container"); - } + ensure!(!env.is_empty(), "expected at least one env in container"); env.iter_mut() .find_map(|var| (var.get("name")? == env_name).then_some(var))