From aa3e6bdc7b6f988d35c49447d66e14043bc3cb31 Mon Sep 17 00:00:00 2001 From: Max Bridges Date: Tue, 22 Apr 2025 08:43:06 -0400 Subject: [PATCH 1/6] Drafting --- _topic_maps/_topic_map.yml | 6 + .../hcp-deploy/hcp-deploy-openstack.adoc | 23 +++ .../hcp-destroy/hcp-destroy-openstack.adoc | 9 + .../hcp-manage/hcp-manage-openstack.adoc | 20 ++ modules/hcp-deploy-openstack-create.adoc | 65 ++++++ modules/hcp-deploy-openstack-parameters.adoc | 53 +++++ modules/hcp-manage-openstack-az.adoc | 93 +++++++++ modules/hcp-openstack-accessing.adoc | 48 +++++ modules/hcp-openstack-autoscale.adoc | 105 ++++++++++ modules/hcp-support-matrix.adoc | 4 + ...d-clusters-openstack-additional-ports.adoc | 89 +++++++++ ...clusters-openstack-create-floating-ip.adoc | 34 ++++ .../hosted-clusters-openstack-destroy.adoc | 28 +++ ...hosted-clusters-openstack-performance.adoc | 189 ++++++++++++++++++ ...osted-clusters-openstack-prepare-etcd.adoc | 152 ++++++++++++++ ...sted-clusters-openstack-prerequisites.adoc | 22 ++ ...osted-clusters-openstack-upload-rhcos.adoc | 29 +++ 17 files changed, 969 insertions(+) create mode 100644 hosted_control_planes/hcp-deploy/hcp-deploy-openstack.adoc create mode 100644 hosted_control_planes/hcp-destroy/hcp-destroy-openstack.adoc create mode 100644 hosted_control_planes/hcp-manage/hcp-manage-openstack.adoc create mode 100644 modules/hcp-deploy-openstack-create.adoc create mode 100644 modules/hcp-deploy-openstack-parameters.adoc create mode 100644 modules/hcp-manage-openstack-az.adoc create mode 100644 modules/hcp-openstack-accessing.adoc create mode 100644 modules/hcp-openstack-autoscale.adoc create mode 100644 modules/hosted-clusters-openstack-additional-ports.adoc create mode 100644 modules/hosted-clusters-openstack-create-floating-ip.adoc create mode 100644 modules/hosted-clusters-openstack-destroy.adoc create mode 100644 modules/hosted-clusters-openstack-performance.adoc create mode 100644 modules/hosted-clusters-openstack-prepare-etcd.adoc create mode 100644 modules/hosted-clusters-openstack-prerequisites.adoc create mode 100644 modules/hosted-clusters-openstack-upload-rhcos.adoc diff --git a/_topic_maps/_topic_map.yml b/_topic_maps/_topic_map.yml index de5f181d6cc0..33d3c67653af 100644 --- a/_topic_maps/_topic_map.yml +++ b/_topic_maps/_topic_map.yml @@ -2524,6 +2524,8 @@ Topics: File: hcp-deploy-ibmz - Name: Deploying hosted control planes on IBM Power File: hcp-deploy-ibm-power + - Name: Deploying hosted control planes on OpenStack + File: hcp-deploy-openstack - Name: Managing hosted control planes Dir: hcp-manage Topics: @@ -2537,6 +2539,8 @@ Topics: File: hcp-manage-non-bm - Name: Managing hosted control planes on IBM Power File: hcp-manage-ibm-power + - Name: Managing hosted control planes on OpenStack + File: hcp-manage-openstack - Name: Deploying hosted control planes in a disconnected environment Dir: hcp-disconnected Topics: @@ -2594,6 +2598,8 @@ Topics: File: hcp-destroy-ibmz - Name: Destroying a hosted cluster on IBM Power File: hcp-destroy-ibm-power + - Name: Destroying a hosted cluster on OpenStack + File: hcp-destroy-openstack - Name: Destroying a hosted cluster on non-bare-metal agent machines File: hcp-destroy-non-bm - Name: Manually importing a hosted cluster diff --git a/hosted_control_planes/hcp-deploy/hcp-deploy-openstack.adoc b/hosted_control_planes/hcp-deploy/hcp-deploy-openstack.adoc new file mode 100644 index 000000000000..f8667ebfb6e0 --- /dev/null +++ b/hosted_control_planes/hcp-deploy/hcp-deploy-openstack.adoc @@ -0,0 +1,23 @@ +:_mod-docs-content-type: ASSEMBLY +[id="hcp-deploy-openstack"] +include::_attributes/common-attributes.adoc[] += Deploying {hcp} on OpenStack +:context: hcp-deploy-openstack + +toc::[] + +You can deploy {hcp} with hosted clusters that run on {rh-openstack-first}. + +A _hosted cluster_ is an {product-title} cluster with its API endpoint and control plane that are hosted on the hosting cluster. The hosted cluster includes the control plane and its corresponding data plane. You can use the {mce-short} console or the `hcp` command-line interface (CLI) to create a hosted cluster. + +include::modules/hosted-clusters-openstack-prerequisites.adoc[leveloffset=+1] + +include::modules/hosted-clusters-openstack-prepare-etcd.adoc[leveloffset=+1] + +include::modules/hosted-clusters-openstack-create-floating-ip.adoc[leveloffset=+1] + +include::modules/hosted-clusters-openstack-upload-rhcos.adoc[leveloffset=+1] + +include::modules/hcp-deploy-openstack-create.adoc[leveloffset=+1] + +include::modules/hcp-deploy-openstack-parameters.adoc[leveloffset=+2] \ No newline at end of file diff --git a/hosted_control_planes/hcp-destroy/hcp-destroy-openstack.adoc b/hosted_control_planes/hcp-destroy/hcp-destroy-openstack.adoc new file mode 100644 index 000000000000..414da2809d36 --- /dev/null +++ b/hosted_control_planes/hcp-destroy/hcp-destroy-openstack.adoc @@ -0,0 +1,9 @@ +:_mod-docs-content-type: ASSEMBLY +[id="hcp-destroy-openstack"] +include::_attributes/common-attributes.adoc[] += Destroying a hosted control plane on OpenStack +:context: hcp-destroy-openstack + +toc::[] + +include::modules/hosted-clusters-openstack-destroy.adoc[leveloffset=+1] \ No newline at end of file diff --git a/hosted_control_planes/hcp-manage/hcp-manage-openstack.adoc b/hosted_control_planes/hcp-manage/hcp-manage-openstack.adoc new file mode 100644 index 000000000000..cd882807f342 --- /dev/null +++ b/hosted_control_planes/hcp-manage/hcp-manage-openstack.adoc @@ -0,0 +1,20 @@ +:_mod-docs-content-type: ASSEMBLY +[id="hcp-manage-openstack"] +include::_attributes/common-attributes.adoc[] += Managing {hcp} on OpenStack +:context: hcp-manage-openstack + +toc::[] + +After you deploy {hcp} on {rh-openstack-first} agent machines, you can manage a hosted cluster by completing the +following tasks. + +include::modules/hcp-openstack-accessing.adoc[leveloffset=+1] + +include::modules/hcp-openstack-autoscale.adoc[leveloffset=+1] + +include::modules/hcp-manage-openstack-az.adoc[leveloffset=+1] + +include::modules/hosted-clusters-openstack-additional-ports.adoc[leveloffset=+1] + +include::modules/hosted-clusters-openstack-performance.adoc[leveloffset=+1] \ No newline at end of file diff --git a/modules/hcp-deploy-openstack-create.adoc b/modules/hcp-deploy-openstack-create.adoc new file mode 100644 index 000000000000..f0b4d94180b5 --- /dev/null +++ b/modules/hcp-deploy-openstack-create.adoc @@ -0,0 +1,65 @@ +:_mod-docs-content-type: PROCEDURE +[id="hcp-deploy-openstack-create_{context}"] += Creating a hosted cluster on OpenStack + +You can create a hosted cluster on {rh-openstack-first} by using the `hcp` CLI. + +.Prerequisites + +* You completed all prerequisite steps in "Preparing to deploy hosted control planes". +* You completed all prerequisite steps in "Prerequisites for OpenStack". +* You have access to the management cluster. +* You have access to the {rh-openstack} cloud. + +.Procedure + +* Create a hosted cluster by running the following command: ++ +[source,terminal] +---- +$ hcp create cluster openstack --node-pool-flavor +---- ++ +-- +where: + +``:: Specifies the flavor of the node pool of the cluster. +-- +NOTE: Many options are available at cluster creation. For {rh-openstack}-specific options, see "Options for creating a Hosted Control Planes cluster on OpenStack". For general options, see the `hcp` documentation. + +.Verification +* Verify that the hosted cluster is ready by running the following command on it: ++ +[source,terminal] +---- +$ oc -n clusters- get pods +---- ++ +-- +where: + +``:: Specifies the name of the cluster. +-- ++ +After several minutes, the output should show that the hosted control plane pods are running. ++ +.Example output +[source,terminal] +---- +NAME READY STATUS RESTARTS AGE +capi-provider-5cc7b74f47-n5gkr 1/1 Running 0 3m +catalog-operator-5f799567b7-fd6jw 2/2 Running 0 69s +certified-operators-catalog-784b9899f9-mrp6p 1/1 Running 0 66s +cluster-api-6bbc867966-l4dwl 1/1 Running 0 66s +... +... +... +redhat-operators-catalog-9d5fd4d44-z8qqk 1/1 Running 0 +---- + +[NOTE] +==== +The {rh-openstack} resources that the cluster API (CAPI) provider creates are tagged with the label `openshiftClusterID=`. + +You can define additional tags for the resources as values in the `HostedCluster.Spec.Platform.OpenStack.Tags` field of a YAML manifest that you use to create the hosted cluster. The tags are applied when you scale up the node pool. +==== \ No newline at end of file diff --git a/modules/hcp-deploy-openstack-parameters.adoc b/modules/hcp-deploy-openstack-parameters.adoc new file mode 100644 index 000000000000..48ace734decf --- /dev/null +++ b/modules/hcp-deploy-openstack-parameters.adoc @@ -0,0 +1,53 @@ +// Module included in the following assemblies: +// +// * hosted-control-planes/hcp-deploy/hcp-deploy-openstack.adoc + +:_mod-docs-content-type: REFERENCE +[id="hcp-deploy-openstack-parameters_{context}"] += Options for creating a Hosted Control Planes cluster on OpenStack + +You can supply several options to the `hcp` CLI while deploying a Hosted Control Planes Cluster on {rh-openstack-first}. + +|=== +|Option|Description|Required + +|`--openstack-ca-cert-file` +|Path to the OpenStack CA certificate file. +|No + +|`--openstack-cloud` +|Name of the cloud in `clouds.yaml`. The default value is `openstack`. +|No + +|`--openstack-credentials-file` +|Path to the OpenStack credentials file. +|No + +|`--openstack-dns-nameservers` +|List of DNS server addresses that are provided when creating the subnet. +|No + +|`--openstack-external-network-id` +|ID of the OpenStack external network. +|No + +|`--openstack-ingress-floating-ip` +|A floating IP for OpenShift ingress. +|No + +|`--openstack-node-additional-port` +|Additional ports to attach to nodes. Valid values are: `network-id`, `vnic-type`, `disable-port-security`, and `address-pairs`. +|No + +|`--openstack-node-availability-zone` +|Availability zone for the node pool. +|No + +|`--openstack-node-flavor` +|Flavor for the node pool. +|Yes + +|`--openstack-node-image-name` +|Image name for the node pool. +|No +|=== \ No newline at end of file diff --git a/modules/hcp-manage-openstack-az.adoc b/modules/hcp-manage-openstack-az.adoc new file mode 100644 index 000000000000..cf8fd4608984 --- /dev/null +++ b/modules/hcp-manage-openstack-az.adoc @@ -0,0 +1,93 @@ +// Module included in the following assemblies: +// +// * hosted_control_planes/hcp-manage/hcp-manage-openstack.adoc + +:_mod-docs-content-type: PROCEDURE +[id="hcp-manage-openstack-az_{context}"] += Configuring node pools for availability zones + +You can distribute node pools across multiple {rh-openstack-first} Nova availability zones to improve the high availability of your hosted cluster. + +NOTE: Availability zones do not necessarily correspond to fault domains and do not inherently provide high availability benefits. + +.Prerequisites + +* You created a hosted cluster. +* You have access to the management cluster. +* The `hcp` and `oc` CLIs are installed. + +.Procedure + +. Set environment variables that are appropriate for your needs. For example, if you want to create two additional +machines in the `az1` availability zone, you could enter: ++ +[source,terminal] +---- +$ export NODEPOOL_NAME="${CLUSTER_NAME}-az1" \ +&& export WORKER_COUNT="2" \ +&& export FLAVOR="m1.xlarge" \ +&& export AZ="az1" +---- + +. Create the node pool by using your environment variables by entering the following command: ++ +[source,terminal] +---- +$ hcp create nodepool openstack \ + --cluster-name \ + --name $NODEPOOL_NAME \ + --replicas $WORKER_COUNT \ + --openstack-node-flavor $FLAVOR \ + --openstack-node-availability-zone $AZ \ +---- ++ +-- +where: + +``:: Specifies the name of your hosted cluster. +-- + +. Check the status of the node pool by listing `nodepool` resources in the clusters namespace by running the following command: ++ +[source,terminal] +---- +$ oc get nodepools --namespace clusters +---- +.Example output +[source,terminal] +---- +NAME CLUSTER DESIRED NODES CURRENT NODES AUTOSCALING AUTOREPAIR VERSION UPDATINGVERSION UPDATINGCONFIG MESSAGE +example example 5 5 False False 4.17.0 +example-az1 example 2 False False True True Minimum availability requires 2 replicas, current 0 available +---- + +. Observe the notes as they start on your hosted cluster by running the following command: ++ +[source,terminal] +---- +$ oc --kubeconfig $CLUSTER_NAME-kubeconfig get nodes +---- ++ +.Example output +[source,terminal] +---- +NAME STATUS ROLES AGE VERSION +... +example-extra-az-zh9l5 Ready worker 2m6s v1.27.4+18eadca +example-extra-az-zr8mj Ready worker 102s v1.27.4+18eadca +... +---- + +. Verify that the node pool is created by running the following command: ++ +[source,terminal] +---- +$ oc get nodepools --namsepace clusters +---- ++ +.Example output +[source,terminal] +---- +NAME CLUSTER DESIRED CURRENT AVAILABLE PROGRESSING MESSAGE + 2 2 2 False All replicas are available +---- \ No newline at end of file diff --git a/modules/hcp-openstack-accessing.adoc b/modules/hcp-openstack-accessing.adoc new file mode 100644 index 000000000000..41f33d271e02 --- /dev/null +++ b/modules/hcp-openstack-accessing.adoc @@ -0,0 +1,48 @@ +// Module included in the following assemblies: +// +// * hosted_control_planes/hcp-manage/hcp-manage-openstack.adoc + +:_mod-docs-content-type: PROCEDURE +[id="hcp-openstack-accessing_{context}"] += Accessing the hosted cluster + +You can access hosted clusters on {rh-openstack-first} by either getting the `kubeconfig` file and `kubeadmin` +credential directly from +resources, or by using the `hcp` command-line interface to generate a `kubeconfig` file. + +.Prerequisites + +To access the hosted cluster by getting the `kubeconfig` file and credentials directly from resources, you must be familiar with the access secrets for hosted clusters. The _hosted cluster (hosting)_ namespace contains hosted cluster resources and the access secrets. The _hosted control plane_ namespace is where the hosted control plane runs. + +The secret name formats are as follows: + +** `kubeconfig` secret: `--admin-kubeconfig`. For example, `clusters-hypershift-demo-admin-kubeconfig`. +** `kubeadmin` password secret: `--kubeadmin-password`. For example, `clusters-hypershift-demo-kubeadmin-password`. + +The `kubeconfig` secret contains a Base64-encoded `kubeconfig` field, which you can decode and save into a file to use with the following command: + +[source,terminal] +---- +$ oc --kubeconfig .kubeconfig get nodes +---- + +The `kubeadmin` password secret is also Base64-encoded. You can decode it and use the password to log in to the API server or console of the hosted cluster. + +.Procedure + +* To access the hosted cluster by using the `hcp` CLI to generate the `kubeconfig` file, take the following steps: + +. Generate the `kubeconfig` file by entering the following command: ++ +[source,terminal] +---- +$ hcp create kubeconfig --namespace \ + --name > .kubeconfig +---- + +. After you save the `kubeconfig` file, you can access the hosted cluster by entering the following example command: ++ +[source,terminal] +---- +$ oc --kubeconfig .kubeconfig get nodes +---- \ No newline at end of file diff --git a/modules/hcp-openstack-autoscale.adoc b/modules/hcp-openstack-autoscale.adoc new file mode 100644 index 000000000000..e777cd6d7df8 --- /dev/null +++ b/modules/hcp-openstack-autoscale.adoc @@ -0,0 +1,105 @@ +// Module included in the following assemblies: +// +// * hosted_control_planes/hcp-manage/hcp-manage-openstack.adoc + +:_mod-docs-content-type: PROCEDURE +[id="hcp-openstack-autoscale_{context}"] += Enabling node auto-scaling for the hosted cluster + +When you need more capacity in your hosted cluster on {rh-openstack-first} and spare agents are available, you can enable auto-scaling to install new worker nodes. + +.Procedure + +. To enable auto-scaling, enter the following command: ++ +[source,terminal] +---- +$ oc -n patch nodepool \ + --type=json \ + -p '[{"op": "remove", "path": "/spec/replicas"},{"op":"add", "path": "/spec/autoScaling", "value": { "max": 5, "min": 2 }}]' +---- ++ +[NOTE] +==== +In the example, the minimum number of nodes is 2, and the maximum is 5. The maximum number of nodes that you can add might be bound by your platform. For example, if you use the Agent platform, the maximum number of nodes is bound by the number of available agents. +==== + +. Create a workload that requires a new node. + +.. Create a YAML file that contains the workload configuration, by using the following example: ++ +[source,yaml] +---- +apiVersion: apps/v1 +kind: Deployment +metadata: + creationTimestamp: null + labels: + app: reversewords + name: reversewords + namespace: default +spec: + replicas: 40 + selector: + matchLabels: + app: reversewords + strategy: {} + template: + metadata: + creationTimestamp: null + labels: + app: reversewords + spec: + containers: + - image: quay.io/mavazque/reversewords:latest + name: reversewords + resources: + requests: + memory: 2Gi +status: {} +---- + +.. Save the file with the name `workload-config.yaml`. + +.. Apply the YAML by entering the following command: ++ +[source,terminal] +---- +$ oc apply -f workload-config.yaml +---- + +. Extract the `admin-kubeconfig` secret by entering the following command: ++ +[source,terminal] +---- +$ oc extract -n \ + secret/-admin-kubeconfig \ + --to=./hostedcluster-secrets --confirm +---- ++ +.Example output +---- +hostedcluster-secrets/kubeconfig +---- + +. You can check if new nodes are in the `Ready` status by entering the following command: ++ +[source,terminal] +---- +$ oc --kubeconfig ./hostedcluster-secrets get nodes +---- + +. To remove the node, delete the workload by entering the following command: ++ +[source,terminal] +---- +$ oc --kubeconfig ./hostedcluster-secrets -n \ + delete deployment +---- + +. Wait for several minutes to pass without requiring the additional capacity. On the Agent platform, the agent is decommissioned and can be reused. You can confirm that the node was removed by entering the following command: ++ +[source,terminal] +---- +$ oc --kubeconfig ./hostedcluster-secrets get nodes +---- \ No newline at end of file diff --git a/modules/hcp-support-matrix.adoc b/modules/hcp-support-matrix.adoc index 7989deb8912d..0270d79eb9f3 100644 --- a/modules/hcp-support-matrix.adoc +++ b/modules/hcp-support-matrix.adoc @@ -142,6 +142,10 @@ In the following table, the management cluster version is the {product-title} ve |Non-bare-metal agent machines (Technology Preview) |4.16 - 4.18 |4.16 - 4.18 + +|{rh-openstack-first} (Technology Preview) +|4.19 +|4.19 |=== [id="hcp-matrix-updates_{context}"] diff --git a/modules/hosted-clusters-openstack-additional-ports.adoc b/modules/hosted-clusters-openstack-additional-ports.adoc new file mode 100644 index 000000000000..6967372aeadb --- /dev/null +++ b/modules/hosted-clusters-openstack-additional-ports.adoc @@ -0,0 +1,89 @@ +// Module included in the following assemblies: +// +// * hosted_control_planes/hypershift-openstack.adoc + +:_mod-docs-content-type: PROCEDURE +[id="hosted-clusters-openstack-additional-ports_{context}"] += Configuring additional ports for node pools + +You can configure additional ports for node pools to support advanced networking scenarios, such as SR-IOV or multiple networks. + +== Use cases for additional ports for node pools + +* **SR-IOV (Single Root I/O Virtualization)**: Enables a physical network device to appear as multiple virtual functions (VFs). By attaching additional ports to node pools, workloads can use SR-IOV interfaces to achieve low-latency, high-performance networking. + +* **DPDK (Data Plane Development Kit)**: Provides fast packet processing in user space, bypassing the kernel. Node pools with additional ports can expose interfaces for workloads that use DPDK to improve network performance. + +* **Manila RWX volumes on NFS**: Supports `ReadWriteMany` (RWX) volumes over NFS, allowing multiple nodes to access shared storage. Attaching additional ports to node pools enables workloads to reach the NFS network used by Manila. + +* **Multus CNI**: Enables pods to connect to multiple network interfaces. Node pools with additional ports support use cases that require secondary network interfaces, including dual-stack connectivity and traffic separation. + + +== Options for additional ports for node pools + +The --openstack-node-additional-port flag can be used to attach additional ports to nodes in a HostedCluster on OpenStack. The flag takes a list of parameters separated by commas. The parameter can be used multiple times to attach multiple additional ports to the nodes. + +The parameters are: + +|=== +|Parameter|Description|Required|Default + +|`network-id` +|The ID of the network to attach to the node. +|Yes +|N/A + +|`vnic-type` +|The VNIC type to use for the port. If not specified, Neutron uses the default type `normal`. +|No +|N/A + +|`disable-port-security` +|Whether to disable port security for the port. If not specified, Neutron enables port security unless it is explicitly disabled at the network level. +|No +|N/A + +|`address-pairs` +|A list of IP address pairs to assign to the port. The format is `ip_address=mac_address`. Multiple pairs can be provided, separated by a hyphen (`-`). The `mac_address` portion is optional. +|No +|N/A +|=== + +== Creating additional ports for node pools + +You can configure additional ports for node pools for hosted clusters that run on {rh-openstack-first}. + +.Prerequisites + +* You created a hosted cluster. +* You have access to the management cluster. +* The `hcp` CLI is installed. +* Additional networks are created in {rh-openstack}. +* The project that is used by the hosted cluster must have access to the additional networks. +* You reviewed the options that are listed in "Options for additional ports for node pools". + +.Procedure + +* Create a hosted cluster with additional ports attached to it by running the `hcp create nodepool openstack` command with the `--openstack-node-additional-port` options. For example: ++ +[source,terminal] +---- +$ hcp create nodepool openstack \ + --cluster-name \ + --name \ + --replicas \ + --openstack-node-flavor \ + --openstack-node-additional-port "network-id=,vnic-type=direct,disable-port-security=true" \ + --openstack-node-additional-port "network-id=,address-pairs:192.168.0.1-192.168.0.2" +---- ++ +-- +where: + +``:: Specifies the name of the hosted cluster. +``:: Specifies the name of the node pool. +``:: Specifies the desired number of replicas. +``:: Specifies the {rh-openstack} flavor to use. +``:: Specifies a SR-IOV network ID. +``:: Specifies a load balancer network ID. +-- \ No newline at end of file diff --git a/modules/hosted-clusters-openstack-create-floating-ip.adoc b/modules/hosted-clusters-openstack-create-floating-ip.adoc new file mode 100644 index 000000000000..5c9c05627434 --- /dev/null +++ b/modules/hosted-clusters-openstack-create-floating-ip.adoc @@ -0,0 +1,34 @@ +// Module included in the following assemblies: +// +// * hosted_control_planes/hypershift-openstack.adoc + +:_mod-docs-content-type: PROCEDURE +[id="hosted-clusters-openstack-create-floating-ip_{context}"] += Creating a floating IP for ingress + +If you want to make ingress available in a hosted cluster without manual intervention, you can create a floating IP address for it in advance. + +.Prerequisites + +* You have access to the {rh-openstack-first} cloud. + +.Procedure + +* Create a floating IP address by running the following command: ++ +[source,terminal] +---- +$ openstack floating ip create +---- ++ +-- +where: + +``:: Specifies the ID of the external network. +-- + +[NOTE] +==== +If you specify a floating IP address by using the `--openstack-ingress-floating-ip` flag without creating it in advance, the `cloud-provider-openstack` component attempts to create it automatically. This process only succeeds if the + Neutron API policy permits creating a floating IP address with a specific IP address. +==== \ No newline at end of file diff --git a/modules/hosted-clusters-openstack-destroy.adoc b/modules/hosted-clusters-openstack-destroy.adoc new file mode 100644 index 000000000000..ffa42287223a --- /dev/null +++ b/modules/hosted-clusters-openstack-destroy.adoc @@ -0,0 +1,28 @@ +// Module included in the following assemblies: +// +// * hosted_control_planes/hypershift-openstack.adoc + +:_mod-docs-content-type: PROCEDURE +[id="hosted-clusters-openstack-destroy_{context}"] += Destroying a hosted cluster by using the CLI + +You can destroy a hosted cluster and its associated resources on {rh-openstack-first} by using the `hcp` CLI tool. + +.Prerequisites + +* You installed the hosted control planes CLI, `hcp`. + +.Procedure + +* To destroy the cluster and its associated resources, run the following command: ++ +[source,terminal] +---- +$ hcp destroy cluster openstack --name= +---- ++ +where: ++ +``:: is the name of the hosted cluster. + +After the process completes, all your cluster and all resources that are associated with it are destroyed. \ No newline at end of file diff --git a/modules/hosted-clusters-openstack-performance.adoc b/modules/hosted-clusters-openstack-performance.adoc new file mode 100644 index 000000000000..718ff89c119d --- /dev/null +++ b/modules/hosted-clusters-openstack-performance.adoc @@ -0,0 +1,189 @@ +:_mod-docs-content-type: PROCEDURE +[id="hosted-clusters-openstack-performance"] += Tuning performance for hosted cluster nodes +:context: hostedcluster-openstack-performance + +You can tune hosted cluster node performance on {rh-openstack-first} for high-performance workloads, such as +cloud-native network functions (CNFs). Performance tuning includes configuring {rh-openstack} resources, creating a performance profile, deploying a tuned `NodePool` resource, and enabling SR-IOV device support. + +CNFs are designed to run in cloud-native environments. They can provide network services such as routing, +firewalling, and load balancing. You can configure the node pool to use high-performance computing and networking devices to run CNFs. + +.Prerequisites + +* You have {rh-openstack} flavor that has the necessary resources to run your workload, including dedicated CPU, memory, and host aggregate information. +* You have an {rh-openstack} network that is attached to SR-IOV or DPDK-capable NICs. The network must be available to the project used by hosted clusters. + +.Procedure + +. Create a performance profile that meets your requirements in a file called `perfprofile.yaml`. For example: ++ +.Example performance profile in a config map +[source,yaml] +---- +apiVersion: v1 +kind: ConfigMap +metadata: + name: perfprof-1 + namespace: clusters +data: + tuning: | + apiVersion: v1 + kind: ConfigMap + metadata: + name: cnf-performanceprofile + namespace: "${HYPERSHIFT_NAMESPACE}" + data: + tuning: | + apiVersion: performance.openshift.io/v2 + kind: PerformanceProfile + metadata: + name: cnf-performanceprofile + spec: + additionalKernelArgs: + - nmi_watchdog=0 + - audit=0 + - mce=off + - processor.max_cstate=1 + - idle=poll + - intel_idle.max_cstate=0 + - amd_iommu=on + cpu: + isolated: "${CPU_ISOLATED}" + reserved: "${CPU_RESERVED}" + hugepages: + defaultHugepagesSize: "1G" + pages: + - count: ${HUGEPAGES} + node: 0 + size: 1G + nodeSelector: + node-role.kubernetes.io/worker: '' + realTimeKernel: + enabled: false + globallyDisableIrqLoadBalancing: true +---- ++ +IMPORTANT: If you do not already have environment variables set for the HyperShift Operator namespace, isolated and +reserved +CPUs, and huge pages count, create them prior to applying the performance profile. + +. Apply the performance profile configuration by running the following command: ++ +[source,terminal] +---- +$ oc apply -f perfprof.yaml +---- + +. If you do not already have a `CLUSTER_NAME` environment variable set for the name of your cluster, define it. + +. Set a node pool name environment variable by running the following command: ++ +[source,terminal] +---- +$ export NODEPOOL_NAME=$CLUSTER_NAME-cnf +---- + +. Set a flavor environment variable by running the following command: ++ +[source,terminal] +---- +$ export FLAVOR="m1.xlarge.nfv" +---- + +. Create a node pool that uses the performance profile by running the following command: ++ +[source,terminal] +---- +$ hcp create nodepool openstack \ + --cluster-name $CLUSTER_NAME \ + --name $NODEPOOL_NAME \ + --node-count 0 \ + --openstack-node-flavor $FLAVOR +---- + +. Patch the node pool to reference the `PerformanceProfile` resource by running the following command: ++ +[source,terminal] +---- +$ oc patch nodepool -n ${HYPERSHIFT_NAMESPACE} ${CLUSTER_NAME} \ + -p '{"spec":{"tuningConfig":[{"name":"cnf-performanceprofile"}]}}' --type=merge +---- + +. Scale the node pool by running the following command: + +[source,terminal] +---- +$ oc scale nodepool/$CLUSTER_NAME --namespace ${HYPERSHIFT_NAMESPACE} --replicas=1 +---- + +. Wait for the nodes to be ready: + +.. Wait for the nodes to be ready by running the following command: ++ +[source,terminal] +---- +$ oc wait --for=condition=UpdatingConfig=True nodepool \ +-n ${HYPERSHIFT_NAMESPACE} ${CLUSTER_NAME} \ +--timeout=5m +---- + +.. Wait for the configuration update to finish by running the following command: ++ +[source,terminal] +---- +$ oc wait --for=condition=UpdatingConfig=False nodepool \ + -n ${HYPERSHIFT_NAMESPACE} ${CLUSTER_NAME} \ + --timeout=30m +---- + +.. Wait until all nodes are healthy by running the following command: ++ +[source,terminal] +---- +$ oc wait --for=condition=AllNodesHealthy nodepool \ + -n ${HYPERSHIFT_NAMESPACE} ${CLUSTER_NAME} \ + --timeout=5m +---- + +NOTE: You can make an SSH connection into the nodes or use the `oc debug` command to verify performance configurations. + +[id="hosted-clusters-openstack-performance-enabling"] +== Enabling the SR-IOV Network Operator in a hosted cluster + +You can enable the SR-IOV Network Operator to manage SR-IOV-capable devices on nodes deployed by the `NodePool` resource. The operator runs in the hosted cluster and requires labeled worker nodes. + +.Procedure + +. Generate a `kubeconfig` file for the hosted cluster by running the following command: ++ +[source,terminal] +---- +$ hcp create kubeconfig --name $CLUSTER_NAME > $CLUSTER_NAME-kubeconfig +---- + +. Create a `kubeconfig` resource environment variable by running the following command: ++ +[source,terminal] +---- +$ export KUBECONFIG=$CLUSTER_NAME-kubeconfig +---- + +. Label each worker node to indicate SR-IOV capability by running the following command: ++ +[source,terminal] +---- +$ oc label node feature.node.kubernetes.io/network-sriov.capable=true +---- ++ +-- +where: + +``:: Specifies the name of a worker node in the hosted cluster. +-- +// TODO: Since this is a module, I need to link in add'ls. Yeeaahh. +. Install the SR-IOV Network Operator in the hosted cluster by following the instructions in the OpenShift documentation: ++ +link:https://docs.redhat.com/en/documentation/openshift_container_platform/4.17/html-single/networking/index#installing-sr-iov-operator_installing-sriov-operator[Installing the SR-IOV Network Operator] + +. After installation, configure SR-IOV workloads in the hosted cluster by using the same process as for a standalone OpenShift Container Platform cluster. \ No newline at end of file diff --git a/modules/hosted-clusters-openstack-prepare-etcd.adoc b/modules/hosted-clusters-openstack-prepare-etcd.adoc new file mode 100644 index 000000000000..fec53c46457d --- /dev/null +++ b/modules/hosted-clusters-openstack-prepare-etcd.adoc @@ -0,0 +1,152 @@ +// Module included in the following assemblies: +// +// * hosted_control_planes/hypershift-openstack.adoc + +:_mod-docs-content-type: PROCEDURE +[id="hosted-clusters-openstack-prepare-etcd_{context}"] += Preparing the management cluster for etcd local storage + +In a Hosted Control Plane (HCP) deployment on {rh-openstack-first}, you can improve etcd performance by using local ephemeral storage that is provisioned with the TopoLVM CSI driver instead of relying on the default Cinder-based Persistent Volume Claims (PVCs). + +.Prerequisites + +* You have access to a management cluster with HyperShift installed. +* You can create and manage {rh-openstack} flavors and machine sets. +* You have the `oc` and `openstack` CLI tools installed and configured. +* You are familiar with TopoLVM and Logical Volume Manager (LVM) storage concepts. +* You installed the LVM Storage Operator on the management cluster. For more information, see "Installing LVM Storage + by using the CLI" in the Storage section of the {product-title} documentation. + +.Procedure + +. Create a Nova flavor with an additional ephemeral disk by using the `openstack` CLI. For example: ++ +[source,terminal] +---- +$ openstack flavor create \ + --id auto \ + --ram 8192 \ + --disk 0 \ + --ephemeral 100 \ + --vcpus 4 \ + --public \ + hcp-etcd-ephemeral +---- ++ +[NOTE] +==== +Nova automatically attaches the ephemeral disk to the instance and formats it as `vfat` when a server is created with + that flavor. +==== +// Yes, that is the title. +. Create a compute machine set that uses the new flavor. For more information, see "Creating a compute machine set +on OpenStack" in the {product-title} documentation. + +. Scale the machine set to meet your requirements. If clusters are deployed for high availability, a minimum of 3 workers must be deployed so the pods can be distributed accordingly. + +. Label the new worker nodes to identify them for etcd use. For example: ++ +[source,terminal] +---- +$ oc label node hypershift-capable=true +---- ++ +This label is arbitrary; you can update it later. + +. In a file called `lvmcluster.yaml`, create the following `LVMCluster` custom resource to the local storage +configuration for etcd: ++ +[source,yaml] +---- +apiVersion: lvm.topolvm.io/v1alpha1 +kind: LVMCluster +metadata: + name: etcd-hcp + namespace: openshift-storage +spec: + storage: + deviceClasses: + - name: etcd-class + default: true + nodeSelector: + nodeSelectorTerms: + - matchExpressions: + - key: hypershift-capable + operator: In + values: + - "true" + deviceSelector: + forceWipeDevicesAndDestroyAllData: true + paths: + - /dev/vdb +---- ++ +In this example resource: ++ +* The ephemeral disk location is `/dev/vdb`, which is the case in most situations. Verify that this location is true in your case, and note that symlinks are not supported. +* The parameter `forceWipeDevicesAndDestroyAllData` is set to a `True` value because the default Nova ephemeral disk +comes formatted in VFAT. + +. Apply the `LVMCluster` resource by running the following command: ++ +[source,terminal] +---- +oc apply -f lvmcluster.yaml +---- + +. Verify the `LVMCluster` resource by running the following command: ++ +[source,terminal] +---- +$ oc get lvmcluster -A +---- ++ +.Example output +[source,terminal] +---- +NAMESPACE NAME STATUS +openshift-storage etcd-hcp Ready +---- + +. Verify the `StorageClass` resource by running the following command: ++ +[source,terminal] +---- +$ oc get storageclass +---- ++ +.Example output +[source,terminal] +---- +NAME PROVISIONER RECLAIMPOLICY VOLUMEBINDINGMODE ALLOWVOLUMEEXPANSION AGE +lvms-etcd-class topolvm.io Delete WaitForFirstConsumer true 23m +standard-csi (default) cinder.csi.openstack.org Delete WaitForFirstConsumer true 56m +---- + +. Deploy a hosted cluster that uses LVM-backed etcd storage class by using the `hcp` CLI. For example: ++ +[source,terminal] +---- +$ hcp create cluster openstack \ + --name my-hcp-cluster \ + --base-domain example.com \ + --pull-secret /path/to/pull-secret.json \ + --release-image quay.io/openshift-release-dev/ocp-release:4.19.0-x86_64 \ + --node-pool-replicas 3 \ + --etcd-storage-class lvms-etcd-class +---- + +. Validate the etcd persistent volume claim (PVC) by running the following command: ++ +[source,terminal] +---- +$ oc get pvc -A +---- + +. Inside the {hcp} etcd pod, confirm the mount path and device by running the following command: ++ +[source,terminal] +---- +$ df -h /var/lib +---- + diff --git a/modules/hosted-clusters-openstack-prerequisites.adoc b/modules/hosted-clusters-openstack-prerequisites.adoc new file mode 100644 index 000000000000..642aac1ff30a --- /dev/null +++ b/modules/hosted-clusters-openstack-prerequisites.adoc @@ -0,0 +1,22 @@ +// Module included in the following assemblies: +// +// * hosted_control_planes/hypershift-openstack.adoc + +:_mod-docs-content-type: CONCEPT +[id="hosted-clusters-openstack-prerequisites_{context}"] += Prerequisites for OpenStack + +Before you create a hosted cluster on {rh-openstack-first} or {rhoso-first}, ensure that you meet the following requirements: + +* You have administrative access to a management {product-title} cluster version 4.17 or greater. This cluster can run on bare metal, {rh-openstack}, or a supported public cloud. +* The HyperShift Operator is installed on the management cluster as specified in "Preparing to deploy hosted control planes". +* The management cluster is configured with OVN-Kubernetes as the default pod network CNI. +* The OpenShift CLI (`oc`) and hosted control planes CLI, `hcp` are installed. +* A load-balancer backend, for example, Octavia, is installed on the management OCP cluster. The load balancer is required for the `kube-api` service to be created for each hosted cluster. +** When ingress is configured with an Octavia load balance, the {rh-openstack} Octavia service is running in the cloud that hosts the guest cluster. +* A valid link:https://console.redhat.com/openshift/install/platform-agnostic/user-provisioned[pull secret] file is present for the `quay.io/openshift-release-dev` repository. +* The default external network for the management cluster is reachable from the guest cluster. The `kube-apiserver` load-balancer type service is created on this network. +* If you use a pre-defined floating IP address for ingress, you created a DNS record for the following wildcard +domain that points to it: `*.apps..`, where: +** `` is the name of the management cluster. +** `` is the parent DNS domain under which your cluster’s applications live. \ No newline at end of file diff --git a/modules/hosted-clusters-openstack-upload-rhcos.adoc b/modules/hosted-clusters-openstack-upload-rhcos.adoc new file mode 100644 index 000000000000..9ee96e5da23e --- /dev/null +++ b/modules/hosted-clusters-openstack-upload-rhcos.adoc @@ -0,0 +1,29 @@ +// Module included in the following assemblies: +// +// * hosted_control_planes/hcp-deploy-hcp-deploy-openstack + +:_mod-docs-content-type: PROCEDURE +[id="hosted-clusters-openstack-upload-rhcos_{context}"] += Uploading the RHCOS image to OpenStack + +If you want to specify which {op-system} image to use when deploying node pools on and {hcp} and {rh-openstack-first} deployment, upload the image to the {rh-openstack} cloud. If you do not upload the image, the OpenStack Resource Controller (ORC) downloads an image from the {product-title} mirror and deletes it when the hosted cluster is deleted. + +.Prerequisites + +* You downloaded the {op-system} image from the {product-title} mirror. +* You have access to your {rh-openstack} cloud. + +.Procedure + +* Upload an {op-system} image to {rh-openstack} by running the following command: ++ +[source,terminal] +---- +$ openstack image create --disk-format qcow2 --file rhcos +---- ++ +-- +where: + +``:: Specifies the file name of the {op-system} image. +-- \ No newline at end of file From 8d98282198e00f16bdeba07d6112dedb886d2b24 Mon Sep 17 00:00:00 2001 From: Max Bridges Date: Sun, 25 May 2025 23:03:16 -0400 Subject: [PATCH 2/6] typos --- modules/hcp-manage-openstack-az.adoc | 4 ++-- ...hosted-clusters-openstack-performance.adoc | 20 +++++++++---------- 2 files changed, 11 insertions(+), 13 deletions(-) diff --git a/modules/hcp-manage-openstack-az.adoc b/modules/hcp-manage-openstack-az.adoc index cf8fd4608984..750d74913145 100644 --- a/modules/hcp-manage-openstack-az.adoc +++ b/modules/hcp-manage-openstack-az.adoc @@ -18,8 +18,7 @@ NOTE: Availability zones do not necessarily correspond to fault domains and do n .Procedure -. Set environment variables that are appropriate for your needs. For example, if you want to create two additional -machines in the `az1` availability zone, you could enter: +. Set environment variables that are appropriate for your needs. For example, if you want to create two additional machines in the `az1` availability zone, you could enter: + [source,terminal] ---- @@ -53,6 +52,7 @@ where: ---- $ oc get nodepools --namespace clusters ---- ++ .Example output [source,terminal] ---- diff --git a/modules/hosted-clusters-openstack-performance.adoc b/modules/hosted-clusters-openstack-performance.adoc index 718ff89c119d..57e0db46fac4 100644 --- a/modules/hosted-clusters-openstack-performance.adoc +++ b/modules/hosted-clusters-openstack-performance.adoc @@ -1,13 +1,15 @@ +// Module included in the following assemblies: +// +// * hosted_control_planes/hypershift-openstack.adoc + :_mod-docs-content-type: PROCEDURE [id="hosted-clusters-openstack-performance"] = Tuning performance for hosted cluster nodes :context: hostedcluster-openstack-performance -You can tune hosted cluster node performance on {rh-openstack-first} for high-performance workloads, such as -cloud-native network functions (CNFs). Performance tuning includes configuring {rh-openstack} resources, creating a performance profile, deploying a tuned `NodePool` resource, and enabling SR-IOV device support. +You can tune hosted cluster node performance on {rh-openstack-first} for high-performance workloads, such as cloud-native network functions (CNFs). Performance tuning includes configuring {rh-openstack} resources, creating a performance profile, deploying a tuned `NodePool` resource, and enabling SR-IOV device support. -CNFs are designed to run in cloud-native environments. They can provide network services such as routing, -firewalling, and load balancing. You can configure the node pool to use high-performance computing and networking devices to run CNFs. +CNFs are designed to run in cloud-native environments. They can provide network services such as routing, firewalling, and load balancing. You can configure the node pool to use high-performance computing and networking devices to run CNFs. .Prerequisites @@ -64,9 +66,7 @@ data: globallyDisableIrqLoadBalancing: true ---- + -IMPORTANT: If you do not already have environment variables set for the HyperShift Operator namespace, isolated and -reserved -CPUs, and huge pages count, create them prior to applying the performance profile. +IMPORTANT: If you do not already have environment variables set for the HyperShift Operator namespace, isolated and reserved CPUs, and huge pages count, create them prior to applying the performance profile. . Apply the performance profile configuration by running the following command: + @@ -181,9 +181,7 @@ where: ``:: Specifies the name of a worker node in the hosted cluster. -- -// TODO: Since this is a module, I need to link in add'ls. Yeeaahh. -. Install the SR-IOV Network Operator in the hosted cluster by following the instructions in the OpenShift documentation: -+ -link:https://docs.redhat.com/en/documentation/openshift_container_platform/4.17/html-single/networking/index#installing-sr-iov-operator_installing-sriov-operator[Installing the SR-IOV Network Operator] + +. Install the SR-IOV Network Operator in the hosted cluster by following the instructions in the OpenShift documentation: "Installing the SR-IOV Network Operator". . After installation, configure SR-IOV workloads in the hosted cluster by using the same process as for a standalone OpenShift Container Platform cluster. \ No newline at end of file From 78f0a5e46409c595969d52715640484f40f3c538 Mon Sep 17 00:00:00 2001 From: Max Bridges Date: Sun, 25 May 2025 23:19:11 -0400 Subject: [PATCH 3/6] typo --- modules/hcp-manage-openstack-az.adoc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/hcp-manage-openstack-az.adoc b/modules/hcp-manage-openstack-az.adoc index 750d74913145..7343ef8be6a1 100644 --- a/modules/hcp-manage-openstack-az.adoc +++ b/modules/hcp-manage-openstack-az.adoc @@ -82,7 +82,7 @@ example-extra-az-zr8mj Ready worker 102s v1.27.4+18eadca + [source,terminal] ---- -$ oc get nodepools --namsepace clusters +$ oc get nodepools --namespace clusters ---- + .Example output From fee1ad9310d64dc24cfd8a277ae16d5f6a9a58f2 Mon Sep 17 00:00:00 2001 From: Max Bridges Date: Sun, 25 May 2025 23:30:28 -0400 Subject: [PATCH 4/6] better be a fix! --- modules/hcp-manage-openstack-az.adoc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/hcp-manage-openstack-az.adoc b/modules/hcp-manage-openstack-az.adoc index 7343ef8be6a1..e8e12e278b0e 100644 --- a/modules/hcp-manage-openstack-az.adoc +++ b/modules/hcp-manage-openstack-az.adoc @@ -37,7 +37,7 @@ $ hcp create nodepool openstack \ --name $NODEPOOL_NAME \ --replicas $WORKER_COUNT \ --openstack-node-flavor $FLAVOR \ - --openstack-node-availability-zone $AZ \ + --openstack-node-availability-zone $AZ ---- + -- From 663759396e6aca102458af84e96ec47f6dcce72f Mon Sep 17 00:00:00 2001 From: Max Bridges Date: Sun, 25 May 2025 23:44:18 -0400 Subject: [PATCH 5/6] modularizing more ugh --- .../hcp-manage/hcp-manage-openstack.adoc | 16 +++- ...d-clusters-openstack-additional-ports.adoc | 89 ------------------- ...d-clusters-openstack-addl-ports-cases.adoc | 17 ++++ ...lusters-openstack-addl-ports-creating.adoc | 44 +++++++++ ...clusters-openstack-addl-ports-options.adoc | 35 ++++++++ 5 files changed, 111 insertions(+), 90 deletions(-) delete mode 100644 modules/hosted-clusters-openstack-additional-ports.adoc create mode 100644 modules/hosted-clusters-openstack-addl-ports-cases.adoc create mode 100644 modules/hosted-clusters-openstack-addl-ports-creating.adoc create mode 100644 modules/hosted-clusters-openstack-addl-ports-options.adoc diff --git a/hosted_control_planes/hcp-manage/hcp-manage-openstack.adoc b/hosted_control_planes/hcp-manage/hcp-manage-openstack.adoc index cd882807f342..69eb25dd97b2 100644 --- a/hosted_control_planes/hcp-manage/hcp-manage-openstack.adoc +++ b/hosted_control_planes/hcp-manage/hcp-manage-openstack.adoc @@ -15,6 +15,20 @@ include::modules/hcp-openstack-autoscale.adoc[leveloffset=+1] include::modules/hcp-manage-openstack-az.adoc[leveloffset=+1] -include::modules/hosted-clusters-openstack-additional-ports.adoc[leveloffset=+1] +[id="hosted-clusters-openstack-additional-ports"] +== Configuring additional ports for node pools + +You can configure additional ports for node pools to support advanced networking scenarios, such as SR-IOV or multiple networks. + +include::modules/hosted-clusters-openstack-addl-ports-cases.adoc[leveloffset=+2] + +include::modules/hosted-clusters-openstack-addl-ports-options.adoc[leveloffset=+2] + +include::modules/hosted-clusters-openstack-addl-ports-creating.adoc[leveloffset=+2] + +[role="_additional-resources"] +.Additional resources + +* xref:../../networking/networking_operators/metallb-operator/about-metallb.adoc#about-metallb_about-metallb[About MetalLB and the MetalLB Operator] include::modules/hosted-clusters-openstack-performance.adoc[leveloffset=+1] \ No newline at end of file diff --git a/modules/hosted-clusters-openstack-additional-ports.adoc b/modules/hosted-clusters-openstack-additional-ports.adoc deleted file mode 100644 index 6967372aeadb..000000000000 --- a/modules/hosted-clusters-openstack-additional-ports.adoc +++ /dev/null @@ -1,89 +0,0 @@ -// Module included in the following assemblies: -// -// * hosted_control_planes/hypershift-openstack.adoc - -:_mod-docs-content-type: PROCEDURE -[id="hosted-clusters-openstack-additional-ports_{context}"] -= Configuring additional ports for node pools - -You can configure additional ports for node pools to support advanced networking scenarios, such as SR-IOV or multiple networks. - -== Use cases for additional ports for node pools - -* **SR-IOV (Single Root I/O Virtualization)**: Enables a physical network device to appear as multiple virtual functions (VFs). By attaching additional ports to node pools, workloads can use SR-IOV interfaces to achieve low-latency, high-performance networking. - -* **DPDK (Data Plane Development Kit)**: Provides fast packet processing in user space, bypassing the kernel. Node pools with additional ports can expose interfaces for workloads that use DPDK to improve network performance. - -* **Manila RWX volumes on NFS**: Supports `ReadWriteMany` (RWX) volumes over NFS, allowing multiple nodes to access shared storage. Attaching additional ports to node pools enables workloads to reach the NFS network used by Manila. - -* **Multus CNI**: Enables pods to connect to multiple network interfaces. Node pools with additional ports support use cases that require secondary network interfaces, including dual-stack connectivity and traffic separation. - - -== Options for additional ports for node pools - -The --openstack-node-additional-port flag can be used to attach additional ports to nodes in a HostedCluster on OpenStack. The flag takes a list of parameters separated by commas. The parameter can be used multiple times to attach multiple additional ports to the nodes. - -The parameters are: - -|=== -|Parameter|Description|Required|Default - -|`network-id` -|The ID of the network to attach to the node. -|Yes -|N/A - -|`vnic-type` -|The VNIC type to use for the port. If not specified, Neutron uses the default type `normal`. -|No -|N/A - -|`disable-port-security` -|Whether to disable port security for the port. If not specified, Neutron enables port security unless it is explicitly disabled at the network level. -|No -|N/A - -|`address-pairs` -|A list of IP address pairs to assign to the port. The format is `ip_address=mac_address`. Multiple pairs can be provided, separated by a hyphen (`-`). The `mac_address` portion is optional. -|No -|N/A -|=== - -== Creating additional ports for node pools - -You can configure additional ports for node pools for hosted clusters that run on {rh-openstack-first}. - -.Prerequisites - -* You created a hosted cluster. -* You have access to the management cluster. -* The `hcp` CLI is installed. -* Additional networks are created in {rh-openstack}. -* The project that is used by the hosted cluster must have access to the additional networks. -* You reviewed the options that are listed in "Options for additional ports for node pools". - -.Procedure - -* Create a hosted cluster with additional ports attached to it by running the `hcp create nodepool openstack` command with the `--openstack-node-additional-port` options. For example: -+ -[source,terminal] ----- -$ hcp create nodepool openstack \ - --cluster-name \ - --name \ - --replicas \ - --openstack-node-flavor \ - --openstack-node-additional-port "network-id=,vnic-type=direct,disable-port-security=true" \ - --openstack-node-additional-port "network-id=,address-pairs:192.168.0.1-192.168.0.2" ----- -+ --- -where: - -``:: Specifies the name of the hosted cluster. -``:: Specifies the name of the node pool. -``:: Specifies the desired number of replicas. -``:: Specifies the {rh-openstack} flavor to use. -``:: Specifies a SR-IOV network ID. -``:: Specifies a load balancer network ID. --- \ No newline at end of file diff --git a/modules/hosted-clusters-openstack-addl-ports-cases.adoc b/modules/hosted-clusters-openstack-addl-ports-cases.adoc new file mode 100644 index 000000000000..4abe72e0109e --- /dev/null +++ b/modules/hosted-clusters-openstack-addl-ports-cases.adoc @@ -0,0 +1,17 @@ +// Module included in the following assemblies: +// +// * hosted_control_planes/hcp-manage/hcp-manage-openstack.adoc + +:_mod-docs-content-type: CONCEPT +[id="hosted-clusters-openstack-addl-ports-cases_{context}"] +== Use cases for additional ports for node pools + +Common reasons to configure additional ports for node pools are: + +* **SR-IOV (Single Root I/O Virtualization)**: Enables a physical network device to appear as multiple virtual functions (VFs). By attaching additional ports to node pools, workloads can use SR-IOV interfaces to achieve low-latency, high-performance networking. + +* **DPDK (Data Plane Development Kit)**: Provides fast packet processing in user space, bypassing the kernel. Node pools with additional ports can expose interfaces for workloads that use DPDK to improve network performance. + +* **Manila RWX volumes on NFS**: Supports `ReadWriteMany` (RWX) volumes over NFS, allowing multiple nodes to access shared storage. Attaching additional ports to node pools enables workloads to reach the NFS network used by Manila. + +* **Multus CNI**: Enables pods to connect to multiple network interfaces. Node pools with additional ports support use cases that require secondary network interfaces, including dual-stack connectivity and traffic separation. \ No newline at end of file diff --git a/modules/hosted-clusters-openstack-addl-ports-creating.adoc b/modules/hosted-clusters-openstack-addl-ports-creating.adoc new file mode 100644 index 000000000000..7977dde067b9 --- /dev/null +++ b/modules/hosted-clusters-openstack-addl-ports-creating.adoc @@ -0,0 +1,44 @@ +// Module included in the following assemblies: +// +// * hosted_control_planes/hcp-manage/hcp-manage-openstack.adoc + +:_mod-docs-content-type: PROCEDURE +[id="hosted-clusters-openstack-addl-ports_{context}"] +== Creating additional ports for node pools + +You can configure additional ports for node pools for hosted clusters that run on {rh-openstack-first}. + +.Prerequisites + +* You created a hosted cluster. +* You have access to the management cluster. +* The `hcp` CLI is installed. +* Additional networks are created in {rh-openstack}. +* The project that is used by the hosted cluster must have access to the additional networks. +* You reviewed the options that are listed in "Options for additional ports for node pools". + +.Procedure + +* Create a hosted cluster with additional ports attached to it by running the `hcp create nodepool openstack` command with the `--openstack-node-additional-port` options. For example: ++ +[source,terminal] +---- +$ hcp create nodepool openstack \ + --cluster-name \ + --name \ + --replicas \ + --openstack-node-flavor \ + --openstack-node-additional-port "network-id=,vnic-type=direct,disable-port-security=true" \ + --openstack-node-additional-port "network-id=,address-pairs:192.168.0.1-192.168.0.2" +---- ++ +-- +where: + +``:: Specifies the name of the hosted cluster. +``:: Specifies the name of the node pool. +``:: Specifies the desired number of replicas. +``:: Specifies the {rh-openstack} flavor to use. +``:: Specifies a SR-IOV network ID. +``:: Specifies a load balancer network ID. +-- \ No newline at end of file diff --git a/modules/hosted-clusters-openstack-addl-ports-options.adoc b/modules/hosted-clusters-openstack-addl-ports-options.adoc new file mode 100644 index 000000000000..84e86ac38beb --- /dev/null +++ b/modules/hosted-clusters-openstack-addl-ports-options.adoc @@ -0,0 +1,35 @@ +// Module included in the following assemblies: +// +// * hosted_control_planes/hcp-manage/hcp-manage-openstack.adoc + +:_mod-docs-content-type: REFERENCE +[id="hosted-clusters-openstack-addl-ports-options_{context}"] +== Options for additional ports for node pools + +You can use the `--openstack-node-additional-port` flag to attach additional ports to nodes in a hosted cluster on OpenStack. The flag takes a list of comma-separated parameters. Parameters can be used multiple times to attach multiple additional ports to the nodes. + +The parameters are: + +|=== +|Parameter|Description|Required|Default + +|`network-id` +|The ID of the network to attach to the node. +|Yes +|N/A + +|`vnic-type` +|The VNIC type to use for the port. If not specified, Neutron uses the default type `normal`. +|No +|N/A + +|`disable-port-security` +|Whether to disable port security for the port. If not specified, Neutron enables port security unless it is explicitly disabled at the network level. +|No +|N/A + +|`address-pairs` +|A list of IP address pairs to assign to the port. The format is `ip_address=mac_address`. Multiple pairs can be provided, separated by a hyphen (`-`). The `mac_address` portion is optional. +|No +|N/A +|=== \ No newline at end of file From caa2c394bac6b30408c39b7d68ef3b8f3a9a543a Mon Sep 17 00:00:00 2001 From: Max Bridges Date: Sun, 25 May 2025 23:47:34 -0400 Subject: [PATCH 6/6] level fix --- modules/hosted-clusters-openstack-addl-ports-cases.adoc | 2 +- modules/hosted-clusters-openstack-addl-ports-creating.adoc | 2 +- modules/hosted-clusters-openstack-addl-ports-options.adoc | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/modules/hosted-clusters-openstack-addl-ports-cases.adoc b/modules/hosted-clusters-openstack-addl-ports-cases.adoc index 4abe72e0109e..1994694578b4 100644 --- a/modules/hosted-clusters-openstack-addl-ports-cases.adoc +++ b/modules/hosted-clusters-openstack-addl-ports-cases.adoc @@ -4,7 +4,7 @@ :_mod-docs-content-type: CONCEPT [id="hosted-clusters-openstack-addl-ports-cases_{context}"] -== Use cases for additional ports for node pools += Use cases for additional ports for node pools Common reasons to configure additional ports for node pools are: diff --git a/modules/hosted-clusters-openstack-addl-ports-creating.adoc b/modules/hosted-clusters-openstack-addl-ports-creating.adoc index 7977dde067b9..09a2d681ca9b 100644 --- a/modules/hosted-clusters-openstack-addl-ports-creating.adoc +++ b/modules/hosted-clusters-openstack-addl-ports-creating.adoc @@ -4,7 +4,7 @@ :_mod-docs-content-type: PROCEDURE [id="hosted-clusters-openstack-addl-ports_{context}"] -== Creating additional ports for node pools += Creating additional ports for node pools You can configure additional ports for node pools for hosted clusters that run on {rh-openstack-first}. diff --git a/modules/hosted-clusters-openstack-addl-ports-options.adoc b/modules/hosted-clusters-openstack-addl-ports-options.adoc index 84e86ac38beb..a03703295aa9 100644 --- a/modules/hosted-clusters-openstack-addl-ports-options.adoc +++ b/modules/hosted-clusters-openstack-addl-ports-options.adoc @@ -4,7 +4,7 @@ :_mod-docs-content-type: REFERENCE [id="hosted-clusters-openstack-addl-ports-options_{context}"] -== Options for additional ports for node pools += Options for additional ports for node pools You can use the `--openstack-node-additional-port` flag to attach additional ports to nodes in a hosted cluster on OpenStack. The flag takes a list of comma-separated parameters. Parameters can be used multiple times to attach multiple additional ports to the nodes.