diff --git a/cmd/liqoctl/cmd/peer.go b/cmd/liqoctl/cmd/peer.go index 5f0582d61d..a637e9d04a 100644 --- a/cmd/liqoctl/cmd/peer.go +++ b/cmd/liqoctl/cmd/peer.go @@ -52,6 +52,7 @@ Examples: $ {{ .Executable }} peer --remote-kubeconfig $ {{ .Executable }} peer --remote-kubeconfig --gw-server-service-type NodePort $ {{ .Executable }} peer --remote-kubeconfig --cpu 2 --memory 4Gi --pods 10 + $ {{ .Executable }} peer --remote-kubeconfig --cpu 2 --memory 4Gi --pods 10 --resource nvidia.com/gpu=2 $ {{ .Executable }} peer --remote-kubeconfig --create-resource-slice false $ {{ .Executable }} peer --remote-kubeconfig --create-virtual-node false ` @@ -129,6 +130,8 @@ func newPeerCommand(ctx context.Context, f *factory.Factory) *cobra.Command { cmd.Flags().StringVar(&options.CPU, "cpu", "", "The amount of CPU requested for the VirtualNode") cmd.Flags().StringVar(&options.Memory, "memory", "", "The amount of memory requested for the VirtualNode") cmd.Flags().StringVar(&options.Pods, "pods", "", "The amount of pods requested for the VirtualNode") + cmd.Flags().StringToStringVar( + &options.OtherResources, "resource", nil, "Other resources requested for the VirtualNode (e.g., '--resource=nvidia.com/gpu=2')") return cmd } diff --git a/docs/advanced/peering/offloading-in-depth.md b/docs/advanced/peering/offloading-in-depth.md index 698edd82a5..66f5b69010 100644 --- a/docs/advanced/peering/offloading-in-depth.md +++ b/docs/advanced/peering/offloading-in-depth.md @@ -61,6 +61,7 @@ You can specify the resources you want to acquire by adding: * `--cpu` to specify the amount of CPU. * `--memory` to specify the amount of memory. * `--pods` to specify the number of pods. +* `--resource` other resources can be specified with this flag, which can be even repeated multiple times. (e.g: `--resource=nvidia.com/gpu=2 --resource=my.custom/resource=2Gi`) To add other resources like `ephemeral-storage`, `gpu`, or any other custom resources, you can use the `-o yaml` flag for the `liqoctl create resourceslice` command and edit the `ResourceSlice` spec manifest before applying it. diff --git a/docs/usage/liqoctl/liqoctl_create.md b/docs/usage/liqoctl/liqoctl_create.md index c0f5277e96..87d98409c7 100644 --- a/docs/usage/liqoctl/liqoctl_create.md +++ b/docs/usage/liqoctl/liqoctl_create.md @@ -500,6 +500,8 @@ liqoctl create resourceslice [flags] ```bash $ liqoctl create resourceslice my-slice --remote-cluster-id remote-cluster-id \ --cpu 4 --memory 8Gi --pods 30 + $ liqoctl create resourceslice my-slice --remote-cluster-id remote-cluster-id \ + --cpu 4 --memory 8Gi --pods 30 --resource nvidia.com/gpu=2 ``` @@ -532,6 +534,10 @@ liqoctl create resourceslice [flags] >The cluster ID of the remote cluster +`--resource` _stringToString_: + +>Other resources requested in the resource slice (e.g., 'resource=nvidia.com/gpu=2') + ### Global options @@ -656,6 +662,10 @@ liqoctl create virtualnode [flags] >The cluster ID of the remote cluster +`--resource` _stringToString_: + +>Other resources available in the virtual node (e.g., 'resource=nvidia.com/gpu=2') + `--resource-slice-name` _string_: >The name of the resourceslice to be used to create the virtual node. Mutually exclusive with --kubeconfig-secret-name diff --git a/docs/usage/liqoctl/liqoctl_peer.md b/docs/usage/liqoctl/liqoctl_peer.md index 0be54e140e..0e9f06141e 100644 --- a/docs/usage/liqoctl/liqoctl_peer.md +++ b/docs/usage/liqoctl/liqoctl_peer.md @@ -37,6 +37,7 @@ liqoctl peer [flags] $ liqoctl peer --remote-kubeconfig $ liqoctl peer --remote-kubeconfig --gw-server-service-type NodePort $ liqoctl peer --remote-kubeconfig --cpu 2 --memory 4Gi --pods 10 + $ liqoctl peer --remote-kubeconfig --cpu 2 --memory 4Gi --pods 10 --resource nvidia.com/gpu=2 $ liqoctl peer --remote-kubeconfig --create-resource-slice false $ liqoctl peer --remote-kubeconfig --create-virtual-node false ``` @@ -146,6 +147,10 @@ liqoctl peer [flags] >The name of the kubeconfig user to use (in the remote cluster) +`--resource` _stringToString_: + +>Other resources requested for the VirtualNode (e.g., '--resource=nvidia.com/gpu=2') + `--resource-slice-class` _string_: >The class of the ResourceSlice **(default "default")** diff --git a/docs/usage/peer.md b/docs/usage/peer.md index efbd9c2a23..4dbbdb305a 100644 --- a/docs/usage/peer.md +++ b/docs/usage/peer.md @@ -36,8 +36,8 @@ To perform a peering without having access to both clusters, you need to manuall The peering command enables all 3 liqo modules and performs the following steps: 1. **enables networking**. -Exchanges network configurations and creates the two **gateways** (one acting as _server_ and located in the provider cluster, another acting as _client_ in the consumer cluster) to let the two clusters communicate over a secure tunnel. -The location of the client/server gateway can be customized when creating the peering using the `--gw-server-service-location` flag in `liqoctl`. +Exchanges network configurations and creates the two **gateways** (one acting as _server_ and located in the provider cluster, another acting as _client_ in the consumer cluster) to let the two clusters communicate over a secure tunnel. +The location of the client/server gateway can be customized when creating the peering using the `--gw-server-service-location` flag in `liqoctl`. 2. **enables authentication**. Authenticates the consumer with the provider. In this step, the consumer obtains an `Identity` (*kubeconfig*) to replicate resources to the provider cluster. @@ -237,6 +237,18 @@ liqoctl peer \ --memory=2Gi ``` +Other non-standard resources can be defined via the `--resource` flag: + +```bash +liqoctl peer \ + --kubeconfig=$CONSUMER_KUBECONFIG_PATH \ + --remote-kubeconfig=$PROVIDER_KUBECONFIG_PATH \ + --cpu=2 \ + --memory=2Gi \ + --resource=nvidia.com/gpu=2 \ + --resource=custom=2Gi +``` + ```{warning} To make sure the consumer cluster does not exceed the quota of shared resources, the offloaded pods need to be created with the resources `limits` set. diff --git a/pkg/liqoctl/peer/handler.go b/pkg/liqoctl/peer/handler.go index b90c9f05ff..5cd4e6d7f6 100644 --- a/pkg/liqoctl/peer/handler.go +++ b/pkg/liqoctl/peer/handler.go @@ -61,6 +61,7 @@ type Options struct { CPU string Memory string Pods string + OtherResources map[string]string } // NewOptions returns a new Options struct. @@ -195,9 +196,10 @@ func ensureOffloading(ctx context.Context, o *Options) error { Class: o.ResourceSliceClass, DisableVirtualNodeCreation: !o.CreateVirtualNode, - CPU: o.CPU, - Memory: o.Memory, - Pods: o.Pods, + CPU: o.CPU, + Memory: o.Memory, + Pods: o.Pods, + OtherResources: o.OtherResources, } if err := rsOptions.HandleCreate(ctx); err != nil { diff --git a/pkg/liqoctl/rest/resourceslice/create.go b/pkg/liqoctl/rest/resourceslice/create.go index 017605fca7..c49d65908b 100644 --- a/pkg/liqoctl/rest/resourceslice/create.go +++ b/pkg/liqoctl/rest/resourceslice/create.go @@ -44,7 +44,9 @@ The ResourceSlice resource is used to represent a slice of resources that can be Examples: $ {{ .Executable }} create resourceslice my-slice --remote-cluster-id remote-cluster-id \ - --cpu 4 --memory 8Gi --pods 30` + --cpu 4 --memory 8Gi --pods 30 + $ {{ .Executable }} create resourceslice my-slice --remote-cluster-id remote-cluster-id \ + --cpu 4 --memory 8Gi --pods 30 --resource nvidia.com/gpu=2` // Create implements the create command. func (o *Options) Create(ctx context.Context, options *rest.CreateOptions) *cobra.Command { @@ -80,6 +82,8 @@ func (o *Options) Create(ctx context.Context, options *rest.CreateOptions) *cobr cmd.Flags().StringVar(&o.CPU, "cpu", "", "The amount of CPU requested in the resource slice") cmd.Flags().StringVar(&o.Memory, "memory", "", "The amount of memory requested in the resource slice") cmd.Flags().StringVar(&o.Pods, "pods", "", "The amount of pods requested in the resource slice") + cmd.Flags().StringToStringVar( + &o.OtherResources, "resource", nil, "Other resources requested in the resource slice (e.g., 'resource=nvidia.com/gpu=2')") cmd.Flags().BoolVar(&o.DisableVirtualNodeCreation, "no-virtual-node", false, "Prevent the automatic creation of a VirtualNode for the ResourceSlice. Default: false") @@ -111,12 +115,8 @@ func (o *Options) HandleCreate(ctx context.Context) error { resourceSlice := forge.ResourceSlice(opts.Name, namespace) _, err = resource.CreateOrUpdate(ctx, opts.CRClient, resourceSlice, func() error { return forge.MutateResourceSlice(resourceSlice, o.RemoteClusterID.GetClusterID(), &forge.ResourceSliceOptions{ - Class: authv1beta1.ResourceSliceClass(o.Class), - Resources: map[corev1.ResourceName]string{ - corev1.ResourceCPU: o.CPU, - corev1.ResourceMemory: o.Memory, - corev1.ResourcePods: o.Pods, - }, + Class: authv1beta1.ResourceSliceClass(o.Class), + Resources: o.buildResourceMap(), }, !o.DisableVirtualNodeCreation) }) if err != nil { @@ -157,6 +157,21 @@ func (o *Options) getTenantNamespace(ctx context.Context) (string, error) { } } +func (o *Options) buildResourceMap() map[corev1.ResourceName]string { + resources := map[corev1.ResourceName]string{ + corev1.ResourceCPU: o.CPU, + corev1.ResourceMemory: o.Memory, + corev1.ResourcePods: o.Pods, + } + + // Add other resources to the resources map. + for name, quantity := range o.OtherResources { + resources[corev1.ResourceName(name)] = quantity + } + + return resources +} + // output implements the logic to output the generated ResourceSlice resource. func (o *Options) output(ctx context.Context) error { opts := o.CreateOptions @@ -177,12 +192,8 @@ func (o *Options) output(ctx context.Context) error { resourceSlice := forge.ResourceSlice(opts.Name, namespace) err = forge.MutateResourceSlice(resourceSlice, o.RemoteClusterID.GetClusterID(), &forge.ResourceSliceOptions{ - Class: authv1beta1.ResourceSliceClass(o.Class), - Resources: map[corev1.ResourceName]string{ - corev1.ResourceCPU: o.CPU, - corev1.ResourceMemory: o.Memory, - corev1.ResourcePods: o.Pods, - }, + Class: authv1beta1.ResourceSliceClass(o.Class), + Resources: o.buildResourceMap(), }, !o.DisableVirtualNodeCreation) if err != nil { return err diff --git a/pkg/liqoctl/rest/resourceslice/types.go b/pkg/liqoctl/rest/resourceslice/types.go index 46505859c1..ac0ff54ca1 100644 --- a/pkg/liqoctl/rest/resourceslice/types.go +++ b/pkg/liqoctl/rest/resourceslice/types.go @@ -29,9 +29,10 @@ type Options struct { Class string DisableVirtualNodeCreation bool - CPU string - Memory string - Pods string + CPU string + Memory string + Pods string + OtherResources map[string]string } var _ rest.API = &Options{} diff --git a/pkg/liqoctl/rest/virtualnode/create.go b/pkg/liqoctl/rest/virtualnode/create.go index 6e030176d6..8d0db9caf7 100644 --- a/pkg/liqoctl/rest/virtualnode/create.go +++ b/pkg/liqoctl/rest/virtualnode/create.go @@ -94,6 +94,8 @@ func (o *Options) Create(ctx context.Context, options *rest.CreateOptions) *cobr cmd.Flags().StringVar(&o.cpu, "cpu", "2", "The amount of CPU available in the virtual node") cmd.Flags().StringVar(&o.memory, "memory", "4Gi", "The amount of memory available in the virtual node") cmd.Flags().StringVar(&o.pods, "pods", "110", "The amount of pods available in the virtual node") + cmd.Flags().StringToStringVar(&o.otherResources, "resource", nil, + "Other resources available in the virtual node (e.g., 'resource=nvidia.com/gpu=2')") cmd.Flags().StringSliceVar(&o.storageClasses, "storage-classes", []string{}, "The storage classes offered by the remote cluster. The first one will be used as default") cmd.Flags().StringSliceVar(&o.ingressClasses, "ingress-classes", @@ -225,6 +227,20 @@ func (o *Options) forgeVirtualNodeOptions(vkOptionsTemplateRef *corev1.ObjectRef return nil, fmt.Errorf("unable to parse pod quantity: %w", err) } + resourceMap := corev1.ResourceList{ + corev1.ResourceCPU: cpuQnt, + corev1.ResourceMemory: memoryQnt, + corev1.ResourcePods: podsQnt, + } + + for resourceName, resourceValue := range o.otherResources { + parsedQuantity, err := k8sresource.ParseQuantity(resourceValue) + if err != nil { + return nil, fmt.Errorf("unable to parse quantity for resource %q: %w", resourceName, err) + } + resourceMap[corev1.ResourceName(resourceName)] = parsedQuantity + } + storageClasses := make([]liqov1beta1.StorageType, len(o.storageClasses)) for i, storageClass := range o.storageClasses { sc := liqov1beta1.StorageType{ @@ -261,17 +277,12 @@ func (o *Options) forgeVirtualNodeOptions(vkOptionsTemplateRef *corev1.ObjectRef return &forge.VirtualNodeOptions{ KubeconfigSecretRef: corev1.LocalObjectReference{Name: o.kubeconfigSecretName}, VkOptionsTemplateRef: vkOptionsTemplateRef, - - ResourceList: corev1.ResourceList{ - corev1.ResourceCPU: cpuQnt, - corev1.ResourceMemory: memoryQnt, - corev1.ResourcePods: podsQnt, - }, - StorageClasses: storageClasses, - IngressClasses: ingressClasses, - LoadBalancerClasses: loadBalancerClasses, - NodeLabels: o.labels, - NodeSelector: o.nodeSelector, + ResourceList: resourceMap, + StorageClasses: storageClasses, + IngressClasses: ingressClasses, + LoadBalancerClasses: loadBalancerClasses, + NodeLabels: o.labels, + NodeSelector: o.nodeSelector, }, nil } diff --git a/pkg/liqoctl/rest/virtualnode/types.go b/pkg/liqoctl/rest/virtualnode/types.go index 3f709a41e3..939ff05c8e 100644 --- a/pkg/liqoctl/rest/virtualnode/types.go +++ b/pkg/liqoctl/rest/virtualnode/types.go @@ -33,9 +33,10 @@ type Options struct { resourceSliceName string vkOptionsTemplate string - cpu string - memory string - pods string + cpu string + memory string + pods string + otherResources map[string]string storageClasses []string ingressClasses []string