Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 3 additions & 0 deletions cmd/liqoctl/cmd/peer.go
Original file line number Diff line number Diff line change
Expand Up @@ -52,6 +52,7 @@ Examples:
$ {{ .Executable }} peer --remote-kubeconfig <provider>
$ {{ .Executable }} peer --remote-kubeconfig <provider> --gw-server-service-type NodePort
$ {{ .Executable }} peer --remote-kubeconfig <provider> --cpu 2 --memory 4Gi --pods 10
$ {{ .Executable }} peer --remote-kubeconfig <provider> --cpu 2 --memory 4Gi --pods 10 --resource nvidia.com/gpu=2
$ {{ .Executable }} peer --remote-kubeconfig <provider> --create-resource-slice false
$ {{ .Executable }} peer --remote-kubeconfig <provider> --create-virtual-node false
`
Expand Down Expand Up @@ -129,6 +130,8 @@ func newPeerCommand(ctx context.Context, f *factory.Factory) *cobra.Command {
cmd.Flags().StringVar(&options.CPU, "cpu", "", "The amount of CPU requested for the VirtualNode")
cmd.Flags().StringVar(&options.Memory, "memory", "", "The amount of memory requested for the VirtualNode")
cmd.Flags().StringVar(&options.Pods, "pods", "", "The amount of pods requested for the VirtualNode")
cmd.Flags().StringToStringVar(
&options.OtherResources, "resource", nil, "Other resources requested for the VirtualNode (e.g., '--resource=nvidia.com/gpu=2')")

return cmd
}
1 change: 1 addition & 0 deletions docs/advanced/peering/offloading-in-depth.md
Original file line number Diff line number Diff line change
Expand Up @@ -61,6 +61,7 @@ You can specify the resources you want to acquire by adding:
* `--cpu` to specify the amount of CPU.
* `--memory` to specify the amount of memory.
* `--pods` to specify the number of pods.
* `--resource` other resources can be specified with this flag, which can be even repeated multiple times. (e.g: `--resource=nvidia.com/gpu=2 --resource=my.custom/resource=2Gi`)

To add other resources like `ephemeral-storage`, `gpu`, or any other custom resources, you can use the `-o yaml` flag for the `liqoctl create resourceslice` command and edit the `ResourceSlice` spec manifest before applying it.

Expand Down
10 changes: 10 additions & 0 deletions docs/usage/liqoctl/liqoctl_create.md
Original file line number Diff line number Diff line change
Expand Up @@ -500,6 +500,8 @@ liqoctl create resourceslice [flags]
```bash
$ liqoctl create resourceslice my-slice --remote-cluster-id remote-cluster-id \
--cpu 4 --memory 8Gi --pods 30
$ liqoctl create resourceslice my-slice --remote-cluster-id remote-cluster-id \
--cpu 4 --memory 8Gi --pods 30 --resource nvidia.com/gpu=2
```


Expand Down Expand Up @@ -532,6 +534,10 @@ liqoctl create resourceslice [flags]

>The cluster ID of the remote cluster

`--resource` _stringToString_:

>Other resources requested in the resource slice (e.g., 'resource=nvidia.com/gpu=2')


### Global options

Expand Down Expand Up @@ -656,6 +662,10 @@ liqoctl create virtualnode [flags]

>The cluster ID of the remote cluster

`--resource` _stringToString_:

>Other resources available in the virtual node (e.g., 'resource=nvidia.com/gpu=2')

`--resource-slice-name` _string_:

>The name of the resourceslice to be used to create the virtual node. Mutually exclusive with --kubeconfig-secret-name
Expand Down
5 changes: 5 additions & 0 deletions docs/usage/liqoctl/liqoctl_peer.md
Original file line number Diff line number Diff line change
Expand Up @@ -37,6 +37,7 @@ liqoctl peer [flags]
$ liqoctl peer --remote-kubeconfig <provider>
$ liqoctl peer --remote-kubeconfig <provider> --gw-server-service-type NodePort
$ liqoctl peer --remote-kubeconfig <provider> --cpu 2 --memory 4Gi --pods 10
$ liqoctl peer --remote-kubeconfig <provider> --cpu 2 --memory 4Gi --pods 10 --resource nvidia.com/gpu=2
$ liqoctl peer --remote-kubeconfig <provider> --create-resource-slice false
$ liqoctl peer --remote-kubeconfig <provider> --create-virtual-node false
```
Expand Down Expand Up @@ -146,6 +147,10 @@ liqoctl peer [flags]

>The name of the kubeconfig user to use (in the remote cluster)

`--resource` _stringToString_:

>Other resources requested for the VirtualNode (e.g., '--resource=nvidia.com/gpu=2')

`--resource-slice-class` _string_:

>The class of the ResourceSlice **(default "default")**
Expand Down
16 changes: 14 additions & 2 deletions docs/usage/peer.md
Original file line number Diff line number Diff line change
Expand Up @@ -36,8 +36,8 @@ To perform a peering without having access to both clusters, you need to manuall
The peering command enables all 3 liqo modules and performs the following steps:

1. **enables networking**.
Exchanges network configurations and creates the two **gateways** (one acting as _server_ and located in the provider cluster, another acting as _client_ in the consumer cluster) to let the two clusters communicate over a secure tunnel.
The location of the client/server gateway can be customized when creating the peering using the `--gw-server-service-location` flag in `liqoctl`.
Exchanges network configurations and creates the two **gateways** (one acting as _server_ and located in the provider cluster, another acting as _client_ in the consumer cluster) to let the two clusters communicate over a secure tunnel.
The location of the client/server gateway can be customized when creating the peering using the `--gw-server-service-location` flag in `liqoctl`.
2. **enables authentication**.
Authenticates the consumer with the provider.
In this step, the consumer obtains an `Identity` (*kubeconfig*) to replicate resources to the provider cluster.
Expand Down Expand Up @@ -237,6 +237,18 @@ liqoctl peer \
--memory=2Gi
```

Other non-standard resources can be defined via the `--resource` flag:

```bash
liqoctl peer \
--kubeconfig=$CONSUMER_KUBECONFIG_PATH \
--remote-kubeconfig=$PROVIDER_KUBECONFIG_PATH \
--cpu=2 \
--memory=2Gi \
--resource=nvidia.com/gpu=2 \
--resource=custom=2Gi
```

```{warning}
To make sure the consumer cluster does not exceed the quota of shared resources, the offloaded pods need to be created with the resources `limits` set.

Expand Down
8 changes: 5 additions & 3 deletions pkg/liqoctl/peer/handler.go
Original file line number Diff line number Diff line change
Expand Up @@ -61,6 +61,7 @@ type Options struct {
CPU string
Memory string
Pods string
OtherResources map[string]string
}

// NewOptions returns a new Options struct.
Expand Down Expand Up @@ -195,9 +196,10 @@ func ensureOffloading(ctx context.Context, o *Options) error {
Class: o.ResourceSliceClass,
DisableVirtualNodeCreation: !o.CreateVirtualNode,

CPU: o.CPU,
Memory: o.Memory,
Pods: o.Pods,
CPU: o.CPU,
Memory: o.Memory,
Pods: o.Pods,
OtherResources: o.OtherResources,
}

if err := rsOptions.HandleCreate(ctx); err != nil {
Expand Down
37 changes: 24 additions & 13 deletions pkg/liqoctl/rest/resourceslice/create.go
Original file line number Diff line number Diff line change
Expand Up @@ -44,7 +44,9 @@ The ResourceSlice resource is used to represent a slice of resources that can be

Examples:
$ {{ .Executable }} create resourceslice my-slice --remote-cluster-id remote-cluster-id \
--cpu 4 --memory 8Gi --pods 30`
--cpu 4 --memory 8Gi --pods 30
$ {{ .Executable }} create resourceslice my-slice --remote-cluster-id remote-cluster-id \
--cpu 4 --memory 8Gi --pods 30 --resource nvidia.com/gpu=2`

// Create implements the create command.
func (o *Options) Create(ctx context.Context, options *rest.CreateOptions) *cobra.Command {
Expand Down Expand Up @@ -80,6 +82,8 @@ func (o *Options) Create(ctx context.Context, options *rest.CreateOptions) *cobr
cmd.Flags().StringVar(&o.CPU, "cpu", "", "The amount of CPU requested in the resource slice")
cmd.Flags().StringVar(&o.Memory, "memory", "", "The amount of memory requested in the resource slice")
cmd.Flags().StringVar(&o.Pods, "pods", "", "The amount of pods requested in the resource slice")
cmd.Flags().StringToStringVar(
&o.OtherResources, "resource", nil, "Other resources requested in the resource slice (e.g., 'resource=nvidia.com/gpu=2')")
cmd.Flags().BoolVar(&o.DisableVirtualNodeCreation, "no-virtual-node", false,
"Prevent the automatic creation of a VirtualNode for the ResourceSlice. Default: false")

Expand Down Expand Up @@ -111,12 +115,8 @@ func (o *Options) HandleCreate(ctx context.Context) error {
resourceSlice := forge.ResourceSlice(opts.Name, namespace)
_, err = resource.CreateOrUpdate(ctx, opts.CRClient, resourceSlice, func() error {
return forge.MutateResourceSlice(resourceSlice, o.RemoteClusterID.GetClusterID(), &forge.ResourceSliceOptions{
Class: authv1beta1.ResourceSliceClass(o.Class),
Resources: map[corev1.ResourceName]string{
corev1.ResourceCPU: o.CPU,
corev1.ResourceMemory: o.Memory,
corev1.ResourcePods: o.Pods,
},
Class: authv1beta1.ResourceSliceClass(o.Class),
Resources: o.buildResourceMap(),
}, !o.DisableVirtualNodeCreation)
})
if err != nil {
Expand Down Expand Up @@ -157,6 +157,21 @@ func (o *Options) getTenantNamespace(ctx context.Context) (string, error) {
}
}

func (o *Options) buildResourceMap() map[corev1.ResourceName]string {
resources := map[corev1.ResourceName]string{
corev1.ResourceCPU: o.CPU,
corev1.ResourceMemory: o.Memory,
corev1.ResourcePods: o.Pods,
}

// Add other resources to the resources map.
for name, quantity := range o.OtherResources {
resources[corev1.ResourceName(name)] = quantity
}

return resources
}

// output implements the logic to output the generated ResourceSlice resource.
func (o *Options) output(ctx context.Context) error {
opts := o.CreateOptions
Expand All @@ -177,12 +192,8 @@ func (o *Options) output(ctx context.Context) error {

resourceSlice := forge.ResourceSlice(opts.Name, namespace)
err = forge.MutateResourceSlice(resourceSlice, o.RemoteClusterID.GetClusterID(), &forge.ResourceSliceOptions{
Class: authv1beta1.ResourceSliceClass(o.Class),
Resources: map[corev1.ResourceName]string{
corev1.ResourceCPU: o.CPU,
corev1.ResourceMemory: o.Memory,
corev1.ResourcePods: o.Pods,
},
Class: authv1beta1.ResourceSliceClass(o.Class),
Resources: o.buildResourceMap(),
}, !o.DisableVirtualNodeCreation)
if err != nil {
return err
Expand Down
7 changes: 4 additions & 3 deletions pkg/liqoctl/rest/resourceslice/types.go
Original file line number Diff line number Diff line change
Expand Up @@ -29,9 +29,10 @@ type Options struct {
Class string
DisableVirtualNodeCreation bool

CPU string
Memory string
Pods string
CPU string
Memory string
Pods string
OtherResources map[string]string
}

var _ rest.API = &Options{}
Expand Down
33 changes: 22 additions & 11 deletions pkg/liqoctl/rest/virtualnode/create.go
Original file line number Diff line number Diff line change
Expand Up @@ -94,6 +94,8 @@ func (o *Options) Create(ctx context.Context, options *rest.CreateOptions) *cobr
cmd.Flags().StringVar(&o.cpu, "cpu", "2", "The amount of CPU available in the virtual node")
cmd.Flags().StringVar(&o.memory, "memory", "4Gi", "The amount of memory available in the virtual node")
cmd.Flags().StringVar(&o.pods, "pods", "110", "The amount of pods available in the virtual node")
cmd.Flags().StringToStringVar(&o.otherResources, "resource", nil,
"Other resources available in the virtual node (e.g., 'resource=nvidia.com/gpu=2')")
cmd.Flags().StringSliceVar(&o.storageClasses, "storage-classes",
[]string{}, "The storage classes offered by the remote cluster. The first one will be used as default")
cmd.Flags().StringSliceVar(&o.ingressClasses, "ingress-classes",
Expand Down Expand Up @@ -225,6 +227,20 @@ func (o *Options) forgeVirtualNodeOptions(vkOptionsTemplateRef *corev1.ObjectRef
return nil, fmt.Errorf("unable to parse pod quantity: %w", err)
}

resourceMap := corev1.ResourceList{
corev1.ResourceCPU: cpuQnt,
corev1.ResourceMemory: memoryQnt,
corev1.ResourcePods: podsQnt,
}

for resourceName, resourceValue := range o.otherResources {
parsedQuantity, err := k8sresource.ParseQuantity(resourceValue)
if err != nil {
return nil, fmt.Errorf("unable to parse quantity for resource %q: %w", resourceName, err)
}
resourceMap[corev1.ResourceName(resourceName)] = parsedQuantity
}

storageClasses := make([]liqov1beta1.StorageType, len(o.storageClasses))
for i, storageClass := range o.storageClasses {
sc := liqov1beta1.StorageType{
Expand Down Expand Up @@ -261,17 +277,12 @@ func (o *Options) forgeVirtualNodeOptions(vkOptionsTemplateRef *corev1.ObjectRef
return &forge.VirtualNodeOptions{
KubeconfigSecretRef: corev1.LocalObjectReference{Name: o.kubeconfigSecretName},
VkOptionsTemplateRef: vkOptionsTemplateRef,

ResourceList: corev1.ResourceList{
corev1.ResourceCPU: cpuQnt,
corev1.ResourceMemory: memoryQnt,
corev1.ResourcePods: podsQnt,
},
StorageClasses: storageClasses,
IngressClasses: ingressClasses,
LoadBalancerClasses: loadBalancerClasses,
NodeLabels: o.labels,
NodeSelector: o.nodeSelector,
ResourceList: resourceMap,
StorageClasses: storageClasses,
IngressClasses: ingressClasses,
LoadBalancerClasses: loadBalancerClasses,
NodeLabels: o.labels,
NodeSelector: o.nodeSelector,
}, nil
}

Expand Down
7 changes: 4 additions & 3 deletions pkg/liqoctl/rest/virtualnode/types.go
Original file line number Diff line number Diff line change
Expand Up @@ -33,9 +33,10 @@ type Options struct {
resourceSliceName string
vkOptionsTemplate string

cpu string
memory string
pods string
cpu string
memory string
pods string
otherResources map[string]string

storageClasses []string
ingressClasses []string
Expand Down