diff --git a/api/v1/storagepool.go b/api/v1/storagepool.go index 47c1d8b2..4768d997 100644 --- a/api/v1/storagepool.go +++ b/api/v1/storagepool.go @@ -9,8 +9,10 @@ import ( "strings" lclient "github.com/LINBIT/golinstor/client" + "golang.org/x/exp/slices" "k8s.io/apimachinery/pkg/util/sets" "k8s.io/apimachinery/pkg/util/validation/field" + "sigs.k8s.io/controller-runtime/pkg/webhook/admission" ) type LinstorStoragePool struct { @@ -104,14 +106,90 @@ func (p *LinstorStoragePool) PoolName() string { return "" } +func (p *LinstorStoragePool) PvCreateArguments() []string { + switch { + case p.LvmPool != nil: + return p.LvmPool.PhysicalVolumeCreateArguments + case p.LvmThinPool != nil: + return p.LvmThinPool.PhysicalVolumeCreateArguments + default: + return nil + } +} + +func (p *LinstorStoragePool) VgCreateArguments() []string { + switch { + case p.LvmPool != nil: + return p.LvmPool.VolumeGroupCreateArguments + case p.LvmThinPool != nil: + return p.LvmThinPool.VolumeGroupCreateArguments + default: + return nil + } +} + +func (p *LinstorStoragePool) LvCreateArguments() []string { + switch { + case p.LvmThinPool != nil: + return p.LvmThinPool.LogicalVolumeCreateArguments + default: + return nil + } +} + +func (p *LinstorStoragePool) ZpoolCreateArguments() []string { + switch { + case p.ZfsPool != nil: + if len(p.ZfsPool.ZpoolCreateArguments) == 0 { + return []string{"-o", "failmode=continue"} + } + + return p.ZfsPool.ZpoolCreateArguments + case p.ZfsThinPool != nil: + if len(p.ZfsThinPool.ZpoolCreateArguments) == 0 { + return []string{"-o", "failmode=continue"} + } + + return p.ZfsThinPool.ZpoolCreateArguments + default: + return nil + } +} + type LinstorStoragePoolLvm struct { + // VolumeGroup is the name of the Volume Group (VG) to use. VolumeGroup string `json:"volumeGroup,omitempty"` + + // PhysicalVolumeCreateArguments are arguments to pass to "pvcreate". + // This has no effect on an existing VG, it only applies the VG initially gets created. + // +kubebuilder:validation:Optional + PhysicalVolumeCreateArguments []string `json:"physicalVolumeCreateArguments,omitempty"` + + // VolumeGroupCreateArguments are arguments to pass to "vgcreate". + // This has no effect on an existing VG, it only applies the VG initially gets created. + // +kubebuilder:validation:Optional + VolumeGroupCreateArguments []string `json:"volumeGroupCreateArguments,omitempty"` } type LinstorStoragePoolLvmThin struct { VolumeGroup string `json:"volumeGroup,omitempty"` // ThinPool is the name of the thinpool LV (without VG prefix). ThinPool string `json:"thinPool,omitempty"` + + // PhysicalVolumeCreateArguments are arguments to pass to "pvcreate". + // This has no effect on an existing VG, it only applies the VG initially gets created. + // +kubebuilder:validation:Optional + PhysicalVolumeCreateArguments []string `json:"physicalVolumeCreateArguments,omitempty"` + + // VolumeGroupCreateArguments are arguments to pass to "vgcreate". + // This has no effect on an existing VG, it only applies the VG initially gets created. + // +kubebuilder:validation:Optional + VolumeGroupCreateArguments []string `json:"volumeGroupCreateArguments,omitempty"` + + // LogicalVolumeCreateArguments are arguments to pass to "lvcreate". + // This has no effect on an existing thinpool, it only applies the thinpool initially gets created. + // +kubebuilder:validation:Optional + LogicalVolumeCreateArguments []string `json:"logicalVolumeCreateArguments,omitempty"` } type LinstorStoragePoolFile struct { @@ -122,6 +200,15 @@ type LinstorStoragePoolFile struct { type LinstorStoragePoolZfs struct { // ZPool is the name of the ZFS zpool. ZPool string `json:"zPool,omitempty"` + + // ZpoolCreateArguments are arguments to pass to "zpool create". + // This has no effect on an existing zpool, it only applies the zpool initially gets created. + // + // If not set, "-o failmode=continue" is automatically added to ensure a failed zpool does not stop replication + // to and from other nodes in the cluster. + // + // +kubebuilder:validation:Optional + ZpoolCreateArguments []string `json:"zpoolCreateArguments,omitempty"` } type LinstorStoragePoolSource struct { @@ -139,8 +226,9 @@ func (l *LinstorStoragePoolFile) DirectoryOrDefault(name string) string { return l.Directory } -func (l *LinstorStoragePoolZfs) Validate(oldSP *LinstorStoragePool, fieldPrefix *field.Path, name string, thin bool) field.ErrorList { +func (l *LinstorStoragePoolZfs) Validate(oldSP *LinstorStoragePool, fieldPrefix *field.Path, name string, thin bool) (admission.Warnings, field.ErrorList) { var result field.ErrorList + var warnings admission.Warnings if oldSP != nil { if thin && oldSP.ZfsThinPool == nil { @@ -150,7 +238,15 @@ func (l *LinstorStoragePoolZfs) Validate(oldSP *LinstorStoragePool, fieldPrefix } } - return result + if oldSP != nil && thin && oldSP.ZfsThinPool != nil && !slices.Equal(l.ZpoolCreateArguments, oldSP.ZfsThinPool.ZpoolCreateArguments) { + warnings = append(warnings, fmt.Sprintf("%s: Update will only apply to new nodes", fieldPrefix.Child("zpoolCreateArguments"))) + } + + if oldSP != nil && !thin && oldSP.ZfsPool != nil && !slices.Equal(l.ZpoolCreateArguments, oldSP.ZfsPool.ZpoolCreateArguments) { + warnings = append(warnings, fmt.Sprintf("%s: Update will only apply to new nodes", fieldPrefix.Child("zpoolCreateArguments"))) + } + + return warnings, result } func (s *LinstorStoragePoolSource) Validate(oldSP *LinstorStoragePool, knownDevices sets.Set[string], fieldPrefix *field.Path) field.ErrorList { diff --git a/api/v1/zz_generated.deepcopy.go b/api/v1/zz_generated.deepcopy.go index 534a0ade..956649e4 100644 --- a/api/v1/zz_generated.deepcopy.go +++ b/api/v1/zz_generated.deepcopy.go @@ -903,12 +903,12 @@ func (in *LinstorStoragePool) DeepCopyInto(out *LinstorStoragePool) { if in.LvmPool != nil { in, out := &in.LvmPool, &out.LvmPool *out = new(LinstorStoragePoolLvm) - **out = **in + (*in).DeepCopyInto(*out) } if in.LvmThinPool != nil { in, out := &in.LvmThinPool, &out.LvmThinPool *out = new(LinstorStoragePoolLvmThin) - **out = **in + (*in).DeepCopyInto(*out) } if in.FilePool != nil { in, out := &in.FilePool, &out.FilePool @@ -923,12 +923,12 @@ func (in *LinstorStoragePool) DeepCopyInto(out *LinstorStoragePool) { if in.ZfsPool != nil { in, out := &in.ZfsPool, &out.ZfsPool *out = new(LinstorStoragePoolZfs) - **out = **in + (*in).DeepCopyInto(*out) } if in.ZfsThinPool != nil { in, out := &in.ZfsThinPool, &out.ZfsThinPool *out = new(LinstorStoragePoolZfs) - **out = **in + (*in).DeepCopyInto(*out) } if in.Source != nil { in, out := &in.Source, &out.Source @@ -965,6 +965,16 @@ func (in *LinstorStoragePoolFile) DeepCopy() *LinstorStoragePoolFile { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *LinstorStoragePoolLvm) DeepCopyInto(out *LinstorStoragePoolLvm) { *out = *in + if in.PhysicalVolumeCreateArguments != nil { + in, out := &in.PhysicalVolumeCreateArguments, &out.PhysicalVolumeCreateArguments + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.VolumeGroupCreateArguments != nil { + in, out := &in.VolumeGroupCreateArguments, &out.VolumeGroupCreateArguments + *out = make([]string, len(*in)) + copy(*out, *in) + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinstorStoragePoolLvm. @@ -980,6 +990,21 @@ func (in *LinstorStoragePoolLvm) DeepCopy() *LinstorStoragePoolLvm { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *LinstorStoragePoolLvmThin) DeepCopyInto(out *LinstorStoragePoolLvmThin) { *out = *in + if in.PhysicalVolumeCreateArguments != nil { + in, out := &in.PhysicalVolumeCreateArguments, &out.PhysicalVolumeCreateArguments + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.VolumeGroupCreateArguments != nil { + in, out := &in.VolumeGroupCreateArguments, &out.VolumeGroupCreateArguments + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.LogicalVolumeCreateArguments != nil { + in, out := &in.LogicalVolumeCreateArguments, &out.LogicalVolumeCreateArguments + *out = make([]string, len(*in)) + copy(*out, *in) + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinstorStoragePoolLvmThin. @@ -1015,6 +1040,11 @@ func (in *LinstorStoragePoolSource) DeepCopy() *LinstorStoragePoolSource { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *LinstorStoragePoolZfs) DeepCopyInto(out *LinstorStoragePoolZfs) { *out = *in + if in.ZpoolCreateArguments != nil { + in, out := &in.ZpoolCreateArguments, &out.ZpoolCreateArguments + *out = make([]string, len(*in)) + copy(*out, *in) + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinstorStoragePoolZfs. diff --git a/charts/piraeus/templates/crds.yaml b/charts/piraeus/templates/crds.yaml index 8af98224..d226f641 100644 --- a/charts/piraeus/templates/crds.yaml +++ b/charts/piraeus/templates/crds.yaml @@ -1348,18 +1348,55 @@ spec: lvmPool: description: Configures a LVM Volume Group as storage pool. properties: + physicalVolumeCreateArguments: + description: |- + PhysicalVolumeCreateArguments are arguments to pass to "pvcreate". + This has no effect on an existing VG, it only applies the VG initially gets created. + items: + type: string + type: array volumeGroup: + description: VolumeGroup is the name of the Volume Group + (VG) to use. type: string + volumeGroupCreateArguments: + description: |- + VolumeGroupCreateArguments are arguments to pass to "vgcreate". + This has no effect on an existing VG, it only applies the VG initially gets created. + items: + type: string + type: array type: object lvmThinPool: description: Configures a LVM Thin Pool as storage pool. properties: + logicalVolumeCreateArguments: + description: |- + LogicalVolumeCreateArguments are arguments to pass to "lvcreate". + This has no effect on an existing thinpool, it only applies the thinpool initially gets created. + items: + type: string + type: array + physicalVolumeCreateArguments: + description: |- + PhysicalVolumeCreateArguments are arguments to pass to "pvcreate". + This has no effect on an existing VG, it only applies the VG initially gets created. + items: + type: string + type: array thinPool: description: ThinPool is the name of the thinpool LV (without VG prefix). type: string volumeGroup: type: string + volumeGroupCreateArguments: + description: |- + VolumeGroupCreateArguments are arguments to pass to "vgcreate". + This has no effect on an existing VG, it only applies the VG initially gets created. + items: + type: string + type: array type: object name: description: Name of the storage pool in linstor. @@ -1448,6 +1485,16 @@ spec: zPool: description: ZPool is the name of the ZFS zpool. type: string + zpoolCreateArguments: + description: |- + ZpoolCreateArguments are arguments to pass to "zpool create". + This has no effect on an existing zpool, it only applies the zpool initially gets created. + + If not set, "-o failmode=continue" is automatically added to ensure a failed zpool does not stop replication + to and from other nodes in the cluster. + items: + type: string + type: array type: object zfsThinPool: description: Configures a ZFS system based storage pool, allocating @@ -1456,6 +1503,16 @@ spec: zPool: description: ZPool is the name of the ZFS zpool. type: string + zpoolCreateArguments: + description: |- + ZpoolCreateArguments are arguments to pass to "zpool create". + This has no effect on an existing zpool, it only applies the zpool initially gets created. + + If not set, "-o failmode=continue" is automatically added to ensure a failed zpool does not stop replication + to and from other nodes in the cluster. + items: + type: string + type: array type: object required: - name @@ -1952,18 +2009,55 @@ spec: lvmPool: description: Configures a LVM Volume Group as storage pool. properties: + physicalVolumeCreateArguments: + description: |- + PhysicalVolumeCreateArguments are arguments to pass to "pvcreate". + This has no effect on an existing VG, it only applies the VG initially gets created. + items: + type: string + type: array volumeGroup: + description: VolumeGroup is the name of the Volume Group + (VG) to use. type: string + volumeGroupCreateArguments: + description: |- + VolumeGroupCreateArguments are arguments to pass to "vgcreate". + This has no effect on an existing VG, it only applies the VG initially gets created. + items: + type: string + type: array type: object lvmThinPool: description: Configures a LVM Thin Pool as storage pool. properties: + logicalVolumeCreateArguments: + description: |- + LogicalVolumeCreateArguments are arguments to pass to "lvcreate". + This has no effect on an existing thinpool, it only applies the thinpool initially gets created. + items: + type: string + type: array + physicalVolumeCreateArguments: + description: |- + PhysicalVolumeCreateArguments are arguments to pass to "pvcreate". + This has no effect on an existing VG, it only applies the VG initially gets created. + items: + type: string + type: array thinPool: description: ThinPool is the name of the thinpool LV (without VG prefix). type: string volumeGroup: type: string + volumeGroupCreateArguments: + description: |- + VolumeGroupCreateArguments are arguments to pass to "vgcreate". + This has no effect on an existing VG, it only applies the VG initially gets created. + items: + type: string + type: array type: object name: description: Name of the storage pool in linstor. @@ -2052,6 +2146,16 @@ spec: zPool: description: ZPool is the name of the ZFS zpool. type: string + zpoolCreateArguments: + description: |- + ZpoolCreateArguments are arguments to pass to "zpool create". + This has no effect on an existing zpool, it only applies the zpool initially gets created. + + If not set, "-o failmode=continue" is automatically added to ensure a failed zpool does not stop replication + to and from other nodes in the cluster. + items: + type: string + type: array type: object zfsThinPool: description: Configures a ZFS system based storage pool, allocating @@ -2060,6 +2164,16 @@ spec: zPool: description: ZPool is the name of the ZFS zpool. type: string + zpoolCreateArguments: + description: |- + ZpoolCreateArguments are arguments to pass to "zpool create". + This has no effect on an existing zpool, it only applies the zpool initially gets created. + + If not set, "-o failmode=continue" is automatically added to ensure a failed zpool does not stop replication + to and from other nodes in the cluster. + items: + type: string + type: array type: object required: - name diff --git a/config/crd/bases/piraeus.io_linstorsatelliteconfigurations.yaml b/config/crd/bases/piraeus.io_linstorsatelliteconfigurations.yaml index 2aba91c7..b50429d9 100644 --- a/config/crd/bases/piraeus.io_linstorsatelliteconfigurations.yaml +++ b/config/crd/bases/piraeus.io_linstorsatelliteconfigurations.yaml @@ -439,18 +439,55 @@ spec: lvmPool: description: Configures a LVM Volume Group as storage pool. properties: + physicalVolumeCreateArguments: + description: |- + PhysicalVolumeCreateArguments are arguments to pass to "pvcreate". + This has no effect on an existing VG, it only applies the VG initially gets created. + items: + type: string + type: array volumeGroup: + description: VolumeGroup is the name of the Volume Group + (VG) to use. type: string + volumeGroupCreateArguments: + description: |- + VolumeGroupCreateArguments are arguments to pass to "vgcreate". + This has no effect on an existing VG, it only applies the VG initially gets created. + items: + type: string + type: array type: object lvmThinPool: description: Configures a LVM Thin Pool as storage pool. properties: + logicalVolumeCreateArguments: + description: |- + LogicalVolumeCreateArguments are arguments to pass to "lvcreate". + This has no effect on an existing thinpool, it only applies the thinpool initially gets created. + items: + type: string + type: array + physicalVolumeCreateArguments: + description: |- + PhysicalVolumeCreateArguments are arguments to pass to "pvcreate". + This has no effect on an existing VG, it only applies the VG initially gets created. + items: + type: string + type: array thinPool: description: ThinPool is the name of the thinpool LV (without VG prefix). type: string volumeGroup: type: string + volumeGroupCreateArguments: + description: |- + VolumeGroupCreateArguments are arguments to pass to "vgcreate". + This has no effect on an existing VG, it only applies the VG initially gets created. + items: + type: string + type: array type: object name: description: Name of the storage pool in linstor. @@ -539,6 +576,16 @@ spec: zPool: description: ZPool is the name of the ZFS zpool. type: string + zpoolCreateArguments: + description: |- + ZpoolCreateArguments are arguments to pass to "zpool create". + This has no effect on an existing zpool, it only applies the zpool initially gets created. + + If not set, "-o failmode=continue" is automatically added to ensure a failed zpool does not stop replication + to and from other nodes in the cluster. + items: + type: string + type: array type: object zfsThinPool: description: Configures a ZFS system based storage pool, allocating @@ -547,6 +594,16 @@ spec: zPool: description: ZPool is the name of the ZFS zpool. type: string + zpoolCreateArguments: + description: |- + ZpoolCreateArguments are arguments to pass to "zpool create". + This has no effect on an existing zpool, it only applies the zpool initially gets created. + + If not set, "-o failmode=continue" is automatically added to ensure a failed zpool does not stop replication + to and from other nodes in the cluster. + items: + type: string + type: array type: object required: - name diff --git a/config/crd/bases/piraeus.io_linstorsatellites.yaml b/config/crd/bases/piraeus.io_linstorsatellites.yaml index 5c10a9ec..fbb6a6a4 100644 --- a/config/crd/bases/piraeus.io_linstorsatellites.yaml +++ b/config/crd/bases/piraeus.io_linstorsatellites.yaml @@ -408,18 +408,55 @@ spec: lvmPool: description: Configures a LVM Volume Group as storage pool. properties: + physicalVolumeCreateArguments: + description: |- + PhysicalVolumeCreateArguments are arguments to pass to "pvcreate". + This has no effect on an existing VG, it only applies the VG initially gets created. + items: + type: string + type: array volumeGroup: + description: VolumeGroup is the name of the Volume Group + (VG) to use. type: string + volumeGroupCreateArguments: + description: |- + VolumeGroupCreateArguments are arguments to pass to "vgcreate". + This has no effect on an existing VG, it only applies the VG initially gets created. + items: + type: string + type: array type: object lvmThinPool: description: Configures a LVM Thin Pool as storage pool. properties: + logicalVolumeCreateArguments: + description: |- + LogicalVolumeCreateArguments are arguments to pass to "lvcreate". + This has no effect on an existing thinpool, it only applies the thinpool initially gets created. + items: + type: string + type: array + physicalVolumeCreateArguments: + description: |- + PhysicalVolumeCreateArguments are arguments to pass to "pvcreate". + This has no effect on an existing VG, it only applies the VG initially gets created. + items: + type: string + type: array thinPool: description: ThinPool is the name of the thinpool LV (without VG prefix). type: string volumeGroup: type: string + volumeGroupCreateArguments: + description: |- + VolumeGroupCreateArguments are arguments to pass to "vgcreate". + This has no effect on an existing VG, it only applies the VG initially gets created. + items: + type: string + type: array type: object name: description: Name of the storage pool in linstor. @@ -508,6 +545,16 @@ spec: zPool: description: ZPool is the name of the ZFS zpool. type: string + zpoolCreateArguments: + description: |- + ZpoolCreateArguments are arguments to pass to "zpool create". + This has no effect on an existing zpool, it only applies the zpool initially gets created. + + If not set, "-o failmode=continue" is automatically added to ensure a failed zpool does not stop replication + to and from other nodes in the cluster. + items: + type: string + type: array type: object zfsThinPool: description: Configures a ZFS system based storage pool, allocating @@ -516,6 +563,16 @@ spec: zPool: description: ZPool is the name of the ZFS zpool. type: string + zpoolCreateArguments: + description: |- + ZpoolCreateArguments are arguments to pass to "zpool create". + This has no effect on an existing zpool, it only applies the zpool initially gets created. + + If not set, "-o failmode=continue" is automatically added to ensure a failed zpool does not stop replication + to and from other nodes in the cluster. + items: + type: string + type: array type: object required: - name diff --git a/docs/reference/linstorsatelliteconfiguration.md b/docs/reference/linstorsatelliteconfiguration.md index ec93d1ff..934a2915 100644 --- a/docs/reference/linstorsatelliteconfiguration.md +++ b/docs/reference/linstorsatelliteconfiguration.md @@ -130,19 +130,42 @@ Configures LINSTOR Storage Pools. Every Storage Pool needs at least a `name`, and a type. Types are specified by setting a (potentially empty) value on the matching key. Available types are: -* `lvmPool`: Configures a [LVM Volume Group](https://linbit.com/drbd-user-guide/drbd-guide-9_0-en/#s-lvm-primer) as storage - pool. Defaults to using the storage pool name as the VG name. Can be overridden by setting `volumeGroup`. -* `lvmThinPool`: Configures a [LVM Thin Pool](https://man7.org/linux/man-pages/man7/lvmthin.7.html) as storage pool. - Defaults to using the storage pool name as name for the thin pool volume and the storage pool name prefixed by - `linstor_` as the VG name. Can be overridden by setting `thinPool` and `volumeGroup`. -* `filePool`: Configures a file system based storage pool. Configures a host directory as location for the volume files. - Defaults to using the `/var/lib/linstor-pools/` directory. -* `fileThinPool`: Configures a file system based storage pool. Behaves the same as `filePool`, except the files will - be thinly allocated on file systems that support sparse files. -* `zfsPool`: Configure a [ZFS ZPool](https://wiki.ubuntu.com/ZFS/ZPool) as storage pool. Defaults to using the storage - pool name as name for the zpool. Can be overriden by setting `zPool`. -* `zfsThinPool`: Configure a [ZFS ZPool](https://wiki.ubuntu.com/ZFS/ZPool) as storage pool. Behaves the same as - `zfsPool`, except the contained zVol will be created using sparse reservation. +* `lvmPool`: Configures a [LVM Volume Group](https://linbit.com/drbd-user-guide/drbd-guide-9_0-en/#s-lvm-primer) as storage pool. + + Arguments: + * `volumeGroup`: set the name of the Volume Group to use. Defaults to using the storage pool name. + * `physicalVolumeCreateArguments`: additional arguments to pass to the `pvcreate` command. Only applies when the + pool is first set up from a `source`. + * `volumeGroupCreateArguments`: additional arguments to pass to the `vgcreate` command. Only applies when the + pool is first set up from a `source`. +* `lvmThinPool`: Configures a [LVM Thin Pool](https://man7.org/linux/man-pages/man7/lvmthin.7.html) as storage pool. + + Arguments: + * `volumeGroup`: set the name of the Volume Group to use. Defaults to using the storage pool name prefixed with + `linstor_`. + * `thinPool`: set the name of the Thinpool Logical Volumes to use. Defaults to using the storage pool name. + * `physicalVolumeCreateArguments`: additional arguments to pass to the `pvcreate` command. Only applies when the + pool is first set up from a `source`. + * `volumeGroupCreateArguments`: additional arguments to pass to the `vgcreate` command. Only applies when the + pool is first set up from a `source`. + * `logicalVolumeCreateArguments`: additional arguments to pass to the `lvcreate` command. Only applies when the + pool is first set up from a `source`. +* `filePool`: Configures a file system based storage pool. Configures a host directory as location for the volume + files. + + Arguments: + * `directory`: Host directory used to store volume data. +* `fileThinPool`: Configures a file system based storage pool. Behaves the same as `filePool`, except the files will + be thinly allocated on file systems that support sparse files. Supports the same arguments as `filePool`. +* `zfsPool`: Configure a [ZFS ZPool](https://wiki.ubuntu.com/ZFS/ZPool) as storage pool. Defaults to using the storage + pool name as name for the zpool. Can be overridden by setting `zPool`. + + Arguments: + * `zPool`: set the name of the zpool on the host. Defaults to the storage pool name. + * `zPoolCreateArguments`: additional arguments to pass to the `zpool create` command. Only applies when the pool + is first set up from a `source`. +* `zfsThinPool`: Configure a [ZFS ZPool](https://wiki.ubuntu.com/ZFS/ZPool) as storage pool. Behaves the same as `zfsPool`, except the contained + zVol will be created using sparse reservation. Supports the same arguments as `zfsPool`. Optionally, you can configure LINSTOR to automatically create the backing pools. `source.hostDevices` takes a list of raw block devices, which LINSTOR will prepare as the chosen backing pool. @@ -159,6 +182,9 @@ This example configures these LINSTOR Storage Pools on all satellites: * A LVM Pool named `vg2-from-raw-devices`. It will use the VG `vg2`, which will be created on demand from the raw devices `/dev/sdb` and `/dev/sdc` if it does not exist already. In addition, it sets the `StorDriver/LvcreateOptions` property to `-i 2`, which causes every created LV to be striped across 2 PVs. +* A LVM Thin Pool named `thin-raid`. It will use the thinpool `linstor_thin-raid/thin-raid`, which will be created on + `/dev/nvme0n2` and `/dev/nvme0n3`. The backing VG will have a physical extent size of 512KiB and the Thinpool LV + striped across both devices. * A File System Pool named `fs1`. It will use the `/var/lib/linstor-pools/fs1` directory on the host, creating the directory if necessary. * A File System Pool named `fs2`, using sparse files. It will use the custom `/mnt/data` directory on the host. @@ -189,6 +215,14 @@ spec: properties: - name: StorDriver/LvcreateOptions value: '-i 2' + - name: thin-raid + lvmThinPool: + volumeGroupCreateArguments: ["-s", "512KiB"] + logicalVolumeCreateArguments: ["-i", "2"] + source: + hostDevices: + - /dev/nvme0n2 + - /dev/nvme0n3 - name: fs1 filePool: {} - name: fs2 diff --git a/internal/controller/linstorsatellite_controller.go b/internal/controller/linstorsatellite_controller.go index a8903931..1ad3b87e 100644 --- a/internal/controller/linstorsatellite_controller.go +++ b/internal/controller/linstorsatellite_controller.go @@ -561,9 +561,13 @@ func (r *LinstorSatelliteReconciler) reconcileStoragePools(ctx context.Context, if existingPool == nil && pool.Source != nil && len(pool.Source.HostDevices) > 0 { err := lc.Nodes.CreateDevicePool(ctx, lsatellite.Name, lapi.PhysicalStorageCreate{ - ProviderKind: pool.ProviderKind(), - PoolName: pool.PoolName(), - DevicePaths: pool.Source.HostDevices, + ProviderKind: pool.ProviderKind(), + PoolName: pool.PoolName(), + PvCreateArguments: pool.PvCreateArguments(), + VgCreateArguments: pool.VgCreateArguments(), + LvCreateArguments: pool.LvCreateArguments(), + ZpoolCreateArguments: pool.ZpoolCreateArguments(), + DevicePaths: pool.Source.HostDevices, WithStoragePool: lapi.PhysicalStorageStoragePoolCreate{ Name: pool.Name, Props: linstorhelper.UpdateLastApplyProperty(expectedProperties), diff --git a/internal/webhook/v1/linstorsatellite_webhook.go b/internal/webhook/v1/linstorsatellite_webhook.go index f63f1442..f9e6e6ba 100644 --- a/internal/webhook/v1/linstorsatellite_webhook.go +++ b/internal/webhook/v1/linstorsatellite_webhook.go @@ -81,10 +81,8 @@ func (r *LinstorSatelliteCustomValidator) validate(new, old *piraeusiov1.Linstor oldSPs = old.Spec.StoragePools } - var warnings admission.Warnings - - errs := ValidateExternalController(new.Spec.ClusterRef.ExternalController, field.NewPath("spec", "clusterRef", "externalController")) - errs = append(errs, ValidateStoragePools(new.Spec.StoragePools, oldSPs, field.NewPath("spec", "storagePools"))...) + warnings, errs := ValidateStoragePools(new.Spec.StoragePools, oldSPs, field.NewPath("spec", "storagePools")) + errs = append(errs, ValidateExternalController(new.Spec.ClusterRef.ExternalController, field.NewPath("spec", "clusterRef", "externalController"))...) errs = append(errs, ValidateNodeProperties(new.Spec.Properties, field.NewPath("spec", "properties"))...) for i := range new.Spec.Patches { path := field.NewPath("spec", "patches", strconv.Itoa(i)) diff --git a/internal/webhook/v1/linstorsatelliteconfiguration_webhook.go b/internal/webhook/v1/linstorsatelliteconfiguration_webhook.go index f28613b5..cb4df001 100644 --- a/internal/webhook/v1/linstorsatelliteconfiguration_webhook.go +++ b/internal/webhook/v1/linstorsatelliteconfiguration_webhook.go @@ -80,9 +80,7 @@ func (r *LinstorSatelliteConfigurationCustomValidator) validate(obj, old *piraeu oldSPs = old.Spec.StoragePools } - var warnings admission.Warnings - - errs := ValidateStoragePools(obj.Spec.StoragePools, oldSPs, field.NewPath("spec", "storagePools")) + warnings, errs := ValidateStoragePools(obj.Spec.StoragePools, oldSPs, field.NewPath("spec", "storagePools")) errs = append(errs, ValidateNodeSelector(obj.Spec.NodeSelector, field.NewPath("spec", "nodeSelector"))...) errs = append(errs, ValidateNodeProperties(obj.Spec.Properties, field.NewPath("spec", "properties"))...) errs = append(errs, ValidatePodTemplate(obj.Spec.PodTemplate, field.NewPath("spec", "podTemplate"))...) diff --git a/internal/webhook/v1/linstorsatelliteconfiguration_webhook_test.go b/internal/webhook/v1/linstorsatelliteconfiguration_webhook_test.go index b47d9ff8..a32ca04c 100644 --- a/internal/webhook/v1/linstorsatelliteconfiguration_webhook_test.go +++ b/internal/webhook/v1/linstorsatelliteconfiguration_webhook_test.go @@ -185,6 +185,63 @@ var _ = Describe("LinstorSatelliteConfiguration webhook", func() { Expect(warningHandler[0].text).To(ContainSubstring("consider targeting the DaemonSet 'linstor-satellite'")) }) + It("should warn on updating storage pool creation arguments", func(ctx context.Context) { + warningHandler.Clear() + satelliteConfig := &piraeusv1.LinstorSatelliteConfiguration{ + TypeMeta: typeMeta, + ObjectMeta: metav1.ObjectMeta{Name: "storage-pool-args"}, + Spec: piraeusv1.LinstorSatelliteConfigurationSpec{ + StoragePools: []piraeusv1.LinstorStoragePool{ + { + Name: "lvm", + LvmPool: &piraeusv1.LinstorStoragePoolLvm{ + PhysicalVolumeCreateArguments: []string{"--some", "pv-arg"}, + VolumeGroupCreateArguments: []string{"--some", "vg-arg"}, + }, + }, + { + Name: "lvm-thin", + LvmThinPool: &piraeusv1.LinstorStoragePoolLvmThin{ + PhysicalVolumeCreateArguments: []string{"--some", "pv-arg"}, + VolumeGroupCreateArguments: []string{"--some", "vg-arg"}, + LogicalVolumeCreateArguments: []string{"--some", "lv-arg"}, + }, + }, + { + Name: "zfs", + ZfsPool: &piraeusv1.LinstorStoragePoolZfs{ + ZpoolCreateArguments: []string{"--some", "zpool-arg"}, + }, + }, + { + Name: "zfs-thin", + ZfsThinPool: &piraeusv1.LinstorStoragePoolZfs{ + ZpoolCreateArguments: []string{"--some", "zpool-thin-arg"}, + }, + }, + }, + }, + } + err := k8sClient.Patch(ctx, satelliteConfig, client.Apply, client.FieldOwner("test"), client.ForceOwnership) + Expect(err).NotTo(HaveOccurred()) + Expect(warningHandler).To(HaveLen(7)) + Expect(warningHandler).To(HaveEach(ContainSubstring("Arguments without a pool source have no effect"))) + warningHandler.Clear() + + satelliteConfig.Spec.StoragePools[0].LvmPool.PhysicalVolumeCreateArguments = append(satelliteConfig.Spec.StoragePools[0].LvmPool.PhysicalVolumeCreateArguments, "extra-pv") + satelliteConfig.Spec.StoragePools[0].LvmPool.VolumeGroupCreateArguments = append(satelliteConfig.Spec.StoragePools[0].LvmPool.VolumeGroupCreateArguments, "extra-vg") + satelliteConfig.Spec.StoragePools[1].LvmThinPool.PhysicalVolumeCreateArguments = append(satelliteConfig.Spec.StoragePools[1].LvmThinPool.PhysicalVolumeCreateArguments, "extra-pv") + satelliteConfig.Spec.StoragePools[1].LvmThinPool.VolumeGroupCreateArguments = append(satelliteConfig.Spec.StoragePools[1].LvmThinPool.VolumeGroupCreateArguments, "extra-vg") + satelliteConfig.Spec.StoragePools[1].LvmThinPool.LogicalVolumeCreateArguments = append(satelliteConfig.Spec.StoragePools[1].LvmThinPool.LogicalVolumeCreateArguments, "extra-lv") + satelliteConfig.Spec.StoragePools[2].ZfsPool.ZpoolCreateArguments = append(satelliteConfig.Spec.StoragePools[2].ZfsPool.ZpoolCreateArguments, "extra-zfs") + satelliteConfig.Spec.StoragePools[3].ZfsThinPool.ZpoolCreateArguments = append(satelliteConfig.Spec.StoragePools[3].ZfsThinPool.ZpoolCreateArguments, "extra-zfs") + + err = k8sClient.Update(ctx, satelliteConfig) + Expect(err).NotTo(HaveOccurred()) + Expect(warningHandler).To(HaveLen(14)) + Expect(warningHandler).To(HaveEach(Or(ContainSubstring("Update will only apply to new nodes"), ContainSubstring("Arguments without a pool source have no effect")))) + }) + It("should reject invalid property sources", func(ctx context.Context) { satelliteConfig := &piraeusv1.LinstorSatelliteConfiguration{ TypeMeta: typeMeta, diff --git a/internal/webhook/v1/storagepool.go b/internal/webhook/v1/storagepool.go index 63ef2ee4..f1157deb 100644 --- a/internal/webhook/v1/storagepool.go +++ b/internal/webhook/v1/storagepool.go @@ -6,8 +6,10 @@ import ( "regexp" "strconv" + "golang.org/x/exp/slices" "k8s.io/apimachinery/pkg/util/sets" "k8s.io/apimachinery/pkg/util/validation/field" + "sigs.k8s.io/controller-runtime/pkg/webhook/admission" piraeusv1 "github.com/piraeusdatastore/piraeus-operator/v2/api/v1" ) @@ -17,8 +19,9 @@ var ( VGRegexp = regexp.MustCompile("^[A-Za-z0-9.+_-]+$") ) -func ValidateStoragePools(curSPs, oldSPs []piraeusv1.LinstorStoragePool, fieldPrefix *field.Path) field.ErrorList { +func ValidateStoragePools(curSPs, oldSPs []piraeusv1.LinstorStoragePool, fieldPrefix *field.Path) (admission.Warnings, field.ErrorList) { var result field.ErrorList + var warnings admission.Warnings devNames := sets.New[string]() @@ -43,12 +46,16 @@ func ValidateStoragePools(curSPs, oldSPs []piraeusv1.LinstorStoragePool, fieldPr numPoolTypes := 0 if curSP.LvmThinPool != nil { result = append(result, validateStoragePoolType(&numPoolTypes, fieldPrefix.Child(strconv.Itoa(i), "lvmThinPool"))...) - result = append(result, validateLinstorStoragePoolLvmThin(curSP.LvmThinPool, oldSP, fieldPrefix.Child(strconv.Itoa(i), "lvmThinPool"))...) + warns, errs := validateLinstorStoragePoolLvmThin(curSP.LvmThinPool, oldSP, curSP.Source, fieldPrefix.Child(strconv.Itoa(i), "lvmThinPool")) + result = append(result, errs...) + warnings = append(warnings, warns...) } if curSP.LvmPool != nil { result = append(result, validateStoragePoolType(&numPoolTypes, fieldPrefix.Child(strconv.Itoa(i), "lvmPool"))...) - result = append(result, ValidateLinstorStoragePoolLvm(curSP.LvmPool, oldSP, fieldPrefix.Child(strconv.Itoa(i), "lvmPool"))...) + warns, errs := validateLinstorStoragePoolLvm(curSP.LvmPool, oldSP, curSP.Source, fieldPrefix.Child(strconv.Itoa(i), "lvmPool")) + result = append(result, errs...) + warnings = append(warnings, warns...) } if curSP.FilePool != nil { @@ -65,12 +72,18 @@ func ValidateStoragePools(curSPs, oldSPs []piraeusv1.LinstorStoragePool, fieldPr if curSP.ZfsPool != nil { result = append(result, validateStoragePoolType(&numPoolTypes, fieldPrefix.Child(strconv.Itoa(i), "zfsPool"))...) - result = append(result, curSP.ZfsPool.Validate(oldSP, fieldPrefix.Child(strconv.Itoa(i)), "zfsPool", false)...) + warns, errs := curSP.ZfsPool.Validate(oldSP, fieldPrefix.Child(strconv.Itoa(i)), "zfsPool", false) + result = append(result, errs...) + warnings = append(warnings, warns...) + warnings = append(warnings, validateCreateArgsRequireSource(curSP.Source, curSP.ZfsPool.ZpoolCreateArguments, fieldPrefix.Child(strconv.Itoa(i), "zfsPool", "zpoolCreateArguments"))...) } if curSP.ZfsThinPool != nil { result = append(result, validateStoragePoolType(&numPoolTypes, fieldPrefix.Child(strconv.Itoa(i), "zfsThinPool"))...) - result = append(result, curSP.ZfsThinPool.Validate(oldSP, fieldPrefix.Child(strconv.Itoa(i)), "zfsThinPool", true)...) + warns, errs := curSP.ZfsThinPool.Validate(oldSP, fieldPrefix.Child(strconv.Itoa(i)), "zfsThinPool", true) + result = append(result, errs...) + warnings = append(warnings, warns...) + warnings = append(warnings, validateCreateArgsRequireSource(curSP.Source, curSP.ZfsThinPool.ZpoolCreateArguments, fieldPrefix.Child(strconv.Itoa(i), "zfsThinPool", "zpoolCreateArguments"))...) } if numPoolTypes == 0 { @@ -85,7 +98,7 @@ func ValidateStoragePools(curSPs, oldSPs []piraeusv1.LinstorStoragePool, fieldPr ) } - return result + return warnings, result } func validateNoSource(src *piraeusv1.LinstorStoragePoolSource, p *field.Path, name string) field.ErrorList { @@ -98,6 +111,14 @@ func validateNoSource(src *piraeusv1.LinstorStoragePoolSource, p *field.Path, na return nil } +func validateCreateArgsRequireSource(src *piraeusv1.LinstorStoragePoolSource, args []string, p *field.Path) admission.Warnings { + if len(args) != 0 && src == nil { + return admission.Warnings{fmt.Sprintf("%s: Arguments without a pool source have no effect", p)} + } + + return nil +} + func validateStoragePoolType(numPools *int, p *field.Path) field.ErrorList { *numPools++ if *numPools > 1 { @@ -109,18 +130,19 @@ func validateStoragePoolType(numPools *int, p *field.Path) field.ErrorList { return nil } -func validateLinstorStoragePoolLvmThin(newSP *piraeusv1.LinstorStoragePoolLvmThin, oldSP *piraeusv1.LinstorStoragePool, fieldPrefix *field.Path) field.ErrorList { - var result field.ErrorList +func validateLinstorStoragePoolLvmThin(newSP *piraeusv1.LinstorStoragePoolLvmThin, oldSP *piraeusv1.LinstorStoragePool, src *piraeusv1.LinstorStoragePoolSource, fieldPrefix *field.Path) (admission.Warnings, field.ErrorList) { + var errs field.ErrorList + var warnings admission.Warnings if oldSP != nil && oldSP.LvmThinPool == nil { - result = append(result, field.Forbidden( + errs = append(errs, field.Forbidden( fieldPrefix, "Cannot change storage pool type", )) } if newSP.VolumeGroup != "" && !VGRegexp.MatchString(newSP.VolumeGroup) { - result = append(result, field.Invalid( + errs = append(errs, field.Invalid( fieldPrefix.Child("volumeGroup"), newSP.VolumeGroup, "Not a valid VG name", @@ -128,14 +150,14 @@ func validateLinstorStoragePoolLvmThin(newSP *piraeusv1.LinstorStoragePoolLvmThi } if oldSP != nil && oldSP.LvmThinPool != nil && newSP.VolumeGroup != oldSP.LvmThinPool.VolumeGroup { - result = append(result, field.Forbidden( + errs = append(errs, field.Forbidden( fieldPrefix.Child("volumeGroup"), "Cannot change VG name", )) } if newSP.ThinPool != "" && !VGRegexp.MatchString(newSP.ThinPool) { - result = append(result, field.Invalid( + errs = append(errs, field.Invalid( fieldPrefix.Child("thinPool"), newSP.ThinPool, "Not a valid thinpool LV name", @@ -143,27 +165,44 @@ func validateLinstorStoragePoolLvmThin(newSP *piraeusv1.LinstorStoragePoolLvmThi } if oldSP != nil && oldSP.LvmThinPool != nil && newSP.ThinPool != oldSP.LvmThinPool.ThinPool { - result = append(result, field.Forbidden( + errs = append(errs, field.Forbidden( fieldPrefix.Child("thinPool"), "Cannot change thinpool LV name", )) } - return result + warnings = append(warnings, validateCreateArgsRequireSource(src, newSP.PhysicalVolumeCreateArguments, fieldPrefix.Child("physicalVolumeCreateArguments"))...) + warnings = append(warnings, validateCreateArgsRequireSource(src, newSP.VolumeGroupCreateArguments, fieldPrefix.Child("volumeGroupCreateArguments"))...) + warnings = append(warnings, validateCreateArgsRequireSource(src, newSP.LogicalVolumeCreateArguments, fieldPrefix.Child("logicalVolumeCreateArguments"))...) + + if oldSP != nil && oldSP.LvmThinPool != nil && !slices.Equal(newSP.PhysicalVolumeCreateArguments, oldSP.LvmThinPool.PhysicalVolumeCreateArguments) { + warnings = append(warnings, fmt.Sprintf("%s: Update will only apply to new nodes", fieldPrefix.Child("physicalVolumeCreateArguments"))) + } + + if oldSP != nil && oldSP.LvmThinPool != nil && !slices.Equal(newSP.VolumeGroupCreateArguments, oldSP.LvmThinPool.VolumeGroupCreateArguments) { + warnings = append(warnings, fmt.Sprintf("%s: Update will only apply to new nodes", fieldPrefix.Child("volumeGroupCreateArguments"))) + } + + if oldSP != nil && oldSP.LvmThinPool != nil && !slices.Equal(newSP.LogicalVolumeCreateArguments, oldSP.LvmThinPool.LogicalVolumeCreateArguments) { + warnings = append(warnings, fmt.Sprintf("%s: Update will only apply to new nodes", fieldPrefix.Child("logicalVolumeCreateArguments"))) + } + + return warnings, errs } -func ValidateLinstorStoragePoolLvm(newSP *piraeusv1.LinstorStoragePoolLvm, oldSP *piraeusv1.LinstorStoragePool, fieldPrefix *field.Path) field.ErrorList { - var result field.ErrorList +func validateLinstorStoragePoolLvm(newSP *piraeusv1.LinstorStoragePoolLvm, oldSP *piraeusv1.LinstorStoragePool, src *piraeusv1.LinstorStoragePoolSource, fieldPrefix *field.Path) (admission.Warnings, field.ErrorList) { + var errs field.ErrorList + var warnings admission.Warnings if oldSP != nil && oldSP.LvmPool == nil { - result = append(result, field.Forbidden( + errs = append(errs, field.Forbidden( fieldPrefix, "Cannot change storage pool type", )) } if newSP.VolumeGroup != "" && !VGRegexp.MatchString(newSP.VolumeGroup) { - result = append(result, field.Invalid( + errs = append(errs, field.Invalid( fieldPrefix.Child("volumeGroup"), newSP.VolumeGroup, "Not a valid VG name", @@ -171,13 +210,24 @@ func ValidateLinstorStoragePoolLvm(newSP *piraeusv1.LinstorStoragePoolLvm, oldSP } if oldSP != nil && oldSP.LvmPool != nil && newSP.VolumeGroup != oldSP.LvmPool.VolumeGroup { - result = append(result, field.Forbidden( + errs = append(errs, field.Forbidden( fieldPrefix.Child("volumeGroup"), "Cannot change VG name", )) } - return result + warnings = append(warnings, validateCreateArgsRequireSource(src, newSP.PhysicalVolumeCreateArguments, fieldPrefix.Child("physicalVolumeCreateArguments"))...) + warnings = append(warnings, validateCreateArgsRequireSource(src, newSP.VolumeGroupCreateArguments, fieldPrefix.Child("volumeGroupCreateArguments"))...) + + if oldSP != nil && oldSP.LvmPool != nil && !slices.Equal(newSP.PhysicalVolumeCreateArguments, oldSP.LvmPool.PhysicalVolumeCreateArguments) { + warnings = append(warnings, fmt.Sprintf("%s: Update will only apply to new nodes", fieldPrefix.Child("physicalVolumeCreateArguments"))) + } + + if oldSP != nil && oldSP.LvmPool != nil && !slices.Equal(newSP.VolumeGroupCreateArguments, oldSP.LvmPool.VolumeGroupCreateArguments) { + warnings = append(warnings, fmt.Sprintf("%s: Update will only apply to new nodes", fieldPrefix.Child("volumeGroupCreateArguments"))) + } + + return warnings, errs } func ValidateLinstorStoragePoolFile(newSP *piraeusv1.LinstorStoragePoolFile, oldSP *piraeusv1.LinstorStoragePool, fieldPrefix *field.Path, name string, thin bool) field.ErrorList { diff --git a/internal/webhook/v1/webhook_suite_test.go b/internal/webhook/v1/webhook_suite_test.go index a4a6be64..ed2c5772 100644 --- a/internal/webhook/v1/webhook_suite_test.go +++ b/internal/webhook/v1/webhook_suite_test.go @@ -159,6 +159,10 @@ type Warning struct { text string } +func (w Warning) String() string { + return fmt.Sprintf("%d(%s): %s", w.code, w.agent, w.text) +} + type WarningHandler []Warning func (w *WarningHandler) HandleWarningHeader(code int, agent string, text string) {