diff --git a/api/operator/v1/vlagent_types.go b/api/operator/v1/vlagent_types.go
index 65a465258..9c626f85e 100644
--- a/api/operator/v1/vlagent_types.go
+++ b/api/operator/v1/vlagent_types.go
@@ -55,10 +55,10 @@ type VLAgentSpec struct {
// PodDisruptionBudget created by operator
// +optional
PodDisruptionBudget *vmv1beta1.EmbeddedPodDisruptionBudgetSpec `json:"podDisruptionBudget,omitempty"`
- // StatefulStorage configures storage for StatefulSet
+ // Storage configures storage for StatefulSet
// +optional
Storage *vmv1beta1.StorageSpec `json:"storage,omitempty"`
- // StatefulRollingUpdateStrategy allows configuration for strategyType
+ // RollingUpdateStrategy allows configuration for strategyType
// set it to RollingUpdate for disabling operator statefulSet rollingUpdate
// +optional
RollingUpdateStrategy appsv1.StatefulSetUpdateStrategyType `json:"rollingUpdateStrategy,omitempty"`
diff --git a/config/crd/overlay/crd.yaml b/config/crd/overlay/crd.yaml
index 6dbed5b78..fedc4408e 100644
--- a/config/crd/overlay/crd.yaml
+++ b/config/crd/overlay/crd.yaml
@@ -1267,7 +1267,7 @@ spec:
type: integer
rollingUpdateStrategy:
description: |-
- StatefulRollingUpdateStrategy allows configuration for strategyType
+ RollingUpdateStrategy allows configuration for strategyType
set it to RollingUpdate for disabling operator statefulSet rollingUpdate
type: string
runtimeClassName:
@@ -1357,7 +1357,7 @@ spec:
type: object
x-kubernetes-preserve-unknown-fields: true
storage:
- description: StatefulStorage configures storage for StatefulSet
+ description: Storage configures storage for StatefulSet
properties:
disableMountSubPath:
description: |-
diff --git a/docs/CHANGELOG.md b/docs/CHANGELOG.md
index bfe4b09c5..50dbb23cf 100644
--- a/docs/CHANGELOG.md
+++ b/docs/CHANGELOG.md
@@ -24,6 +24,7 @@ SECURITY: upgrade Go builder from Go1.25.4 to Go1.25.5. See [the list of issues
* BUGFIX: [vmoperator](https://docs.victoriametrics.com/operator/): remove orphaned ServiceAccount and RBAC resources. See [#1665](https://github.com/VictoriaMetrics/operator/issues/1665).
* BUGFIX: [vmanomaly](https://docs.victoriametrics.com/operator/resources/vmanomaly/): properly handle configuration which is missing `reader.queries` in either `configRawYaml` or `configSecret`. Previously, it would lead to panic.
* BUGFIX: [vmanomaly](https://docs.victoriametrics.com/operator/resources/vmanomaly/): fix configuration parsing when running in [UI mode](https://docs.victoriametrics.com/anomaly-detection/ui/). Previously, configuration required to use `preset: ui:version` instead of `preset: ui`.
+* BUGFIX: [vmsingle](https://docs.victoriametrics.com/operator/resources/vmsingle/), [vlsingle](https://docs.victoriametrics.com/operator/resources/vlsingle/) and [vmalertmanager](https://docs.victoriametrics.com/operator/resources/vmalertmanager): do not mount emptydir if storage data volume is already present in volumes list. Before it was impossible to mount external PVC without overriding default storageDataPath using `spec.extraArgs` and without having unneeded emptydir listed among pod volumes. Related issues [#1477](https://github.com/VictoriaMetrics/operator/issues/1477).
## [v0.66.0](https://github.com/VictoriaMetrics/operator/releases/tag/v0.66.0)
diff --git a/docs/api.md b/docs/api.md
index 8e2044542..fc48a3104 100644
--- a/docs/api.md
+++ b/docs/api.md
@@ -253,7 +253,7 @@ Appears in: [VLAgent](#vlagent)
| replicaCount#
_integer_ | _(Optional)_
ReplicaCount is the expected size of the Application. |
| resources#
_[ResourceRequirements](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#resourcerequirements-v1-core)_ | _(Optional)_
Resources container resource request and limits, https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/
if not defined default resources from operator config will be used |
| revisionHistoryLimitCount#
_integer_ | _(Optional)_
The number of old ReplicaSets to retain to allow rollback in deployment or
maximum number of revisions that will be maintained in the Deployment revision history.
Has no effect at StatefulSets
Defaults to 10. |
-| rollingUpdateStrategy#
_[StatefulSetUpdateStrategyType](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#statefulsetupdatestrategytype-v1-apps)_ | _(Optional)_
StatefulRollingUpdateStrategy allows configuration for strategyType
set it to RollingUpdate for disabling operator statefulSet rollingUpdate |
+| rollingUpdateStrategy#
_[StatefulSetUpdateStrategyType](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#statefulsetupdatestrategytype-v1-apps)_ | _(Optional)_
RollingUpdateStrategy allows configuration for strategyType
set it to RollingUpdate for disabling operator statefulSet rollingUpdate |
| runtimeClassName#
_string_ | _(Optional)_
RuntimeClassName - defines runtime class for kubernetes pod.
https://kubernetes.io/docs/concepts/containers/runtime-class/ |
| schedulerName#
_string_ | _(Optional)_
SchedulerName - defines kubernetes scheduler name |
| secrets#
_string array_ | _(Optional)_
Secrets is a list of Secrets in the same namespace as the Application
object, which shall be mounted into the Application container
at /etc/vm/secrets/SECRET_NAME folder |
@@ -261,7 +261,7 @@ Appears in: [VLAgent](#vlagent)
| serviceAccountName#
_string_ | _(Optional)_
ServiceAccountName is the name of the ServiceAccount to use to run the pods |
| serviceScrapeSpec#
_[VMServiceScrapeSpec](#vmservicescrapespec)_ | _(Optional)_
ServiceScrapeSpec that will be added to vlagent VMServiceScrape spec |
| serviceSpec#
_[AdditionalServiceSpec](#additionalservicespec)_ | _(Optional)_
ServiceSpec that will be added to vlagent service spec |
-| storage#
_[StorageSpec](#storagespec)_ | _(Optional)_
StatefulStorage configures storage for StatefulSet |
+| storage#
_[StorageSpec](#storagespec)_ | _(Optional)_
Storage configures storage for StatefulSet |
| syslogSpec#
_[SyslogServerSpec](#syslogserverspec)_ | _(Optional)_
SyslogSpec defines syslog listener configuration |
| terminationGracePeriodSeconds#
_integer_ | _(Optional)_
TerminationGracePeriodSeconds period for container graceful termination |
| tolerations#
_[Toleration](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#toleration-v1-core) array_ | _(Optional)_
Tolerations If specified, the pod's tolerations. |
diff --git a/internal/controller/operator/factory/build/backup.go b/internal/controller/operator/factory/build/backup.go
index 35ba458a7..ec876a976 100644
--- a/internal/controller/operator/factory/build/backup.go
+++ b/internal/controller/operator/factory/build/backup.go
@@ -21,7 +21,8 @@ func VMBackupManager(
ctx context.Context,
cr *vmv1beta1.VMBackup,
port string,
- storagePath, dataVolumeName string,
+ storagePath string,
+ mounts []corev1.VolumeMount,
extraArgs map[string]string,
isCluster bool,
license *vmv1beta1.License,
@@ -85,16 +86,7 @@ func VMBackupManager(
var ports []corev1.ContainerPort
ports = append(ports, corev1.ContainerPort{Name: "http", Protocol: "TCP", ContainerPort: intstr.Parse(cr.Port).IntVal})
-
- mounts := []corev1.VolumeMount{
- {
- Name: dataVolumeName,
- MountPath: storagePath,
- ReadOnly: false,
- },
- }
mounts = append(mounts, cr.VolumeMounts...)
-
if cr.CredentialsSecret != nil {
mounts = append(mounts, corev1.VolumeMount{
Name: k8stools.SanitizeVolumeName("secret-" + cr.CredentialsSecret.Name),
@@ -171,9 +163,9 @@ func VMBackupManager(
// VMRestore conditionally creates vmrestore container
func VMRestore(
cr *vmv1beta1.VMBackup,
- storagePath, dataVolumeName string,
+ storagePath string,
+ mounts []corev1.VolumeMount,
) (*corev1.Container, error) {
-
args := []string{
fmt.Sprintf("-storageDataPath=%s", storagePath),
"-eula",
@@ -197,16 +189,7 @@ func VMRestore(
var ports []corev1.ContainerPort
ports = append(ports, corev1.ContainerPort{Name: "http", Protocol: "TCP", ContainerPort: intstr.Parse(cr.Port).IntVal})
-
- mounts := []corev1.VolumeMount{
- {
- Name: dataVolumeName,
- MountPath: storagePath,
- ReadOnly: false,
- },
- }
mounts = append(mounts, cr.VolumeMounts...)
-
if cr.CredentialsSecret != nil {
mounts = append(mounts, corev1.VolumeMount{
Name: k8stools.SanitizeVolumeName("secret-" + cr.CredentialsSecret.Name),
diff --git a/internal/controller/operator/factory/build/container.go b/internal/controller/operator/factory/build/container.go
index edc7e4da5..fed739b5d 100644
--- a/internal/controller/operator/factory/build/container.go
+++ b/internal/controller/operator/factory/build/container.go
@@ -2,6 +2,7 @@ package build
import (
"fmt"
+ "path/filepath"
"strings"
corev1 "k8s.io/api/core/v1"
@@ -15,6 +16,7 @@ import (
)
const probeTimeoutSeconds int32 = 5
+const DataVolumeName = "data"
type probeCRD interface {
Probe() *vmv1beta1.EmbeddedProbes
@@ -535,3 +537,49 @@ func AddSyslogTLSConfigToVolumes(dstVolumes []corev1.Volume, dstMounts []corev1.
}
return dstVolumes, dstMounts
}
+
+func StorageVolumeMountsTo(volumes []corev1.Volume, mounts []corev1.VolumeMount, pvcSrc *corev1.PersistentVolumeClaimVolumeSource, storagePath, dataVolumeName string) ([]corev1.Volume, []corev1.VolumeMount, error) {
+ foundMount := false
+ for _, volumeMount := range mounts {
+ rel, err := filepath.Rel(volumeMount.MountPath, storagePath)
+ if err == nil && !strings.HasPrefix(rel, "..") {
+ if volumeMount.Name == dataVolumeName {
+ foundMount = true
+ break
+ }
+ return nil, nil, fmt.Errorf(
+ "unexpected volume=%q mounted to path=%q, which is reserved for volume=%q, path=%q",
+ volumeMount.Name, volumeMount.MountPath, dataVolumeName, storagePath)
+ } else {
+ if volumeMount.Name != dataVolumeName {
+ continue
+ }
+ return nil, nil, fmt.Errorf(
+ "unexpected volume=%q mounted to path=%q, expected path=%q",
+ volumeMount.Name, volumeMount.MountPath, dataVolumeName)
+ }
+ }
+ if !foundMount {
+ mounts = append([]corev1.VolumeMount{{
+ Name: dataVolumeName,
+ MountPath: storagePath,
+ }}, mounts...)
+ }
+
+ for _, volume := range volumes {
+ if volume.Name == dataVolumeName {
+ return volumes, mounts, nil
+ }
+ }
+ var source corev1.VolumeSource
+ if pvcSrc != nil {
+ source.PersistentVolumeClaim = pvcSrc
+ } else {
+ source.EmptyDir = &corev1.EmptyDirVolumeSource{}
+ }
+ volumes = append([]corev1.Volume{{
+ Name: dataVolumeName,
+ VolumeSource: source,
+ }}, volumes...)
+ return volumes, mounts, nil
+}
diff --git a/internal/controller/operator/factory/build/container_test.go b/internal/controller/operator/factory/build/container_test.go
index 9f26d8c12..8b0b80d46 100644
--- a/internal/controller/operator/factory/build/container_test.go
+++ b/internal/controller/operator/factory/build/container_test.go
@@ -45,7 +45,6 @@ func Test_buildProbe(t *testing.T) {
cr testBuildProbeCR
validate func(corev1.Container) error
}
-
f := func(o opts) {
t.Helper()
got := Probe(o.container, o.cr)
@@ -349,5 +348,214 @@ func TestAddSyslogArgsTo(t *testing.T) {
"-syslog.compressMethod.udp=zstd",
}
f(&spec, expected)
+}
+
+func TestStorageVolumeMountsTo(t *testing.T) {
+ type opts struct {
+ pvcSrc *corev1.PersistentVolumeClaimVolumeSource
+ storagePath string
+ volumes []corev1.Volume
+ expectedVolumes []corev1.Volume
+ mounts []corev1.VolumeMount
+ expectedMounts []corev1.VolumeMount
+ wantErr bool
+ }
+ f := func(o opts) {
+ t.Helper()
+ gotVolumes, gotMounts, err := StorageVolumeMountsTo(o.volumes, o.mounts, o.pvcSrc, o.storagePath, DataVolumeName)
+ assert.Equal(t, o.expectedMounts, gotMounts)
+ assert.Equal(t, o.expectedVolumes, gotVolumes)
+ if o.wantErr {
+ assert.Error(t, err)
+ } else {
+ assert.NoError(t, err)
+ }
+ }
+
+ // no PVC spec and no volumes and mounts
+ f(opts{
+ storagePath: "/test",
+ expectedVolumes: []corev1.Volume{{
+ Name: DataVolumeName,
+ VolumeSource: corev1.VolumeSource{
+ EmptyDir: &corev1.EmptyDirVolumeSource{},
+ },
+ }},
+ expectedMounts: []corev1.VolumeMount{{
+ Name: DataVolumeName,
+ MountPath: "/test",
+ }},
+ })
+
+ // with PVC spec and no volumes and mounts
+ f(opts{
+ storagePath: "/test",
+ pvcSrc: &corev1.PersistentVolumeClaimVolumeSource{
+ ClaimName: "test-claim",
+ },
+ expectedVolumes: []corev1.Volume{{
+ Name: DataVolumeName,
+ VolumeSource: corev1.VolumeSource{
+ PersistentVolumeClaim: &corev1.PersistentVolumeClaimVolumeSource{
+ ClaimName: "test-claim",
+ },
+ },
+ }},
+ expectedMounts: []corev1.VolumeMount{{
+ Name: DataVolumeName,
+ MountPath: "/test",
+ }},
+ })
+
+ // with PVC spec and matching data volume
+ f(opts{
+ volumes: []corev1.Volume{{
+ Name: DataVolumeName,
+ VolumeSource: corev1.VolumeSource{
+ AWSElasticBlockStore: &corev1.AWSElasticBlockStoreVolumeSource{
+ VolumeID: "aws-volume",
+ },
+ },
+ }},
+ storagePath: "/test",
+ pvcSrc: &corev1.PersistentVolumeClaimVolumeSource{
+ ClaimName: "test-claim",
+ },
+ expectedVolumes: []corev1.Volume{
+ {
+ Name: DataVolumeName,
+ VolumeSource: corev1.VolumeSource{
+ AWSElasticBlockStore: &corev1.AWSElasticBlockStoreVolumeSource{
+ VolumeID: "aws-volume",
+ },
+ },
+ },
+ },
+ expectedMounts: []corev1.VolumeMount{{
+ Name: DataVolumeName,
+ MountPath: "/test",
+ }},
+ })
+ // with PVC spec and not matching data volume
+ f(opts{
+ volumes: []corev1.Volume{{
+ Name: "extra",
+ VolumeSource: corev1.VolumeSource{
+ AWSElasticBlockStore: &corev1.AWSElasticBlockStoreVolumeSource{
+ VolumeID: "aws-volume",
+ },
+ },
+ }},
+ storagePath: "/test",
+ pvcSrc: &corev1.PersistentVolumeClaimVolumeSource{
+ ClaimName: "test-claim",
+ },
+ expectedVolumes: []corev1.Volume{
+ {
+ Name: DataVolumeName,
+ VolumeSource: corev1.VolumeSource{
+ PersistentVolumeClaim: &corev1.PersistentVolumeClaimVolumeSource{
+ ClaimName: "test-claim",
+ },
+ },
+ },
+ {
+ Name: "extra",
+ VolumeSource: corev1.VolumeSource{
+ AWSElasticBlockStore: &corev1.AWSElasticBlockStoreVolumeSource{
+ VolumeID: "aws-volume",
+ },
+ },
+ },
+ },
+ expectedMounts: []corev1.VolumeMount{{
+ Name: DataVolumeName,
+ MountPath: "/test",
+ }},
+ })
+
+ // with PVC spec and existing data volume mount
+ f(opts{
+ volumes: []corev1.Volume{{
+ Name: "extra",
+ VolumeSource: corev1.VolumeSource{
+ AWSElasticBlockStore: &corev1.AWSElasticBlockStoreVolumeSource{
+ VolumeID: "aws-volume",
+ },
+ },
+ }},
+ mounts: []corev1.VolumeMount{{
+ Name: DataVolumeName,
+ MountPath: "/other-path",
+ }},
+ wantErr: true,
+ storagePath: "/test",
+ pvcSrc: &corev1.PersistentVolumeClaimVolumeSource{
+ ClaimName: "test-claim",
+ },
+ })
+
+ // with PVC spec and intersecting data volume mount
+ f(opts{
+ volumes: []corev1.Volume{{
+ Name: "extra",
+ VolumeSource: corev1.VolumeSource{
+ AWSElasticBlockStore: &corev1.AWSElasticBlockStoreVolumeSource{
+ VolumeID: "aws-volume",
+ },
+ },
+ }},
+ mounts: []corev1.VolumeMount{{
+ Name: DataVolumeName,
+ MountPath: "/test",
+ }},
+ storagePath: "/test/data",
+ pvcSrc: &corev1.PersistentVolumeClaimVolumeSource{
+ ClaimName: "test-claim",
+ },
+ expectedVolumes: []corev1.Volume{
+ {
+ Name: DataVolumeName,
+ VolumeSource: corev1.VolumeSource{
+ PersistentVolumeClaim: &corev1.PersistentVolumeClaimVolumeSource{
+ ClaimName: "test-claim",
+ },
+ },
+ },
+ {
+ Name: "extra",
+ VolumeSource: corev1.VolumeSource{
+ AWSElasticBlockStore: &corev1.AWSElasticBlockStoreVolumeSource{
+ VolumeID: "aws-volume",
+ },
+ },
+ },
+ },
+ expectedMounts: []corev1.VolumeMount{{
+ Name: DataVolumeName,
+ MountPath: "/test",
+ }},
+ })
+
+ // with PVC spec and intersecting volume mount and absent volume
+ f(opts{
+ volumes: []corev1.Volume{{
+ Name: "test",
+ VolumeSource: corev1.VolumeSource{
+ AWSElasticBlockStore: &corev1.AWSElasticBlockStoreVolumeSource{
+ VolumeID: "aws-volume",
+ },
+ },
+ }},
+ mounts: []corev1.VolumeMount{{
+ Name: "test",
+ MountPath: "/test",
+ }},
+ storagePath: "/test/data",
+ pvcSrc: &corev1.PersistentVolumeClaimVolumeSource{
+ ClaimName: "test-claim",
+ },
+ wantErr: true,
+ })
}
diff --git a/internal/controller/operator/factory/vlsingle/vlogs.go b/internal/controller/operator/factory/vlsingle/vlogs.go
index da5c887b5..60fb440e0 100644
--- a/internal/controller/operator/factory/vlsingle/vlogs.go
+++ b/internal/controller/operator/factory/vlsingle/vlogs.go
@@ -147,10 +147,7 @@ func makeVLogsPodSpec(r *vmv1beta1.VLogs) (*corev1.PodTemplateSpec, error) {
fmt.Sprintf("-retentionPeriod=%s", r.Spec.RetentionPeriod),
}
- // if customStorageDataPath is not empty, do not add pvc.
- shouldAddPVC := r.Spec.StorageDataPath == ""
-
- storagePath := vlsingleDataDir
+ storagePath := dataDataDir
if r.Spec.StorageDataPath != "" {
storagePath = r.Spec.StorageDataPath
}
@@ -180,37 +177,18 @@ func makeVLogsPodSpec(r *vmv1beta1.VLogs) (*corev1.PodTemplateSpec, error) {
var ports []corev1.ContainerPort
ports = append(ports, corev1.ContainerPort{Name: "http", Protocol: "TCP", ContainerPort: intstr.Parse(r.Spec.Port).IntVal})
- volumes := []corev1.Volume{}
-
- storageSpec := r.Spec.Storage
- if storageSpec == nil {
- volumes = append(volumes, corev1.Volume{
- Name: vlsingleDataVolumeName,
- VolumeSource: corev1.VolumeSource{
- EmptyDir: &corev1.EmptyDirVolumeSource{},
- },
- })
- } else if shouldAddPVC {
- volumes = append(volumes, corev1.Volume{
- Name: vlsingleDataVolumeName,
- VolumeSource: corev1.VolumeSource{
- PersistentVolumeClaim: &corev1.PersistentVolumeClaimVolumeSource{
- ClaimName: r.PrefixedName(),
- },
- },
- })
+ var pvcSrc *corev1.PersistentVolumeClaimVolumeSource
+ if r.Spec.Storage != nil {
+ pvcSrc = &corev1.PersistentVolumeClaimVolumeSource{
+ ClaimName: r.PrefixedName(),
+ }
}
- volumes = append(volumes, r.Spec.Volumes...)
- vmMounts := []corev1.VolumeMount{
- {
- Name: vlsingleDataVolumeName,
- MountPath: storagePath,
- },
+ volumes, vmMounts, err := build.StorageVolumeMountsTo(r.Spec.Volumes, r.Spec.VolumeMounts, pvcSrc, storagePath, build.DataVolumeName)
+ if err != nil {
+ return nil, err
}
- vmMounts = append(vmMounts, r.Spec.VolumeMounts...)
-
for _, s := range r.Spec.Secrets {
volumes = append(volumes, corev1.Volume{
Name: k8stools.SanitizeVolumeName("secret-" + s),
diff --git a/internal/controller/operator/factory/vlsingle/vlsingle.go b/internal/controller/operator/factory/vlsingle/vlsingle.go
index 7620d2f8c..847c1b617 100644
--- a/internal/controller/operator/factory/vlsingle/vlsingle.go
+++ b/internal/controller/operator/factory/vlsingle/vlsingle.go
@@ -24,8 +24,7 @@ import (
)
const (
- vlsingleDataDir = "/victoria-logs-data"
- vlsingleDataVolumeName = "data"
+ dataDataDir = "/victoria-logs-data"
tlsServerConfigMountPath = "/etc/vm/tls-server-secrets"
)
@@ -69,7 +68,7 @@ func CreateOrUpdate(ctx context.Context, rclient client.Client, cr *vmv1.VLSingl
return err
}
}
- if cr.Spec.Storage != nil && cr.Spec.StorageDataPath == "" {
+ if cr.Spec.Storage != nil {
if err := createOrUpdatePVC(ctx, rclient, cr, prevCR); err != nil {
return err
}
@@ -156,10 +155,7 @@ func makePodSpec(r *vmv1.VLSingle) (*corev1.PodTemplateSpec, error) {
args = append(args, fmt.Sprintf("-retention.maxDiskSpaceUsageBytes=%s", r.Spec.RetentionMaxDiskSpaceUsageBytes))
}
- // if customStorageDataPath is not empty, do not add pvc.
- shouldAddPVC := r.Spec.StorageDataPath == ""
-
- storagePath := vlsingleDataDir
+ storagePath := dataDataDir
if r.Spec.StorageDataPath != "" {
storagePath = r.Spec.StorageDataPath
}
@@ -193,37 +189,18 @@ func makePodSpec(r *vmv1.VLSingle) (*corev1.PodTemplateSpec, error) {
var ports []corev1.ContainerPort
ports = append(ports, corev1.ContainerPort{Name: "http", Protocol: "TCP", ContainerPort: intstr.Parse(r.Spec.Port).IntVal})
- volumes := []corev1.Volume{}
-
- storageSpec := r.Spec.Storage
- if storageSpec == nil {
- volumes = append(volumes, corev1.Volume{
- Name: vlsingleDataVolumeName,
- VolumeSource: corev1.VolumeSource{
- EmptyDir: &corev1.EmptyDirVolumeSource{},
- },
- })
- } else if shouldAddPVC {
- volumes = append(volumes, corev1.Volume{
- Name: vlsingleDataVolumeName,
- VolumeSource: corev1.VolumeSource{
- PersistentVolumeClaim: &corev1.PersistentVolumeClaimVolumeSource{
- ClaimName: r.PrefixedName(),
- },
- },
- })
+ var pvcSrc *corev1.PersistentVolumeClaimVolumeSource
+ if r.Spec.Storage != nil {
+ pvcSrc = &corev1.PersistentVolumeClaimVolumeSource{
+ ClaimName: r.PrefixedName(),
+ }
}
- volumes = append(volumes, r.Spec.Volumes...)
- vmMounts := []corev1.VolumeMount{
- {
- Name: vlsingleDataVolumeName,
- MountPath: storagePath,
- },
+ volumes, vmMounts, err := build.StorageVolumeMountsTo(r.Spec.Volumes, r.Spec.VolumeMounts, pvcSrc, storagePath, build.DataVolumeName)
+ if err != nil {
+ return nil, err
}
- vmMounts = append(vmMounts, r.Spec.VolumeMounts...)
-
for _, s := range r.Spec.Secrets {
volumes = append(volumes, corev1.Volume{
Name: k8stools.SanitizeVolumeName("secret-" + s),
diff --git a/internal/controller/operator/factory/vmcluster/vmcluster.go b/internal/controller/operator/factory/vmcluster/vmcluster.go
index 3fd18618b..5a3432779 100644
--- a/internal/controller/operator/factory/vmcluster/vmcluster.go
+++ b/internal/controller/operator/factory/vmcluster/vmcluster.go
@@ -988,6 +988,7 @@ func makePodSpecForVMStorage(ctx context.Context, cr *vmv1beta1.VMCluster) (*cor
args = append(args, fmt.Sprintf("-storageDataPath=%s", cr.Spec.VMStorage.StorageDataPath))
vmMounts = append(vmMounts, cr.Spec.VMStorage.VolumeMounts...)
+ commonMounts := vmMounts
for _, s := range cr.Spec.VMStorage.Secrets {
volumes = append(volumes, corev1.Volume{
@@ -1048,7 +1049,7 @@ func makePodSpecForVMStorage(ctx context.Context, cr *vmv1beta1.VMCluster) (*cor
var initContainers []corev1.Container
if cr.Spec.VMStorage.VMBackup != nil {
- vmBackupManagerContainer, err := build.VMBackupManager(ctx, cr.Spec.VMStorage.VMBackup, cr.Spec.VMStorage.Port, cr.Spec.VMStorage.StorageDataPath, cr.Spec.VMStorage.GetStorageVolumeName(), cr.Spec.VMStorage.ExtraArgs, true, cr.Spec.License)
+ vmBackupManagerContainer, err := build.VMBackupManager(ctx, cr.Spec.VMStorage.VMBackup, cr.Spec.VMStorage.Port, cr.Spec.VMStorage.StorageDataPath, commonMounts, cr.Spec.VMStorage.ExtraArgs, true, cr.Spec.License)
if err != nil {
return nil, err
}
@@ -1058,7 +1059,7 @@ func makePodSpecForVMStorage(ctx context.Context, cr *vmv1beta1.VMCluster) (*cor
if cr.Spec.VMStorage.VMBackup.Restore != nil &&
cr.Spec.VMStorage.VMBackup.Restore.OnStart != nil &&
cr.Spec.VMStorage.VMBackup.Restore.OnStart.Enabled {
- vmRestore, err := build.VMRestore(cr.Spec.VMStorage.VMBackup, cr.Spec.VMStorage.StorageDataPath, cr.Spec.VMStorage.GetStorageVolumeName())
+ vmRestore, err := build.VMRestore(cr.Spec.VMStorage.VMBackup, cr.Spec.VMStorage.StorageDataPath, commonMounts)
if err != nil {
return nil, err
}
diff --git a/internal/controller/operator/factory/vmsingle/vmsingle.go b/internal/controller/operator/factory/vmsingle/vmsingle.go
index 3c700d513..9b12582dd 100644
--- a/internal/controller/operator/factory/vmsingle/vmsingle.go
+++ b/internal/controller/operator/factory/vmsingle/vmsingle.go
@@ -24,8 +24,7 @@ import (
)
const (
- vmSingleDataDir = "/victoria-metrics-data"
- vmDataVolumeName = "data"
+ dataDataDir = "/victoria-metrics-data"
streamAggrSecretKey = "config.yaml"
)
@@ -83,7 +82,7 @@ func CreateOrUpdate(ctx context.Context, cr *vmv1beta1.VMSingle, rclient client.
}
}
- if cr.Spec.Storage != nil && cr.Spec.StorageDataPath == "" {
+ if cr.Spec.Storage != nil {
if err := createStorage(ctx, rclient, cr, prevCR); err != nil {
return fmt.Errorf("cannot create storage: %w", err)
}
@@ -154,12 +153,7 @@ func makeSpec(ctx context.Context, cr *vmv1beta1.VMSingle) (*corev1.PodTemplateS
args = append(args, fmt.Sprintf("-retentionPeriod=%s", cr.Spec.RetentionPeriod))
}
- // if customStorageDataPath is not empty, do not add volumes
- // and volumeMounts
- // it's user responsibility to provide correct values
- mustAddVolumeMounts := cr.Spec.StorageDataPath == ""
-
- storagePath := vmSingleDataDir
+ storagePath := dataDataDir
if cr.Spec.StorageDataPath != "" {
storagePath = cr.Spec.StorageDataPath
}
@@ -188,10 +182,18 @@ func makeSpec(ctx context.Context, cr *vmv1beta1.VMSingle) (*corev1.PodTemplateS
ports = append(ports, corev1.ContainerPort{Name: "http", Protocol: "TCP", ContainerPort: intstr.Parse(cr.Spec.Port).IntVal})
ports = build.AppendInsertPorts(ports, cr.Spec.InsertPorts)
- var volumes []corev1.Volume
- var vmMounts []corev1.VolumeMount
+ var pvcSrc *corev1.PersistentVolumeClaimVolumeSource
+ if cr.Spec.Storage != nil {
+ pvcSrc = &corev1.PersistentVolumeClaimVolumeSource{
+ ClaimName: cr.PrefixedName(),
+ }
+ }
- volumes, vmMounts = addVolumeMountsTo(volumes, vmMounts, cr, mustAddVolumeMounts, storagePath)
+ volumes, vmMounts, err := build.StorageVolumeMountsTo(cr.Spec.Volumes, cr.Spec.VolumeMounts, pvcSrc, storagePath, build.DataVolumeName)
+ if err != nil {
+ return nil, err
+ }
+ commonMounts := vmMounts
if cr.Spec.VMBackup != nil && cr.Spec.VMBackup.CredentialsSecret != nil {
volumes = append(volumes, corev1.Volume{
@@ -204,9 +206,6 @@ func makeSpec(ctx context.Context, cr *vmv1beta1.VMSingle) (*corev1.PodTemplateS
})
}
- volumes = append(volumes, cr.Spec.Volumes...)
- vmMounts = append(vmMounts, cr.Spec.VolumeMounts...)
-
for _, s := range cr.Spec.Secrets {
volumes = append(volumes, corev1.Volume{
Name: k8stools.SanitizeVolumeName("secret-" + s),
@@ -275,7 +274,7 @@ func makeSpec(ctx context.Context, cr *vmv1beta1.VMSingle) (*corev1.PodTemplateS
var initContainers []corev1.Container
if cr.Spec.VMBackup != nil {
- vmBackupManagerContainer, err := build.VMBackupManager(ctx, cr.Spec.VMBackup, cr.Spec.Port, storagePath, vmDataVolumeName, cr.Spec.ExtraArgs, false, cr.Spec.License)
+ vmBackupManagerContainer, err := build.VMBackupManager(ctx, cr.Spec.VMBackup, cr.Spec.Port, storagePath, commonMounts, cr.Spec.ExtraArgs, false, cr.Spec.License)
if err != nil {
return nil, err
}
@@ -285,7 +284,7 @@ func makeSpec(ctx context.Context, cr *vmv1beta1.VMSingle) (*corev1.PodTemplateS
if cr.Spec.VMBackup.Restore != nil &&
cr.Spec.VMBackup.Restore.OnStart != nil &&
cr.Spec.VMBackup.Restore.OnStart.Enabled {
- vmRestore, err := build.VMRestore(cr.Spec.VMBackup, storagePath, vmDataVolumeName)
+ vmRestore, err := build.VMRestore(cr.Spec.VMBackup, storagePath, commonMounts)
if err != nil {
return nil, err
}
@@ -455,58 +454,3 @@ func deleteOrphaned(ctx context.Context, rclient client.Client, cr *vmv1beta1.VM
}
return nil
}
-
-func addVolumeMountsTo(volumes []corev1.Volume, vmMounts []corev1.VolumeMount, cr *vmv1beta1.VMSingle, mustAddVolumeMounts bool, storagePath string) ([]corev1.Volume, []corev1.VolumeMount) {
-
- switch {
- case mustAddVolumeMounts:
- // add volume and mount point by operator directly
- vmMounts = append(vmMounts, corev1.VolumeMount{
- Name: vmDataVolumeName,
- MountPath: storagePath},
- )
-
- vlSource := corev1.VolumeSource{
- EmptyDir: &corev1.EmptyDirVolumeSource{},
- }
- if cr.Spec.Storage != nil {
- vlSource = corev1.VolumeSource{
- PersistentVolumeClaim: &corev1.PersistentVolumeClaimVolumeSource{
- ClaimName: cr.PrefixedName(),
- },
- }
- }
- volumes = append(volumes, corev1.Volume{
- Name: vmDataVolumeName,
- VolumeSource: vlSource})
-
- case len(cr.Spec.Volumes) > 0:
- // add missing volumeMount point for backward compatibility
- // it simplifies management of external PVCs
- var volumeNamePresent bool
- for _, volume := range cr.Spec.Volumes {
- if volume.Name == vmDataVolumeName {
- volumeNamePresent = true
- break
- }
- }
- if volumeNamePresent {
- var mustSkipVolumeAdd bool
- for _, volumeMount := range cr.Spec.VolumeMounts {
- if volumeMount.Name == vmDataVolumeName {
- mustSkipVolumeAdd = true
- break
- }
- }
- if !mustSkipVolumeAdd {
- vmMounts = append(vmMounts, corev1.VolumeMount{
- Name: vmDataVolumeName,
- MountPath: storagePath,
- })
- }
- }
-
- }
-
- return volumes, vmMounts
-}
diff --git a/internal/controller/operator/factory/vtsingle/vtsingle.go b/internal/controller/operator/factory/vtsingle/vtsingle.go
index 66dcfd90c..7cd8a70bc 100644
--- a/internal/controller/operator/factory/vtsingle/vtsingle.go
+++ b/internal/controller/operator/factory/vtsingle/vtsingle.go
@@ -24,8 +24,7 @@ import (
)
const (
- vtsingleDataDir = "/victoria-traces-data"
- vtsingleDataVolumeName = "data"
+ dataDataDir = "/victoria-traces-data"
tlsServerConfigMountPath = "/etc/vm/tls-server-secrets"
)
@@ -69,7 +68,7 @@ func CreateOrUpdate(ctx context.Context, rclient client.Client, cr *vmv1.VTSingl
return err
}
}
- if cr.Spec.Storage != nil && cr.Spec.StorageDataPath == "" {
+ if cr.Spec.Storage != nil {
if err := createOrUpdatePVC(ctx, rclient, cr, prevCR); err != nil {
return err
}
@@ -156,10 +155,7 @@ func makePodSpec(r *vmv1.VTSingle) (*corev1.PodTemplateSpec, error) {
args = append(args, fmt.Sprintf("-retention.maxDiskSpaceUsageBytes=%s", r.Spec.RetentionMaxDiskSpaceUsageBytes))
}
- // if customStorageDataPath is not empty, do not add pvc.
- shouldAddPVC := r.Spec.StorageDataPath == ""
-
- storagePath := vtsingleDataDir
+ storagePath := dataDataDir
if r.Spec.StorageDataPath != "" {
storagePath = r.Spec.StorageDataPath
}
@@ -193,37 +189,17 @@ func makePodSpec(r *vmv1.VTSingle) (*corev1.PodTemplateSpec, error) {
var ports []corev1.ContainerPort
ports = append(ports, corev1.ContainerPort{Name: "http", Protocol: "TCP", ContainerPort: intstr.Parse(r.Spec.Port).IntVal})
- volumes := []corev1.Volume{}
-
- storageSpec := r.Spec.Storage
-
- if storageSpec == nil {
- volumes = append(volumes, corev1.Volume{
- Name: vtsingleDataVolumeName,
- VolumeSource: corev1.VolumeSource{
- EmptyDir: &corev1.EmptyDirVolumeSource{},
- },
- })
- } else if shouldAddPVC {
- volumes = append(volumes, corev1.Volume{
- Name: vtsingleDataVolumeName,
- VolumeSource: corev1.VolumeSource{
- PersistentVolumeClaim: &corev1.PersistentVolumeClaimVolumeSource{
- ClaimName: r.PrefixedName(),
- },
- },
- })
+ var pvcSrc *corev1.PersistentVolumeClaimVolumeSource
+ if r.Spec.Storage != nil {
+ pvcSrc = &corev1.PersistentVolumeClaimVolumeSource{
+ ClaimName: r.PrefixedName(),
+ }
}
- volumes = append(volumes, r.Spec.Volumes...)
- vmMounts := []corev1.VolumeMount{
- {
- Name: vtsingleDataVolumeName,
- MountPath: storagePath,
- },
+ volumes, vmMounts, err := build.StorageVolumeMountsTo(r.Spec.Volumes, r.Spec.VolumeMounts, pvcSrc, storagePath, build.DataVolumeName)
+ if err != nil {
+ return nil, err
}
- vmMounts = append(vmMounts, r.Spec.VolumeMounts...)
-
for _, s := range r.Spec.Secrets {
volumes = append(volumes, corev1.Volume{
Name: k8stools.SanitizeVolumeName("secret-" + s),
diff --git a/test/e2e/vlsingle_test.go b/test/e2e/vlsingle_test.go
index bc8cf17bf..889608fa6 100644
--- a/test/e2e/vlsingle_test.go
+++ b/test/e2e/vlsingle_test.go
@@ -145,7 +145,6 @@ var _ = Describe("test vlsingle Controller", Label("vl", "single", "vlsingle"),
},
RetentionPeriod: "1",
StorageDataPath: "/custom-path/internal/dir",
- Storage: &corev1.PersistentVolumeClaimSpec{},
},
},
func(cr *vmv1.VLSingle) {
diff --git a/test/e2e/vmsingle_test.go b/test/e2e/vmsingle_test.go
index a50828cf2..6f0b6cb2c 100644
--- a/test/e2e/vmsingle_test.go
+++ b/test/e2e/vmsingle_test.go
@@ -215,7 +215,7 @@ var _ = Describe("test vmsingle Controller", Label("vm", "single"), func() {
Expect(*createdDeploy.Spec.Template.Spec.Containers[0].SecurityContext.RunAsNonRoot).To(BeTrue())
}),
- Entry("with data emptyDir", "emptydir", false,
+ Entry("with storage", "storage", false,
&vmv1beta1.VMSingle{
ObjectMeta: metav1.ObjectMeta{
Namespace: namespace,
@@ -245,8 +245,34 @@ var _ = Describe("test vmsingle Controller", Label("vm", "single"), func() {
Expect(k8sClient.Get(ctx, createdChildObjects, &createdDeploy)).To(Succeed())
ts := createdDeploy.Spec.Template.Spec
Expect(ts.Containers).To(HaveLen(1))
- Expect(ts.Volumes).To(BeEmpty())
- Expect(ts.Containers[0].VolumeMounts).To(BeEmpty())
+ Expect(ts.Volumes).To(HaveLen(1))
+ Expect(ts.Containers[0].VolumeMounts).To(HaveLen(1))
+ }),
+ Entry("with empty dir", "emptydir", false,
+ &vmv1beta1.VMSingle{
+ ObjectMeta: metav1.ObjectMeta{
+ Namespace: namespace,
+ },
+ Spec: vmv1beta1.VMSingleSpec{
+ CommonApplicationDeploymentParams: vmv1beta1.CommonApplicationDeploymentParams{
+ ReplicaCount: ptr.To[int32](1),
+ },
+ CommonDefaultableParams: vmv1beta1.CommonDefaultableParams{
+ UseStrictSecurity: ptr.To(false),
+ },
+ RetentionPeriod: "1",
+ RemovePvcAfterDelete: true,
+ StorageDataPath: "/tmp/",
+ },
+ },
+ func(cr *vmv1beta1.VMSingle) {
+ createdChildObjects := types.NamespacedName{Namespace: namespace, Name: cr.PrefixedName()}
+ var createdDeploy appsv1.Deployment
+ Expect(k8sClient.Get(ctx, createdChildObjects, &createdDeploy)).To(Succeed())
+ ts := createdDeploy.Spec.Template.Spec
+ Expect(ts.Containers).To(HaveLen(1))
+ Expect(ts.Volumes).To(HaveLen(1))
+ Expect(ts.Containers[0].VolumeMounts).To(HaveLen(1))
}),
Entry("with external volume", "externalvolume", true,
&vmv1beta1.VMSingle{
@@ -289,7 +315,6 @@ var _ = Describe("test vmsingle Controller", Label("vm", "single"), func() {
RetentionPeriod: "1",
RemovePvcAfterDelete: true,
StorageDataPath: "/custom-path/internal/dir",
- Storage: &corev1.PersistentVolumeClaimSpec{},
VMBackup: &vmv1beta1.VMBackup{
Destination: "fs:///opt/backup",
VolumeMounts: []corev1.VolumeMount{{Name: "backup", MountPath: "/opt/backup"}},
@@ -305,9 +330,13 @@ var _ = Describe("test vmsingle Controller", Label("vm", "single"), func() {
Expect(ts.Volumes).To(HaveLen(4))
Expect(ts.Containers[0].VolumeMounts).To(HaveLen(3))
Expect(ts.Containers[0].VolumeMounts[0].Name).To(Equal("data"))
- Expect(ts.Containers[1].VolumeMounts).To(HaveLen(3))
+ Expect(ts.Containers[0].VolumeMounts[1].Name).To(Equal("unused"))
+ Expect(ts.Containers[0].VolumeMounts[2].Name).To(Equal("license"))
+ Expect(ts.Containers[1].VolumeMounts).To(HaveLen(4))
Expect(ts.Containers[1].VolumeMounts[0].Name).To(Equal("data"))
- Expect(ts.Containers[1].VolumeMounts[1].Name).To(Equal("backup"))
+ Expect(ts.Containers[1].VolumeMounts[1].Name).To(Equal("unused"))
+ Expect(ts.Containers[1].VolumeMounts[2].Name).To(Equal("backup"))
+ Expect(ts.Containers[1].VolumeMounts[3].Name).To(Equal("license"))
}),
)
diff --git a/test/e2e/vtsingle_test.go b/test/e2e/vtsingle_test.go
index 1c4ef97eb..e5756c7f6 100644
--- a/test/e2e/vtsingle_test.go
+++ b/test/e2e/vtsingle_test.go
@@ -145,7 +145,6 @@ var _ = Describe("test vtsingle Controller", Label("vt", "single", "vtsingle"),
},
RetentionPeriod: "1",
StorageDataPath: "/custom-path/internal/dir",
- Storage: &corev1.PersistentVolumeClaimSpec{},
},
},
func(cr *vmv1.VTSingle) {