Skip to content
5 changes: 3 additions & 2 deletions deploy/helm/rawfile-localpv/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,6 @@ Please follow the [install guide](https://github.com/openebs/rawfile-localpv/tre
|-----|------|---------|-------------|
| auth.enabled | bool | `true` | Enables authentication for internal gRPC server |
| auth.token | string | `""` | Sets authentication token for internal gRPC server, will generate one if nothing provided |
| capacityOverride | string | `""` | Overrides total capacity of the storage for data dir storage on each host (Support size values) [e.g. `50GB` or `10MiB`] |
| controller.externalResizer.image.registry | string | `""` | Image registry for `csi-resizer` |
| controller.externalResizer.image.repository | string | `"sig-storage/csi-resizer"` | Image Repository for `csi-resizer` |
| controller.externalResizer.image.tag | string | `"v1.13.2"` | Image tag for `csi-resizer` |
Expand Down Expand Up @@ -58,6 +57,7 @@ Please follow the [install guide](https://github.com/openebs/rawfile-localpv/tre
| metrics.serviceMonitor.enabled | bool | `false` | Enables prometheus service monitor |
| metrics.serviceMonitor.interval | string | `"1m"` | Sets prometheus target interval |
| node.dataDirPath | string | `"/var/csi/rawfile"` | Data dir path for provisioner to be used by provisioner |
| node.defaultPool | string | `"default"` | Default storage pool name |
| node.driverRegistrar.image.registry | string | `""` | Image Registry for `csi-node-driver-registrar` |
| node.driverRegistrar.image.repository | string | `"sig-storage/csi-node-driver-registrar"` | Image Repository for `csi-node-driver-registrar` |
| node.driverRegistrar.image.tag | string | `"v2.13.0"` | Image Tag for `csi-node-driver-registrar` |
Expand All @@ -81,9 +81,9 @@ Please follow the [install guide](https://github.com/openebs/rawfile-localpv/tre
| node.snapshotController.image.registry | string | `""` | Image Registry for `snapshot-controller` |
| node.snapshotController.image.repository | string | `"sig-storage/snapshot-controller"` | Image Repository for `snapshot-controller` |
| node.snapshotController.image.tag | string | `"v8.2.1"` | Image Tag for `snapshot-controller` |
| node.storagePools | object | `{"default":{"capacityOverride":"","defaultFs":"ext4","path":"/var/local/openebs/rawfile/default-pool/","reservedCapacity":""}}` | Storage pools configuration |
| node.tolerations | string | `nil` | Tolerations for node component |
| provisionerName | string | `"rawfile.csi.openebs.io"` | Name of the registered CSI Driver in the cluster |
| reservedCapacity | string | `""` | Used to reserve capacity on each node for data dir storage on each host (Supports percentage and size) [e.g. `25%` or `50GB` or `10MiB`] |
| snapshotClasses[0].deletionPolicy | string | `"Delete"` | Sets deletion policy for snapshots created using this class (Delete or Retain) |
| snapshotClasses[0].enabled | bool | `true` | Enable or disable SnapshotClass |
| snapshotClasses[0].isDefault | bool | `false` | Make the snapshot class as default |
Expand All @@ -98,5 +98,6 @@ Please follow the [install guide](https://github.com/openebs/rawfile-localpv/tre
| storageClasses[0].mountOptions | list | `[]` | Sets mount options for filesystem volumes |
| storageClasses[0].name | string | `"rawfile-localpv"` | Name of the StorageClass |
| storageClasses[0].reclaimPolicy | string | `"Delete"` | Sets default reclaimPolicy for StorageClass volumes |
| storageClasses[0].storagePool | string | `"default"` | Sets storage pool used for volumes |
| storageClasses[0].thinProvision | string | `""` | Enables thin provisioning of volumes |
| storageClasses[0].volumeBindingMode | string | `"WaitForFirstConsumer"` | Sets volumeBindingMode for StorageClass |
16 changes: 16 additions & 0 deletions deploy/helm/rawfile-localpv/templates/_helpers.tpl
Original file line number Diff line number Diff line change
Expand Up @@ -120,3 +120,19 @@ Some helpers to handle image global information
{{- define "rawfile-localpv.metadata-dir-path" -}}
{{- tpl .Values.node.metadataDirPath . }}
{{- end }}

{{- define "rawfile-localpv.pool-volumes" -}}
{{- range $name, $pool := .Values.node.storagePools }}
- name: pool-{{ $name }}
hostPath:
path: {{ tpl $pool.path . }}
type: DirectoryOrCreate
{{- end }}
{{- end }}

{{- define "rawfile-localpv.pool-volume-mounts" -}}
{{- range $name, $pool := .Values.node.storagePools }}
- name: pool-{{ $name }}
mountPath: {{ tpl $pool.path . }}
{{- end }}
{{- end }}
Original file line number Diff line number Diff line change
Expand Up @@ -31,10 +31,6 @@ spec:
hostPath:
path: {{ include "rawfile-localpv.node-kubelet-path" . }}
type: DirectoryOrCreate
- name: data-dir
hostPath:
path: {{ .Values.node.dataDirPath }}
type: DirectoryOrCreate
- name: metadata-dir
hostPath:
path: {{ include "rawfile-localpv.metadata-dir-path" . }}
Expand All @@ -43,6 +39,7 @@ spec:
hostPath:
path: /dev
type: Directory
{{- include "rawfile-localpv.pool-volumes" . | nindent 8 }}
containers:
- name: csi-driver
image: "{{ include "rawfile-localpv.node-image" . }}"
Expand Down Expand Up @@ -111,6 +108,28 @@ spec:
name: {{ include "rawfile-localpv.fullname" . }}-secrets
key: internal-signature
{{- end }}
- name: CSI_DRIVER__DEFAULT_POOL
value: {{ .Values.node.defaultPool }}
- name: CSI_DRIVER__STORAGE_POOLS
value: |
{
{{- $keys := keys .Values.node.storagePools | sortAlpha }}
{{- range $i, $name := $keys }}
"{{ $name }}": {
"path": "{{ tpl (index $.Values.node.storagePools $name).path . }}"
{{- with (index $.Values.node.storagePools $name).reservedCapacity }}
, "reserved_capacity": "{{ . | toString }}"
{{- end }}
{{- with (index $.Values.node.storagePools $name).capacityOverride }}
, "capacity_override": "{{ . | toString }}"
{{- end }}
{{- with (index $.Values.node.storagePools $name).defaultFs }}
, "default_fs": "{{ . }}"
{{- end }}
}{{ if lt (add1 $i) (len $keys) }},{{ end }}
{{- end }}
}

ports:
- name: metrics
containerPort: {{ .Values.metrics.port }}
Expand All @@ -122,12 +141,11 @@ spec:
- name: mountpoint-dir
mountPath: {{ include "rawfile-localpv.node-kubelet-path" . }}
mountPropagation: "Bidirectional"
- name: data-dir
mountPath: /data
- name: metadata-dir
mountPath: {{ include "rawfile-localpv.metadata-dir-path" . }}
- name: device
mountPath: /dev
{{- include "rawfile-localpv.pool-volume-mounts" . | nindent 12 }}
resources:
{{- include "rawfile-localpv.controller-resources" . | nindent 12 }}
- name: node-driver-registrar
Expand Down
3 changes: 3 additions & 0 deletions deploy/helm/rawfile-localpv/templates/storageclass.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -21,6 +21,9 @@ parameters:
formatOptions: {{ ($class.formatOptions | default (list)) | join " " | quote }}
copyOnWrite: {{ $class.copyOnWrite | default "false" | toString | quote }}
freezeFs: {{ $class.freezeFs | default "false" | toString | quote }}
{{- with $class.storagePool }}
storagePool: {{ . }}
{{- end }}
---
{{- end }}
{{- end }}
17 changes: 12 additions & 5 deletions deploy/helm/rawfile-localpv/values.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -6,11 +6,6 @@ logLevel: INFO
# -- Format of the logs (json, pretty)
logFormat: json

# -- Used to reserve capacity on each node for data dir storage on each host (Supports percentage and size) [e.g. `25%` or `50GB` or `10MiB`]
reservedCapacity: ""
# -- Overrides total capacity of the storage for data dir storage on each host (Support size values) [e.g. `50GB` or `10MiB`]
capacityOverride: ""

auth:
# -- Enables authentication for internal gRPC server
enabled: true
Expand Down Expand Up @@ -129,6 +124,16 @@ node:
# -- Data dir path for provisioner to be used by provisioner
dataDirPath: /var/csi/rawfile

# -- Default storage pool name
defaultPool: default
# -- Storage pools configuration
storagePools:
default:
path: /var/local/openebs/rawfile/default-pool/
reservedCapacity: ""
capacityOverride: ""
defaultFs: ext4

# -- Metadata dir path for rawfile volumes metadata and tasks store file
metadataDirPath: /var/local/openebs/rawfile/{{ .Release.Name }}/meta

Expand Down Expand Up @@ -201,6 +206,8 @@ storageClasses:
copyOnWrite: ""
# -- Enables FreezeFS on storage class can be used to enable snapshotting of inused volumes when CoW is disabled/not supported (False by default)
freezeFs: ""
# -- Sets storage pool used for volumes
storagePool: default

snapshotClasses:
- # -- Name of the SnapshotClass
Expand Down
13 changes: 10 additions & 3 deletions rawfile/bd2fs.py
Original file line number Diff line number Diff line change
Expand Up @@ -106,7 +106,8 @@ def NodeGetInfo(self, request, context):
@log_grpc_request
def NodeStageVolume(self, request, context):
with VolLock(request.volume_id):
if not metadata_or(volume_id=request.volume_id).get("ready", False):
meta = metadata_or(volume_id=request.volume_id)
if not meta.get("ready", False):
raise VolumeNotReadyError(request.volume_id)
bd_stage_request = NodeStageVolumeRequest()
bd_stage_request.CopyFrom(request)
Expand Down Expand Up @@ -141,7 +142,13 @@ def NodeStageVolume(self, request, context):
format_options = format_options_str.split(" ")
default_fs = request.volume_capability.mount.fs_type
fs = get_from_device_or_fallback(
bd_publish_request.target_path, (default_fs or config.default_fs)
bd_publish_request.target_path,
(
default_fs
or config.csi_driver.storage_pools[
meta["storage_pool"]
].default_fs
),
)
fs.mountpoint = f"{request.staging_target_path}/mount"
fs.format_and_mount(
Expand Down Expand Up @@ -300,7 +307,7 @@ def CreateSnapshot(self, request: csi_pb2.CreateSnapshotRequest, context):
copy_on_write = (
copy_on_write_param
if copy_on_write_param is not None
else consts.COW_SUPPORTED
else consts.COW_SUPPORT_MAP.get(volume_meta["storage_pool"], False)
)

def _get_current_snapshot():
Expand Down
84 changes: 65 additions & 19 deletions rawfile/config/model.py
Original file line number Diff line number Diff line change
@@ -1,3 +1,4 @@
import re
import warnings
from pydantic_settings import (
BaseSettings,
Expand All @@ -15,13 +16,41 @@
DirectoryPath,
StringConstraints,
Field,
field_validator,
model_validator,
)
from pydantic.networks import IPvAnyAddress
from typing import Annotated, Literal
from typing import Annotated, Final, Literal
import consts
from utils.logs import LoggingFormats
from datetime import timedelta
import json

NAME_REGEX: Final[re.Pattern] = re.compile(
r"^(?=.{1,253}$)(?!.*\.\.)([a-zA-Z0-9]([a-zA-Z0-9\-]{0,61}[a-zA-Z0-9])?)(\.([a-zA-Z0-9]([a-zA-Z0-9\-]{0,61}[a-zA-Z0-9])?))*$"
)


class StoragePool(BaseModel):
path: DirectoryPath = Field(description="Path of the pool, Should be unique")
reserved_capacity: (
ByteSize
| Annotated[
str,
StringConstraints(strip_whitespace=True, pattern=r"^\d+%$"),
]
) = Field(
default=ByteSize(0),
description="Reserves capacity of storage pool",
)
capacity_override: ByteSize | None = Field(
default=None,
description="Overrides total capacity of storage pool",
)
default_fs: FileSystemName = Field(
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I see this more as a "frontend" rather than a "backend storage-pool" configuration?

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

This will be used to make how users are configuring storage pools

default=FileSystemName.EXT4,
description="Default filesystem used where creating volumes and fsType is not specified in storage class parameters",
)


class CSIDriverCmd(BaseModel):
Expand All @@ -33,6 +62,14 @@ class CSIDriverCmd(BaseModel):
) = Field(
description="Listen address for gRPC server",
)
storage_pools: dict[str, StoragePool] | None = Field(
description="List of storage pools (Map of name to configuration), required when running node plugin",
default=None,
)
default_pool: str | None = Field(
description="Name of the default storage pool, used when no storage pool is defined in storage class",
default=None,
)
internal_ip: IPvAnyAddress | None = Field(
description="Listen ip for gRPC server (used for internal communication only)",
default=None,
Expand Down Expand Up @@ -73,6 +110,15 @@ class CSIDriverCmd(BaseModel):
description="Type/Mode of the CSI plugin"
)

@field_validator("storage_pools", mode="before")
def parse_dict(cls, v):
if isinstance(v, str):
try:
return json.loads(v)
except Exception as e:
raise ValueError(f"Invalid JSON for storage_pools: {e}")
return v

@model_validator(mode="after")
def validate_node_plugin(
self,
Expand All @@ -84,6 +130,24 @@ def validate_node_plugin(
)
if not self.metadata_dir:
raise ValueError("Metadata Dir is required when running node plugin")
if not self.storage_pools or len(self.storage_pools) == 0:
raise ValueError(
"Storage Pool list is required when running node plugin"
)

paths = []
for name, pool in self.storage_pools.items():
if len(name) > 63 or len(name) < 3:
raise ValueError(
"Name of the storage pool should be between 3 and 63 characters"
)
if not NAME_REGEX.match(name):
raise ValueError(
"Name of the storage pool should be DNS compatible"
)
if pool.path in paths:
raise ValueError("Duplicate path in storage pool is not supported")
paths.append(pool.path)
return self


Expand All @@ -110,24 +174,6 @@ def settings_customise_sources(
) -> tuple[PydanticBaseSettingsSource, ...]:
return CliSettingsSource(settings_cls, cli_parse_args=True), env_settings

reserved_capacity: (
ByteSize
| Annotated[
str,
StringConstraints(strip_whitespace=True, pattern=r"^\d+%$"),
]
) = Field(
default=ByteSize(0),
description="Reserves capacity of data dir",
)
capacity_override: ByteSize | None = Field(
default=None,
description="Overrides total capacity of data dir",
)
default_fs: FileSystemName = Field(
default=FileSystemName.EXT4,
description="Default filesystem used where creating volumes and fsType is not specified in storage class parameters",
)
namespace: str = Field(
description="K8s Namespace of the driver",
)
Expand Down
3 changes: 1 addition & 2 deletions rawfile/consts.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,6 @@
PROVISIONER_VERSION = os.getenv("PROVISIONER_VERSION") or importlib.metadata.version(
"rawfile"
)
DATA_DIR = "/data"
D_PERMS = 0o700
F_PERMS = 0o600
OWNER_UMASK = 0o077
Expand All @@ -15,4 +14,4 @@
CSI_K8S_PVC_NAME_KEY = "csi.storage.k8s.io/pvc/name"
GA_ID = "Ry1UWkdQNDY2MThX"
GA_KEY = "OTFKR2RUZzlRd0duN1ktdnZ1TTR6QQ=="
COW_SUPPORTED: bool | None = None
COW_SUPPORT_MAP = {}
7 changes: 4 additions & 3 deletions rawfile/internal_svc.py
Original file line number Diff line number Diff line change
Expand Up @@ -57,18 +57,19 @@ def ExpandRawFile(
is_attached=volume_manager.is_attached(request.volume_id),
status=internal_pb2.ExpandRawFileStatus.OK,
)

if get_capacity() < size_inc:
meta = metadata(request.volume_id)
if get_capacity(meta["storage_pool"]) < request.new_size:
return internal_pb2.ExpandRawFileResponse(
is_attached=volume_manager.is_attached(request.volume_id),
status=internal_pb2.ExpandRawFileStatus.RESOURCE_EXHAUSTED,
)
if metadata(request.volume_id).get("thin_provision", False):
if meta.get("thin_provision", False):
truncate(img_file_path, request.new_size)
else:
fallocate(img_file_path, request.new_size)
patch_metadata(
request.volume_id,
meta["storage_pool"],
{"size": request.new_size},
)
return internal_pb2.ExpandRawFileResponse(
Expand Down
10 changes: 9 additions & 1 deletion rawfile/metrics.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,14 @@
from prometheus_client.metrics_core import GaugeMetricFamily
from utils.storage_pool import get_capacity
from utils.volume_manager import manager as volume_manager
from config import config


def get_remaining_capacity():
val = 0
for pool in config.csi_driver.storage_pools.keys():
val += get_capacity(pool)
return val


class VolumeStatsCollector(object):
Expand All @@ -28,7 +36,7 @@ def collect(self):
labels=["node", "volume"],
unit="bytes",
)
remaining_capacity.add_metric([self.node], get_capacity())
remaining_capacity.add_metric([self.node], get_remaining_capacity())
for volume_id, stats in volume_manager.get_all_volumes_stats().items():
volume_used.add_metric([self.node, volume_id], stats["used"])
volume_total.add_metric([self.node, volume_id], stats["total"])
Expand Down
Loading
Loading