Skip to content
  •  
  •  
  •  
26 changes: 22 additions & 4 deletions cmd/node-disk-manager/main.go
Original file line number Diff line number Diff line change
Expand Up @@ -13,6 +13,7 @@ import (

"github.com/ehazlett/simplelog"
ctlharvester "github.com/harvester/harvester/pkg/generated/controllers/harvesterhci.io"
k8scorev1 "github.com/rancher/wrangler/v3/pkg/generated/controllers/core"
"github.com/rancher/wrangler/v3/pkg/kubeconfig"
"github.com/rancher/wrangler/v3/pkg/signals"
"github.com/rancher/wrangler/v3/pkg/start"
Expand Down Expand Up @@ -215,9 +216,27 @@ func run(opt *option.Option) error {
return fmt.Errorf("error building node-disk-manager controllers: %s", err.Error())
}

corev1, err := k8scorev1.NewFactoryFromConfig(kubeConfig)
if err != nil {
return fmt.Errorf("error building node-disk-manager controllers: %s", err.Error())
}

configmap := corev1.Core().V1().ConfigMap()

// Create ConfigMapLoader for dynamic configuration reloading
// The env variables are used as fallback when ConfigMap is not available or empty
configMapLoader := filter.NewConfigMapLoader(
configmap,
filter.DefaultConfigMapNamespace,
opt.NodeName,
opt.VendorFilter,
opt.PathFilter,
opt.LabelFilter,
opt.AutoProvisionFilter,
)

terminatedChannel := make(chan bool, 1)
excludeFilters := filter.SetExcludeFilters(opt.VendorFilter, opt.PathFilter, opt.LabelFilter)
autoProvisionFilters := filter.SetAutoProvisionFilters(opt.AutoProvisionFilter)

locker := &sync.Mutex{}
cond := sync.NewCond(locker)
upgrades := harvesters.Harvesterhci().V1beta1().Upgrade()
Expand All @@ -230,8 +249,7 @@ func run(opt *option.Option) error {
upgrades,
bds,
block,
excludeFilters,
autoProvisionFilters,
configMapLoader,
cond,
false,
&terminatedChannel,
Expand Down
16 changes: 16 additions & 0 deletions deploy/charts/harvester-node-disk-manager/templates/configmap.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,16 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: {{ include "harvester-node-disk-manager.name" . }}
namespace: {{ .Release.Namespace }}
labels:
{{- include "harvester-node-disk-manager.labels" . | nindent 4 }}
data:
{{- if .Values.configMap.filters }}
filters.yaml: |
{{- toYaml .Values.configMap.filters | nindent 4 }}
{{- end }}
{{- if .Values.configMap.autoprovision }}
autoprovision.yaml: |
{{- toYaml .Values.configMap.autoprovision | nindent 4 }}
{{- end }}
28 changes: 28 additions & 0 deletions deploy/charts/harvester-node-disk-manager/values.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -85,3 +85,31 @@ autoGPTGenerate:

# Enable debug logging
debug: false

# ConfigMap-based configuration
configMap:
# Filter configurations - excludes devices from being managed by NDM
# Each entry can specify hostname (use "*" or "" for global rules)
filters: []
# Example:
# - hostname: "*"
# excludeDevices:
# - "/dev/sda"
# excludeLabels:
# - "COS_*"
# excludeVendors:
# - "QEMU"
# excludePaths:
# - "/var/lib/longhorn"
# - hostname: "node-1"
# excludeDevices:
# - "/dev/sdb"

# Auto-provision configurations - automatically provisions matching devices
# provisioner defaults to "longhornv1" if not specified
autoprovision: []
# Example:
# - hostname: "*"
# devices:
# - "/dev/sdc"
# - "/dev/sdd"
3 changes: 2 additions & 1 deletion go.mod
Original file line number Diff line number Diff line change
Expand Up @@ -52,6 +52,7 @@ require (
github.com/stretchr/testify v1.11.1
github.com/urfave/cli/v2 v2.3.0
golang.org/x/crypto v0.42.0
gopkg.in/yaml.v3 v3.0.1
k8s.io/api v0.34.1
k8s.io/apimachinery v0.34.1
k8s.io/client-go v12.0.0+incompatible
Expand Down Expand Up @@ -97,6 +98,7 @@ require (
github.com/russross/blackfriday/v2 v2.1.0 // indirect
github.com/spf13/pflag v1.0.6 // indirect
github.com/x448/float16 v0.8.4 // indirect
go.uber.org/mock v0.5.2 // indirect
go.yaml.in/yaml/v2 v2.4.2 // indirect
go.yaml.in/yaml/v3 v3.0.4 // indirect
golang.org/x/mod v0.28.0 // indirect
Expand All @@ -113,7 +115,6 @@ require (
gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect
gopkg.in/inf.v0 v0.9.1 // indirect
gopkg.in/yaml.v2 v2.4.0 // indirect
gopkg.in/yaml.v3 v3.0.1 // indirect
howett.net/plist v0.0.0-20181124034731-591f970eefbb // indirect
k8s.io/apiextensions-apiserver v0.34.1 // indirect
k8s.io/code-generator v0.33.5 // indirect
Expand Down
2 changes: 1 addition & 1 deletion pkg/controller/blockdevice/controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -96,7 +96,7 @@ func Register(
provisionerLock: &sync.Mutex{},
}

if err := scanner.Start(); err != nil {
if err := scanner.Start(ctx); err != nil {
return err
}

Expand Down
106 changes: 91 additions & 15 deletions pkg/controller/blockdevice/scanner.go
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
package blockdevice

import (
"context"
"fmt"
"path/filepath"
"reflect"
Expand Down Expand Up @@ -30,6 +31,7 @@ type Scanner struct {
BlockInfo block.Info
ExcludeFilters []*filter.Filter
AutoProvisionFilters []*filter.Filter
ConfigMapLoader *filter.ConfigMapLoader
Cond *sync.Cond
Shutdown bool
TerminatedChannels *chan bool
Expand All @@ -45,29 +47,29 @@ func NewScanner(
upgrades ctlharvesterv1.UpgradeController,
bds ctldiskv1.BlockDeviceController,
block block.Info,
excludeFilters, autoProvisionFilters []*filter.Filter,
configMapLoader *filter.ConfigMapLoader,
cond *sync.Cond,
shutdown bool,
ch *chan bool,
) *Scanner {
return &Scanner{
NodeName: nodeName,
Namespace: namespace,
Blockdevices: bds,
UpgradeClient: upgrades,
BlockInfo: block,
ExcludeFilters: excludeFilters,
AutoProvisionFilters: autoProvisionFilters,
Cond: cond,
Shutdown: shutdown,
TerminatedChannels: ch,
NodeName: nodeName,
Namespace: namespace,
Blockdevices: bds,
UpgradeClient: upgrades,
BlockInfo: block,
ConfigMapLoader: configMapLoader,
Cond: cond,
Shutdown: shutdown,
TerminatedChannels: ch,
}
}

func (s *Scanner) Start() error {
if err := s.scanBlockDevicesOnNode(); err != nil {
func (s *Scanner) Start(ctx context.Context) error {
if err := s.scanBlockDevicesOnNode(ctx); err != nil {
return err
}

go func() {
for {
s.Cond.L.Lock()
Expand All @@ -83,7 +85,7 @@ func (s *Scanner) Start() error {
}

logrus.Infof("scanner waked up, do scan...")
if err := s.scanBlockDevicesOnNode(); err != nil {
if err := s.scanBlockDevicesOnNode(ctx); err != nil {
logrus.Errorf("Failed to rescan block devices on node %s: %v", s.NodeName, err)
}
s.Cond.L.Unlock()
Expand Down Expand Up @@ -200,10 +202,84 @@ func (s *Scanner) deactivateBlockDevices(oldBds map[string]*diskv1.BlockDevice)
return nil
}

// reloadConfigMapFilters reloads filter and auto-provision configurations from ConfigMap
// Falls back to environment variables if ConfigMap is not available or empty
func (s *Scanner) loadConfigMapFilters(ctx context.Context) {
deviceFilter, vendorFilter, pathFilter, labelFilter, err := s.ConfigMapLoader.LoadFiltersFromConfigMap(ctx)
if err != nil {
logrus.Warnf("Failed to reload filters from ConfigMap: %v, using environment variable fallback", err)
deviceFilter, vendorFilter, pathFilter, labelFilter = s.ConfigMapLoader.GetEnvFilters()
} else if deviceFilter == "" && vendorFilter == "" && pathFilter == "" && labelFilter == "" {
// ConfigMap exists but is empty, use env var fallback
logrus.Info("ConfigMap filter data is empty, using environment variable fallback")
deviceFilter, vendorFilter, pathFilter, labelFilter = s.ConfigMapLoader.GetEnvFilters()
} else {
// Use ConfigMap values (they take precedence)
logrus.Info("Using filter configuration from ConfigMap")
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

should we need this logger?
The configmap loader should have a logger to prove the configmap was loaded, right?

}

// Update filters
s.ExcludeFilters = filter.SetExcludeFilters(deviceFilter, vendorFilter, pathFilter, labelFilter)

autoProvisionFilter, err := s.ConfigMapLoader.LoadAutoProvisionFromConfigMap(ctx)
if err != nil {
logrus.Warnf("Failed to reload auto-provision from ConfigMap: %v, using environment variable fallback", err)
autoProvisionFilter = s.ConfigMapLoader.GetEnvAutoProvisionFilter()
} else if autoProvisionFilter == "" {
// ConfigMap exists but is empty, use env var fallback
logrus.Debug("ConfigMap auto-provision data is empty, using environment variable fallback")
autoProvisionFilter = s.ConfigMapLoader.GetEnvAutoProvisionFilter()
} else {
// Use ConfigMap values (they take precedence)
logrus.Info("Using auto-provision configuration from ConfigMap")
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

ditto

}

// Update auto-provision filters
s.AutoProvisionFilters = filter.SetAutoProvisionFilters(autoProvisionFilter)
}

func (s *Scanner) debugFilter() {
// Debug: Log final filter details including defaults
logrus.Debugf("Final filter configuration (including defaults):")
logrus.Debugf(" Exclude Filters (%d total):", len(s.ExcludeFilters))
for i, f := range s.ExcludeFilters {
diskDetails := "N/A"
partDetails := "N/A"
if f.DiskFilter != nil {
diskDetails = f.DiskFilter.Details()
}
if f.PartFilter != nil {
partDetails = f.PartFilter.Details()
}
logrus.Debugf(" [%d] %s", i, f.Name)
logrus.Debugf(" Disk: %s", diskDetails)
logrus.Debugf(" Part: %s", partDetails)
}

logrus.Debugf(" Auto-Provision Filters (%d total):", len(s.AutoProvisionFilters))
for i, f := range s.AutoProvisionFilters {
diskDetails := "N/A"
partDetails := "N/A"
if f.DiskFilter != nil {
diskDetails = f.DiskFilter.Details()
}
if f.PartFilter != nil {
partDetails = f.PartFilter.Details()
}
logrus.Debugf(" [%d] %s", i, f.Name)
logrus.Debugf(" Disk: %s", diskDetails)
logrus.Debugf(" Part: %s", partDetails)
}
}

// scanBlockDevicesOnNode scans block devices on the node, and it will either create or update them.
func (s *Scanner) scanBlockDevicesOnNode() error {
func (s *Scanner) scanBlockDevicesOnNode(ctx context.Context) error {
logrus.Debugf("Scan block devices of node: %s", s.NodeName)

// load filter and auto-provision configurations from ConfigMap
s.loadConfigMapFilters(ctx)
s.debugFilter()

// list all the block devices
allDevices := s.collectAllDevices()

Expand Down
Loading
Loading