Skip to content

Commit def20e2

Browse files
authored
Merge pull request #202 from k8s-proxmox/fix/scheduler
Fix/scheduler
2 parents ca5c26d + 080f1e5 commit def20e2

File tree

2 files changed

+18
-14
lines changed

2 files changed

+18
-14
lines changed

README.md

+2-2
Original file line numberDiff line numberDiff line change
@@ -22,7 +22,7 @@ for more information : https://cluster-api.sigs.k8s.io/user/quick-start.html#ini
2222
```sh
2323
# install cluster-api components
2424
export EXP_CLUSTER_RESOURCE_SET=true
25-
clusterctl init --infrastructure=proxmox:v0.4.2 --config https://raw.githubusercontent.com/k8s-proxmox/cluster-api-provider-proxmox/main/clusterctl.yaml
25+
clusterctl init --infrastructure=proxmox:v0.4.3 --config https://raw.githubusercontent.com/k8s-proxmox/cluster-api-provider-proxmox/main/clusterctl.yaml
2626
```
2727

2828
**Note:** container images are available at [ghcr.io/k8s-proxmox/cluster-api-provider-proxmox:\<tag\>](https://github.com/k8s-proxmox/cluster-api-provider-proxmox/pkgs/container/cluster-api-provider-proxmox)
@@ -37,7 +37,7 @@ export PROXMOX_PASSWORD=password
3737
export PROXMOX_USER=user@pam
3838

3939
# generate manifests (available flags: --target-namespace, --kubernetes-version, --control-plane-machine-count, --worker-machine-count)
40-
clusterctl generate cluster cappx-test --control-plane-machine-count=3 --infrastructure=proxmox:v0.4.2 --config https://raw.githubusercontent.com/k8s-proxmox/cluster-api-provider-proxmox/main/clusterctl.yaml > cappx-test.yaml
40+
clusterctl generate cluster cappx-test --control-plane-machine-count=3 --infrastructure=proxmox:v0.4.3 --config https://raw.githubusercontent.com/k8s-proxmox/cluster-api-provider-proxmox/main/clusterctl.yaml > cappx-test.yaml
4141

4242
# inspect and edit
4343
vi cappx-test.yaml

cloud/scheduler/scheduler.go

+16-12
Original file line numberDiff line numberDiff line change
@@ -326,18 +326,21 @@ func (s *Scheduler) SelectVMID(ctx context.Context, config api.VirtualMachineCre
326326
}
327327

328328
func (s *Scheduler) SelectStorage(ctx context.Context, config api.VirtualMachineCreateOptions, nodeName string) (string, error) {
329-
s.logger.Info("finding proxmox storage to be used for qemu")
329+
log := s.logger.WithValues("qemu", config.Name).WithValues("node", nodeName)
330+
log.Info("finding proxmox storage to be used for qemu")
330331
if config.Storage != "" {
331332
// to do: raise error if storage is not available on the node
332333
return config.Storage, nil
333334
}
334335

335336
node, err := s.client.Node(ctx, nodeName)
336337
if err != nil {
338+
log.Error(err, "failed to get node")
337339
return "", err
338340
}
339341
storages, err := node.GetStorages(ctx)
340342
if err != nil {
343+
log.Error(err, "failed to get storages")
341344
return "", err
342345
}
343346

@@ -375,44 +378,45 @@ func (s *Scheduler) RunFilterPlugins(ctx context.Context, state *framework.Cycle
375378
return feasibleNodes, nil
376379
}
377380

378-
func (s *Scheduler) RunScorePlugins(ctx context.Context, state *framework.CycleState, config api.VirtualMachineCreateOptions, nodes []*api.Node) (framework.NodeScoreList, *framework.Status) {
381+
func (s *Scheduler) RunScorePlugins(ctx context.Context, state *framework.CycleState, config api.VirtualMachineCreateOptions, nodes []*api.Node) (map[string]framework.NodeScore, *framework.Status) {
379382
s.logger.Info("scoring proxmox node")
380383
status := framework.NewStatus()
381-
scoresMap := make(map[string](map[int]framework.NodeScore))
384+
scoresMap := make(map[string](map[string]framework.NodeScore))
382385
for _, pl := range s.registry.ScorePlugins() {
383-
scoresMap[pl.Name()] = make(map[int]framework.NodeScore)
386+
scoresMap[pl.Name()] = make(map[string]framework.NodeScore)
384387
}
385388
nodeInfos, err := framework.GetNodeInfoList(ctx, s.client)
386389
if err != nil {
387390
status.SetCode(1)
388391
s.logger.Error(err, "failed to get node info list")
389392
return nil, status
390393
}
391-
for index, nodeInfo := range nodeInfos {
394+
for _, nodeInfo := range nodeInfos {
392395
for _, pl := range s.registry.ScorePlugins() {
393396
score, status := pl.Score(ctx, state, config, nodeInfo)
394397
if !status.IsSuccess() {
395398
status.SetCode(1)
396399
s.logger.Error(status.Error(), fmt.Sprintf("failed to score node %s", nodeInfo.Node().Node))
397400
return nil, status
398401
}
399-
scoresMap[pl.Name()][index] = framework.NodeScore{
402+
scoresMap[pl.Name()][nodeInfo.Node().Node] = framework.NodeScore{
400403
Name: nodeInfo.Node().Node,
401404
Score: score,
402405
}
403406
}
404407
}
405-
result := make(framework.NodeScoreList, 0, len(nodes))
406-
for i := range nodes {
407-
result = append(result, framework.NodeScore{Name: nodes[i].Node, Score: 0})
408-
for j := range scoresMap {
409-
result[i].Score += scoresMap[j][i].Score
408+
result := make(map[string]framework.NodeScore)
409+
for _, node := range nodes {
410+
result[node.Node] = framework.NodeScore{Name: node.Node, Score: 0}
411+
for plugin := range scoresMap {
412+
r := result[node.Node]
413+
r.Score += scoresMap[plugin][node.Node].Score
410414
}
411415
}
412416
return result, status
413417
}
414418

415-
func selectHighestScoreNode(scoreList framework.NodeScoreList) (string, error) {
419+
func selectHighestScoreNode(scoreList map[string]framework.NodeScore) (string, error) {
416420
if len(scoreList) == 0 {
417421
return "", fmt.Errorf("empty node score list")
418422
}

0 commit comments

Comments
 (0)