Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 2 additions & 2 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,7 @@ for more information : https://cluster-api.sigs.k8s.io/user/quick-start.html#ini
```sh
# install cluster-api components
export EXP_CLUSTER_RESOURCE_SET=true
clusterctl init --infrastructure=proxmox:v0.4.2 --config https://raw.githubusercontent.com/k8s-proxmox/cluster-api-provider-proxmox/main/clusterctl.yaml
clusterctl init --infrastructure=proxmox:v0.4.3 --config https://raw.githubusercontent.com/k8s-proxmox/cluster-api-provider-proxmox/main/clusterctl.yaml
```

**Note:** container images are available at [ghcr.io/k8s-proxmox/cluster-api-provider-proxmox:\<tag\>](https://github.com/k8s-proxmox/cluster-api-provider-proxmox/pkgs/container/cluster-api-provider-proxmox)
Expand All @@ -37,7 +37,7 @@ export PROXMOX_PASSWORD=password
export PROXMOX_USER=user@pam

# generate manifests (available flags: --target-namespace, --kubernetes-version, --control-plane-machine-count, --worker-machine-count)
clusterctl generate cluster cappx-test --control-plane-machine-count=3 --infrastructure=proxmox:v0.4.2 --config https://raw.githubusercontent.com/k8s-proxmox/cluster-api-provider-proxmox/main/clusterctl.yaml > cappx-test.yaml
clusterctl generate cluster cappx-test --control-plane-machine-count=3 --infrastructure=proxmox:v0.4.3 --config https://raw.githubusercontent.com/k8s-proxmox/cluster-api-provider-proxmox/main/clusterctl.yaml > cappx-test.yaml

# inspect and edit
vi cappx-test.yaml
Expand Down
28 changes: 16 additions & 12 deletions cloud/scheduler/scheduler.go
Original file line number Diff line number Diff line change
Expand Up @@ -326,18 +326,21 @@ func (s *Scheduler) SelectVMID(ctx context.Context, config api.VirtualMachineCre
}

func (s *Scheduler) SelectStorage(ctx context.Context, config api.VirtualMachineCreateOptions, nodeName string) (string, error) {
s.logger.Info("finding proxmox storage to be used for qemu")
log := s.logger.WithValues("qemu", config.Name).WithValues("node", nodeName)
log.Info("finding proxmox storage to be used for qemu")
if config.Storage != "" {
// to do: raise error if storage is not available on the node
return config.Storage, nil
}

node, err := s.client.Node(ctx, nodeName)
if err != nil {
log.Error(err, "failed to get node")
return "", err
}
storages, err := node.GetStorages(ctx)
if err != nil {
log.Error(err, "failed to get storages")
return "", err
}

Expand Down Expand Up @@ -375,44 +378,45 @@ func (s *Scheduler) RunFilterPlugins(ctx context.Context, state *framework.Cycle
return feasibleNodes, nil
}

func (s *Scheduler) RunScorePlugins(ctx context.Context, state *framework.CycleState, config api.VirtualMachineCreateOptions, nodes []*api.Node) (framework.NodeScoreList, *framework.Status) {
func (s *Scheduler) RunScorePlugins(ctx context.Context, state *framework.CycleState, config api.VirtualMachineCreateOptions, nodes []*api.Node) (map[string]framework.NodeScore, *framework.Status) {
s.logger.Info("scoring proxmox node")
status := framework.NewStatus()
scoresMap := make(map[string](map[int]framework.NodeScore))
scoresMap := make(map[string](map[string]framework.NodeScore))
for _, pl := range s.registry.ScorePlugins() {
scoresMap[pl.Name()] = make(map[int]framework.NodeScore)
scoresMap[pl.Name()] = make(map[string]framework.NodeScore)
}
nodeInfos, err := framework.GetNodeInfoList(ctx, s.client)
if err != nil {
status.SetCode(1)
s.logger.Error(err, "failed to get node info list")
return nil, status
}
for index, nodeInfo := range nodeInfos {
for _, nodeInfo := range nodeInfos {
for _, pl := range s.registry.ScorePlugins() {
score, status := pl.Score(ctx, state, config, nodeInfo)
if !status.IsSuccess() {
status.SetCode(1)
s.logger.Error(status.Error(), fmt.Sprintf("failed to score node %s", nodeInfo.Node().Node))
return nil, status
}
scoresMap[pl.Name()][index] = framework.NodeScore{
scoresMap[pl.Name()][nodeInfo.Node().Node] = framework.NodeScore{
Name: nodeInfo.Node().Node,
Score: score,
}
}
}
result := make(framework.NodeScoreList, 0, len(nodes))
for i := range nodes {
result = append(result, framework.NodeScore{Name: nodes[i].Node, Score: 0})
for j := range scoresMap {
result[i].Score += scoresMap[j][i].Score
result := make(map[string]framework.NodeScore)
for _, node := range nodes {
result[node.Node] = framework.NodeScore{Name: node.Node, Score: 0}
for plugin := range scoresMap {
r := result[node.Node]
r.Score += scoresMap[plugin][node.Node].Score
}
}
return result, status
}

func selectHighestScoreNode(scoreList framework.NodeScoreList) (string, error) {
func selectHighestScoreNode(scoreList map[string]framework.NodeScore) (string, error) {
if len(scoreList) == 0 {
return "", fmt.Errorf("empty node score list")
}
Expand Down