diff --git a/cmd/deploy.go b/cmd/deploy.go index 9502784b475d7aed40727eb4570deb7dc7b31fb5..618941c1a8c3b2e4a39528dc167c74d8763b359d 100755 --- a/cmd/deploy.go +++ b/cmd/deploy.go @@ -243,7 +243,9 @@ func createCluster(conf *asset.ClusterAsset) error { if err != nil { return err } - httpService.AddFileToCache(constants.IPXECfg, fileContent) + if err := httpService.AddFileToCache(constants.IPXECfg, fileContent); err != nil { + return fmt.Errorf("error adding ipxe config file to cache: %v", err) + } httpserver.StartHTTPService(httpService) default: diff --git a/cmd/extend.go b/cmd/extend.go index 6c9dcf88ae65a196b1cbbcdc92f07ebddacfe471..8410ae71f7e08ae63d2a88fe7231fb7b8640c340 100755 --- a/cmd/extend.go +++ b/cmd/extend.go @@ -118,16 +118,15 @@ func extendCluster(conf *asset.ClusterAsset, num uint) error { httpService.AddFileToCache(constants.WorkerIgn, data) - if err := configmanager.Persist(); err != nil { - logrus.Errorf("Failed to persist the cluster asset: %v", err) - return err - } - p := infra.InfraPlatform{} switch strings.ToLower(conf.Platform) { case "libvirt": httpserver.StartHTTPService(httpService) + if err := extendArray(conf, int(num)); err != nil { + return err + } + // regenerate worker.tf var worker terraform.Infra if err := worker.Generate(conf, "worker"); err != nil { @@ -149,6 +148,10 @@ func extendCluster(conf *asset.ClusterAsset, num uint) error { case "openstack": httpserver.StartHTTPService(httpService) + if err := extendArray(conf, int(num)); err != nil { + return err + } + // regenerate worker.tf var worker terraform.Infra if err := worker.Generate(conf, "worker"); err != nil { @@ -211,6 +214,33 @@ func extendCluster(conf *asset.ClusterAsset, num uint) error { return nil } +func extendArray(c *asset.ClusterAsset, count int) error { + if count <= 0 { + return fmt.Errorf("the number of nodes to be extended should be greater than 0") + } + + num := len(c.Worker) + for i := 0; i < count; i++ { + hostname := fmt.Sprintf("k8s-worker%02d", num+i+1) + c.Worker = append(c.Worker, asset.NodeAsset{ + Hostname: hostname, + IP: "", + HardwareInfo: asset.HardwareInfo{ + CPU: c.Worker[i].CPU, + RAM: c.Worker[i].RAM, + Disk: c.Worker[i].Disk, + }, + }) + } + + if err := configmanager.Persist(); err != nil { + logrus.Errorf("Failed to persist the extended cluster asset: %v", err) + return err + } + + return nil +} + // checkNodesReady waits for all nodes to be ready func checkNodesReady(ctx context.Context, conf *asset.ClusterAsset, num int) error { clientset, err := kubeclient.CreateClient(conf.Kubernetes.AdminKubeConfig)