Created
March 19, 2024 19:26
-
-
Save hamzy/2593ca688df7df1c220444afaab7c4cd to your computer and use it in GitHub Desktop.
20240319 ./pkg/infrastructure/powervs/clusterapi/powervs.go
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
package clusterapi | |
import ( | |
"context" | |
"fmt" | |
"regexp" | |
"strings" | |
"github.com/IBM/vpc-go-sdk/vpcv1" | |
"github.com/sirupsen/logrus" | |
"k8s.io/utils/ptr" | |
capibm "sigs.k8s.io/cluster-api-provider-ibmcloud/api/v1beta2" | |
crclient "sigs.k8s.io/controller-runtime/pkg/client" | |
powervsconfig "github.com/openshift/installer/pkg/asset/installconfig/powervs" | |
"github.com/openshift/installer/pkg/asset/manifests/capiutils" | |
"github.com/openshift/installer/pkg/infrastructure/clusterapi" | |
powervstypes "github.com/openshift/installer/pkg/types/powervs" | |
) | |
// Provider is the vSphere implementation of the clusterapi InfraProvider. | |
type Provider struct { | |
clusterapi.InfraProvider | |
} | |
var _ clusterapi.PreProvider = (*Provider)(nil) | |
var _ clusterapi.InfraReadyProvider = (*Provider)(nil) | |
var _ clusterapi.Provider = (*Provider)(nil) | |
// Name returns the PowerVS provider name. | |
func (p Provider) Name() string { | |
return powervstypes.Name | |
} | |
// PreProvision creates the PowerVS objects required prior to running capv. | |
func (p Provider) PreProvision(ctx context.Context, in clusterapi.PreProvisionInput) error { | |
return nil | |
} | |
// InfraReady is called once cluster.Status.InfrastructureReady | |
// is true, typically after load balancers have been provisioned. It can be used | |
// to create DNS records. | |
func (p Provider) InfraReady(ctx context.Context, in clusterapi.InfraReadyInput) error { | |
var ( | |
client *powervsconfig.Client | |
instanceCRN string | |
rule *vpcv1.SecurityGroupRulePrototype | |
ports = [...]int64{22, 10258, 22623} | |
port int64 | |
err error | |
) | |
logrus.Debugf("InfraReady: in = %+v", in) | |
logrus.Debugf("InfraReady: in.InstallConfig.Config = %+v", in.InstallConfig.Config) | |
logrus.Debugf("InfraReady: in.InstallConfig.PowerVS = %+v", in.InstallConfig.PowerVS) | |
powerVSCluster := &capibm.IBMPowerVSCluster{} | |
key := crclient.ObjectKey{ | |
Name: in.InfraID, | |
Namespace: capiutils.Namespace, | |
} | |
logrus.Debugf("InfraReady: cluster key = %+v", key) | |
err = in.Client.Get(ctx, key, powerVSCluster) | |
if err != nil { | |
return fmt.Errorf("failed to get PowerVS cluster in InfraReady: %w", err) | |
} | |
logrus.Debugf("InfraReady: powerVSCluster = %+v", powerVSCluster) | |
logrus.Debugf("InfraReady: powerVSCluster.Status = %+v", powerVSCluster.Status) | |
key = crclient.ObjectKey{ | |
Name: fmt.Sprintf("rhcos-%s", in.InfraID), | |
Namespace: capiutils.Namespace, | |
} | |
logrus.Debugf("InfraReady: image key = %+v", key) | |
powerVSImage := &capibm.IBMPowerVSImage{} | |
err = in.Client.Get(ctx, key, powerVSImage) | |
logrus.Debugf("InfraReady: image err = %v", err) | |
if err == nil { | |
logrus.Debugf("InfraReady: image = %+v", powerVSImage) | |
} | |
key = crclient.ObjectKey{ | |
Name: fmt.Sprintf("%s-bootstrap", in.InfraID), | |
Namespace: capiutils.Namespace, | |
} | |
logrus.Debugf("InfraReady: machine key = %+v", key) | |
powerVSMachine := &capibm.IBMPowerVSMachine{} | |
err = in.Client.Get(ctx, key, powerVSMachine) | |
logrus.Debugf("InfraReady: machine err = %v", err) | |
if err == nil { | |
logrus.Debugf("InfraReady: machine = %+v", powerVSMachine) | |
} | |
client, err = powervsconfig.NewClient() | |
if err != nil { | |
return fmt.Errorf("failed to get NewClient in InfraReady: %w", err) | |
} | |
logrus.Debugf("InfraReady: NewClient returns %+v", client) | |
// SAD: client in the Metadata struct is lowercase and therefore private | |
// client = in.InstallConfig.PowerVS.client | |
instanceCRN, err = client.GetInstanceCRNByName(ctx, | |
in.InstallConfig.PowerVS.BaseDomain, | |
in.InstallConfig.Config.Publish) | |
if err != nil { | |
return fmt.Errorf("failed to get InstanceCRN (%s) by name in InfraReady: %w", | |
in.InstallConfig.Config.Publish, | |
err) | |
} | |
logrus.Debugf("InfraReady: instanceCRN = %s", instanceCRN) | |
lbExtExp := regexp.MustCompile(`\b-loadbalancer\b$`) | |
lbIntExp := regexp.MustCompile(`\b-loadbalancer-int\b$`) | |
// Step 1. | |
// Create DNS records for the two load balancers | |
// map[string]VPCLoadBalancerStatus | |
for lbKey, loadBalancerStatus := range powerVSCluster.Status.LoadBalancers { | |
var ( | |
idx int | |
substr string | |
infraID string | |
hostname string | |
prefix string | |
) | |
// The infra id is "rdr-hamzy-test-dal10-846vd" and we need "rdr-hamzy-test-dal10" | |
logrus.Debugf("in.InfraID = %s", in.InfraID) | |
idx = strings.LastIndex(in.InfraID, "-") | |
logrus.Debugf("idx = %d", idx) | |
substr = in.InfraID[idx:] | |
logrus.Debugf("substr = %s", substr) | |
infraID = strings.ReplaceAll(in.InfraID, substr, "") | |
logrus.Debugf("infraID = %s", infraID) | |
// Is it external (public) or internal (private)? | |
logrus.Debugf("lbKey = %s", lbKey) | |
switch { | |
case lbExtExp.MatchString(lbKey): | |
prefix = "api." | |
case lbIntExp.MatchString(lbKey): | |
prefix = "api-int." | |
} | |
logrus.Debugf("prefix = %s", prefix) | |
hostname = fmt.Sprintf("%s%s", prefix, infraID) | |
logrus.Debugf("InfraReady: crn = %s, base domain = %s, hostname = %s, cname = %s", | |
instanceCRN, | |
in.InstallConfig.PowerVS.BaseDomain, | |
hostname, | |
*loadBalancerStatus.Hostname) | |
err = client.CreateDNSRecord(ctx, | |
instanceCRN, | |
in.InstallConfig.PowerVS.BaseDomain, | |
hostname, | |
*loadBalancerStatus.Hostname) | |
if err != nil { | |
return fmt.Errorf("failed to create a DNS CNAME record (%s, %s): %w", | |
hostname, | |
*loadBalancerStatus.Hostname, | |
err) | |
} | |
} | |
// Step 2. | |
// Add to security group rules | |
for _, port = range ports { | |
rule = &vpcv1.SecurityGroupRulePrototype{ | |
Direction: ptr.To("inbound"), | |
Protocol: ptr.To("tcp"), | |
PortMin: ptr.To(port), | |
PortMax: ptr.To(port), | |
} | |
err = client.AddSecurityGroupRule(ctx, *powerVSCluster.Status.VPC.ID, rule) | |
if err != nil { | |
return fmt.Errorf("failed to add security group rule for port %d: %w", port, err) | |
} | |
} | |
rule = &vpcv1.SecurityGroupRulePrototype{ | |
Direction: ptr.To("inbound"), | |
Protocol: ptr.To("icmp"), | |
} | |
err = client.AddSecurityGroupRule(ctx, *powerVSCluster.Status.VPC.ID, rule) | |
if err != nil { | |
return fmt.Errorf("failed to add security group rule for icmp: %w", err) | |
} | |
// Step 3. | |
// Create worker ssh key | |
// in.InstallConfig.Config.SSHKey | |
return nil | |
} |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment