Skip to content

Instantly share code, notes, and snippets.

@feiskyer
Created February 7, 2021 05:39
Show Gist options
  • Save feiskyer/71d857622f826183c7babad4cb743e0d to your computer and use it in GitHub Desktop.
Save feiskyer/71d857622f826183c7babad4cb743e0d to your computer and use it in GitHub Desktop.
Add VMSS and all instances to LoadBalancer backend address pool
module test
go 1.15
require (
github.com/Azure/azure-sdk-for-go v49.1.0+incompatible
github.com/Azure/go-autorest/autorest v0.11.17
github.com/ghodss/yaml v1.0.0
github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e // indirect
k8s.io/apimachinery v0.0.0
k8s.io/klog/v2 v2.5.0
sigs.k8s.io/cloud-provider-azure v0.7.0
)
replace (
github.com/niemeyer/pretty => github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e
k8s.io/api => k8s.io/api v0.0.0-20201209045733-fcac651617f2
k8s.io/apiextensions-apiserver => k8s.io/apiextensions-apiserver v0.0.0-20201114091224-a7ee1efe41fc
k8s.io/apimachinery => k8s.io/apimachinery v0.21.0-alpha.0.0.20201209085528-15c5dba13c59
k8s.io/apiserver => k8s.io/apiserver v0.0.0-20201209130508-aed7ab078321
k8s.io/cli-runtime => k8s.io/cli-runtime v0.0.0-20201209051923-2e4b259e04ba
k8s.io/client-go => k8s.io/client-go v0.0.0-20201209050023-e24efdc77f15
k8s.io/cloud-provider => k8s.io/cloud-provider v0.0.0-20201021002512-82fca6d2b013
k8s.io/cluster-bootstrap => k8s.io/cluster-bootstrap v0.0.0-20201114092228-614b98eee358
k8s.io/code-generator => k8s.io/code-generator v0.21.0-alpha.0
k8s.io/component-base => k8s.io/component-base v0.0.0-20201114090208-1e84b325f5ba
k8s.io/component-helpers => k8s.io/component-helpers v0.20.0-alpha.2.0.20201114090304-7cb42b694587
k8s.io/controller-manager => k8s.io/controller-manager v0.20.0-alpha.1.0.20201209052538-b2c380a1dc86
k8s.io/cri-api => k8s.io/cri-api v0.21.0-alpha.0
k8s.io/csi-translation-lib => k8s.io/csi-translation-lib v0.0.0-20201114092327-833303372de1
k8s.io/gengo => k8s.io/gengo v0.0.0-20200205140755-e0e292d8aa12
k8s.io/klog => k8s.io/klog v1.0.0
k8s.io/kube-aggregator => k8s.io/kube-aggregator v0.0.0-20201126170540-6c47de442a82
k8s.io/kube-controller-manager => k8s.io/kube-controller-manager v0.0.0-20201114092129-18c28a4120de
k8s.io/kube-openapi => k8s.io/kube-openapi v0.0.0-20201113171705-d219536bb9fd
k8s.io/kube-proxy => k8s.io/kube-proxy v0.0.0-20201114091637-deb12d4b202f
k8s.io/kube-scheduler => k8s.io/kube-scheduler v0.0.0-20201114091838-0f62d3991af1
k8s.io/kubectl => k8s.io/kubectl v0.0.0-20201210013108-5cfbd4019670
k8s.io/kubelet => k8s.io/kubelet v0.0.0-20201114091737-92ded5ee6b96
k8s.io/legacy-cloud-providers => k8s.io/legacy-cloud-providers v0.0.0-20201211092501-716c3daa6bc8
k8s.io/metrics => k8s.io/metrics v0.0.0-20201114091333-d70c0e0c6aa5
k8s.io/mount-utils => k8s.io/mount-utils v0.21.0-alpha.0
k8s.io/sample-apiserver => k8s.io/sample-apiserver v0.0.0-20201114090814-1f4e6a92d4b8
k8s.io/utils => k8s.io/utils v0.0.0-20200619165400-6e3d28b6ed19
sigs.k8s.io/apiserver-network-proxy/konnectivity-client => sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.9
sigs.k8s.io/yaml => sigs.k8s.io/yaml v1.2.0
)
package main
import (
"context"
"flag"
"io/ioutil"
"os"
"github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2020-06-30/compute"
"github.com/Azure/go-autorest/autorest"
"github.com/Azure/go-autorest/autorest/azure"
"github.com/Azure/go-autorest/autorest/to"
"github.com/ghodss/yaml"
utilerrors "k8s.io/apimachinery/pkg/util/errors"
klog "k8s.io/klog/v2"
"sigs.k8s.io/cloud-provider-azure/pkg/auth"
clients "sigs.k8s.io/cloud-provider-azure/pkg/azureclients"
"sigs.k8s.io/cloud-provider-azure/pkg/azureclients/vmssclient"
"sigs.k8s.io/cloud-provider-azure/pkg/azureclients/vmssvmclient"
"sigs.k8s.io/cloud-provider-azure/pkg/retry"
)
const (
location = "eastus"
resourceGroup = "<rg-name>"
vmssName = "<vmss-name>"
lbBackendPoolID = "<lb-backend-id>"
)
func parseConfig(confFile string) (*auth.AzureAuthConfig, *azure.Environment, error) {
var config auth.AzureAuthConfig
var env azure.Environment
configReader, err := os.Open(confFile)
if err != nil {
return nil, nil, err
}
defer configReader.Close()
configContents, err := ioutil.ReadAll(configReader)
if err != nil {
return nil, nil, err
}
err = yaml.Unmarshal(configContents, &config)
if err != nil {
return nil, nil, err
}
if config.Cloud == "" {
env = azure.PublicCloud
} else {
env, err = azure.EnvironmentFromName(config.Cloud)
if err != nil {
return nil, nil, err
}
}
return &config, &env, nil
}
func addVMSSLBBackendPool(vmss compute.VirtualMachineScaleSet, vmssClient vmssclient.Interface) error {
vmssNIC := *vmss.VirtualMachineProfile.NetworkProfile.NetworkInterfaceConfigurations
ipconfigs := *vmssNIC[0].IPConfigurations
newPools := []compute.SubResource{
{
ID: to.StringPtr(lbBackendPoolID),
},
}
if ipconfigs[0].LoadBalancerBackendAddressPools != nil {
newPools = append(newPools, (*ipconfigs[0].LoadBalancerBackendAddressPools)...)
}
ipconfigs[0].LoadBalancerBackendAddressPools = &newPools
newVMSS := compute.VirtualMachineScaleSet{
Sku: vmss.Sku,
Location: vmss.Location,
VirtualMachineScaleSetProperties: &compute.VirtualMachineScaleSetProperties{
VirtualMachineProfile: &compute.VirtualMachineScaleSetVMProfile{
NetworkProfile: &compute.VirtualMachineScaleSetNetworkProfile{
NetworkInterfaceConfigurations: &vmssNIC,
},
},
},
}
rerr := vmssClient.CreateOrUpdate(context.Background(), resourceGroup, *vmss.Name, newVMSS)
return rerr.Error()
}
func addVMSSVMLBBackendPool(vm compute.VirtualMachineScaleSetVM, vmssvmClient vmssvmclient.Interface) error {
networkInterfaceConfigurations := *vm.NetworkProfileConfiguration.NetworkInterfaceConfigurations
ipconfigs := *networkInterfaceConfigurations[0].IPConfigurations
newPools := []compute.SubResource{
{
ID: to.StringPtr(lbBackendPoolID),
},
}
if ipconfigs[0].LoadBalancerBackendAddressPools != nil {
newPools = append(newPools, (*ipconfigs[0].LoadBalancerBackendAddressPools)...)
}
ipconfigs[0].LoadBalancerBackendAddressPools = &newPools
newVM := &compute.VirtualMachineScaleSetVM{
Sku: vm.Sku,
Location: vm.Location,
VirtualMachineScaleSetVMProperties: &compute.VirtualMachineScaleSetVMProperties{
HardwareProfile: vm.HardwareProfile,
NetworkProfileConfiguration: &compute.VirtualMachineScaleSetVMNetworkProfileConfiguration{
NetworkInterfaceConfigurations: &networkInterfaceConfigurations,
},
},
}
rerr := vmssvmClient.Update(context.Background(), resourceGroup, vmssName, *vm.InstanceID, *newVM, "")
return rerr.Error()
}
func main() {
klog.InitFlags(nil)
flag.Set("logtostderr", "true")
flag.Parse()
defer klog.Flush()
config, env, err := parseConfig("config.json")
if err != nil {
klog.Fatal(err)
}
servicePrincipalToken, err := auth.GetServicePrincipalToken(config, env)
if err != nil {
klog.Fatal(err)
}
clientConfig := &clients.ClientConfig{
Location: location,
CloudName: config.Cloud,
SubscriptionID: config.SubscriptionID,
ResourceManagerEndpoint: env.ResourceManagerEndpoint,
Backoff: &retry.Backoff{Steps: 1},
RateLimitConfig: &clients.RateLimitConfig{},
Authorizer: autorest.NewBearerAuthorizer(servicePrincipalToken),
}
vmssClient := vmssclient.New(clientConfig)
vmssvmClient := vmssvmclient.New(clientConfig)
vmss, rerr := vmssClient.Get(context.Background(), resourceGroup, vmssName)
if rerr != nil {
klog.Fatal(rerr)
}
instances, rerr := vmssvmClient.List(context.Background(), resourceGroup, *vmss.Name, "")
if rerr != nil {
klog.Fatal(rerr)
}
klog.Infof("VMSS %s has %d instances to update", *vmss.Name, len(instances))
updates := make([]func() error, 0)
updates = append(updates, func() error {
return addVMSSLBBackendPool(vmss, vmssClient)
})
for idx := range instances {
instance := instances[idx]
updates = append(updates, func() error {
return addVMSSVMLBBackendPool(instance, vmssvmClient)
})
}
errors := utilerrors.AggregateGoroutines(updates...)
if errors != nil {
klog.Fatal(utilerrors.Flatten(errors))
}
}
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment