Created
March 26, 2023 09:20
-
-
Save dastanko/9b36d4d737b8f75170304e37b1dacc31 to your computer and use it in GitHub Desktop.
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
import pulumi | |
import pulumi_gcp as gcp | |
import pulumi_kubernetes as k8s | |
from network import network, subnet1 | |
from common import gcp_project, gcp_zone | |
provider_cfg = pulumi.Config("gcp") | |
gcp_region = provider_cfg.get("region", "europe-north1") | |
cluster_sa = gcp.serviceaccount.Account( | |
"cluster-sa", | |
account_id="cluster-sa", | |
display_name="GKE Cluster Service Account" | |
) | |
# Create a GKE cluster | |
cluster = gcp.container.Cluster( | |
"prod-cluster3", | |
name="prod-cluster3", | |
initial_node_count=1, | |
network=network.self_link, | |
subnetwork=subnet1.name, | |
project=gcp_project, | |
location=gcp_zone, | |
networking_mode="VPC_NATIVE", | |
ip_allocation_policy=gcp.container.ClusterIpAllocationPolicyArgs( | |
cluster_ipv4_cidr_block="/14", | |
services_ipv4_cidr_block="/20" | |
), | |
dns_config=gcp.container.ClusterDnsConfigArgs( | |
cluster_dns="CLOUD_DNS", | |
cluster_dns_scope="VPC_SCOPE", | |
cluster_dns_domain="almash.kg", | |
), | |
binary_authorization=gcp.container.ClusterBinaryAuthorizationArgs( | |
evaluation_mode="PROJECT_SINGLETON_POLICY_ENFORCE" | |
), | |
monitoring_config=gcp.container.ClusterMonitoringConfigArgs( | |
managed_prometheus=gcp.container.ClusterMonitoringConfigManagedPrometheusArgs( | |
enabled=True | |
), | |
), | |
release_channel=gcp.container.ClusterReleaseChannelArgs( | |
channel="STABLE" | |
), | |
remove_default_node_pool=True, | |
workload_identity_config=gcp.container.ClusterWorkloadIdentityConfigArgs( | |
workload_pool=f"{gcp_project}.svc.id.goog" | |
), | |
cluster_autoscaling=gcp.container.ClusterClusterAutoscalingArgs( | |
enabled=True, | |
resource_limits=[ | |
gcp.container.ClusterClusterAutoscalingResourceLimitArgs( | |
resource_type="cpu", | |
minimum=1, | |
maximum=50, | |
), | |
gcp.container.ClusterClusterAutoscalingResourceLimitArgs( | |
resource_type="memory", | |
minimum=1, | |
maximum=200, | |
) | |
], | |
autoscaling_profile="BALANCED", | |
), | |
opts=pulumi.ResourceOptions(depends_on=[subnet1]), | |
) | |
# Create a default node pool | |
default_node_pool = gcp.container.NodePool( | |
"default", | |
name="default", | |
cluster=cluster.name, | |
location=gcp_zone, | |
initial_node_count=0, | |
autoscaling=gcp.container.NodePoolAutoscalingArgs( | |
min_node_count=1, | |
max_node_count=5, | |
), | |
node_config=gcp.container.NodePoolNodeConfigArgs( | |
machine_type="e2-medium", | |
metadata={ | |
"disable-legacy-endpoints": "true", | |
}, | |
labels={ | |
"workload": "default", | |
}, | |
workload_metadata_config=gcp.container.NodePoolNodeConfigWorkloadMetadataConfigArgs( | |
mode="GKE_METADATA" | |
), | |
service_account=cluster_sa.email, | |
), | |
) | |
# Create a redis node pool | |
redis_node_pool = gcp.container.NodePool( | |
"redis", | |
name="redis", | |
cluster=cluster.name, | |
location=gcp_zone, | |
initial_node_count=0, | |
autoscaling=gcp.container.NodePoolAutoscalingArgs( | |
total_max_node_count=1, | |
), | |
node_config=gcp.container.NodePoolNodeConfigArgs( | |
machine_type="e2-medium", | |
metadata={ | |
"disable-legacy-endpoints": "true", | |
}, | |
labels={ | |
"redis": "true", | |
}, | |
), | |
) | |
# Create a binance-proxy node pool | |
binance_proxy_node_pool = gcp.container.NodePool( | |
"binance-proxy", | |
name="binance-proxy", | |
cluster=cluster.name, | |
location=gcp_zone, | |
initial_node_count=0, | |
autoscaling=gcp.container.NodePoolAutoscalingArgs( | |
min_node_count=1, | |
max_node_count=5, | |
), | |
node_config=gcp.container.NodePoolNodeConfigArgs( | |
machine_type="e2-micro", | |
metadata={ | |
"disable-legacy-endpoints": "true", | |
}, | |
labels={ | |
"binance-proxy": "true", | |
}, | |
), | |
) | |
cluster_kubeconfig = pulumi.Output.all( | |
cluster.master_auth.cluster_ca_certificate, | |
cluster.endpoint, | |
cluster.name).apply(lambda l: | |
f"""apiVersion: v1 | |
clusters: | |
- cluster: | |
certificate-authority-data: {l[0]} | |
server: https://{l[1]} | |
name: {l[2]} | |
contexts: | |
- context: | |
cluster: {l[2]} | |
user: {l[2]} | |
name: {l[2]} | |
current-context: {l[2]} | |
kind: Config | |
preferences: {{}} | |
users: | |
- name: {l[2]} | |
user: | |
exec: | |
apiVersion: client.authentication.k8s.io/v1beta1 | |
command: gke-gcloud-auth-plugin | |
installHint: Install gke-gcloud-auth-plugin for use with kubectl by following | |
https://cloud.google.com/blog/products/containers-kubernetes/kubectl-auth-changes-in-gke | |
provideClusterInfo: true | |
""") | |
# Bind the service account to the 'roles/iam.workloadIdentityUser' role | |
k8s_cloudsql_sa_binding = gcp.serviceaccount.IAMBinding( | |
"cloudsql_sa_binding", | |
service_account_id=cluster_sa.id, | |
role="roles/iam.workloadIdentityUser", | |
members=[ | |
f"serviceAccount:{gcp_project}.svc.id.goog[default/cloudsql-sa]", | |
] | |
) | |
def get_policy_data(email): | |
member_list = [ | |
f"serviceAccount:{email}" | |
] | |
return gcp.organizations.get_iam_policy( | |
bindings=[ | |
gcp.organizations.GetIAMPolicyBindingArgs( | |
role="roles/cloudsql.instanceUser", | |
members=member_list, | |
), | |
gcp.organizations.GetIAMPolicyBindingArgs( | |
role="roles/cloudsql.client", | |
members=member_list, | |
), | |
gcp.organizations.GetIAMPolicyBindingArgs( | |
role="roles/container.nodeServiceAccount", | |
members=member_list, | |
) | |
], | |
) | |
# Assign the 'roles/cloudsql.instanceUser' role to the service account | |
instance_user = cluster_sa.email.apply(get_policy_data) | |
instance_user_apply = gcp.projects.IAMPolicy( | |
"instance_user_policy", | |
project=gcp_project, | |
policy_data=instance_user.policy_data | |
) |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment