Skip to content

Instantly share code, notes, and snippets.

@cpcloud
Last active March 19, 2021 14:44
Show Gist options
  • Save cpcloud/41231a63f49cf83d9a44094ce273695d to your computer and use it in GitHub Desktop.
Save cpcloud/41231a63f49cf83d9a44094ce273695d to your computer and use it in GitHub Desktop.
Pulumi setup
package main
import (
"fmt"
"os"
"os/exec"
"path"
"path/filepath"
re "regexp"
"strings"
"github.com/pulumi/pulumi-gcp/sdk/v4/go/gcp/compute"
"github.com/pulumi/pulumi-gcp/sdk/v4/go/gcp/storage"
p "github.com/pulumi/pulumi/sdk/v2/go/pulumi"
"github.com/pulumi/pulumi/sdk/v2/go/pulumi/config"
)
const defaultNetwork = p.String("default")
const allowSsh = p.String("allow-ssh")
const nixos = p.String("nixos")
const nestedVirtualizationLicense = p.String(
"https://www.googleapis.com/compute/v1/projects/vm-options/global/licenses/enable-vmx",
)
const sshPort = p.String("22")
type oneFileError struct {
pattern string
numFilesFound int
}
func (err oneFileError) Error() string {
return fmt.Sprintf(
"got %d files with pattern %q, expected 1",
err.numFilesFound,
err.pattern,
)
}
type gpuCountError struct {
providedCount int8
}
func (e gpuCountError) Error() string {
return fmt.Sprintf(
"invalid value for GPU count: %d, expect a value > 0",
e.providedCount,
)
}
type nixBuildPaths struct {
outPath string
imagePath string
}
func buildNixOsImage(ctx *p.Context, instanceName string) (*nixBuildPaths, error) {
conf := config.New(ctx, "dev")
imageExpr := fmt.Sprintf(conf.Require("nix_build_expr"), instanceName)
outPathBytes, err := exec.Command(
"nix-build",
"..",
"--no-out-link",
"--attr",
imageExpr,
).Output()
if err != nil {
if exitError := err.(*exec.ExitError); exitError != nil {
os.Stderr.Write(exitError.Stderr)
}
return nil, err
}
outPath := strings.TrimRight(string(outPathBytes), "\r\n")
pattern := filepath.Join(outPath, "*.tar.gz")
files, err := filepath.Glob(pattern)
if err != nil {
return nil, err
}
numFiles := len(files)
if numFiles != 1 {
return nil, oneFileError{pattern: pattern, numFilesFound: numFiles}
}
imagePath := files[0]
return &nixBuildPaths{outPath: outPath, imagePath: imagePath}, nil
}
func guestAccelerators(instance *instance) (compute.InstanceGuestAcceleratorArray, error) {
gpu := instance.Gpu
if gpu == nil {
return compute.InstanceGuestAcceleratorArray{}, nil
}
if gpu.Count == 0 {
return nil, gpuCountError{providedCount: int8(gpu.Count)}
}
return compute.InstanceGuestAcceleratorArray{
compute.InstanceGuestAcceleratorArgs{
Type: p.String(gpu.Type),
Count: p.Int(gpu.Count),
},
}, nil
}
type disk struct {
SizeGb uint `json:"size_gb"`
Type string `json:"type"`
}
type gpu struct {
Count uint `json:"count"`
Type string `json:"type"`
}
type instance struct {
Name string `json:"name"`
Disk disk `json:"disk"`
MachineType string `json:"machine_type"`
Gpu *gpu `json:"gpu"`
}
func run(ctx *p.Context) error {
conf := config.New(ctx, "dev")
var instances []instance
conf.RequireObject("instances", &instances)
imageBucketName := conf.Require("image_bucket")
imageBucket, err := storage.NewBucket(ctx, imageBucketName, nil)
if err != nil {
return err
}
var addresses []p.StringOutput
for _, instance := range instances {
instanceName := instance.Name
var nixosImagePaths *nixBuildPaths
if nixosImagePaths, err = buildNixOsImage(ctx, instanceName); err != nil {
return err
}
imagePath := nixosImagePaths.imagePath
baseImagePath := path.Base(imagePath)
outHash := strings.Split(path.Base(nixosImagePaths.outPath), "-")[0]
imageBucketObjectName := fmt.Sprintf("images/%s-%s", outHash, baseImagePath)
imageBucketObject, err := storage.NewBucketObject(
ctx,
imageBucketObjectName,
&storage.BucketObjectArgs{
Source: p.NewFileAsset(imagePath),
Bucket: imageBucket.Name,
// pass Name here because GCP requires an extension of .tar.gz for compute images
// otherwise pulumi will append a random hash suffix and GCP will whine
// this also requires p.DeleteBeforeReplace(true) as a ResourceOption
Name: p.String(imageBucketObjectName),
ContentType: p.String("application/tar+gzip"),
Metadata: p.StringMap{"nix_store_hash": p.String(outHash)},
},
p.DeleteBeforeReplace(true),
)
if err != nil {
return err
}
removeExtension := re.MustCompile("\\.raw\\.tar\\.gz|nixos-image-")
replaceDotAndUnderScore := re.MustCompile("[\\._]+")
imageNameNoExtension := removeExtension.ReplaceAllString(baseImagePath, "")
imageNameNoUnderscores := replaceDotAndUnderScore.ReplaceAllString(imageNameNoExtension, "-")
imageName := fmt.Sprintf("x-%s-%s", outHash[:12], imageNameNoUnderscores)
computeImage, err := compute.NewImage(
ctx,
imageName,
&compute.ImageArgs{
Family: nixos,
Licenses: p.StringArray{
nestedVirtualizationLicense,
},
RawDisk: compute.ImageRawDiskArgs{
Source: imageBucketObject.SelfLink,
},
},
)
if err != nil {
return err
}
networkTags := p.StringArray{
allowSsh,
nixos,
}
firewall, err := compute.NewFirewall(
ctx,
fmt.Sprintf("%s-firewall", instanceName),
&compute.FirewallArgs{
Network: defaultNetwork,
Direction: p.String("INGRESS"),
SourceRanges: p.StringArray{
p.String("0.0.0.0/0"),
},
TargetTags: networkTags,
Allows: compute.FirewallAllowArray{
compute.FirewallAllowArgs{
Protocol: p.String("tcp"),
Ports: p.StringArray{
sshPort,
},
},
compute.FirewallAllowArgs{
Protocol: p.String("icmp"),
},
},
},
)
if err != nil {
return err
}
address, err := compute.NewAddress(
ctx,
fmt.Sprintf("%s-static-ip", instanceName),
nil,
)
if err != nil {
return err
}
addresses = append(addresses, address.Address)
accelerators, err := guestAccelerators(&instance)
if err != nil {
return err
}
scheduling := compute.InstanceSchedulingArgs{
OnHostMaintenance: p.String("MIGRATE"),
}
if instance.Gpu != nil {
scheduling.OnHostMaintenance = p.String("TERMINATE")
}
_, err = compute.NewInstance(
ctx,
instanceName,
&compute.InstanceArgs{
MachineType: p.String(instance.MachineType),
GuestAccelerators: accelerators,
Tags: networkTags,
Scheduling: scheduling,
NetworkInterfaces: compute.InstanceNetworkInterfaceArray{
compute.InstanceNetworkInterfaceArgs{
Network: defaultNetwork,
AccessConfigs: compute.InstanceNetworkInterfaceAccessConfigArray{
compute.InstanceNetworkInterfaceAccessConfigArgs{
NatIp: address.Address,
},
},
},
},
BootDisk: compute.InstanceBootDiskArgs{
InitializeParams: compute.InstanceBootDiskInitializeParamsArgs{
Image: computeImage.SelfLink,
Size: p.Int(instance.Disk.SizeGb),
Type: p.String(instance.Disk.Type),
},
},
AllowStoppingForUpdate: p.Bool(true),
},
p.DependsOn([]p.Resource{firewall}),
)
if err != nil {
return err
}
ctx.Export(fmt.Sprintf("%s", instanceName), address.Address)
}
return nil
}
func main() {
p.Run(run)
}
# NB: `dev` is the stack name. You can call your stack whatever you want.
# The pulumi config should then be called `Pulumi.${my_stack_name}.yaml`.
config:
dev:nix_build_expr: "config.nodes.%s.configuration.system.build.googleComputeImage"
dev:image_bucket: "nixos-images"
dev:instances:
- name: $HOSTNAME_OF_YOUR_CHOOSING
disk:
size_gb: 500
type: "pd-ssd"
machine_type: "n2d-highcpu-224"
gcp:project: $SOME_GCP_PROJECT_ID
gcp:region: us-east4 # can be any region
gcp:zone: us-east4-b # a zone in the aforementioned region that supports n2d instances
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment