Skip to content

Instantly share code, notes, and snippets.

@chaoticryptidz
Last active July 9, 2020 12:35
Show Gist options
  • Save chaoticryptidz/b2af5f195a5f2821b820903dec918298 to your computer and use it in GitHub Desktop.
Save chaoticryptidz/b2af5f195a5f2821b820903dec918298 to your computer and use it in GitHub Desktop.
/*
 * Copyright (c) 2016 Andrew Kelley
 *
 * This file is part of zig, which is MIT licensed.
 * See http://opensource.org/licenses/MIT
 */

#include "analyze.hpp"
#include "ast_render.hpp"
#include "error.hpp"
#include "ir.hpp"
#include "ir_print.hpp"
#include "os.hpp"
#include "range_set.hpp"
#include "softfloat.hpp"
#include "softfloat_ext.hpp"
#include "util.hpp"
#include "mem_list.hpp"
#include "all_types.hpp"

#include <errno.h>

struct IrBuilderSrc {
    CodeGen *codegen;
    IrExecutableSrc *exec;
    IrBasicBlockSrc *current_basic_block;
    AstNode *main_block_node;
};

struct IrBuilderGen {
    CodeGen *codegen;
    IrExecutableGen *exec;
    IrBasicBlockGen *current_basic_block;

    // track for immediate post-analysis destruction
    mem::List<IrInstGenConst *> constants;
};

struct IrAnalyze {
    CodeGen *codegen;
    IrBuilderSrc old_irb;
    IrBuilderGen new_irb;
    size_t old_bb_index;
    size_t instruction_index;
    ZigType *explicit_return_type;
    AstNode *explicit_return_type_source_node;
    ZigList<IrInstGen *> src_implicit_return_type_list;
    ZigList<IrSuspendPosition> resume_stack;
    IrBasicBlockSrc *const_predecessor_bb;
    size_t ref_count;
    size_t break_debug_id; // for debugging purposes
    IrInstGen *return_ptr;

    // For the purpose of using in a debugger
    void dump();
};

enum ConstCastResultId {
    ConstCastResultIdOk,
    ConstCastResultIdInvalid,
    ConstCastResultIdErrSet,
    ConstCastResultIdErrSetGlobal,
    ConstCastResultIdPointerChild,
    ConstCastResultIdSliceChild,
    ConstCastResultIdOptionalChild,
    ConstCastResultIdErrorUnionPayload,
    ConstCastResultIdErrorUnionErrorSet,
    ConstCastResultIdFnAlign,
    ConstCastResultIdFnCC,
    ConstCastResultIdFnVarArgs,
    ConstCastResultIdFnIsGeneric,
    ConstCastResultIdFnReturnType,
    ConstCastResultIdFnArgCount,
    ConstCastResultIdFnGenericArgCount,
    ConstCastResultIdFnArg,
    ConstCastResultIdFnArgNoAlias,
    ConstCastResultIdType,
    ConstCastResultIdUnresolvedInferredErrSet,
    ConstCastResultIdAsyncAllocatorType,
    ConstCastResultIdBadAllowsZero,
    ConstCastResultIdArrayChild,
    ConstCastResultIdSentinelArrays,
    ConstCastResultIdPtrLens,
    ConstCastResultIdCV,
    ConstCastResultIdPtrSentinel,
    ConstCastResultIdIntShorten,
};

struct ConstCastOnly;
struct ConstCastArg {
    size_t arg_index;
    ZigType *actual_param_type;
    ZigType *expected_param_type;
    ConstCastOnly *child;
};

struct ConstCastArgNoAlias {
    size_t arg_index;
};

struct ConstCastOptionalMismatch;
struct ConstCastPointerMismatch;
struct ConstCastSliceMismatch;
struct ConstCastErrUnionErrSetMismatch;
struct ConstCastErrUnionPayloadMismatch;
struct ConstCastErrSetMismatch;
struct ConstCastTypeMismatch;
struct ConstCastArrayMismatch;
struct ConstCastBadAllowsZero;
struct ConstCastBadNullTermArrays;
struct ConstCastBadCV;
struct ConstCastPtrSentinel;
struct ConstCastIntShorten;

struct ConstCastOnly {
    ConstCastResultId id;
    union {
        ConstCastErrSetMismatch *error_set_mismatch;
        ConstCastPointerMismatch *pointer_mismatch;
        ConstCastSliceMismatch *slice_mismatch;
        ConstCastOptionalMismatch *optional;
        ConstCastErrUnionPayloadMismatch *error_union_payload;
        ConstCastErrUnionErrSetMismatch *error_union_error_set;
        ConstCastTypeMismatch *type_mismatch;
        ConstCastArrayMismatch *array_mismatch;
        ConstCastOnly *return_type;
        ConstCastOnly *null_wrap_ptr_child;
        ConstCastArg fn_arg;
        ConstCastArgNoAlias arg_no_alias;
        ConstCastBadAllowsZero *bad_allows_zero;
        ConstCastBadNullTermArrays *sentinel_arrays;
        ConstCastBadCV *bad_cv;
        ConstCastPtrSentinel *bad_ptr_sentinel;
        ConstCastIntShorten *int_shorten;
    } data;
};

struct ConstCastTypeMismatch {
    ZigType *wanted_type;
    ZigType *actual_type;
};

struct ConstCastOptionalMismatch {
    ConstCastOnly child;
    ZigType *wanted_child;
    ZigType *actual_child;
};

struct ConstCastPointerMismatch {
    ConstCastOnly child;
    ZigType *wanted_child;
    ZigType *actual_child;
};

struct ConstCastSliceMismatch {
    ConstCastOnly child;
    ZigType *wanted_child;
    ZigType *actual_child;
};

struct ConstCastArrayMismatch {
    ConstCastOnly child;
    ZigType *wanted_child;
    ZigType *actual_child;
};

struct ConstCastErrUnionErrSetMismatch {
    ConstCastOnly child;
    ZigType *wanted_err_set;
    ZigType *actual_err_set;
};

struct ConstCastErrUnionPayloadMismatch {
    ConstCastOnly child;
    ZigType *wanted_payload;
    ZigType *actual_payload;
};

struct ConstCastErrSetMismatch {
    ZigList<ErrorTableEntry *> missing_errors;
};

struct ConstCastBadAllowsZero {
    ZigType *wanted_type;
    ZigType *actual_type;
};

struct ConstCastBadNullTermArrays {
    ConstCastOnly child;
    ZigType *wanted_type;
    ZigType *actual_type;
};

struct ConstCastBadCV {
    ZigType *wanted_type;
    ZigType *actual_type;
};

struct ConstCastPtrSentinel {
    ZigType *wanted_type;
    ZigType *actual_type;
};

struct ConstCastIntShorten {
    ZigType *wanted_type;
    ZigType *actual_type;
};

// for debugging purposes
struct DbgIrBreakPoint {
    const char *src_file;
    uint32_t line;
};
DbgIrBreakPoint dbg_ir_breakpoints_buf[20];
size_t dbg_ir_breakpoints_count = 0;

static IrInstSrc *ir_gen_node(IrBuilderSrc *irb, AstNode *node, Scope *scope);
static IrInstSrc *ir_gen_node_extra(IrBuilderSrc *irb, AstNode *node, Scope *scope, LVal lval,
        ResultLoc *result_loc);
static IrInstGen *ir_implicit_cast(IrAnalyze *ira, IrInstGen *value, ZigType *expected_type);
static IrInstGen *ir_implicit_cast2(IrAnalyze *ira, IrInst *value_source_instr,
        IrInstGen *value, ZigType *expected_type);
static IrInstGen *ir_get_deref(IrAnalyze *ira, IrInst *source_instr, IrInstGen *ptr,
        ResultLoc *result_loc);
static ErrorMsg *exec_add_error_node(CodeGen *codegen, IrExecutableSrc *exec, AstNode *source_node, Buf *msg);
static IrInstGen *ir_analyze_container_field_ptr(IrAnalyze *ira, Buf *field_name,
    IrInst* source_instr, IrInstGen *container_ptr, IrInst *container_ptr_src,
    ZigType *container_type, bool initializing);
static void ir_assert_impl(bool ok, IrInst* source_instruction, const char *file, unsigned int line);
static void ir_assert_gen_impl(bool ok, IrInstGen *source_instruction, const char *file, unsigned int line);
static IrInstGen *ir_get_var_ptr(IrAnalyze *ira, IrInst *source_instr, ZigVar *var);
static ZigType *ir_resolve_atomic_operand_type(IrAnalyze *ira, IrInstGen *op);
static IrInstSrc *ir_lval_wrap(IrBuilderSrc *irb, Scope *scope, IrInstSrc *value, LVal lval, ResultLoc *result_loc);
static IrInstSrc *ir_expr_wrap(IrBuilderSrc *irb, Scope *scope, IrInstSrc *inst, ResultLoc *result_loc);
static ZigType *adjust_ptr_align(CodeGen *g, ZigType *ptr_type, uint32_t new_align);
static ZigType *adjust_ptr_const(CodeGen *g, ZigType *ptr_type, bool is_const);
static ZigType *adjust_slice_align(CodeGen *g, ZigType *slice_type, uint32_t new_align);
static Error buf_read_value_bytes(IrAnalyze *ira, CodeGen *codegen, AstNode *source_node, uint8_t *buf, ZigValue *val);
static void buf_write_value_bytes(CodeGen *codegen, uint8_t *buf, ZigValue *val);
static Error ir_read_const_ptr(IrAnalyze *ira, CodeGen *codegen, AstNode *source_node,
        ZigValue *out_val, ZigValue *ptr_val);
static IrInstGen *ir_analyze_ptr_cast(IrAnalyze *ira, IrInst* source_instr, IrInstGen *ptr,
        IrInst *ptr_src, ZigType *dest_type, IrInst *dest_type_src, bool safety_check_on,
        bool keep_bigger_alignment);
static ZigValue *ir_resolve_const(IrAnalyze *ira, IrInstGen *value, UndefAllowed undef_allowed);
static Error resolve_ptr_align(IrAnalyze *ira, ZigType *ty, uint32_t *result_align);
static IrInstGen *ir_analyze_int_to_ptr(IrAnalyze *ira, IrInst* source_instr, IrInstGen *target,
        ZigType *ptr_type);
static IrInstGen *ir_analyze_bit_cast(IrAnalyze *ira, IrInst* source_instr, IrInstGen *value,
        ZigType *dest_type);
static IrInstGen *ir_resolve_result_raw(IrAnalyze *ira, IrInst *suspend_source_instr,
        ResultLoc *result_loc, ZigType *value_type, IrInstGen *value, bool force_runtime, bool allow_discard);
static IrInstGen *ir_resolve_result(IrAnalyze *ira, IrInst *suspend_source_instr,
        ResultLoc *result_loc, ZigType *value_type, IrInstGen *value, bool force_runtime, bool allow_discard);
static IrInstGen *ir_analyze_unwrap_optional_payload(IrAnalyze *ira, IrInst* source_instr,
        IrInstGen *base_ptr, bool safety_check_on, bool initializing);
static IrInstGen *ir_analyze_unwrap_error_payload(IrAnalyze *ira, IrInst* source_instr,
        IrInstGen *base_ptr, bool safety_check_on, bool initializing);
static IrInstGen *ir_analyze_unwrap_err_code(IrAnalyze *ira, IrInst* source_instr,
        IrInstGen *base_ptr, bool initializing);
static IrInstGen *ir_analyze_store_ptr(IrAnalyze *ira, IrInst* source_instr,
        IrInstGen *ptr, IrInstGen *uncasted_value, bool allow_write_through_const);
static IrInstSrc *ir_gen_union_init_expr(IrBuilderSrc *irb, Scope *scope, AstNode *source_node,
    IrInstSrc *union_type, IrInstSrc *field_name, AstNode *expr_node,
    LVal lval, ResultLoc *parent_result_loc);
static void ir_reset_result(ResultLoc *result_loc);
static Buf *get_anon_type_name(CodeGen *codegen, IrExecutableSrc *exec, const char *kind_name,
        Scope *scope, AstNode *source_node, Buf *out_bare_name);
static ResultLocCast *ir_build_cast_result_loc(IrBuilderSrc *irb, IrInstSrc *dest_type,
        ResultLoc *parent_result_loc);
static IrInstGen *ir_analyze_struct_field_ptr(IrAnalyze *ira, IrInst* source_instr,
        TypeStructField *field, IrInstGen *struct_ptr, ZigType *struct_type, bool initializing);
static IrInstGen *ir_analyze_inferred_field_ptr(IrAnalyze *ira, Buf *field_name,
    IrInst* source_instr, IrInstGen *container_ptr, ZigType *container_type);
static ResultLoc *no_result_loc(void);
static IrInstGen *ir_analyze_test_non_null(IrAnalyze *ira, IrInst *source_inst, IrInstGen *value);
static IrInstGen *ir_error_dependency_loop(IrAnalyze *ira, IrInst *source_instr);
static IrInstGen *ir_const_undef(IrAnalyze *ira, IrInst *source_instruction, ZigType *ty);
static ZigVar *ir_create_var(IrBuilderSrc *irb, AstNode *node, Scope *scope, Buf *name,
        bool src_is_const, bool gen_is_const, bool is_shadowable, IrInstSrc *is_comptime);
static void build_decl_var_and_init(IrBuilderSrc *irb, Scope *scope, AstNode *source_node, ZigVar *var,
        IrInstSrc *init, const char *name_hint, IrInstSrc *is_comptime);
static IrInstGen *ir_analyze_union_init(IrAnalyze *ira, IrInst* source_instruction,
    AstNode *field_source_node, ZigType *union_type, Buf *field_name, IrInstGen *field_result_loc,
    IrInstGen *result_loc);
static IrInstGen *ir_analyze_struct_value_field_value(IrAnalyze *ira, IrInst* source_instr,
    IrInstGen *struct_operand, TypeStructField *field);
static bool value_cmp_numeric_val_any(ZigValue *left, Cmp predicate, ZigValue *right);
static bool value_cmp_numeric_val_all(ZigValue *left, Cmp predicate, ZigValue *right);
static void memoize_field_init_val(CodeGen *codegen, ZigType *container_type, TypeStructField *field);

#define ir_assert(OK, SOURCE_INSTRUCTION) ir_assert_impl((OK), (SOURCE_INSTRUCTION), __FILE__, __LINE__)
#define ir_assert_gen(OK, SOURCE_INSTRUCTION) ir_assert_gen_impl((OK), (SOURCE_INSTRUCTION), __FILE__, __LINE__)

static void destroy_instruction_src(IrInstSrc *inst) {
    switch (inst->id) {
        case IrInstSrcIdInvalid:
            zig_unreachable();
        case IrInstSrcIdReturn:
            return heap::c_allocator.destroy(reinterpret_cast<IrInstSrcReturn *>(inst));
        case IrInstSrcIdConst:
            return heap::c_allocator.destroy(reinterpret_cast<IrInstSrcConst *>(inst));
        case IrInstSrcIdBinOp:
            return heap::c_allocator.destroy(reinterpret_cast<IrInstSrcBinOp *>(inst));
        case IrInstSrcIdMergeErrSets:
            return heap::c_allocator.destroy(reinterpret_cast<IrInstSrcMergeErrSets *>(inst));
        case IrInstSrcIdDeclVar:
            return heap::c_allocator.destroy(reinterpret_cast<IrInstSrcDeclVar *>(inst));
        case IrInstSrcIdCall:
            return heap::c_allocator.destroy(reinterpret_cast<IrInstSrcCall *>(inst));
        case IrInstSrcIdCallExtra:
            return heap::c_allocator.destroy(reinterpret_cast<IrInstSrcCallExtra *>(inst));
        case IrInstSrcIdAsyncCallExtra:
            return heap::c_allocator.destroy(reinterpret_cast<IrInstSrcAsyncCallExtra *>(inst));
        case IrInstSrcIdUnOp:
            return heap::c_allocator.destroy(reinterpret_cast<IrInstSrcUnOp *>(inst));
        case IrInstSrcIdCondBr:
            return heap::c_allocator.destroy(reinterpret_cast<IrInstSrcCondBr *>(inst));
        case IrInstSrcIdBr:
            return heap::c_allocator.destroy(reinterpret_cast<IrInstSrcBr *>(inst));
        case IrInstSrcIdPhi:
            return heap::c_allocator.destroy(reinterpret_cast<IrInstSrcPhi *>(inst));
        case IrInstSrcIdContainerInitList:
            return heap::c_allocator.destroy(reinterpret_cast<IrInstSrcContainerInitList *>(inst));
        case IrInstSrcIdContainerInitFields:
            return heap::c_allocator.destroy(reinterpret_cast<IrInstSrcContainerInitFields *>(inst));
        case IrInstSrcIdUnreachable:
            return heap::c_allocator.destroy(reinterpret_cast<IrInstSrcUnreachable *>(inst));
        case IrInstSrcIdElemPtr:
            return heap::c_allocator.destroy(reinterpret_cast<IrInstSrcElemPtr *>(inst));
        case IrInstSrcIdVarPtr:
            return heap::c_allocator.destroy(reinterpret_cast<IrInstSrcVarPtr *>(inst));
        case IrInstSrcIdLoadPtr:
            return heap::c_allocator.destroy(reinterpret_cast<IrInstSrcLoadPtr *>(inst));
        case IrInstSrcIdStorePtr:
            return heap::c_allocator.destroy(reinterpret_cast<IrInstSrcStorePtr *>(inst));
        case IrInstSrcIdTypeOf:
            return heap::c_allocator.destroy(reinterpret_cast<IrInstSrcTypeOf *>(inst));
        case IrInstSrcIdFieldPtr:
            return heap::c_allocator.destroy(reinterpret_cast<IrInstSrcFieldPtr *>(inst));
        case IrInstSrcIdSetCold:
            return heap::c_allocator.destroy(reinterpret_cast<IrInstSrcSetCold *>(inst));
        case IrInstSrcIdSetRuntimeSafety:
            return heap::c_allocator.destroy(reinterpret_cast<IrInstSrcSetRuntimeSafety *>(inst));
        case IrInstSrcIdSetFloatMode:
            return heap::c_allocator.destroy(reinterpret_cast<IrInstSrcSetFloatMode *>(inst));
        case IrInstSrcIdArrayType:
            return heap::c_allocator.destroy(reinterpret_cast<IrInstSrcArrayType *>(inst));
        case IrInstSrcIdSliceType:
            return heap::c_allocator.destroy(reinterpret_cast<IrInstSrcSliceType *>(inst));
        case IrInstSrcIdAnyFrameType:
            return heap::c_allocator.destroy(reinterpret_cast<IrInstSrcAnyFrameType *>(inst));
        case IrInstSrcIdAsm:
            return heap::c_allocator.destroy(reinterpret_cast<IrInstSrcAsm *>(inst));
        case IrInstSrcIdSizeOf:
            return heap::c_allocator.destroy(reinterpret_cast<IrInstSrcSizeOf *>(inst));
        case IrInstSrcIdTestNonNull:
            return heap::c_allocator.destroy(reinterpret_cast<IrInstSrcTestNonNull *>(inst));
        case IrInstSrcIdOptionalUnwrapPtr:
            return heap::c_allocator.destroy(reinterpret_cast<IrInstSrcOptionalUnwrapPtr *>(inst));
        case IrInstSrcIdPopCount:
            return heap::c_allocator.destroy(reinterpret_cast<IrInstSrcPopCount *>(inst));
        case IrInstSrcIdClz:
            return heap::c_allocator.destroy(reinterpret_cast<IrInstSrcClz *>(inst));
        case IrInstSrcIdCtz:
            return heap::c_allocator.destroy(reinterpret_cast<IrInstSrcCtz *>(inst));
        case IrInstSrcIdBswap:
            return heap::c_allocator.destroy(reinterpret_cast<IrInstSrcBswap *>(inst));
        case IrInstSrcIdBitReverse:
            return heap::c_allocator.destroy(reinterpret_cast<IrInstSrcBitReverse *>(inst));
        case IrInstSrcIdSwitchBr:
            return heap::c_allocator.destroy(reinterpret_cast<IrInstSrcSwitchBr *>(inst));
        case IrInstSrcIdSwitchVar:
            return heap::c_allocator.destroy(reinterpret_cast<IrInstSrcSwitchVar *>(inst));
        case IrInstSrcIdSwitchElseVar:
            return heap::c_allocator.destroy(reinterpret_cast<IrInstSrcSwitchElseVar *>(inst));
        case IrInstSrcIdSwitchTarget:
            return heap::c_allocator.destroy(reinterpret_cast<IrInstSrcSwitchTarget *>(inst));
        case IrInstSrcIdImport:
            return heap::c_allocator.destroy(reinterpret_cast<IrInstSrcImport *>(inst));
        case IrInstSrcIdRef:
            return heap::c_allocator.destroy(reinterpret_cast<IrInstSrcRef *>(inst));
        case IrInstSrcIdCompileErr:
            return heap::c_allocator.destroy(reinterpret_cast<IrInstSrcCompileErr *>(inst));
        case IrInstSrcIdCompileLog:
            return heap::c_allocator.destroy(reinterpret_cast<IrInstSrcCompileLog *>(inst));
        case IrInstSrcIdErrName:
            return heap::c_allocator.destroy(reinterpret_cast<IrInstSrcErrName *>(inst));
        case IrInstSrcIdCImport:
            return heap::c_allocator.destroy(reinterpret_cast<IrInstSrcCImport *>(inst));
        case IrInstSrcIdCInclude:
            return heap::c_allocator.destroy(reinterpret_cast<IrInstSrcCInclude *>(inst));
        case IrInstSrcIdCDefine:
            return heap::c_allocator.destroy(reinterpret_cast<IrInstSrcCDefine *>(inst));
        case IrInstSrcIdCUndef:
            return heap::c_allocator.destroy(reinterpret_cast<IrInstSrcCUndef *>(inst));
        case IrInstSrcIdEmbedFile:
            return heap::c_allocator.destroy(reinterpret_cast<IrInstSrcEmbedFile *>(inst));
        case IrInstSrcIdCmpxchg:
            return heap::c_allocator.destroy(reinterpret_cast<IrInstSrcCmpxchg *>(inst));
        case IrInstSrcIdFence:
            return heap::c_allocator.destroy(reinterpret_cast<IrInstSrcFence *>(inst));
        case IrInstSrcIdTruncate:
            return heap::c_allocator.destroy(reinterpret_cast<IrInstSrcTruncate *>(inst));
        case IrInstSrcIdIntCast:
            return heap::c_allocator.destroy(reinterpret_cast<IrInstSrcIntCast *>(inst));
        case IrInstSrcIdFloatCast:
            return heap::c_allocator.destroy(reinterpret_cast<IrInstSrcFloatCast *>(inst));
        case IrInstSrcIdErrSetCast:
            return heap::c_allocator.destroy(reinterpret_cast<IrInstSrcErrSetCast *>(inst));
        case IrInstSrcIdIntToFloat:
            return heap::c_allocator.destroy(reinterpret_cast<IrInstSrcIntToFloat *>(inst));
        case IrInstSrcIdFloatToInt:
            return heap::c_allocator.destroy(reinterpret_cast<IrInstSrcFloatToInt *>(inst));
        case IrInstSrcIdBoolToInt:
            return heap::c_allocator.destroy(reinterpret_cast<IrInstSrcBoolToInt *>(inst));
        case IrInstSrcIdVectorType:
            return heap::c_allocator.destroy(reinterpret_cast<IrInstSrcVectorType *>(inst));
        case IrInstSrcIdShuffleVector:
            return heap::c_allocator.destroy(reinterpret_cast<IrInstSrcShuffleVector *>(inst));
        case IrInstSrcIdSplat:
            return heap::c_allocator.destroy(reinterpret_cast<IrInstSrcSplat *>(inst));
        case IrInstSrcIdBoolNot:
            return heap::c_allocator.destroy(reinterpret_cast<IrInstSrcBoolNot *>(inst));
        case IrInstSrcIdMemset:
            return heap::c_allocator.destroy(reinterpret_cast<IrInstSrcMemset *>(inst));
        case IrInstSrcIdMemcpy:
            return heap::c_allocator.destroy(reinterpret_cast<IrInstSrcMemcpy *>(inst));
        case IrInstSrcIdSlice:
            return heap::c_allocator.destroy(reinterpret_cast<IrInstSrcSlice *>(inst));
        case IrInstSrcIdBreakpoint:
            return heap::c_allocator.destroy(reinterpret_cast<IrInstSrcBreakpoint *>(inst));
        case IrInstSrcIdReturnAddress:
            return heap::c_allocator.destroy(reinterpret_cast<IrInstSrcReturnAddress *>(inst));
        case IrInstSrcIdFrameAddress:
            return heap::c_allocator.destroy(reinterpret_cast<IrInstSrcFrameAddress *>(inst));
        case IrInstSrcIdFrameHandle:
            return heap::c_allocator.destroy(reinterpret_cast<IrInstSrcFrameHandle *>(inst));
        case IrInstSrcIdFrameType:
            return heap::c_allocator.destroy(reinterpret_cast<IrInstSrcFrameType *>(inst));
        case IrInstSrcIdFrameSize:
            return heap::c_allocator.destroy(reinterpret_cast<IrInstSrcFrameSize *>(inst));
        case IrInstSrcIdAlignOf:
            return heap::c_allocator.destroy(reinterpret_cast<IrInstSrcAlignOf *>(inst));
        case IrInstSrcIdOverflowOp:
            return heap::c_allocator.destroy(reinterpret_cast<IrInstSrcOverflowOp *>(inst));
        case IrInstSrcIdTestErr:
            return heap::c_allocator.destroy(reinterpret_cast<IrInstSrcTestErr *>(inst));
        case IrInstSrcIdUnwrapErrCode:
            return heap::c_allocator.destroy(reinterpret_cast<IrInstSrcUnwrapErrCode *>(inst));
        case IrInstSrcIdUnwrapErrPayload:
            return heap::c_allocator.destroy(reinterpret_cast<IrInstSrcUnwrapErrPayload *>(inst));
        case IrInstSrcIdFnProto:
            return heap::c_allocator.destroy(reinterpret_cast<IrInstSrcFnProto *>(inst));
        case IrInstSrcIdTestComptime:
            return heap::c_allocator.destroy(reinterpret_cast<IrInstSrcTestComptime *>(inst));
        case IrInstSrcIdPtrCast:
            return heap::c_allocator.destroy(reinterpret_cast<IrInstSrcPtrCast *>(inst));
        case IrInstSrcIdBitCast:
            return heap::c_allocator.destroy(reinterpret_cast<IrInstSrcBitCast *>(inst));
        case IrInstSrcIdPtrToInt:
            return heap::c_allocator.destroy(reinterpret_cast<IrInstSrcPtrToInt *>(inst));
        case IrInstSrcIdIntToPtr:
            return heap::c_allocator.destroy(reinterpret_cast<IrInstSrcIntToPtr *>(inst));
        case IrInstSrcIdIntToEnum:
            return heap::c_allocator.destroy(reinterpret_cast<IrInstSrcIntToEnum *>(inst));
        case IrInstSrcIdIntToErr:
            return heap::c_allocator.destroy(reinterpret_cast<IrInstSrcIntToErr *>(inst));
        case IrInstSrcIdErrToInt:
            return heap::c_allocator.destroy(reinterpret_cast<IrInstSrcErrToInt *>(inst));
        case IrInstSrcIdCheckSwitchProngs:
            return heap::c_allocator.destroy(reinterpret_cast<IrInstSrcCheckSwitchProngs *>(inst));
        case IrInstSrcIdCheckStatementIsVoid:
            return heap::c_allocator.destroy(reinterpret_cast<IrInstSrcCheckStatementIsVoid *>(inst));
        case IrInstSrcIdTypeName:
            return heap::c_allocator.destroy(reinterpret_cast<IrInstSrcTypeName *>(inst));
        case IrInstSrcIdTagName:
            return heap::c_allocator.destroy(reinterpret_cast<IrInstSrcTagName *>(inst));
        case IrInstSrcIdPtrType:
            return heap::c_allocator.destroy(reinterpret_cast<IrInstSrcPtrType *>(inst));
        case IrInstSrcIdDeclRef:
            return heap::c_allocator.destroy(reinterpret_cast<IrInstSrcDeclRef *>(inst));
        case IrInstSrcIdPanic:
            return heap::c_allocator.destroy(reinterpret_cast<IrInstSrcPanic *>(inst));
        case IrInstSrcIdFieldParentPtr:
            return heap::c_allocator.destroy(reinterpret_cast<IrInstSrcFieldParentPtr *>(inst));
        case IrInstSrcIdByteOffsetOf:
            return heap::c_allocator.destroy(reinterpret_cast<IrInstSrcByteOffsetOf *>(inst));
        case IrInstSrcIdBitOffsetOf:
            return heap::c_allocator.destroy(reinterpret_cast<IrInstSrcBitOffsetOf *>(inst));
        case IrInstSrcIdTypeInfo:
            return heap::c_allocator.destroy(reinterpret_cast<IrInstSrcTypeInfo *>(inst));
        case IrInstSrcIdType:
            return heap::c_allocator.destroy(reinterpret_cast<IrInstSrcType *>(inst));
        case IrInstSrcIdHasField:
            return heap::c_allocator.destroy(reinterpret_cast<IrInstSrcHasField *>(inst));
        case IrInstSrcIdSetEvalBranchQuota:
            return heap::c_allocator.destroy(reinterpret_cast<IrInstSrcSetEvalBranchQuota *>(inst));
        case IrInstSrcIdAlignCast:
            return heap::c_allocator.destroy(reinterpret_cast<IrInstSrcAlignCast *>(inst));
        case IrInstSrcIdImplicitCast:
            return heap::c_allocator.destroy(reinterpret_cast<IrInstSrcImplicitCast *>(inst));
        case IrInstSrcIdResolveResult:
            return heap::c_allocator.destroy(reinterpret_cast<IrInstSrcResolveResult *>(inst));
        case IrInstSrcIdResetResult:
            return heap::c_allocator.destroy(reinterpret_cast<IrInstSrcResetResult *>(inst));
        case IrInstSrcIdOpaqueType:
            return heap::c_allocator.destroy(reinterpret_cast<IrInstSrcOpaqueType *>(inst));
        case IrInstSrcIdSetAlignStack:
            return heap::c_allocator.destroy(reinterpret_cast<IrInstSrcSetAlignStack *>(inst));
        case IrInstSrcIdArgType:
            return heap::c_allocator.destroy(reinterpret_cast<IrInstSrcArgType *>(inst));
        case IrInstSrcIdTagType:
            return heap::c_allocator.destroy(reinterpret_cast<IrInstSrcTagType *>(inst));
        case IrInstSrcIdExport:
            return heap::c_allocator.destroy(reinterpret_cast<IrInstSrcExport *>(inst));
        case IrInstSrcIdErrorReturnTrace:
            return heap::c_allocator.destroy(reinterpret_cast<IrInstSrcErrorReturnTrace *>(inst));
        case IrInstSrcIdErrorUnion:
            return heap::c_allocator.destroy(reinterpret_cast<IrInstSrcErrorUnion *>(inst));
        case IrInstSrcIdAtomicRmw:
            return heap::c_allocator.destroy(reinterpret_cast<IrInstSrcAtomicRmw *>(inst));
        case IrInstSrcIdSaveErrRetAddr:
            return heap::c_allocator.destroy(reinterpret_cast<IrInstSrcSaveErrRetAddr *>(inst));
        case IrInstSrcIdAddImplicitReturnType:
            return heap::c_allocator.destroy(reinterpret_cast<IrInstSrcAddImplicitReturnType *>(inst));
        case IrInstSrcIdFloatOp:
            return heap::c_allocator.destroy(reinterpret_cast<IrInstSrcFloatOp *>(inst));
        case IrInstSrcIdMulAdd:
            return heap::c_allocator.destroy(reinterpret_cast<IrInstSrcMulAdd *>(inst));
        case IrInstSrcIdAtomicLoad:
            return heap::c_allocator.destroy(reinterpret_cast<IrInstSrcAtomicLoad *>(inst));
        case IrInstSrcIdAtomicStore:
            return heap::c_allocator.destroy(reinterpret_cast<IrInstSrcAtomicStore *>(inst));
        case IrInstSrcIdEnumToInt:
            return heap::c_allocator.destroy(reinterpret_cast<IrInstSrcEnumToInt *>(inst));
        case IrInstSrcIdCheckRuntimeScope:
            return heap::c_allocator.destroy(reinterpret_cast<IrInstSrcCheckRuntimeScope *>(inst));
        case IrInstSrcIdHasDecl:
            return heap::c_allocator.destroy(reinterpret_cast<IrInstSrcHasDecl *>(inst));
        case IrInstSrcIdUndeclaredIdent:
            return heap::c_allocator.destroy(reinterpret_cast<IrInstSrcUndeclaredIdent *>(inst));
        case IrInstSrcIdAlloca:
            return heap::c_allocator.destroy(reinterpret_cast<IrInstSrcAlloca *>(inst));
        case IrInstSrcIdEndExpr:
            return heap::c_allocator.destroy(reinterpret_cast<IrInstSrcEndExpr *>(inst));
        case IrInstSrcIdUnionInitNamedField:
            return heap::c_allocator.destroy(reinterpret_cast<IrInstSrcUnionInitNamedField *>(inst));
        case IrInstSrcIdSuspendBegin:
            return heap::c_allocator.destroy(reinterpret_cast<IrInstSrcSuspendBegin *>(inst));
        case IrInstSrcIdSuspendFinish:
            return heap::c_allocator.destroy(reinterpret_cast<IrInstSrcSuspendFinish *>(inst));
        case IrInstSrcIdResume:
            return heap::c_allocator.destroy(reinterpret_cast<IrInstSrcResume *>(inst));
        case IrInstSrcIdAwait:
            return heap::c_allocator.destroy(reinterpret_cast<IrInstSrcAwait *>(inst));
        case IrInstSrcIdSpillBegin:
            return heap::c_allocator.destroy(reinterpret_cast<IrInstSrcSpillBegin *>(inst));
        case IrInstSrcIdSpillEnd:
            return heap::c_allocator.destroy(reinterpret_cast<IrInstSrcSpillEnd *>(inst));
        case IrInstSrcIdCallArgs:
            return heap::c_allocator.destroy(reinterpret_cast<IrInstSrcCallArgs *>(inst));
        case IrInstSrcIdWasmMemorySize:
            return heap::c_allocator.destroy(reinterpret_cast<IrInstSrcWasmMemorySize *>(inst));
        case IrInstSrcIdWasmMemoryGrow:
            return heap::c_allocator.destroy(reinterpret_cast<IrInstSrcWasmMemoryGrow *>(inst));
        case IrInstSrcIdSrc:
            return heap::c_allocator.destroy(reinterpret_cast<IrInstSrcSrc *>(inst));
    }
    zig_unreachable();
}

void destroy_instruction_gen(IrInstGen *inst) {
    switch (inst->id) {
        case IrInstGenIdInvalid:
            zig_unreachable();
        case IrInstGenIdReturn:
            return heap::c_allocator.destroy(reinterpret_cast<IrInstGenReturn *>(inst));
        case IrInstGenIdConst:
            return heap::c_allocator.destroy(reinterpret_cast<IrInstGenConst *>(inst));
        case IrInstGenIdBinOp:
            return heap::c_allocator.destroy(reinterpret_cast<IrInstGenBinOp *>(inst));
        case IrInstGenIdCast:
            return heap::c_allocator.destroy(reinterpret_cast<IrInstGenCast *>(inst));
        case IrInstGenIdCall:
            return heap::c_allocator.destroy(reinterpret_cast<IrInstGenCall *>(inst));
        case IrInstGenIdCondBr:
            return heap::c_allocator.destroy(reinterpret_cast<IrInstGenCondBr *>(inst));
        case IrInstGenIdBr:
            return heap::c_allocator.destroy(reinterpret_cast<IrInstGenBr *>(inst));
        case IrInstGenIdPhi:
            return heap::c_allocator.destroy(reinterpret_cast<IrInstGenPhi *>(inst));
        case IrInstGenIdUnreachable:
            return heap::c_allocator.destroy(reinterpret_cast<IrInstGenUnreachable *>(inst));
        case IrInstGenIdElemPtr:
            return heap::c_allocator.destroy(reinterpret_cast<IrInstGenElemPtr *>(inst));
        case IrInstGenIdVarPtr:
            return heap::c_allocator.destroy(reinterpret_cast<IrInstGenVarPtr *>(inst));
        case IrInstGenIdReturnPtr:
            return heap::c_allocator.destroy(reinterpret_cast<IrInstGenReturnPtr *>(inst));
        case IrInstGenIdLoadPtr:
            return heap::c_allocator.destroy(reinterpret_cast<IrInstGenLoadPtr *>(inst));
        case IrInstGenIdStorePtr:
            return heap::c_allocator.destroy(reinterpret_cast<IrInstGenStorePtr *>(inst));
        case IrInstGenIdVectorStoreElem:
            return heap::c_allocator.destroy(reinterpret_cast<IrInstGenVectorStoreElem *>(inst));
        case IrInstGenIdStructFieldPtr:
            return heap::c_allocator.destroy(reinterpret_cast<IrInstGenStructFieldPtr *>(inst));
        case IrInstGenIdUnionFieldPtr:
            return heap::c_allocator.destroy(reinterpret_cast<IrInstGenUnionFieldPtr *>(inst));
        case IrInstGenIdAsm:
            return heap::c_allocator.destroy(reinterpret_cast<IrInstGenAsm *>(inst));
        case IrInstGenIdTestNonNull:
            return heap::c_allocator.destroy(reinterpret_cast<IrInstGenTestNonNull *>(inst));
        case IrInstGenIdOptionalUnwrapPtr:
            return heap::c_allocator.destroy(reinterpret_cast<IrInstGenOptionalUnwrapPtr *>(inst));
        case IrInstGenIdPopCount:
            return heap::c_allocator.destroy(reinterpret_cast<IrInstGenPopCount *>(inst));
        case IrInstGenIdClz:
            return heap::c_allocator.destroy(reinterpret_cast<IrInstGenClz *>(inst));
        case IrInstGenIdCtz:
            return heap::c_allocator.destroy(reinterpret_cast<IrInstGenCtz *>(inst));
        case IrInstGenIdBswap:
            return heap::c_allocator.destroy(reinterpret_cast<IrInstGenBswap *>(inst));
        case IrInstGenIdBitReverse:
            return heap::c_allocator.destroy(reinterpret_cast<IrInstGenBitReverse *>(inst));
        case IrInstGenIdSwitchBr:
            return heap::c_allocator.destroy(reinterpret_cast<IrInstGenSwitchBr *>(inst));
        case IrInstGenIdUnionTag:
            return heap::c_allocator.destroy(reinterpret_cast<IrInstGenUnionTag *>(inst));
        case IrInstGenIdRef:
            return heap::c_allocator.destroy(reinterpret_cast<IrInstGenRef *>(inst));
        case IrInstGenIdErrName:
            return heap::c_allocator.destroy(reinterpret_cast<IrInstGenErrName *>(inst));
        case IrInstGenIdCmpxchg:
            return heap::c_allocator.destroy(reinterpret_cast<IrInstGenCmpxchg *>(inst));
        case IrInstGenIdFence:
            return heap::c_allocator.destroy(reinterpret_cast<IrInstGenFence *>(inst));
        case IrInstGenIdTruncate:
            return heap::c_allocator.destroy(reinterpret_cast<IrInstGenTruncate *>(inst));
        case IrInstGenIdShuffleVector:
            return heap::c_allocator.destroy(reinterpret_cast<IrInstGenShuffleVector *>(inst));
        case IrInstGenIdSplat:
            return heap::c_allocator.destroy(reinterpret_cast<IrInstGenSplat *>(inst));
        case IrInstGenIdBoolNot:
            return heap::c_allocator.destroy(reinterpret_cast<IrInstGenBoolNot *>(inst));
        case IrInstGenIdMemset:
            return heap::c_allocator.destroy(reinterpret_cast<IrInstGenMemset *>(inst));
        case IrInstGenIdMemcpy:
            return heap::c_allocator.destroy(reinterpret_cast<IrInstGenMemcpy *>(inst));
        case IrInstGenIdSlice:
            return heap::c_allocator.destroy(reinterpret_cast<IrInstGenSlice *>(inst));
        case IrInstGenIdBreakpoint:
            return heap::c_allocator.destroy(reinterpret_cast<IrInstGenBreakpoint *>(inst));
        case IrInstGenIdReturnAddress:
            return heap::c_allocator.destroy(reinterpret_cast<IrInstGenReturnAddress *>(inst));
        case IrInstGenIdFrameAddress:
            return heap::c_allocator.destroy(reinterpret_cast<IrInstGenFrameAddress *>(inst));
        case IrInstGenIdFrameHandle:
            return heap::c_allocator.destroy(reinterpret_cast<IrInstGenFrameHandle *>(inst));
        case IrInstGenIdFrameSize:
            return heap::c_allocator.destroy(reinterpret_cast<IrInstGenFrameSize *>(inst));
        case IrInstGenIdOverflowOp:
            return heap::c_allocator.destroy(reinterpret_cast<IrInstGenOverflowOp *>(inst));
        case IrInstGenIdTestErr:
            return heap::c_allocator.destroy(reinterpret_cast<IrInstGenTestErr *>(inst));
        case IrInstGenIdUnwrapErrCode:
            return heap::c_allocator.destroy(reinterpret_cast<IrInstGenUnwrapErrCode *>(inst));
        case IrInstGenIdUnwrapErrPayload:
            return heap::c_allocator.destroy(reinterpret_cast<IrInstGenUnwrapErrPayload *>(inst));
        case IrInstGenIdOptionalWrap:
            return heap::c_allocator.destroy(reinterpret_cast<IrInstGenOptionalWrap *>(inst));
        case IrInstGenIdErrWrapCode:
            return heap::c_allocator.destroy(reinterpret_cast<IrInstGenErrWrapCode *>(inst));
        case IrInstGenIdErrWrapPayload:
            return heap::c_allocator.destroy(reinterpret_cast<IrInstGenErrWrapPayload *>(inst));
        case IrInstGenIdPtrCast:
            return heap::c_allocator.destroy(reinterpret_cast<IrInstGenPtrCast *>(inst));
        case IrInstGenIdBitCast:
            return heap::c_allocator.destroy(reinterpret_cast<IrInstGenBitCast *>(inst));
        case IrInstGenIdWidenOrShorten:
            return heap::c_allocator.destroy(reinterpret_cast<IrInstGenWidenOrShorten *>(inst));
        case IrInstGenIdPtrToInt:
            return heap::c_allocator.destroy(reinterpret_cast<IrInstGenPtrToInt *>(inst));
        case IrInstGenIdIntToPtr:
            return heap::c_allocator.destroy(reinterpret_cast<IrInstGenIntToPtr *>(inst));
        case IrInstGenIdIntToEnum:
            return heap::c_allocator.destroy(reinterpret_cast<IrInstGenIntToEnum *>(inst));
        case IrInstGenIdIntToErr:
            return heap::c_allocator.destroy(reinterpret_cast<IrInstGenIntToErr *>(inst));
        case IrInstGenIdErrToInt:
            return heap::c_allocator.destroy(reinterpret_cast<IrInstGenErrToInt *>(inst));
        case IrInstGenIdTagName:
            return heap::c_allocator.destroy(reinterpret_cast<IrInstGenTagName *>(inst));
        case IrInstGenIdPanic:
            return heap::c_allocator.destroy(reinterpret_cast<IrInstGenPanic *>(inst));
        case IrInstGenIdFieldParentPtr:
            return heap::c_allocator.destroy(reinterpret_cast<IrInstGenFieldParentPtr *>(inst));
        case IrInstGenIdAlignCast:
            return heap::c_allocator.destroy(reinterpret_cast<IrInstGenAlignCast *>(inst));
        case IrInstGenIdErrorReturnTrace:
            return heap::c_allocator.destroy(reinterpret_cast<IrInstGenErrorReturnTrace *>(inst));
        case IrInstGenIdAtomicRmw:
            return heap::c_allocator.destroy(reinterpret_cast<IrInstGenAtomicRmw *>(inst));
        case IrInstGenIdSaveErrRetAddr:
            return heap::c_allocator.destroy(reinterpret_cast<IrInstGenSaveErrRetAddr *>(inst));
        case IrInstGenIdFloatOp:
            return heap::c_allocator.destroy(reinterpret_cast<IrInstGenFloatOp *>(inst));
        case IrInstGenIdMulAdd:
            return heap::c_allocator.destroy(reinterpret_cast<IrInstGenMulAdd *>(inst));
        case IrInstGenIdAtomicLoad:
            return heap::c_allocator.destroy(reinterpret_cast<IrInstGenAtomicLoad *>(inst));
        case IrInstGenIdAtomicStore:
            return heap::c_allocator.destroy(reinterpret_cast<IrInstGenAtomicStore *>(inst));
        case IrInstGenIdDeclVar:
            return heap::c_allocator.destroy(reinterpret_cast<IrInstGenDeclVar *>(inst));
        case IrInstGenIdArrayToVector:
            return heap::c_allocator.destroy(reinterpret_cast<IrInstGenArrayToVector *>(inst));
        case IrInstGenIdVectorToArray:
            return heap::c_allocator.destroy(reinterpret_cast<IrInstGenVectorToArray *>(inst));
        case IrInstGenIdPtrOfArrayToSlice:
            return heap::c_allocator.destroy(reinterpret_cast<IrInstGenPtrOfArrayToSlice *>(inst));
        case IrInstGenIdAssertZero:
            return heap::c_allocator.destroy(reinterpret_cast<IrInstGenAssertZero *>(inst));
        case IrInstGenIdAssertNonNull:
            return heap::c_allocator.destroy(reinterpret_cast<IrInstGenAssertNonNull *>(inst));
        case IrInstGenIdAlloca:
            return heap::c_allocator.destroy(reinterpret_cast<IrInstGenAlloca *>(inst));
        case IrInstGenIdSuspendBegin:
            return heap::c_allocator.destroy(reinterpret_cast<IrInstGenSuspendBegin *>(inst));
        case IrInstGenIdSuspendFinish:
            return heap::c_allocator.destroy(reinterpret_cast<IrInstGenSuspendFinish *>(inst));
        case IrInstGenIdResume:
            return heap::c_allocator.destroy(reinterpret_cast<IrInstGenResume *>(inst));
        case IrInstGenIdAwait:
            return heap::c_allocator.destroy(reinterpret_cast<IrInstGenAwait *>(inst));
        case IrInstGenIdSpillBegin:
            return heap::c_allocator.destroy(reinterpret_cast<IrInstGenSpillBegin *>(inst));
        case IrInstGenIdSpillEnd:
            return heap::c_allocator.destroy(reinterpret_cast<IrInstGenSpillEnd *>(inst));
        case IrInstGenIdVectorExtractElem:
            return heap::c_allocator.destroy(reinterpret_cast<IrInstGenVectorExtractElem *>(inst));
        case IrInstGenIdBinaryNot:
            return heap::c_allocator.destroy(reinterpret_cast<IrInstGenBinaryNot *>(inst));
        case IrInstGenIdNegation:
            return heap::c_allocator.destroy(reinterpret_cast<IrInstGenNegation *>(inst));
        case IrInstGenIdNegationWrapping:
            return heap::c_allocator.destroy(reinterpret_cast<IrInstGenNegationWrapping *>(inst));
        case IrInstGenIdWasmMemorySize:
            return heap::c_allocator.destroy(reinterpret_cast<IrInstGenWasmMemorySize *>(inst));
        case IrInstGenIdWasmMemoryGrow:
            return heap::c_allocator.destroy(reinterpret_cast<IrInstGenWasmMemoryGrow *>(inst));
    }
    zig_unreachable();
}

static void ira_ref(IrAnalyze *ira) {
    ira->ref_count += 1;
}
static void ira_deref(IrAnalyze *ira) {
    if (ira->ref_count > 1) {
        ira->ref_count -= 1;

        // immediate destruction of dangling IrInstGenConst is not possible
        // free tracking memory because it will never be used
        ira->new_irb.constants.deinit(&heap::c_allocator);
        return;
    }
    assert(ira->ref_count != 0);

    for (size_t bb_i = 0; bb_i < ira->old_irb.exec->basic_block_list.length; bb_i += 1) {
        IrBasicBlockSrc *pass1_bb = ira->old_irb.exec->basic_block_list.items[bb_i];
        for (size_t inst_i = 0; inst_i < pass1_bb->instruction_list.length; inst_i += 1) {
            IrInstSrc *pass1_inst = pass1_bb->instruction_list.items[inst_i];
            destroy_instruction_src(pass1_inst);
        }
        heap::c_allocator.destroy(pass1_bb);
    }
    ira->old_irb.exec->basic_block_list.deinit();
    ira->old_irb.exec->tld_list.deinit();
    heap::c_allocator.destroy(ira->old_irb.exec);
    ira->src_implicit_return_type_list.deinit();
    ira->resume_stack.deinit();

    // destroy dangling IrInstGenConst
    for (size_t i = 0; i < ira->new_irb.constants.length; i += 1) {
        auto constant = ira->new_irb.constants.items[i];
        if (constant->base.base.ref_count == 0 && !ir_inst_gen_has_side_effects(&constant->base))
            destroy_instruction_gen(&constant->base);
    }
    ira->new_irb.constants.deinit(&heap::c_allocator);

    heap::c_allocator.destroy(ira);
}

static ZigValue *const_ptr_pointee_unchecked_no_isf(CodeGen *g, ZigValue *const_val) {
    assert(get_src_ptr_type(const_val->type) != nullptr);
    assert(const_val->special == ConstValSpecialStatic);

    switch (type_has_one_possible_value(g, const_val->type->data.pointer.child_type)) {
        case OnePossibleValueInvalid:
            return nullptr;
        case OnePossibleValueYes:
            return get_the_one_possible_value(g, const_val->type->data.pointer.child_type);
        case OnePossibleValueNo:
            break;
    }

    ZigValue *result;
    switch (const_val->data.x_ptr.special) {
        case ConstPtrSpecialInvalid:
            zig_unreachable();
        case ConstPtrSpecialRef:
            result = const_val->data.x_ptr.data.ref.pointee;
            break;
        case ConstPtrSpecialBaseArray: {
            ZigValue *array_val = const_val->data.x_ptr.data.base_array.array_val;
            size_t elem_index = const_val->data.x_ptr.data.base_array.elem_index;
            if (elem_index == array_val->type->data.array.len) {
                result = array_val->type->data.array.sentinel;
            } else {
                expand_undef_array(g, array_val);
                result = &array_val->data.x_array.data.s_none.elements[elem_index];
            }
            break;
        }
        case ConstPtrSpecialSubArray: {
            ZigValue *array_val = const_val->data.x_ptr.data.base_array.array_val;
            size_t elem_index = const_val->data.x_ptr.data.base_array.elem_index;

            expand_undef_array(g, array_val);
            result = g->pass1_arena->create<ZigValue>();
            result->special = array_val->special;
            result->type = get_array_type(g, array_val->type->data.array.child_type,
                    array_val->type->data.array.len - elem_index, array_val->type->data.array.sentinel);
            result->data.x_array.special = ConstArraySpecialNone;
            result->data.x_array.data.s_none.elements = &array_val->data.x_array.data.s_none.elements[elem_index];
            result->parent.id = ConstParentIdArray;
            result->parent.data.p_array.array_val = array_val;
            result->parent.data.p_array.elem_index = elem_index;
            break;
        }
        case ConstPtrSpecialBaseStruct: {
            ZigValue *struct_val = const_val->data.x_ptr.data.base_struct.struct_val;
            expand_undef_struct(g, struct_val);
            result = struct_val->data.x_struct.fields[const_val->data.x_ptr.data.base_struct.field_index];
            break;
        }
        case ConstPtrSpecialBaseErrorUnionCode:
            result = const_val->data.x_ptr.data.base_err_union_code.err_union_val->data.x_err_union.error_set;
            break;
        case ConstPtrSpecialBaseErrorUnionPayload:
            result = const_val->data.x_ptr.data.base_err_union_payload.err_union_val->data.x_err_union.payload;
            break;
        case ConstPtrSpecialBaseOptionalPayload:
            result = const_val->data.x_ptr.data.base_optional_payload.optional_val->data.x_optional;
            break;
        case ConstPtrSpecialNull:
            result = const_val;
            break;
        case ConstPtrSpecialHardCodedAddr:
            zig_unreachable();
        case ConstPtrSpecialDiscard:
            zig_unreachable();
        case ConstPtrSpecialFunction:
            zig_unreachable();
    }
    assert(result != nullptr);
    return result;
}

static ZigValue *const_ptr_pointee_unchecked(CodeGen *g, ZigValue *const_val) {
    assert(get_src_ptr_type(const_val->type) != nullptr);
    assert(const_val->special == ConstValSpecialStatic);

    InferredStructField *isf = const_val->type->data.pointer.inferred_struct_field;
    if (isf != nullptr) {
        TypeStructField *field = find_struct_type_field(isf->inferred_struct_type, isf->field_name);
        assert(field != nullptr);
        if (field->is_comptime) {
            assert(field->init_val != nullptr);
            return field->init_val;
        }
        ZigValue *struct_val = const_ptr_pointee_unchecked_no_isf(g, const_val);
        assert(struct_val->type->id == ZigTypeIdStruct);
        return struct_val->data.x_struct.fields[field->src_index];
    }

    return const_ptr_pointee_unchecked_no_isf(g, const_val);
}

static bool is_tuple(ZigType *type) {
    return type->id == ZigTypeIdStruct && type->data.structure.special == StructSpecialInferredTuple;
}

static bool is_slice(ZigType *type) {
    return type->id == ZigTypeIdStruct && type->data.structure.special == StructSpecialSlice;
}

// This function returns true when you can change the type of a ZigValue and the
// value remains meaningful.
static bool types_have_same_zig_comptime_repr(CodeGen *codegen, ZigType *expected, ZigType *actual) {
    if (expected == actual)
        return true;

    if (get_src_ptr_type(expected) != nullptr && get_src_ptr_type(actual) != nullptr)
        return true;

    if (is_opt_err_set(expected) && is_opt_err_set(actual))
        return true;

    if (expected->id != actual->id)
        return false;

    switch (expected->id) {
        case ZigTypeIdInvalid:
        case ZigTypeIdUnreachable:
            zig_unreachable();
        case ZigTypeIdMetaType:
        case ZigTypeIdVoid:
        case ZigTypeIdBool:
        case ZigTypeIdComptimeFloat:
        case ZigTypeIdComptimeInt:
        case ZigTypeIdEnumLiteral:
        case ZigTypeIdUndefined:
        case ZigTypeIdNull:
        case ZigTypeIdBoundFn:
        case ZigTypeIdErrorSet:
        case ZigTypeIdOpaque:
        case ZigTypeIdAnyFrame:
        case ZigTypeIdFn:
            return true;
        case ZigTypeIdPointer:
            return expected->data.pointer.inferred_struct_field == actual->data.pointer.inferred_struct_field;
        case ZigTypeIdFloat:
            return expected->data.floating.bit_count == actual->data.floating.bit_count;
        case ZigTypeIdInt:
            return expected->data.integral.is_signed == actual->data.integral.is_signed;
        case ZigTypeIdStruct:
            return is_slice(expected) && is_slice(actual);
        case ZigTypeIdOptional:
        case ZigTypeIdErrorUnion:
        case ZigTypeIdEnum:
        case ZigTypeIdUnion:
        case ZigTypeIdVector:
        case ZigTypeIdFnFrame:
            return false;
        case ZigTypeIdArray:
            return expected->data.array.len == actual->data.array.len &&
                expected->data.array.child_type == actual->data.array.child_type &&
                (expected->data.array.sentinel == nullptr || (actual->data.array.sentinel != nullptr &&
                     const_values_equal(codegen, expected->data.array.sentinel, actual->data.array.sentinel)));
    }
    zig_unreachable();
}

static bool ir_should_inline(IrExecutableSrc *exec, Scope *scope) {
    if (exec->is_inline)
        return true;

    while (scope != nullptr) {
        if (scope->id == ScopeIdCompTime)
            return true;
        if (scope->id == ScopeIdTypeOf)
            return false;
        if (scope->id == ScopeIdFnDef)
            break;
        scope = scope->parent;
    }
    return false;
}

static void ir_instruction_append(IrBasicBlockSrc *basic_block, IrInstSrc *instruction) {
    assert(basic_block);
    assert(instruction);
    basic_block->instruction_list.append(instruction);
}

static void ir_inst_gen_append(IrBasicBlockGen *basic_block, IrInstGen *instruction) {
    assert(basic_block);
    assert(instruction);
    basic_block->instruction_list.append(instruction);
}

static size_t exec_next_debug_id(IrExecutableSrc *exec) {
    size_t result = exec->next_debug_id;
    exec->next_debug_id += 1;
    return result;
}

static size_t exec_next_debug_id_gen(IrExecutableGen *exec) {
    size_t result = exec->next_debug_id;
    exec->next_debug_id += 1;
    return result;
}

static ZigFn *exec_fn_entry(IrExecutableSrc *exec) {
    return exec->fn_entry;
}

static Buf *exec_c_import_buf(IrExecutableSrc *exec) {
    return exec->c_import_buf;
}

static bool value_is_comptime(ZigValue *const_val) {
    return const_val->special != ConstValSpecialRuntime;
}

static bool instr_is_comptime(IrInstGen *instruction) {
    return value_is_comptime(instruction->value);
}

static bool instr_is_unreachable(IrInstSrc *instruction) {
    return instruction->is_noreturn;
}

static void ir_ref_bb(IrBasicBlockSrc *bb) {
    bb->ref_count += 1;
}

static void ir_ref_instruction(IrInstSrc *instruction, IrBasicBlockSrc *cur_bb) {
    assert(instruction->id != IrInstSrcIdInvalid);
    instruction->base.ref_count += 1;
    if (instruction->owner_bb != cur_bb && !instr_is_unreachable(instruction)
        && instruction->id != IrInstSrcIdConst)
    {
        ir_ref_bb(instruction->owner_bb);
    }
}

static void ir_ref_inst_gen(IrInstGen *instruction) {
    assert(instruction->id != IrInstGenIdInvalid);
    instruction->base.ref_count += 1;
}

static void ir_ref_var(ZigVar *var) {
    var->ref_count += 1;
}

static void create_result_ptr(CodeGen *codegen, ZigType *expected_type,
        ZigValue **out_result, ZigValue **out_result_ptr)
{
    ZigValue *result = codegen->pass1_arena->create<ZigValue>();
    ZigValue *result_ptr = codegen->pass1_arena->create<ZigValue>();
    result->special = ConstValSpecialUndef;
    result->type = expected_type;
    result_ptr->special = ConstValSpecialStatic;
    result_ptr->type = get_pointer_to_type(codegen, result->type, false);
    result_ptr->data.x_ptr.mut = ConstPtrMutComptimeVar;
    result_ptr->data.x_ptr.special = ConstPtrSpecialRef;
    result_ptr->data.x_ptr.data.ref.pointee = result;

    *out_result = result;
    *out_result_ptr = result_ptr;
}

ZigType *ir_analyze_type_expr(IrAnalyze *ira, Scope *scope, AstNode *node) {
    Error err;

    ZigValue *result;
    ZigValue *result_ptr;
    create_result_ptr(ira->codegen, ira->codegen->builtin_types.entry_type, &result, &result_ptr);

    if ((err = ir_eval_const_value(ira->codegen, scope, node, result_ptr,
            ira->new_irb.exec->backward_branch_count, ira->new_irb.exec->backward_branch_quota,
            nullptr, nullptr, node, nullptr, ira->new_irb.exec, nullptr, UndefBad)))
    {
        return ira->codegen->builtin_types.entry_invalid;
    }
    if (type_is_invalid(result->type))
        return ira->codegen->builtin_types.entry_invalid;

    assert(result->special != ConstValSpecialRuntime);
    ZigType *res_type = result->data.x_type;

    return res_type;
}

static IrBasicBlockSrc *ir_create_basic_block(IrBuilderSrc *irb, Scope *scope, const char *name_hint) {
    IrBasicBlockSrc *result = heap::c_allocator.create<IrBasicBlockSrc>();
    result->scope = scope;
    result->name_hint = name_hint;
    result->debug_id = exec_next_debug_id(irb->exec);
    result->index = UINT32_MAX; // set later
    return result;
}

static IrBasicBlockGen *ir_create_basic_block_gen(IrAnalyze *ira, Scope *scope, const char *name_hint) {
    IrBasicBlockGen *result = heap::c_allocator.create<IrBasicBlockGen>();
    result->scope = scope;
    result->name_hint = name_hint;
    result->debug_id = exec_next_debug_id_gen(ira->new_irb.exec);
    return result;
}

static IrBasicBlockGen *ir_build_bb_from(IrAnalyze *ira, IrBasicBlockSrc *other_bb) {
    IrBasicBlockGen *new_bb = ir_create_basic_block_gen(ira, other_bb->scope, other_bb->name_hint);
    other_bb->child = new_bb;
    return new_bb;
}

static constexpr IrInstSrcId ir_inst_id(IrInstSrcDeclVar *) {
    return IrInstSrcIdDeclVar;
}

static constexpr IrInstSrcId ir_inst_id(IrInstSrcBr *) {
    return IrInstSrcIdBr;
}

static constexpr IrInstSrcId ir_inst_id(IrInstSrcCondBr *) {
    return IrInstSrcIdCondBr;
}

static constexpr IrInstSrcId ir_inst_id(IrInstSrcSwitchBr *) {
    return IrInstSrcIdSwitchBr;
}

static constexpr IrInstSrcId ir_inst_id(IrInstSrcSwitchVar *) {
    return IrInstSrcIdSwitchVar;
}

static constexpr IrInstSrcId ir_inst_id(IrInstSrcSwitchElseVar *) {
    return IrInstSrcIdSwitchElseVar;
}

static constexpr IrInstSrcId ir_inst_id(IrInstSrcSwitchTarget *) {
    return IrInstSrcIdSwitchTarget;
}

static constexpr IrInstSrcId ir_inst_id(IrInstSrcPhi *) {
    return IrInstSrcIdPhi;
}

static constexpr IrInstSrcId ir_inst_id(IrInstSrcUnOp *) {
    return IrInstSrcIdUnOp;
}

static constexpr IrInstSrcId ir_inst_id(IrInstSrcBinOp *) {
    return IrInstSrcIdBinOp;
}

static constexpr IrInstSrcId ir_inst_id(IrInstSrcMergeErrSets *) {
    return IrInstSrcIdMergeErrSets;
}

static constexpr IrInstSrcId ir_inst_id(IrInstSrcLoadPtr *) {
    return IrInstSrcIdLoadPtr;
}

static constexpr IrInstSrcId ir_inst_id(IrInstSrcStorePtr *) {
    return IrInstSrcIdStorePtr;
}

static constexpr IrInstSrcId ir_inst_id(IrInstSrcFieldPtr *) {
    return IrInstSrcIdFieldPtr;
}

static constexpr IrInstSrcId ir_inst_id(IrInstSrcElemPtr *) {
    return IrInstSrcIdElemPtr;
}

static constexpr IrInstSrcId ir_inst_id(IrInstSrcVarPtr *) {
    return IrInstSrcIdVarPtr;
}

static constexpr IrInstSrcId ir_inst_id(IrInstSrcCall *) {
    return IrInstSrcIdCall;
}

static constexpr IrInstSrcId ir_inst_id(IrInstSrcCallArgs *) {
    return IrInstSrcIdCallArgs;
}

static constexpr IrInstSrcId ir_inst_id(IrInstSrcCallExtra *) {
    return IrInstSrcIdCallExtra;
}

static constexpr IrInstSrcId ir_inst_id(IrInstSrcAsyncCallExtra *) {
    return IrInstSrcIdAsyncCallExtra;
}

static constexpr IrInstSrcId ir_inst_id(IrInstSrcConst *) {
    return IrInstSrcIdConst;
}

static constexpr IrInstSrcId ir_inst_id(IrInstSrcReturn *) {
    return IrInstSrcIdReturn;
}

static constexpr IrInstSrcId ir_inst_id(IrInstSrcContainerInitList *) {
    return IrInstSrcIdContainerInitList;
}

static constexpr IrInstSrcId ir_inst_id(IrInstSrcContainerInitFields *) {
    return IrInstSrcIdContainerInitFields;
}

static constexpr IrInstSrcId ir_inst_id(IrInstSrcUnreachable *) {
    return IrInstSrcIdUnreachable;
}

static constexpr IrInstSrcId ir_inst_id(IrInstSrcTypeOf *) {
    return IrInstSrcIdTypeOf;
}

static constexpr IrInstSrcId ir_inst_id(IrInstSrcSetCold *) {
    return IrInstSrcIdSetCold;
}

static constexpr IrInstSrcId ir_inst_id(IrInstSrcSetRuntimeSafety *) {
    return IrInstSrcIdSetRuntimeSafety;
}

static constexpr IrInstSrcId ir_inst_id(IrInstSrcSetFloatMode *) {
    return IrInstSrcIdSetFloatMode;
}

static constexpr IrInstSrcId ir_inst_id(IrInstSrcArrayType *) {
    return IrInstSrcIdArrayType;
}

static constexpr IrInstSrcId ir_inst_id(IrInstSrcAnyFrameType *) {
    return IrInstSrcIdAnyFrameType;
}

static constexpr IrInstSrcId ir_inst_id(IrInstSrcSliceType *) {
    return IrInstSrcIdSliceType;
}

static constexpr IrInstSrcId ir_inst_id(IrInstSrcAsm *) {
    return IrInstSrcIdAsm;
}

static constexpr IrInstSrcId ir_inst_id(IrInstSrcSizeOf *) {
    return IrInstSrcIdSizeOf;
}

static constexpr IrInstSrcId ir_inst_id(IrInstSrcTestNonNull *) {
    return IrInstSrcIdTestNonNull;
}

static constexpr IrInstSrcId ir_inst_id(IrInstSrcOptionalUnwrapPtr *) {
    return IrInstSrcIdOptionalUnwrapPtr;
}

static constexpr IrInstSrcId ir_inst_id(IrInstSrcClz *) {
    return IrInstSrcIdClz;
}

static constexpr IrInstSrcId ir_inst_id(IrInstSrcCtz *) {
    return IrInstSrcIdCtz;
}

static constexpr IrInstSrcId ir_inst_id(IrInstSrcPopCount *) {
    return IrInstSrcIdPopCount;
}

static constexpr IrInstSrcId ir_inst_id(IrInstSrcBswap *) {
    return IrInstSrcIdBswap;
}

static constexpr IrInstSrcId ir_inst_id(IrInstSrcBitReverse *) {
    return IrInstSrcIdBitReverse;
}

static constexpr IrInstSrcId ir_inst_id(IrInstSrcImport *) {
    return IrInstSrcIdImport;
}

static constexpr IrInstSrcId ir_inst_id(IrInstSrcCImport *) {
    return IrInstSrcIdCImport;
}

static constexpr IrInstSrcId ir_inst_id(IrInstSrcCInclude *) {
    return IrInstSrcIdCInclude;
}

static constexpr IrInstSrcId ir_inst_id(IrInstSrcCDefine *) {
    return IrInstSrcIdCDefine;
}

static constexpr IrInstSrcId ir_inst_id(IrInstSrcCUndef *) {
    return IrInstSrcIdCUndef;
}

static constexpr IrInstSrcId ir_inst_id(IrInstSrcRef *) {
    return IrInstSrcIdRef;
}

static constexpr IrInstSrcId ir_inst_id(IrInstSrcCompileErr *) {
    return IrInstSrcIdCompileErr;
}

static constexpr IrInstSrcId ir_inst_id(IrInstSrcCompileLog *) {
    return IrInstSrcIdCompileLog;
}

static constexpr IrInstSrcId ir_inst_id(IrInstSrcErrName *) {
    return IrInstSrcIdErrName;
}

static constexpr IrInstSrcId ir_inst_id(IrInstSrcEmbedFile *) {
    return IrInstSrcIdEmbedFile;
}

static constexpr IrInstSrcId ir_inst_id(IrInstSrcCmpxchg *) {
    return IrInstSrcIdCmpxchg;
}

static constexpr IrInstSrcId ir_inst_id(IrInstSrcFence *) {
    return IrInstSrcIdFence;
}

static constexpr IrInstSrcId ir_inst_id(IrInstSrcTruncate *) {
    return IrInstSrcIdTruncate;
}

static constexpr IrInstSrcId ir_inst_id(IrInstSrcIntCast *) {
    return IrInstSrcIdIntCast;
}

static constexpr IrInstSrcId ir_inst_id(IrInstSrcFloatCast *) {
    return IrInstSrcIdFloatCast;
}

static constexpr IrInstSrcId ir_inst_id(IrInstSrcIntToFloat *) {
    return IrInstSrcIdIntToFloat;
}

static constexpr IrInstSrcId ir_inst_id(IrInstSrcFloatToInt *) {
    return IrInstSrcIdFloatToInt;
}

static constexpr IrInstSrcId ir_inst_id(IrInstSrcBoolToInt *) {
    return IrInstSrcIdBoolToInt;
}

static constexpr IrInstSrcId ir_inst_id(IrInstSrcVectorType *) {
    return IrInstSrcIdVectorType;
}

static constexpr IrInstSrcId ir_inst_id(IrInstSrcShuffleVector *) {
    return IrInstSrcIdShuffleVector;
}

static constexpr IrInstSrcId ir_inst_id(IrInstSrcSplat *) {
    return IrInstSrcIdSplat;
}

static constexpr IrInstSrcId ir_inst_id(IrInstSrcBoolNot *) {
    return IrInstSrcIdBoolNot;
}

static constexpr IrInstSrcId ir_inst_id(IrInstSrcMemset *) {
    return IrInstSrcIdMemset;
}

static constexpr IrInstSrcId ir_inst_id(IrInstSrcMemcpy *) {
    return IrInstSrcIdMemcpy;
}

static constexpr IrInstSrcId ir_inst_id(IrInstSrcSlice *) {
    return IrInstSrcIdSlice;
}

static constexpr IrInstSrcId ir_inst_id(IrInstSrcBreakpoint *) {
    return IrInstSrcIdBreakpoint;
}

static constexpr IrInstSrcId ir_inst_id(IrInstSrcReturnAddress *) {
    return IrInstSrcIdReturnAddress;
}

static constexpr IrInstSrcId ir_inst_id(IrInstSrcFrameAddress *) {
    return IrInstSrcIdFrameAddress;
}

static constexpr IrInstSrcId ir_inst_id(IrInstSrcFrameHandle *) {
    return IrInstSrcIdFrameHandle;
}

static constexpr IrInstSrcId ir_inst_id(IrInstSrcFrameType *) {
    return IrInstSrcIdFrameType;
}

static constexpr IrInstSrcId ir_inst_id(IrInstSrcFrameSize *) {
    return IrInstSrcIdFrameSize;
}

static constexpr IrInstSrcId ir_inst_id(IrInstSrcAlignOf *) {
    return IrInstSrcIdAlignOf;
}

static constexpr IrInstSrcId ir_inst_id(IrInstSrcOverflowOp *) {
    return IrInstSrcIdOverflowOp;
}

static constexpr IrInstSrcId ir_inst_id(IrInstSrcTestErr *) {
    return IrInstSrcIdTestErr;
}

static constexpr IrInstSrcId ir_inst_id(IrInstSrcMulAdd *) {
    return IrInstSrcIdMulAdd;
}

static constexpr IrInstSrcId ir_inst_id(IrInstSrcFloatOp *) {
    return IrInstSrcIdFloatOp;
}

static constexpr IrInstSrcId ir_inst_id(IrInstSrcUnwrapErrCode *) {
    return IrInstSrcIdUnwrapErrCode;
}

static constexpr IrInstSrcId ir_inst_id(IrInstSrcUnwrapErrPayload *) {
    return IrInstSrcIdUnwrapErrPayload;
}

static constexpr IrInstSrcId ir_inst_id(IrInstSrcFnProto *) {
    return IrInstSrcIdFnProto;
}

static constexpr IrInstSrcId ir_inst_id(IrInstSrcTestComptime *) {
    return IrInstSrcIdTestComptime;
}

static constexpr IrInstSrcId ir_inst_id(IrInstSrcPtrCast *) {
    return IrInstSrcIdPtrCast;
}

static constexpr IrInstSrcId ir_inst_id(IrInstSrcBitCast *) {
    return IrInstSrcIdBitCast;
}

static constexpr IrInstSrcId ir_inst_id(IrInstSrcIntToPtr *) {
    return IrInstSrcIdIntToPtr;
}

static constexpr IrInstSrcId ir_inst_id(IrInstSrcPtrToInt *) {
    return IrInstSrcIdPtrToInt;
}

static constexpr IrInstSrcId ir_inst_id(IrInstSrcIntToEnum *) {
    return IrInstSrcIdIntToEnum;
}

static constexpr IrInstSrcId ir_inst_id(IrInstSrcEnumToInt *) {
    return IrInstSrcIdEnumToInt;
}

static constexpr IrInstSrcId ir_inst_id(IrInstSrcIntToErr *) {
    return IrInstSrcIdIntToErr;
}

static constexpr IrInstSrcId ir_inst_id(IrInstSrcErrToInt *) {
    return IrInstSrcIdErrToInt;
}

static constexpr IrInstSrcId ir_inst_id(IrInstSrcCheckSwitchProngs *) {
    return IrInstSrcIdCheckSwitchProngs;
}

static constexpr IrInstSrcId ir_inst_id(IrInstSrcCheckStatementIsVoid *) {
    return IrInstSrcIdCheckStatementIsVoid;
}

static constexpr IrInstSrcId ir_inst_id(IrInstSrcTypeName *) {
    return IrInstSrcIdTypeName;
}

static constexpr IrInstSrcId ir_inst_id(IrInstSrcDeclRef *) {
    return IrInstSrcIdDeclRef;
}

static constexpr IrInstSrcId ir_inst_id(IrInstSrcPanic *) {
    return IrInstSrcIdPanic;
}

static constexpr IrInstSrcId ir_inst_id(IrInstSrcTagName *) {
    return IrInstSrcIdTagName;
}

static constexpr IrInstSrcId ir_inst_id(IrInstSrcTagType *) {
    return IrInstSrcIdTagType;
}

static constexpr IrInstSrcId ir_inst_id(IrInstSrcFieldParentPtr *) {
    return IrInstSrcIdFieldParentPtr;
}

static constexpr IrInstSrcId ir_inst_id(IrInstSrcByteOffsetOf *) {
    return IrInstSrcIdByteOffsetOf;
}

static constexpr IrInstSrcId ir_inst_id(IrInstSrcBitOffsetOf *) {
    return IrInstSrcIdBitOffsetOf;
}

static constexpr IrInstSrcId ir_inst_id(IrInstSrcTypeInfo *) {
    return IrInstSrcIdTypeInfo;
}

static constexpr IrInstSrcId ir_inst_id(IrInstSrcType *) {
    return IrInstSrcIdType;
}

static constexpr IrInstSrcId ir_inst_id(IrInstSrcHasField *) {
    return IrInstSrcIdHasField;
}

static constexpr IrInstSrcId ir_inst_id(IrInstSrcSetEvalBranchQuota *) {
    return IrInstSrcIdSetEvalBranchQuota;
}

static constexpr IrInstSrcId ir_inst_id(IrInstSrcPtrType *) {
    return IrInstSrcIdPtrType;
}

static constexpr IrInstSrcId ir_inst_id(IrInstSrcAlignCast *) {
    return IrInstSrcIdAlignCast;
}

static constexpr IrInstSrcId ir_inst_id(IrInstSrcImplicitCast *) {
    return IrInstSrcIdImplicitCast;
}

static constexpr IrInstSrcId ir_inst_id(IrInstSrcResolveResult *) {
    return IrInstSrcIdResolveResult;
}

static constexpr IrInstSrcId ir_inst_id(IrInstSrcResetResult *) {
    return IrInstSrcIdResetResult;
}

static constexpr IrInstSrcId ir_inst_id(IrInstSrcOpaqueType *) {
    return IrInstSrcIdOpaqueType;
}

static constexpr IrInstSrcId ir_inst_id(IrInstSrcSetAlignStack *) {
    return IrInstSrcIdSetAlignStack;
}

static constexpr IrInstSrcId ir_inst_id(IrInstSrcArgType *) {
    return IrInstSrcIdArgType;
}

static constexpr IrInstSrcId ir_inst_id(IrInstSrcExport *) {
    return IrInstSrcIdExport;
}

static constexpr IrInstSrcId ir_inst_id(IrInstSrcErrorReturnTrace *) {
    return IrInstSrcIdErrorReturnTrace;
}

static constexpr IrInstSrcId ir_inst_id(IrInstSrcErrorUnion *) {
    return IrInstSrcIdErrorUnion;
}

static constexpr IrInstSrcId ir_inst_id(IrInstSrcAtomicRmw *) {
    return IrInstSrcIdAtomicRmw;
}

static constexpr IrInstSrcId ir_inst_id(IrInstSrcAtomicLoad *) {
    return IrInstSrcIdAtomicLoad;
}

static constexpr IrInstSrcId ir_inst_id(IrInstSrcAtomicStore *) {
    return IrInstSrcIdAtomicStore;
}

static constexpr IrInstSrcId ir_inst_id(IrInstSrcSaveErrRetAddr *) {
    return IrInstSrcIdSaveErrRetAddr;
}

static constexpr IrInstSrcId ir_inst_id(IrInstSrcAddImplicitReturnType *) {
    return IrInstSrcIdAddImplicitReturnType;
}

static constexpr IrInstSrcId ir_inst_id(IrInstSrcErrSetCast *) {
    return IrInstSrcIdErrSetCast;
}

static constexpr IrInstSrcId ir_inst_id(IrInstSrcCheckRuntimeScope *) {
    return IrInstSrcIdCheckRuntimeScope;
}

static constexpr IrInstSrcId ir_inst_id(IrInstSrcHasDecl *) {
    return IrInstSrcIdHasDecl;
}

static constexpr IrInstSrcId ir_inst_id(IrInstSrcUndeclaredIdent *) {
    return IrInstSrcIdUndeclaredIdent;
}

static constexpr IrInstSrcId ir_inst_id(IrInstSrcAlloca *) {
    return IrInstSrcIdAlloca;
}

static constexpr IrInstSrcId ir_inst_id(IrInstSrcEndExpr *) {
    return IrInstSrcIdEndExpr;
}

static constexpr IrInstSrcId ir_inst_id(IrInstSrcUnionInitNamedField *) {
    return IrInstSrcIdUnionInitNamedField;
}

static constexpr IrInstSrcId ir_inst_id(IrInstSrcSuspendBegin *) {
    return IrInstSrcIdSuspendBegin;
}

static constexpr IrInstSrcId ir_inst_id(IrInstSrcSuspendFinish *) {
    return IrInstSrcIdSuspendFinish;
}

static constexpr IrInstSrcId ir_inst_id(IrInstSrcAwait *) {
    return IrInstSrcIdAwait;
}

static constexpr IrInstSrcId ir_inst_id(IrInstSrcResume *) {
    return IrInstSrcIdResume;
}

static constexpr IrInstSrcId ir_inst_id(IrInstSrcSpillBegin *) {
    return IrInstSrcIdSpillBegin;
}

static constexpr IrInstSrcId ir_inst_id(IrInstSrcSpillEnd *) {
    return IrInstSrcIdSpillEnd;
}

static constexpr IrInstSrcId ir_inst_id(IrInstSrcWasmMemorySize *) {
    return IrInstSrcIdWasmMemorySize;
}

static constexpr IrInstSrcId ir_inst_id(IrInstSrcWasmMemoryGrow *) {
    return IrInstSrcIdWasmMemoryGrow;
}

static constexpr IrInstSrcId ir_inst_id(IrInstSrcSrc *) {
    return IrInstSrcIdSrc;
}

static constexpr IrInstGenId ir_inst_id(IrInstGenDeclVar *) {
    return IrInstGenIdDeclVar;
}

static constexpr IrInstGenId ir_inst_id(IrInstGenBr *) {
    return IrInstGenIdBr;
}

static constexpr IrInstGenId ir_inst_id(IrInstGenCondBr *) {
    return IrInstGenIdCondBr;
}

static constexpr IrInstGenId ir_inst_id(IrInstGenSwitchBr *) {
    return IrInstGenIdSwitchBr;
}

static constexpr IrInstGenId ir_inst_id(IrInstGenPhi *) {
    return IrInstGenIdPhi;
}

static constexpr IrInstGenId ir_inst_id(IrInstGenBinaryNot *) {
    return IrInstGenIdBinaryNot;
}

static constexpr IrInstGenId ir_inst_id(IrInstGenNegation *) {
    return IrInstGenIdNegation;
}

static constexpr IrInstGenId ir_inst_id(IrInstGenNegationWrapping *) {
    return IrInstGenIdNegationWrapping;
}

static constexpr IrInstGenId ir_inst_id(IrInstGenBinOp *) {
    return IrInstGenIdBinOp;
}

static constexpr IrInstGenId ir_inst_id(IrInstGenLoadPtr *) {
    return IrInstGenIdLoadPtr;
}

static constexpr IrInstGenId ir_inst_id(IrInstGenStorePtr *) {
    return IrInstGenIdStorePtr;
}

static constexpr IrInstGenId ir_inst_id(IrInstGenVectorStoreElem *) {
    return IrInstGenIdVectorStoreElem;
}

static constexpr IrInstGenId ir_inst_id(IrInstGenStructFieldPtr *) {
    return IrInstGenIdStructFieldPtr;
}

static constexpr IrInstGenId ir_inst_id(IrInstGenUnionFieldPtr *) {
    return IrInstGenIdUnionFieldPtr;
}

static constexpr IrInstGenId ir_inst_id(IrInstGenElemPtr *) {
    return IrInstGenIdElemPtr;
}

static constexpr IrInstGenId ir_inst_id(IrInstGenVarPtr *) {
    return IrInstGenIdVarPtr;
}

static constexpr IrInstGenId ir_inst_id(IrInstGenReturnPtr *) {
    return IrInstGenIdReturnPtr;
}

static constexpr IrInstGenId ir_inst_id(IrInstGenCall *) {
    return IrInstGenIdCall;
}

static constexpr IrInstGenId ir_inst_id(IrInstGenReturn *) {
    return IrInstGenIdReturn;
}

static constexpr IrInstGenId ir_inst_id(IrInstGenCast *) {
    return IrInstGenIdCast;
}

static constexpr IrInstGenId ir_inst_id(IrInstGenUnreachable *) {
    return IrInstGenIdUnreachable;
}

static constexpr IrInstGenId ir_inst_id(IrInstGenAsm *) {
    return IrInstGenIdAsm;
}

static constexpr IrInstGenId ir_inst_id(IrInstGenTestNonNull *) {
    return IrInstGenIdTestNonNull;
}

static constexpr IrInstGenId ir_inst_id(IrInstGenOptionalUnwrapPtr *) {
    return IrInstGenIdOptionalUnwrapPtr;
}

static constexpr IrInstGenId ir_inst_id(IrInstGenOptionalWrap *) {
    return IrInstGenIdOptionalWrap;
}

static constexpr IrInstGenId ir_inst_id(IrInstGenUnionTag *) {
    return IrInstGenIdUnionTag;
}

static constexpr IrInstGenId ir_inst_id(IrInstGenClz *) {
    return IrInstGenIdClz;
}

static constexpr IrInstGenId ir_inst_id(IrInstGenCtz *) {
    return IrInstGenIdCtz;
}

static constexpr IrInstGenId ir_inst_id(IrInstGenPopCount *) {
    return IrInstGenIdPopCount;
}

static constexpr IrInstGenId ir_inst_id(IrInstGenBswap *) {
    return IrInstGenIdBswap;
}

static constexpr IrInstGenId ir_inst_id(IrInstGenBitReverse *) {
    return IrInstGenIdBitReverse;
}

static constexpr IrInstGenId ir_inst_id(IrInstGenRef *) {
    return IrInstGenIdRef;
}

static constexpr IrInstGenId ir_inst_id(IrInstGenErrName *) {
    return IrInstGenIdErrName;
}

static constexpr IrInstGenId ir_inst_id(IrInstGenCmpxchg *) {
    return IrInstGenIdCmpxchg;
}

static constexpr IrInstGenId ir_inst_id(IrInstGenFence *) {
    return IrInstGenIdFence;
}

static constexpr IrInstGenId ir_inst_id(IrInstGenTruncate *) {
    return IrInstGenIdTruncate;
}

static constexpr IrInstGenId ir_inst_id(IrInstGenShuffleVector *) {
    return IrInstGenIdShuffleVector;
}

static constexpr IrInstGenId ir_inst_id(IrInstGenSplat *) {
    return IrInstGenIdSplat;
}

static constexpr IrInstGenId ir_inst_id(IrInstGenBoolNot *) {
    return IrInstGenIdBoolNot;
}

static constexpr IrInstGenId ir_inst_id(IrInstGenMemset *) {
    return IrInstGenIdMemset;
}

static constexpr IrInstGenId ir_inst_id(IrInstGenMemcpy *) {
    return IrInstGenIdMemcpy;
}

static constexpr IrInstGenId ir_inst_id(IrInstGenSlice *) {
    return IrInstGenIdSlice;
}

static constexpr IrInstGenId ir_inst_id(IrInstGenBreakpoint *) {
    return IrInstGenIdBreakpoint;
}

static constexpr IrInstGenId ir_inst_id(IrInstGenReturnAddress *) {
    return IrInstGenIdReturnAddress;
}

static constexpr IrInstGenId ir_inst_id(IrInstGenFrameAddress *) {
    return IrInstGenIdFrameAddress;
}

static constexpr IrInstGenId ir_inst_id(IrInstGenFrameHandle *) {
    return IrInstGenIdFrameHandle;
}

static constexpr IrInstGenId ir_inst_id(IrInstGenFrameSize *) {
    return IrInstGenIdFrameSize;
}

static constexpr IrInstGenId ir_inst_id(IrInstGenOverflowOp *) {
    return IrInstGenIdOverflowOp;
}

static constexpr IrInstGenId ir_inst_id(IrInstGenTestErr *) {
    return IrInstGenIdTestErr;
}

static constexpr IrInstGenId ir_inst_id(IrInstGenMulAdd *) {
    return IrInstGenIdMulAdd;
}

static constexpr IrInstGenId ir_inst_id(IrInstGenFloatOp *) {
    return IrInstGenIdFloatOp;
}

static constexpr IrInstGenId ir_inst_id(IrInstGenUnwrapErrCode *) {
    return IrInstGenIdUnwrapErrCode;
}

static constexpr IrInstGenId ir_inst_id(IrInstGenUnwrapErrPayload *) {
    return IrInstGenIdUnwrapErrPayload;
}

static constexpr IrInstGenId ir_inst_id(IrInstGenErrWrapCode *) {
    return IrInstGenIdErrWrapCode;
}

static constexpr IrInstGenId ir_inst_id(IrInstGenErrWrapPayload *) {
    return IrInstGenIdErrWrapPayload;
}

static constexpr IrInstGenId ir_inst_id(IrInstGenPtrCast *) {
    return IrInstGenIdPtrCast;
}

static constexpr IrInstGenId ir_inst_id(IrInstGenBitCast *) {
    return IrInstGenIdBitCast;
}

static constexpr IrInstGenId ir_inst_id(IrInstGenWidenOrShorten *) {
    return IrInstGenIdWidenOrShorten;
}

static constexpr IrInstGenId ir_inst_id(IrInstGenIntToPtr *) {
    return IrInstGenIdIntToPtr;
}

static constexpr IrInstGenId ir_inst_id(IrInstGenPtrToInt *) {
    return IrInstGenIdPtrToInt;
}

static constexpr IrInstGenId ir_inst_id(IrInstGenIntToEnum *) {
    return IrInstGenIdIntToEnum;
}

static constexpr IrInstGenId ir_inst_id(IrInstGenIntToErr *) {
    return IrInstGenIdIntToErr;
}

static constexpr IrInstGenId ir_inst_id(IrInstGenErrToInt *) {
    return IrInstGenIdErrToInt;
}

static constexpr IrInstGenId ir_inst_id(IrInstGenPanic *) {
    return IrInstGenIdPanic;
}

static constexpr IrInstGenId ir_inst_id(IrInstGenTagName *) {
    return IrInstGenIdTagName;
}

static constexpr IrInstGenId ir_inst_id(IrInstGenFieldParentPtr *) {
    return IrInstGenIdFieldParentPtr;
}

static constexpr IrInstGenId ir_inst_id(IrInstGenAlignCast *) {
    return IrInstGenIdAlignCast;
}

static constexpr IrInstGenId ir_inst_id(IrInstGenErrorReturnTrace *) {
    return IrInstGenIdErrorReturnTrace;
}

static constexpr IrInstGenId ir_inst_id(IrInstGenAtomicRmw *) {
    return IrInstGenIdAtomicRmw;
}

static constexpr IrInstGenId ir_inst_id(IrInstGenAtomicLoad *) {
    return IrInstGenIdAtomicLoad;
}

static constexpr IrInstGenId ir_inst_id(IrInstGenAtomicStore *) {
    return IrInstGenIdAtomicStore;
}

static constexpr IrInstGenId ir_inst_id(IrInstGenSaveErrRetAddr *) {
    return IrInstGenIdSaveErrRetAddr;
}

static constexpr IrInstGenId ir_inst_id(IrInstGenVectorToArray *) {
    return IrInstGenIdVectorToArray;
}

static constexpr IrInstGenId ir_inst_id(IrInstGenArrayToVector *) {
    return IrInstGenIdArrayToVector;
}

static constexpr IrInstGenId ir_inst_id(IrInstGenAssertZero *) {
    return IrInstGenIdAssertZero;
}

static constexpr IrInstGenId ir_inst_id(IrInstGenAssertNonNull *) {
    return IrInstGenIdAssertNonNull;
}

static constexpr IrInstGenId ir_inst_id(IrInstGenPtrOfArrayToSlice *) {
    return IrInstGenIdPtrOfArrayToSlice;
}

static constexpr IrInstGenId ir_inst_id(IrInstGenSuspendBegin *) {
    return IrInstGenIdSuspendBegin;
}

static constexpr IrInstGenId ir_inst_id(IrInstGenSuspendFinish *) {
    return IrInstGenIdSuspendFinish;
}

static constexpr IrInstGenId ir_inst_id(IrInstGenAwait *) {
    return IrInstGenIdAwait;
}

static constexpr IrInstGenId ir_inst_id(IrInstGenResume *) {
    return IrInstGenIdResume;
}

static constexpr IrInstGenId ir_inst_id(IrInstGenSpillBegin *) {
    return IrInstGenIdSpillBegin;
}

static constexpr IrInstGenId ir_inst_id(IrInstGenSpillEnd *) {
    return IrInstGenIdSpillEnd;
}

static constexpr IrInstGenId ir_inst_id(IrInstGenVectorExtractElem *) {
    return IrInstGenIdVectorExtractElem;
}

static constexpr IrInstGenId ir_inst_id(IrInstGenAlloca *) {
    return IrInstGenIdAlloca;
}

static constexpr IrInstGenId ir_inst_id(IrInstGenConst *) {
    return IrInstGenIdConst;
}

static constexpr IrInstGenId ir_inst_id(IrInstGenWasmMemorySize *) {
  return IrInstGenIdWasmMemorySize;
}

static constexpr IrInstGenId ir_inst_id(IrInstGenWasmMemoryGrow *) {
  return IrInstGenIdWasmMemoryGrow;
}

template<typename T>
static T *ir_create_instruction(IrBuilderSrc *irb, Scope *scope, AstNode *source_node) {
    T *special_instruction = heap::c_allocator.create<T>();
    special_instruction->base.id = ir_inst_id(special_instruction);
    special_instruction->base.base.scope = scope;
    special_instruction->base.base.source_node = source_node;
    special_instruction->base.base.debug_id = exec_next_debug_id(irb->exec);
    special_instruction->base.owner_bb = irb->current_basic_block;
    return special_instruction;
}

template<typename T>
static T *ir_create_inst_gen(IrBuilderGen *irb, Scope *scope, AstNode *source_node) {
    T *special_instruction = heap::c_allocator.create<T>();
    special_instruction->base.id = ir_inst_id(special_instruction);
    special_instruction->base.base.scope = scope;
    special_instruction->base.base.source_node = source_node;
    special_instruction->base.base.debug_id = exec_next_debug_id_gen(irb->exec);
    special_instruction->base.owner_bb = irb->current_basic_block;
    special_instruction->base.value = irb->codegen->pass1_arena->create<ZigValue>();
    return special_instruction;
}

template<typename T>
static T *ir_create_inst_noval(IrBuilderGen *irb, Scope *scope, AstNode *source_node) {
    T *special_instruction = heap::c_allocator.create<T>();
    special_instruction->base.id = ir_inst_id(special_instruction);
    special_instruction->base.base.scope = scope;
    special_instruction->base.base.source_node = source_node;
    special_instruction->base.base.debug_id = exec_next_debug_id_gen(irb->exec);
    special_instruction->base.owner_bb = irb->current_basic_block;
    return special_instruction;
}

template<typename T>
static T *ir_build_instruction(IrBuilderSrc *irb, Scope *scope, AstNode *source_node) {
    T *special_instruction = ir_create_instruction<T>(irb, scope, source_node);
    ir_instruction_append(irb->current_basic_block, &special_instruction->base);
    return special_instruction;
}

template<typename T>
static T *ir_build_inst_gen(IrBuilderGen *irb, Scope *scope, AstNode *source_node) {
    T *special_instruction = ir_create_inst_gen<T>(irb, scope, source_node);
    ir_inst_gen_append(irb->current_basic_block, &special_instruction->base);
    return special_instruction;
}

template<typename T>
static T *ir_build_inst_noreturn(IrBuilderGen *irb, Scope *scope, AstNode *source_node) {
    T *special_instruction = ir_create_inst_noval<T>(irb, scope, source_node);
    special_instruction->base.value = irb->codegen->intern.for_unreachable();
    ir_inst_gen_append(irb->current_basic_block, &special_instruction->base);
    return special_instruction;
}

template<typename T>
static T *ir_build_inst_void(IrBuilderGen *irb, Scope *scope, AstNode *source_node) {
    T *special_instruction = ir_create_inst_noval<T>(irb, scope, source_node);
    special_instruction->base.value = irb->codegen->intern.for_void();
    ir_inst_gen_append(irb->current_basic_block, &special_instruction->base);
    return special_instruction;
}

IrInstGen *ir_create_alloca(CodeGen *g, Scope *scope, AstNode *source_node, ZigFn *fn,
        ZigType *var_type, const char *name_hint)
{
    IrInstGenAlloca *alloca_gen = heap::c_allocator.create<IrInstGenAlloca>();
    alloca_gen->base.id = IrInstGenIdAlloca;
    alloca_gen->base.base.source_node = source_node;
    alloca_gen->base.base.scope = scope;
    alloca_gen->base.value = g->pass1_arena->create<ZigValue>();
    alloca_gen->base.value->type = get_pointer_to_type(g, var_type, false);
    alloca_gen->base.base.ref_count = 1;
    alloca_gen->name_hint = name_hint;
    fn->alloca_gen_list.append(alloca_gen);
    return &alloca_gen->base;
}

static IrInstGen *ir_build_cast(IrAnalyze *ira, IrInst *source_instr,ZigType *dest_type,
    IrInstGen *value, CastOp cast_op)
{
    IrInstGenCast *inst = ir_build_inst_gen<IrInstGenCast>(&ira->new_irb, source_instr->scope, source_instr->source_node);
    inst->base.value->type = dest_type;
    inst->value = value;
    inst->cast_op = cast_op;

    ir_ref_inst_gen(value);

    return &inst->base;
}

static IrInstSrc *ir_build_cond_br(IrBuilderSrc *irb, Scope *scope, AstNode *source_node, IrInstSrc *condition,
        IrBasicBlockSrc *then_block, IrBasicBlockSrc *else_block, IrInstSrc *is_comptime)
{
    IrInstSrcCondBr *inst = ir_build_instruction<IrInstSrcCondBr>(irb, scope, source_node);
    inst->base.is_noreturn = true;
    inst->condition = condition;
    inst->then_block = then_block;
    inst->else_block = else_block;
    inst->is_comptime = is_comptime;

    ir_ref_instruction(condition, irb->current_basic_block);
    ir_ref_bb(then_block);
    ir_ref_bb(else_block);
    if (is_comptime != nullptr) ir_ref_instruction(is_comptime, irb->current_basic_block);

    return &inst->base;
}

static IrInstGen *ir_build_cond_br_gen(IrAnalyze *ira, IrInst *source_instr, IrInstGen *condition,
        IrBasicBlockGen *then_block, IrBasicBlockGen *else_block)
{
    IrInstGenCondBr *inst = ir_build_inst_noreturn<IrInstGenCondBr>(&ira->new_irb, source_instr->scope, source_instr->source_node);
    inst->condition = condition;
    inst->then_block = then_block;
    inst->else_block = else_block;

    ir_ref_inst_gen(condition);

    return &inst->base;
}

static IrInstSrc *ir_build_return_src(IrBuilderSrc *irb, Scope *scope, AstNode *source_node, IrInstSrc *operand) {
    IrInstSrcReturn *inst = ir_build_instruction<IrInstSrcReturn>(irb, scope, source_node);
    inst->base.is_noreturn = true;
    inst->operand = operand;

    if (operand != nullptr) ir_ref_instruction(operand, irb->current_basic_block);

    return &inst->base;
}

static IrInstGen *ir_build_return_gen(IrAnalyze *ira, IrInst *source_inst, IrInstGen *operand) {
    IrInstGenReturn *inst = ir_build_inst_noreturn<IrInstGenReturn>(&ira->new_irb,
            source_inst->scope, source_inst->source_node);
    inst->operand = operand;

    if (operand != nullptr) ir_ref_inst_gen(operand);

    return &inst->base;
}

static IrInstSrc *ir_build_const_void(IrBuilderSrc *irb, Scope *scope, AstNode *source_node) {
    IrInstSrcConst *const_instruction = ir_create_instruction<IrInstSrcConst>(irb, scope, source_node);
    ir_instruction_append(irb->current_basic_block, &const_instruction->base);
    const_instruction->value = irb->codegen->intern.for_void();
    return &const_instruction->base;
}

static IrInstSrc *ir_build_const_undefined(IrBuilderSrc *irb, Scope *scope, AstNode *source_node) {
    IrInstSrcConst *const_instruction = ir_create_instruction<IrInstSrcConst>(irb, scope, source_node);
    ir_instruction_append(irb->current_basic_block, &const_instruction->base);
    const_instruction->value = irb->codegen->intern.for_undefined();
    return &const_instruction->base;
}

static IrInstSrc *ir_build_const_uint(IrBuilderSrc *irb, Scope *scope, AstNode *source_node, uint64_t value) {
    IrInstSrcConst *const_instruction = ir_build_instruction<IrInstSrcConst>(irb, scope, source_node);
    const_instruction->value = irb->codegen->pass1_arena->create<ZigValue>();
    const_instruction->value->type = irb->codegen->builtin_types.entry_num_lit_int;
    const_instruction->value->special = ConstValSpecialStatic;
    bigint_init_unsigned(&const_instruction->value->data.x_bigint, value);
    return &const_instruction->base;
}

static IrInstSrc *ir_build_const_bigint(IrBuilderSrc *irb, Scope *scope, AstNode *source_node, BigInt *bigint) {
    IrInstSrcConst *const_instruction = ir_build_instruction<IrInstSrcConst>(irb, scope, source_node);
    const_instruction->value = irb->codegen->pass1_arena->create<ZigValue>();
    const_instruction->value->type = irb->codegen->builtin_types.entry_num_lit_int;
    const_instruction->value->special = ConstValSpecialStatic;
    bigint_init_bigint(&const_instruction->value->data.x_bigint, bigint);
    return &const_instruction->base;
}

static IrInstSrc *ir_build_const_bigfloat(IrBuilderSrc *irb, Scope *scope, AstNode *source_node, BigFloat *bigfloat) {
    IrInstSrcConst *const_instruction = ir_build_instruction<IrInstSrcConst>(irb, scope, source_node);
    const_instruction->value = irb->codegen->pass1_arena->create<ZigValue>();
    const_instruction->value->type = irb->codegen->builtin_types.entry_num_lit_float;
    const_instruction->value->special = ConstValSpecialStatic;
    bigfloat_init_bigfloat(&const_instruction->value->data.x_bigfloat, bigfloat);
    return &const_instruction->base;
}

static IrInstSrc *ir_build_const_null(IrBuilderSrc *irb, Scope *scope, AstNode *source_node) {
    IrInstSrcConst *const_instruction = ir_create_instruction<IrInstSrcConst>(irb, scope, source_node);
    ir_instruction_append(irb->current_basic_block, &const_instruction->base);
    const_instruction->value = irb->codegen->intern.for_null();
    return &const_instruction->base;
}

static IrInstSrc *ir_build_const_usize(IrBuilderSrc *irb, Scope *scope, AstNode *source_node, uint64_t value) {
    IrInstSrcConst *const_instruction = ir_build_instruction<IrInstSrcConst>(irb, scope, source_node);
    const_instruction->value = irb->codegen->pass1_arena->create<ZigValue>();
    const_instruction->value->type = irb->codegen->builtin_types.entry_usize;
    const_instruction->value->special = ConstValSpecialStatic;
    bigint_init_unsigned(&const_instruction->value->data.x_bigint, value);
    return &const_instruction->base;
}

static IrInstSrc *ir_create_const_type(IrBuilderSrc *irb, Scope *scope, AstNode *source_node,
        ZigType *type_entry)
{
    IrInstSrcConst *const_instruction = ir_create_instruction<IrInstSrcConst>(irb, scope, source_node);
    const_instruction->value = irb->codegen->pass1_arena->create<ZigValue>();
    const_instruction->value->type = irb->codegen->builtin_types.entry_type;
    const_instruction->value->special = ConstValSpecialStatic;
    const_instruction->value->data.x_type = type_entry;
    return &const_instruction->base;
}

static IrInstSrc *ir_build_const_type(IrBuilderSrc *irb, Scope *scope, AstNode *source_node,
        ZigType *type_entry)
{
    IrInstSrc *instruction = ir_create_const_type(irb, scope, source_node, type_entry);
    ir_instruction_append(irb->current_basic_block, instruction);
    return instruction;
}

static IrInstSrc *ir_build_const_import(IrBuilderSrc *irb, Scope *scope, AstNode *source_node, ZigType *import) {
    IrInstSrcConst *const_instruction = ir_build_instruction<IrInstSrcConst>(irb, scope, source_node);
    const_instruction->value = irb->codegen->pass1_arena->create<ZigValue>();
    const_instruction->value->type = irb->codegen->builtin_types.entry_type;
    const_instruction->value->special = ConstValSpecialStatic;
    const_instruction->value->data.x_type = import;
    return &const_instruction->base;
}

static IrInstSrc *ir_build_const_bool(IrBuilderSrc *irb, Scope *scope, AstNode *source_node, bool value) {
    IrInstSrcConst *const_instruction = ir_build_instruction<IrInstSrcConst>(irb, scope, source_node);
    const_instruction->value = irb->codegen->pass1_arena->create<ZigValue>();
    const_instruction->value->type = irb->codegen->builtin_types.entry_bool;
    const_instruction->value->special = ConstValSpecialStatic;
    const_instruction->value->data.x_bool = value;
    return &const_instruction->base;
}

static IrInstSrc *ir_build_const_enum_literal(IrBuilderSrc *irb, Scope *scope, AstNode *source_node, Buf *name) {
    IrInstSrcConst *const_instruction = ir_build_instruction<IrInstSrcConst>(irb, scope, source_node);
    const_instruction->value = irb->codegen->pass1_arena->create<ZigValue>();
    const_instruction->value->type = irb->codegen->builtin_types.entry_enum_literal;
    const_instruction->value->special = ConstValSpecialStatic;
    const_instruction->value->data.x_enum_literal = name;
    return &const_instruction->base;
}

static IrInstSrc *ir_create_const_str_lit(IrBuilderSrc *irb, Scope *scope, AstNode *source_node, Buf *str) {
    IrInstSrcConst *const_instruction = ir_create_instruction<IrInstSrcConst>(irb, scope, source_node);
    const_instruction->value = irb->codegen->pass1_arena->create<ZigValue>();
    init_const_str_lit(irb->codegen, const_instruction->value, str);

    return &const_instruction->base;
}

static IrInstSrc *ir_build_const_str_lit(IrBuilderSrc *irb, Scope *scope, AstNode *source_node, Buf *str) {
    IrInstSrc *instruction = ir_create_const_str_lit(irb, scope, source_node, str);
    ir_instruction_append(irb->current_basic_block, instruction);
    return instruction;
}

static IrInstSrc *ir_build_bin_op(IrBuilderSrc *irb, Scope *scope, AstNode *source_node, IrBinOp op_id,
        IrInstSrc *op1, IrInstSrc *op2, bool safety_check_on)
{
    IrInstSrcBinOp *inst = ir_build_instruction<IrInstSrcBinOp>(irb, scope, source_node);
    inst->op_id = op_id;
    inst->op1 = op1;
    inst->op2 = op2;
    inst->safety_check_on = safety_check_on;

    ir_ref_instruction(op1, irb->current_basic_block);
    ir_ref_instruction(op2, irb->current_basic_block);

    return &inst->base;
}

static IrInstGen *ir_build_bin_op_gen(IrAnalyze *ira, IrInst *source_instr, ZigType *res_type,
        IrBinOp op_id, IrInstGen *op1, IrInstGen *op2, bool safety_check_on)
{
    IrInstGenBinOp *inst = ir_build_inst_gen<IrInstGenBinOp>(&ira->new_irb,
            source_instr->scope, source_instr->source_node);
    inst->base.value->type = res_type;
    inst->op_id = op_id;
    inst->op1 = op1;
    inst->op2 = op2;
    inst->safety_check_on = safety_check_on;

    ir_ref_inst_gen(op1);
    ir_ref_inst_gen(op2);

    return &inst->base;
}


static IrInstSrc *ir_build_merge_err_sets(IrBuilderSrc *irb, Scope *scope, AstNode *source_node,
        IrInstSrc *op1, IrInstSrc *op2, Buf *type_name)
{
    IrInstSrcMergeErrSets *inst = ir_build_instruction<IrInstSrcMergeErrSets>(irb, scope, source_node);
    inst->op1 = op1;
    inst->op2 = op2;
    inst->type_name = type_name;

    ir_ref_instruction(op1, irb->current_basic_block);
    ir_ref_instruction(op2, irb->current_basic_block);

    return &inst->base;
}

static IrInstSrc *ir_build_var_ptr_x(IrBuilderSrc *irb, Scope *scope, AstNode *source_node, ZigVar *var,
        ScopeFnDef *crossed_fndef_scope)
{
    IrInstSrcVarPtr *instruction = ir_build_instruction<IrInstSrcVarPtr>(irb, scope, source_node);
    instruction->var = var;
    instruction->crossed_fndef_scope = crossed_fndef_scope;

    ir_ref_var(var);

    return &instruction->base;
}

static IrInstSrc *ir_build_var_ptr(IrBuilderSrc *irb, Scope *scope, AstNode *source_node, ZigVar *var) {
    return ir_build_var_ptr_x(irb, scope, source_node, var, nullptr);
}

static IrInstGen *ir_build_var_ptr_gen(IrAnalyze *ira, IrInst *source_instr, ZigVar *var) {
    IrInstGenVarPtr *instruction = ir_build_inst_gen<IrInstGenVarPtr>(&ira->new_irb, source_instr->scope, source_instr->source_node);
    instruction->var = var;

    ir_ref_var(var);

    return &instruction->base;
}

static IrInstGen *ir_build_return_ptr(IrAnalyze *ira, Scope *scope, AstNode *source_node, ZigType *ty) {
    IrInstGenReturnPtr *instruction = ir_build_inst_gen<IrInstGenReturnPtr>(&ira->new_irb, scope, source_node);
    instruction->base.value->type = ty;
    return &instruction->base;
}

static IrInstSrc *ir_build_elem_ptr(IrBuilderSrc *irb, Scope *scope, AstNode *source_node,
        IrInstSrc *array_ptr, IrInstSrc *elem_index, bool safety_check_on, PtrLen ptr_len,
        AstNode *init_array_type_source_node)
{
    IrInstSrcElemPtr *instruction = ir_build_instruction<IrInstSrcElemPtr>(irb, scope, source_node);
    instruction->array_ptr = array_ptr;
    instruction->elem_index = elem_index;
    instruction->safety_check_on = safety_check_on;
    instruction->ptr_len = ptr_len;
    instruction->init_array_type_source_node = init_array_type_source_node;

    ir_ref_instruction(array_ptr, irb->current_basic_block);
    ir_ref_instruction(elem_index, irb->current_basic_block);

    return &instruction->base;
}

static IrInstGen *ir_build_elem_ptr_gen(IrAnalyze *ira, Scope *scope, AstNode *source_node,
        IrInstGen *array_ptr, IrInstGen *elem_index, bool safety_check_on, ZigType *return_type)
{
    IrInstGenElemPtr *instruction = ir_build_inst_gen<IrInstGenElemPtr>(&ira->new_irb, scope, source_node);
    instruction->base.value->type = return_type;
    instruction->array_ptr = array_ptr;
    instruction->elem_index = elem_index;
    instruction->safety_check_on = safety_check_on;

    ir_ref_inst_gen(array_ptr);
    ir_ref_inst_gen(elem_index);

    return &instruction->base;
}

static IrInstSrc *ir_build_field_ptr_instruction(IrBuilderSrc *irb, Scope *scope, AstNode *source_node,
    IrInstSrc *container_ptr, IrInstSrc *field_name_expr, bool initializing)
{
    IrInstSrcFieldPtr *instruction = ir_build_instruction<IrInstSrcFieldPtr>(irb, scope, source_node);
    instruction->container_ptr = container_ptr;
    instruction->field_name_buffer = nullptr;
    instruction->field_name_expr = field_name_expr;
    instruction->initializing = initializing;

    ir_ref_instruction(container_ptr, irb->current_basic_block);
    ir_ref_instruction(field_name_expr, irb->current_basic_block);

    return &instruction->base;
}

static IrInstSrc *ir_build_field_ptr(IrBuilderSrc *irb, Scope *scope, AstNode *source_node,
    IrInstSrc *container_ptr, Buf *field_name, bool initializing)
{
    IrInstSrcFieldPtr *instruction = ir_build_instruction<IrInstSrcFieldPtr>(irb, scope, source_node);
    instruction->container_ptr = container_ptr;
    instruction->field_name_buffer = field_name;
    instruction->field_name_expr = nullptr;
    instruction->initializing = initializing;

    ir_ref_instruction(container_ptr, irb->current_basic_block);

    return &instruction->base;
}

static IrInstSrc *ir_build_has_field(IrBuilderSrc *irb, Scope *scope, AstNode *source_node,
    IrInstSrc *container_type, IrInstSrc *field_name)
{
    IrInstSrcHasField *instruction = ir_build_instruction<IrInstSrcHasField>(irb, scope, source_node);
    instruction->container_type = container_type;
    instruction->field_name = field_name;

    ir_ref_instruction(container_type, irb->current_basic_block);
    ir_ref_instruction(field_name, irb->current_basic_block);

    return &instruction->base;
}

static IrInstGen *ir_build_struct_field_ptr(IrAnalyze *ira, IrInst *source_instr,
    IrInstGen *struct_ptr, TypeStructField *field, ZigType *ptr_type)
{
    IrInstGenStructFieldPtr *inst = ir_build_inst_gen<IrInstGenStructFieldPtr>(&ira->new_irb, source_instr->scope, source_instr->source_node);
    inst->base.value->type = ptr_type;
    inst->struct_ptr = struct_ptr;
    inst->field = field;

    ir_ref_inst_gen(struct_ptr);

    return &inst->base;
}

static IrInstGen *ir_build_union_field_ptr(IrAnalyze *ira, IrInst *source_instr,
    IrInstGen *union_ptr, TypeUnionField *field, bool safety_check_on, bool initializing, ZigType *ptr_type)
{
    IrInstGenUnionFieldPtr *inst = ir_build_inst_gen<IrInstGenUnionFieldPtr>(&ira->new_irb,
            source_instr->scope, source_instr->source_node);
    inst->base.value->type = ptr_type;
    inst->initializing = initializing;
    inst->safety_check_on = safety_check_on;
    inst->union_ptr = union_ptr;
    inst->field = field;

    ir_ref_inst_gen(union_ptr);

    return &inst->base;
}

static IrInstSrc *ir_build_call_extra(IrBuilderSrc *irb, Scope *scope, AstNode *source_node,
        IrInstSrc *options, IrInstSrc *fn_ref, IrInstSrc *args, ResultLoc *result_loc)
{
    IrInstSrcCallExtra *call_instruction = ir_build_instruction<IrInstSrcCallExtra>(irb, scope, source_node);
    call_instruction->options = options;
    call_instruction->fn_ref = fn_ref;
    call_instruction->args = args;
    call_instruction->result_loc = result_loc;

    ir_ref_instruction(options, irb->current_basic_block);
    ir_ref_instruction(fn_ref, irb->current_basic_block);
    ir_ref_instruction(args, irb->current_basic_block);

    return &call_instruction->base;
}

static IrInstSrc *ir_build_async_call_extra(IrBuilderSrc *irb, Scope *scope, AstNode *source_node,
        CallModifier modifier, IrInstSrc *fn_ref, IrInstSrc *ret_ptr, IrInstSrc *new_stack, IrInstSrc *args, ResultLoc *result_loc)
{
    IrInstSrcAsyncCallExtra *call_instruction = ir_build_instruction<IrInstSrcAsyncCallExtra>(irb, scope, source_node);
    call_instruction->modifier = modifier;
    call_instruction->fn_ref = fn_ref;
    call_instruction->ret_ptr = ret_ptr;
    call_instruction->new_stack = new_stack;
    call_instruction->args = args;
    call_instruction->result_loc = result_loc;

    ir_ref_instruction(fn_ref, irb->current_basic_block);
    if (ret_ptr != nullptr) ir_ref_instruction(ret_ptr, irb->current_basic_block);
    ir_ref_instruction(new_stack, irb->current_basic_block);
    ir_ref_instruction(args, irb->current_basic_block);

    return &call_instruction->base;
}

static IrInstSrc *ir_build_call_args(IrBuilderSrc *irb, Scope *scope, AstNode *source_node,
        IrInstSrc *options, IrInstSrc *fn_ref, IrInstSrc **args_ptr, size_t args_len,
        ResultLoc *result_loc)
{
    IrInstSrcCallArgs *call_instruction = ir_build_instruction<IrInstSrcCallArgs>(irb, scope, source_node);
    call_instruction->options = options;
    call_instruction->fn_ref = fn_ref;
    call_instruction->args_ptr = args_ptr;
    call_instruction->args_len = args_len;
    call_instruction->result_loc = result_loc;

    ir_ref_instruction(options, irb->current_basic_block);
    ir_ref_instruction(fn_ref, irb->current_basic_block);
    for (size_t i = 0; i < args_len; i += 1)
        ir_ref_instruction(args_ptr[i], irb->current_basic_block);

    return &call_instruction->base;
}

static IrInstSrc *ir_build_call_src(IrBuilderSrc *irb, Scope *scope, AstNode *source_node,
        ZigFn *fn_entry, IrInstSrc *fn_ref, size_t arg_count, IrInstSrc **args,
        IrInstSrc *ret_ptr, CallModifier modifier, bool is_async_call_builtin,
        IrInstSrc *new_stack, ResultLoc *result_loc)
{
    IrInstSrcCall *call_instruction = ir_build_instruction<IrInstSrcCall>(irb, scope, source_node);
    call_instruction->fn_entry = fn_entry;
    call_instruction->fn_ref = fn_ref;
    call_instruction->args = args;
    call_instruction->arg_count = arg_count;
    call_instruction->modifier = modifier;
    call_instruction->is_async_call_builtin = is_async_call_builtin;
    call_instruction->new_stack = new_stack;
    call_instruction->result_loc = result_loc;
    call_instruction->ret_ptr = ret_ptr;

    if (fn_ref != nullptr) ir_ref_instruction(fn_ref, irb->current_basic_block);
    for (size_t i = 0; i < arg_count; i += 1)
        ir_ref_instruction(args[i], irb->current_basic_block);
    if (ret_ptr != nullptr) ir_ref_instruction(ret_ptr, irb->current_basic_block);
    if (new_stack != nullptr) ir_ref_instruction(new_stack, irb->current_basic_block);

    return &call_instruction->base;
}

static IrInstGenCall *ir_build_call_gen(IrAnalyze *ira, IrInst *source_instruction,
        ZigFn *fn_entry, IrInstGen *fn_ref, size_t arg_count, IrInstGen **args,
        CallModifier modifier, IrInstGen *new_stack, bool is_async_call_builtin,
        IrInstGen *result_loc, ZigType *return_type)
{
    IrInstGenCall *call_instruction = ir_build_inst_gen<IrInstGenCall>(&ira->new_irb,
            source_instruction->scope, source_instruction->source_node);
    call_instruction->base.value->type = return_type;
    call_instruction->fn_entry = fn_entry;
    call_instruction->fn_ref = fn_ref;
    call_instruction->args = args;
    call_instruction->arg_count = arg_count;
    call_instruction->modifier = modifier;
    call_instruction->is_async_call_builtin = is_async_call_builtin;
    call_instruction->new_stack = new_stack;
    call_instruction->result_loc = result_loc;

    if (fn_ref != nullptr) ir_ref_inst_gen(fn_ref);
    for (size_t i = 0; i < arg_count; i += 1)
        ir_ref_inst_gen(args[i]);
    if (new_stack != nullptr) ir_ref_inst_gen(new_stack);
    if (result_loc != nullptr) ir_ref_inst_gen(result_loc);

    return call_instruction;
}

static IrInstSrc *ir_build_phi(IrBuilderSrc *irb, Scope *scope, AstNode *source_node,
        size_t incoming_count, IrBasicBlockSrc **incoming_blocks, IrInstSrc **incoming_values,
        ResultLocPeerParent *peer_parent)
{
    assert(incoming_count != 0);
    assert(incoming_count != SIZE_MAX);

    IrInstSrcPhi *phi_instruction = ir_build_instruction<IrInstSrcPhi>(irb, scope, source_node);
    phi_instruction->incoming_count = incoming_count;
    phi_instruction->incoming_blocks = incoming_blocks;
    phi_instruction->incoming_values = incoming_values;
    phi_instruction->peer_parent = peer_parent;

    for (size_t i = 0; i < incoming_count; i += 1) {
        ir_ref_bb(incoming_blocks[i]);
        ir_ref_instruction(incoming_values[i], irb->current_basic_block);
    }

    return &phi_instruction->base;
}

static IrInstGen *ir_build_phi_gen(IrAnalyze *ira, IrInst *source_instr, size_t incoming_count,
        IrBasicBlockGen **incoming_blocks, IrInstGen **incoming_values, ZigType *result_type)
{
    assert(incoming_count != 0);
    assert(incoming_count != SIZE_MAX);

    IrInstGenPhi *phi_instruction = ir_build_inst_gen<IrInstGenPhi>(&ira->new_irb,
            source_instr->scope, source_instr->source_node);
    phi_instruction->base.value->type = result_type;
    phi_instruction->incoming_count = incoming_count;
    phi_instruction->incoming_blocks = incoming_blocks;
    phi_instruction->incoming_values = incoming_values;

    for (size_t i = 0; i < incoming_count; i += 1) {
        ir_ref_inst_gen(incoming_values[i]);
    }

    return &phi_instruction->base;
}

static IrInstSrc *ir_build_br(IrBuilderSrc *irb, Scope *scope, AstNode *source_node,
        IrBasicBlockSrc *dest_block, IrInstSrc *is_comptime)
{
    IrInstSrcBr *inst = ir_build_instruction<IrInstSrcBr>(irb, scope, source_node);
    inst->base.is_noreturn = true;
    inst->dest_block = dest_block;
    inst->is_comptime = is_comptime;

    ir_ref_bb(dest_block);
    if (is_comptime) ir_ref_instruction(is_comptime, irb->current_basic_block);

    return &inst->base;
}

static IrInstGen *ir_build_br_gen(IrAnalyze *ira, IrInst *source_instr, IrBasicBlockGen *dest_block) {
    IrInstGenBr *inst = ir_build_inst_noreturn<IrInstGenBr>(&ira->new_irb, source_instr->scope, source_instr->source_node);
    inst->dest_block = dest_block;

    return &inst->base;
}

static IrInstSrc *ir_build_ptr_type(IrBuilderSrc *irb, Scope *scope, AstNode *source_node,
        IrInstSrc *child_type, bool is_const, bool is_volatile, PtrLen ptr_len,
        IrInstSrc *sentinel, IrInstSrc *align_value,
        uint32_t bit_offset_start, uint32_t host_int_bytes, bool is_allow_zero)
{
    IrInstSrcPtrType *inst = ir_build_instruction<IrInstSrcPtrType>(irb, scope, source_node);
    inst->sentinel = sentinel;
    inst->align_value = align_value;
    inst->child_type = child_type;
    inst->is_const = is_const;
    inst->is_volatile = is_volatile;
    inst->ptr_len = ptr_len;
    inst->bit_offset_start = bit_offset_start;
    inst->host_int_bytes = host_int_bytes;
    inst->is_allow_zero = is_allow_zero;

    if (sentinel) ir_ref_instruction(sentinel, irb->current_basic_block);
    if (align_value) ir_ref_instruction(align_value, irb->current_basic_block);
    ir_ref_instruction(child_type, irb->current_basic_block);

    return &inst->base;
}

static IrInstSrc *ir_build_un_op_lval(IrBuilderSrc *irb, Scope *scope, AstNode *source_node, IrUnOp op_id,
        IrInstSrc *value, LVal lval, ResultLoc *result_loc)
{
    IrInstSrcUnOp *instruction = ir_build_instruction<IrInstSrcUnOp>(irb, scope, source_node);
    instruction->op_id = op_id;
    instruction->value = value;
    instruction->lval = lval;
    instruction->result_loc = result_loc;

    ir_ref_instruction(value, irb->current_basic_block);

    return &instruction->base;
}

static IrInstSrc *ir_build_un_op(IrBuilderSrc *irb, Scope *scope, AstNode *source_node, IrUnOp op_id,
        IrInstSrc *value)
{
    return ir_build_un_op_lval(irb, scope, source_node, op_id, value, LValNone, nullptr);
}

static IrInstGen *ir_build_negation(IrAnalyze *ira, IrInst *source_instr, IrInstGen *operand, ZigType *expr_type) {
    IrInstGenNegation *instruction = ir_build_inst_gen<IrInstGenNegation>(&ira->new_irb,
            source_instr->scope, source_instr->source_node);
    instruction->base.value->type = expr_type;
    instruction->operand = operand;

    ir_ref_inst_gen(operand);

    return &instruction->base;
}

static IrInstGen *ir_build_negation_wrapping(IrAnalyze *ira, IrInst *source_instr, IrInstGen *operand,
        ZigType *expr_type)
{
    IrInstGenNegationWrapping *instruction = ir_build_inst_gen<IrInstGenNegationWrapping>(&ira->new_irb,
            source_instr->scope, source_instr->source_node);
    instruction->base.value->type = expr_type;
    instruction->operand = operand;

    ir_ref_inst_gen(operand);

    return &instruction->base;
}

static IrInstGen *ir_build_binary_not(IrAnalyze *ira, IrInst *source_instr, IrInstGen *operand,
        ZigType *expr_type)
{
    IrInstGenBinaryNot *instruction = ir_build_inst_gen<IrInstGenBinaryNot>(&ira->new_irb,
            source_instr->scope, source_instr->source_node);
    instruction->base.value->type = expr_type;
    instruction->operand = operand;

    ir_ref_inst_gen(operand);

    return &instruction->base;
}

static IrInstSrc *ir_build_container_init_list(IrBuilderSrc *irb, Scope *scope, AstNode *source_node,
        size_t item_count, IrInstSrc **elem_result_loc_list, IrInstSrc *result_loc,
        AstNode *init_array_type_source_node)
{
    IrInstSrcContainerInitList *container_init_list_instruction =
        ir_build_instruction<IrInstSrcContainerInitList>(irb, scope, source_node);
    container_init_list_instruction->item_count = item_count;
    container_init_list_instruction->elem_result_loc_list = elem_result_loc_list;
    container_init_list_instruction->result_loc = result_loc;
    container_init_list_instruction->init_array_type_source_node = init_array_type_source_node;

    for (size_t i = 0; i < item_count; i += 1) {
        ir_ref_instruction(elem_result_loc_list[i], irb->current_basic_block);
    }
    if (result_loc != nullptr) ir_ref_instruction(result_loc, irb->current_basic_block);

    return &container_init_list_instruction->base;
}

static IrInstSrc *ir_build_container_init_fields(IrBuilderSrc *irb, Scope *scope, AstNode *source_node,
        size_t field_count, IrInstSrcContainerInitFieldsField *fields, IrInstSrc *result_loc)
{
    IrInstSrcContainerInitFields *container_init_fields_instruction =
        ir_build_instruction<IrInstSrcContainerInitFields>(irb, scope, source_node);
    container_init_fields_instruction->field_count = field_count;
    container_init_fields_instruction->fields = fields;
    container_init_fields_instruction->result_loc = result_loc;

    for (size_t i = 0; i < field_count; i += 1) {
        ir_ref_instruction(fields[i].result_loc, irb->current_basic_block);
    }
    if (result_loc != nullptr) ir_ref_instruction(result_loc, irb->current_basic_block);

    return &container_init_fields_instruction->base;
}

static IrInstSrc *ir_build_unreachable(IrBuilderSrc *irb, Scope *scope, AstNode *source_node) {
    IrInstSrcUnreachable *inst = ir_build_instruction<IrInstSrcUnreachable>(irb, scope, source_node);
    inst->base.is_noreturn = true;
    return &inst->base;
}

static IrInstGen *ir_build_unreachable_gen(IrAnalyze *ira, IrInst *source_instr) {
    IrInstGenUnreachable *inst = ir_build_inst_noreturn<IrInstGenUnreachable>(&ira->new_irb, source_instr->scope, source_instr->source_node);
    return &inst->base;
}

static IrInstSrcStorePtr *ir_build_store_ptr(IrBuilderSrc *irb, Scope *scope, AstNode *source_node,
        IrInstSrc *ptr, IrInstSrc *value)
{
    IrInstSrcStorePtr *instruction = ir_build_instruction<IrInstSrcStorePtr>(irb, scope, source_node);
    instruction->ptr = ptr;
    instruction->value = value;

    ir_ref_instruction(ptr, irb->current_basic_block);
    ir_ref_instruction(value, irb->current_basic_block);

    return instruction;
}

static IrInstGen *ir_build_store_ptr_gen(IrAnalyze *ira, IrInst *source_instr, IrInstGen *ptr, IrInstGen *value) {
    IrInstGenStorePtr *instruction = ir_build_inst_void<IrInstGenStorePtr>(&ira->new_irb,
            source_instr->scope, source_instr->source_node);
    instruction->ptr = ptr;
    instruction->value = value;

    ir_ref_inst_gen(ptr);
    ir_ref_inst_gen(value);

    return &instruction->base;
}

static IrInstGen *ir_build_vector_store_elem(IrAnalyze *ira, IrInst *src_inst,
        IrInstGen *vector_ptr, IrInstGen *index, IrInstGen *value)
{
    IrInstGenVectorStoreElem *inst = ir_build_inst_void<IrInstGenVectorStoreElem>(
            &ira->new_irb, src_inst->scope, src_inst->source_node);
    inst->vector_ptr = vector_ptr;
    inst->index = index;
    inst->value = value;

    ir_ref_inst_gen(vector_ptr);
    ir_ref_inst_gen(index);
    ir_ref_inst_gen(value);

    return &inst->base;
}

static IrInstSrc *ir_build_var_decl_src(IrBuilderSrc *irb, Scope *scope, AstNode *source_node,
        ZigVar *var, IrInstSrc *align_value, IrInstSrc *ptr)
{
    IrInstSrcDeclVar *inst = ir_build_instruction<IrInstSrcDeclVar>(irb, scope, source_node);
    inst->var = var;
    inst->align_value = align_value;
    inst->ptr = ptr;

    if (align_value != nullptr) ir_ref_instruction(align_value, irb->current_basic_block);
    ir_ref_instruction(ptr, irb->current_basic_block);

    return &inst->base;
}

static IrInstGen *ir_build_var_decl_gen(IrAnalyze *ira, IrInst *source_instruction,
        ZigVar *var, IrInstGen *var_ptr)
{
    IrInstGenDeclVar *inst = ir_build_inst_gen<IrInstGenDeclVar>(&ira->new_irb,
            source_instruction->scope, source_instruction->source_node);
    inst->base.value->special = ConstValSpecialStatic;
    inst->base.value->type = ira->codegen->builtin_types.entry_void;
    inst->var = var;
    inst->var_ptr = var_ptr;

    ir_ref_inst_gen(var_ptr);

    return &inst->base;
}

static IrInstSrc *ir_build_export(IrBuilderSrc *irb, Scope *scope, AstNode *source_node,
        IrInstSrc *target, IrInstSrc *options)
{
    IrInstSrcExport *export_instruction = ir_build_instruction<IrInstSrcExport>(
            irb, scope, source_node);
    export_instruction->target = target;
    export_instruction->options = options;

    ir_ref_instruction(target, irb->current_basic_block);
    ir_ref_instruction(options, irb->current_basic_block);

    return &export_instruction->base;
}

static IrInstSrc *ir_build_load_ptr(IrBuilderSrc *irb, Scope *scope, AstNode *source_node, IrInstSrc *ptr) {
    IrInstSrcLoadPtr *instruction = ir_build_instruction<IrInstSrcLoadPtr>(irb, scope, source_node);
    instruction->ptr = ptr;

    ir_ref_instruction(ptr, irb->current_basic_block);

    return &instruction->base;
}

static IrInstGen *ir_build_load_ptr_gen(IrAnalyze *ira, IrInst *source_instruction,
        IrInstGen *ptr, ZigType *ty, IrInstGen *result_loc)
{
    IrInstGenLoadPtr *instruction = ir_build_inst_gen<IrInstGenLoadPtr>(
            &ira->new_irb, source_instruction->scope, source_instruction->source_node);
    instruction->base.value->type = ty;
    instruction->ptr = ptr;
    instruction->result_loc = result_loc;

    ir_ref_inst_gen(ptr);
    if (result_loc != nullptr) ir_ref_inst_gen(result_loc);

    return &instruction->base;
}

static IrInstSrc *ir_build_typeof_n(IrBuilderSrc *irb, Scope *scope, AstNode *source_node,
        IrInstSrc **values, size_t value_count)
{
    assert(value_count >= 2);

    IrInstSrcTypeOf *instruction = ir_build_instruction<IrInstSrcTypeOf>(irb, scope, source_node);
    instruction->value.list = values;
    instruction->value_count = value_count;

    for (size_t i = 0; i < value_count; i++)
        ir_ref_instruction(values[i], irb->current_basic_block);

    return &instruction->base;
}

static IrInstSrc *ir_build_typeof_1(IrBuilderSrc *irb, Scope *scope, AstNode *source_node, IrInstSrc *value) {
    IrInstSrcTypeOf *instruction = ir_build_instruction<IrInstSrcTypeOf>(irb, scope, source_node);
    instruction->value.scalar = value;

    ir_ref_instruction(value, irb->current_basic_block);

    return &instruction->base;
}

static IrInstSrc *ir_build_set_cold(IrBuilderSrc *irb, Scope *scope, AstNode *source_node, IrInstSrc *is_cold) {
    IrInstSrcSetCold *instruction = ir_build_instruction<IrInstSrcSetCold>(irb, scope, source_node);
    instruction->is_cold = is_cold;

    ir_ref_instruction(is_cold, irb->current_basic_block);

    return &instruction->base;
}

static IrInstSrc *ir_build_set_runtime_safety(IrBuilderSrc *irb, Scope *scope, AstNode *source_node,
        IrInstSrc *safety_on)
{
    IrInstSrcSetRuntimeSafety *inst = ir_build_instruction<IrInstSrcSetRuntimeSafety>(irb, scope, source_node);
    inst->safety_on = safety_on;

    ir_ref_instruction(safety_on, irb->current_basic_block);

    return &inst->base;
}

static IrInstSrc *ir_build_set_float_mode(IrBuilderSrc *irb, Scope *scope, AstNode *source_node,
        IrInstSrc *mode_value)
{
    IrInstSrcSetFloatMode *instruction = ir_build_instruction<IrInstSrcSetFloatMode>(irb, scope, source_node);
    instruction->mode_value = mode_value;

    ir_ref_instruction(mode_value, irb->current_basic_block);

    return &instruction->base;
}

static IrInstSrc *ir_build_array_type(IrBuilderSrc *irb, Scope *scope, AstNode *source_node, IrInstSrc *size,
        IrInstSrc *sentinel, IrInstSrc *child_type)
{
    IrInstSrcArrayType *instruction = ir_build_instruction<IrInstSrcArrayType>(irb, scope, source_node);
    instruction->size = size;
    instruction->sentinel = sentinel;
    instruction->child_type = child_type;

    ir_ref_instruction(size, irb->current_basic_block);
    if (sentinel != nullptr) ir_ref_instruction(sentinel, irb->current_basic_block);
    ir_ref_instruction(child_type, irb->current_basic_block);

    return &instruction->base;
}

static IrInstSrc *ir_build_anyframe_type(IrBuilderSrc *irb, Scope *scope, AstNode *source_node,
        IrInstSrc *payload_type)
{
    IrInstSrcAnyFrameType *instruction = ir_build_instruction<IrInstSrcAnyFrameType>(irb, scope, source_node);
    instruction->payload_type = payload_type;

    if (payload_type != nullptr) ir_ref_instruction(payload_type, irb->current_basic_block);

    return &instruction->base;
}

static IrInstSrc *ir_build_slice_type(IrBuilderSrc *irb, Scope *scope, AstNode *source_node,
        IrInstSrc *child_type, bool is_const, bool is_volatile,
        IrInstSrc *sentinel, IrInstSrc *align_value, bool is_allow_zero)
{
    IrInstSrcSliceType *instruction = ir_build_instruction<IrInstSrcSliceType>(irb, scope, source_node);
    instruction->is_const = is_const;
    instruction->is_volatile = is_volatile;
    instruction->child_type = child_type;
    instruction->sentinel = sentinel;
    instruction->align_value = align_value;
    instruction->is_allow_zero = is_allow_zero;

    if (sentinel != nullptr) ir_ref_instruction(sentinel, irb->current_basic_block);
    if (align_value != nullptr) ir_ref_instruction(align_value, irb->current_basic_block);
    ir_ref_instruction(child_type, irb->current_basic_block);

    return &instruction->base;
}

static IrInstSrc *ir_build_asm_src(IrBuilderSrc *irb, Scope *scope, AstNode *source_node,
        IrInstSrc *asm_template, IrInstSrc **input_list, IrInstSrc **output_types,
        ZigVar **output_vars, size_t return_count, bool has_side_effects, bool is_global)
{
    IrInstSrcAsm *instruction = ir_build_instruction<IrInstSrcAsm>(irb, scope, source_node);
    instruction->asm_template = asm_template;
    instruction->input_list = input_list;
    instruction->output_types = output_types;
    instruction->output_vars = output_vars;
    instruction->return_count = return_count;
    instruction->has_side_effects = has_side_effects;
    instruction->is_global = is_global;

    assert(source_node->type == NodeTypeAsmExpr);
    for (size_t i = 0; i < source_node->data.asm_expr.output_list.length; i += 1) {
        IrInstSrc *output_type = output_types[i];
        if (output_type) ir_ref_instruction(output_type, irb->current_basic_block);
    }

    for (size_t i = 0; i < source_node->data.asm_expr.input_list.length; i += 1) {
        IrInstSrc *input_value = input_list[i];
        ir_ref_instruction(input_value, irb->current_basic_block);
    }

    return &instruction->base;
}

static IrInstGen *ir_build_asm_gen(IrAnalyze *ira, IrInst *source_instr,
        Buf *asm_template, AsmToken *token_list, size_t token_list_len,
        IrInstGen **input_list, IrInstGen **output_types, ZigVar **output_vars, size_t return_count,
        bool has_side_effects, ZigType *return_type)
{
    IrInstGenAsm *instruction = ir_build_inst_gen<IrInstGenAsm>(&ira->new_irb, source_instr->scope, source_instr->source_node);
    instruction->base.value->type = return_type;
    instruction->asm_template = asm_template;
    instruction->token_list = token_list;
    instruction->token_list_len = token_list_len;
    instruction->input_list = input_list;
    instruction->output_types = output_types;
    instruction->output_vars = output_vars;
    instruction->return_count = return_count;
    instruction->has_side_effects = has_side_effects;

    assert(source_instr->source_node->type == NodeTypeAsmExpr);
    for (size_t i = 0; i < source_instr->source_node->data.asm_expr.output_list.length; i += 1) {
        IrInstGen *output_type = output_types[i];
        if (output_type) ir_ref_inst_gen(output_type);
    }

    for (size_t i = 0; i < source_instr->source_node->data.asm_expr.input_list.length; i += 1) {
        IrInstGen *input_value = input_list[i];
        ir_ref_inst_gen(input_value);
    }

    return &instruction->base;
}

static IrInstSrc *ir_build_size_of(IrBuilderSrc *irb, Scope *scope, AstNode *source_node, IrInstSrc *type_value,
        bool bit_size)
{
    IrInstSrcSizeOf *instruction = ir_build_instruction<IrInstSrcSizeOf>(irb, scope, source_node);
    instruction->type_value = type_value;
    instruction->bit_size = bit_size;

    ir_ref_instruction(type_value, irb->current_basic_block);

    return &instruction->base;
}

static IrInstSrc *ir_build_test_non_null_src(IrBuilderSrc *irb, Scope *scope, AstNode *source_node,
        IrInstSrc *value)
{
    IrInstSrcTestNonNull *instruction = ir_build_instruction<IrInstSrcTestNonNull>(irb, scope, source_node);
    instruction->value = value;

    ir_ref_instruction(value, irb->current_basic_block);

    return &instruction->base;
}

static IrInstGen *ir_build_test_non_null_gen(IrAnalyze *ira, IrInst *source_instr, IrInstGen *value) {
    IrInstGenTestNonNull *inst = ir_build_inst_gen<IrInstGenTestNonNull>(&ira->new_irb,
            source_instr->scope, source_instr->source_node);
    inst->base.value->type = ira->codegen->builtin_types.entry_bool;
    inst->value = value;

    ir_ref_inst_gen(value);

    return &inst->base;
}

static IrInstSrc *ir_build_optional_unwrap_ptr(IrBuilderSrc *irb, Scope *scope, AstNode *source_node,
        IrInstSrc *base_ptr, bool safety_check_on)
{
    IrInstSrcOptionalUnwrapPtr *instruction = ir_build_instruction<IrInstSrcOptionalUnwrapPtr>(irb, scope, source_node);
    instruction->base_ptr = base_ptr;
    instruction->safety_check_on = safety_check_on;

    ir_ref_instruction(base_ptr, irb->current_basic_block);

    return &instruction->base;
}

static IrInstGen *ir_build_optional_unwrap_ptr_gen(IrAnalyze *ira, IrInst *source_instr,
        IrInstGen *base_ptr, bool safety_check_on, bool initializing, ZigType *result_type)
{
    IrInstGenOptionalUnwrapPtr *inst = ir_build_inst_gen<IrInstGenOptionalUnwrapPtr>(&ira->new_irb,
            source_instr->scope, source_instr->source_node);
    inst->base.value->type = result_type;
    inst->base_ptr = base_ptr;
    inst->safety_check_on = safety_check_on;
    inst->initializing = initializing;

    ir_ref_inst_gen(base_ptr);

    return &inst->base;
}

static IrInstGen *ir_build_optional_wrap(IrAnalyze *ira, IrInst *source_instruction, ZigType *result_ty,
        IrInstGen *operand, IrInstGen *result_loc)
{
    IrInstGenOptionalWrap *instruction = ir_build_inst_gen<IrInstGenOptionalWrap>(
            &ira->new_irb, source_instruction->scope, source_instruction->source_node);
    instruction->base.value->type = result_ty;
    instruction->operand = operand;
    instruction->result_loc = result_loc;

    ir_ref_inst_gen(operand);
    if (result_loc != nullptr) ir_ref_inst_gen(result_loc);

    return &instruction->base;
}

static IrInstGen *ir_build_err_wrap_payload(IrAnalyze *ira, IrInst *source_instruction,
        ZigType *result_type, IrInstGen *operand, IrInstGen *result_loc)
{
    IrInstGenErrWrapPayload *instruction = ir_build_inst_gen<IrInstGenErrWrapPayload>(
            &ira->new_irb, source_instruction->scope, source_instruction->source_node);
    instruction->base.value->type = result_type;
    instruction->operand = operand;
    instruction->result_loc = result_loc;

    ir_ref_inst_gen(operand);
    if (result_loc != nullptr) ir_ref_inst_gen(result_loc);

    return &instruction->base;
}

static IrInstGen *ir_build_err_wrap_code(IrAnalyze *ira, IrInst *source_instruction,
        ZigType *result_type, IrInstGen *operand, IrInstGen *result_loc)
{
    IrInstGenErrWrapCode *instruction = ir_build_inst_gen<IrInstGenErrWrapCode>(
            &ira->new_irb, source_instruction->scope, source_instruction->source_node);
    instruction->base.value->type = result_type;
    instruction->operand = operand;
    instruction->result_loc = result_loc;

    ir_ref_inst_gen(operand);
    if (result_loc != nullptr) ir_ref_inst_gen(result_loc);

    return &instruction->base;
}

static IrInstSrc *ir_build_clz(IrBuilderSrc *irb, Scope *scope, AstNode *source_node, IrInstSrc *type,
        IrInstSrc *op)
{
    IrInstSrcClz *instruction = ir_build_instruction<IrInstSrcClz>(irb, scope, source_node);
    instruction->type = type;
    instruction->op = op;

    ir_ref_instruction(type, irb->current_basic_block);
    ir_ref_instruction(op, irb->current_basic_block);

    return &instruction->base;
}

static IrInstGen *ir_build_clz_gen(IrAnalyze *ira, IrInst *source_instr, ZigType *result_type, IrInstGen *op) {
    IrInstGenClz *instruction = ir_build_inst_gen<IrInstGenClz>(&ira->new_irb,
            source_instr->scope, source_instr->source_node);
    instruction->base.value->type = result_type;
    instruction->op = op;

    ir_ref_inst_gen(op);

    return &instruction->base;
}

static IrInstSrc *ir_build_ctz(IrBuilderSrc *irb, Scope *scope, AstNode *source_node, IrInstSrc *type,
        IrInstSrc *op)
{
    IrInstSrcCtz *instruction = ir_build_instruction<IrInstSrcCtz>(irb, scope, source_node);
    instruction->type = type;
    instruction->op = op;

    ir_ref_instruction(type, irb->current_basic_block);
    ir_ref_instruction(op, irb->current_basic_block);

    return &instruction->base;
}

static IrInstGen *ir_build_ctz_gen(IrAnalyze *ira, IrInst *source_instr, ZigType *result_type, IrInstGen *op) {
    IrInstGenCtz *instruction = ir_build_inst_gen<IrInstGenCtz>(&ira->new_irb,
            source_instr->scope, source_instr->source_node);
    instruction->base.value->type = result_type;
    instruction->op = op;

    ir_ref_inst_gen(op);

    return &instruction->base;
}

static IrInstSrc *ir_build_pop_count(IrBuilderSrc *irb, Scope *scope, AstNode *source_node, IrInstSrc *type,
        IrInstSrc *op)
{
    IrInstSrcPopCount *instruction = ir_build_instruction<IrInstSrcPopCount>(irb, scope, source_node);
    instruction->type = type;
    instruction->op = op;

    ir_ref_instruction(type, irb->current_basic_block);
    ir_ref_instruction(op, irb->current_basic_block);

    return &instruction->base;
}

static IrInstGen *ir_build_pop_count_gen(IrAnalyze *ira, IrInst *source_instr, ZigType *result_type,
        IrInstGen *op)
{
    IrInstGenPopCount *instruction = ir_build_inst_gen<IrInstGenPopCount>(&ira->new_irb,
            source_instr->scope, source_instr->source_node);
    instruction->base.value->type = result_type;
    instruction->op = op;

    ir_ref_inst_gen(op);

    return &instruction->base;
}

static IrInstSrc *ir_build_bswap(IrBuilderSrc *irb, Scope *scope, AstNode *source_node, IrInstSrc *type,
        IrInstSrc *op)
{
    IrInstSrcBswap *instruction = ir_build_instruction<IrInstSrcBswap>(irb, scope, source_node);
    instruction->type = type;
    instruction->op = op;

    ir_ref_instruction(type, irb->current_basic_block);
    ir_ref_instruction(op, irb->current_basic_block);

    return &instruction->base;
}

static IrInstGen *ir_build_bswap_gen(IrAnalyze *ira, IrInst *source_instr, ZigType *op_type,
        IrInstGen *op)
{
    IrInstGenBswap *instruction = ir_build_inst_gen<IrInstGenBswap>(&ira->new_irb,
            source_instr->scope, source_instr->source_node);
    instruction->base.value->type = op_type;
    instruction->op = op;

    ir_ref_inst_gen(op);

    return &instruction->base;
}

static IrInstSrc *ir_build_bit_reverse(IrBuilderSrc *irb, Scope *scope, AstNode *source_node, IrInstSrc *type,
        IrInstSrc *op)
{
    IrInstSrcBitReverse *instruction = ir_build_instruction<IrInstSrcBitReverse>(irb, scope, source_node);
    instruction->type = type;
    instruction->op = op;

    ir_ref_instruction(type, irb->current_basic_block);
    ir_ref_instruction(op, irb->current_basic_block);

    return &instruction->base;
}

static IrInstGen *ir_build_bit_reverse_gen(IrAnalyze *ira, IrInst *source_instr, ZigType *int_type,
        IrInstGen *op)
{
    IrInstGenBitReverse *instruction = ir_build_inst_gen<IrInstGenBitReverse>(&ira->new_irb,
            source_instr->scope, source_instr->source_node);
    instruction->base.value->type = int_type;
    instruction->op = op;

    ir_ref_inst_gen(op);

    return &instruction->base;
}

static IrInstSrcSwitchBr *ir_build_switch_br_src(IrBuilderSrc *irb, Scope *scope, AstNode *source_node,
        IrInstSrc *target_value, IrBasicBlockSrc *else_block, size_t case_count, IrInstSrcSwitchBrCase *cases,
        IrInstSrc *is_comptime, IrInstSrc *switch_prongs_void)
{
    IrInstSrcSwitchBr *instruction = ir_build_instruction<IrInstSrcSwitchBr>(irb, scope, source_node);
    instruction->base.is_noreturn = true;
    instruction->target_value = target_value;
    instruction->else_block = else_block;
    instruction->case_count = case_count;
    instruction->cases = cases;
    instruction->is_comptime = is_comptime;
    instruction->switch_prongs_void = switch_prongs_void;

    ir_ref_instruction(target_value, irb->current_basic_block);
    ir_ref_instruction(is_comptime, irb->current_basic_block);
    ir_ref_bb(else_block);
    ir_ref_instruction(switch_prongs_void, irb->current_basic_block);

    for (size_t i = 0; i < case_count; i += 1) {
        ir_ref_instruction(cases[i].value, irb->current_basic_block);
        ir_ref_bb(cases[i].block);
    }

    return instruction;
}

static IrInstGenSwitchBr *ir_build_switch_br_gen(IrAnalyze *ira, IrInst *source_instr,
        IrInstGen *target_value, IrBasicBlockGen *else_block, size_t case_count, IrInstGenSwitchBrCase *cases)
{
    IrInstGenSwitchBr *instruction = ir_build_inst_noreturn<IrInstGenSwitchBr>(&ira->new_irb,
            source_instr->scope, source_instr->source_node);
    instruction->target_value = target_value;
    instruction->else_block = else_block;
    instruction->case_count = case_count;
    instruction->cases = cases;

    ir_ref_inst_gen(target_value);

    for (size_t i = 0; i < case_count; i += 1) {
        ir_ref_inst_gen(cases[i].value);
    }

    return instruction;
}

static IrInstSrc *ir_build_switch_target(IrBuilderSrc *irb, Scope *scope, AstNode *source_node,
        IrInstSrc *target_value_ptr)
{
    IrInstSrcSwitchTarget *instruction = ir_build_instruction<IrInstSrcSwitchTarget>(irb, scope, source_node);
    instruction->target_value_ptr = target_value_ptr;

    ir_ref_instruction(target_value_ptr, irb->current_basic_block);

    return &instruction->base;
}

static IrInstSrc *ir_build_switch_var(IrBuilderSrc *irb, Scope *scope, AstNode *source_node,
        IrInstSrc *target_value_ptr, IrInstSrc **prongs_ptr, size_t prongs_len)
{
    IrInstSrcSwitchVar *instruction = ir_build_instruction<IrInstSrcSwitchVar>(irb, scope, source_node);
    instruction->target_value_ptr = target_value_ptr;
    instruction->prongs_ptr = prongs_ptr;
    instruction->prongs_len = prongs_len;

    ir_ref_instruction(target_value_ptr, irb->current_basic_block);
    for (size_t i = 0; i < prongs_len; i += 1) {
        ir_ref_instruction(prongs_ptr[i], irb->current_basic_block);
    }

    return &instruction->base;
}

// For this instruction the switch_br must be set later.
static IrInstSrcSwitchElseVar *ir_build_switch_else_var(IrBuilderSrc *irb, Scope *scope, AstNode *source_node,
        IrInstSrc *target_value_ptr)
{
    IrInstSrcSwitchElseVar *instruction = ir_build_instruction<IrInstSrcSwitchElseVar>(irb, scope, source_node);
    instruction->target_value_ptr = target_value_ptr;

    ir_ref_instruction(target_value_ptr, irb->current_basic_block);

    return instruction;
}

static IrInstGen *ir_build_union_tag(IrAnalyze *ira, IrInst *source_instr, IrInstGen *value,
        ZigType *tag_type)
{
    IrInstGenUnionTag *instruction = ir_build_inst_gen<IrInstGenUnionTag>(&ira->new_irb,
            source_instr->scope, source_instr->source_node);
    instruction->value = value;
    instruction->base.value->type = tag_type;

    ir_ref_inst_gen(value);

    return &instruction->base;
}

static IrInstSrc *ir_build_import(IrBuilderSrc *irb, Scope *scope, AstNode *source_node, IrInstSrc *name) {
    IrInstSrcImport *instruction = ir_build_instruction<IrInstSrcImport>(irb, scope, source_node);
    instruction->name = name;

    ir_ref_instruction(name, irb->current_basic_block);

    return &instruction->base;
}

static IrInstSrc *ir_build_ref_src(IrBuilderSrc *irb, Scope *scope, AstNode *source_node, IrInstSrc *value) {
    IrInstSrcRef *instruction = ir_build_instruction<IrInstSrcRef>(irb, scope, source_node);
    instruction->value = value;

    ir_ref_instruction(value, irb->current_basic_block);

    return &instruction->base;
}

static IrInstGen *ir_build_ref_gen(IrAnalyze *ira, IrInst *source_instruction, ZigType *result_type,
        IrInstGen *operand, IrInstGen *result_loc)
{
    IrInstGenRef *instruction = ir_build_inst_gen<IrInstGenRef>(&ira->new_irb,
            source_instruction->scope, source_instruction->source_node);
    instruction->base.value->type = result_type;
    instruction->operand = operand;
    instruction->result_loc = result_loc;

    ir_ref_inst_gen(operand);
    if (result_loc != nullptr) ir_ref_inst_gen(result_loc);

    return &instruction->base;
}

static IrInstSrc *ir_build_compile_err(IrBuilderSrc *irb, Scope *scope, AstNode *source_node, IrInstSrc *msg) {
    IrInstSrcCompileErr *instruction = ir_build_instruction<IrInstSrcCompileErr>(irb, scope, source_node);
    instruction->msg = msg;

    ir_ref_instruction(msg, irb->current_basic_block);

    return &instruction->base;
}

static IrInstSrc *ir_build_compile_log(IrBuilderSrc *irb, Scope *scope, AstNode *source_node,
        size_t msg_count, IrInstSrc **msg_list)
{
    IrInstSrcCompileLog *instruction = ir_build_instruction<IrInstSrcCompileLog>(irb, scope, source_node);
    instruction->msg_count = msg_count;
    instruction->msg_list = msg_list;

    for (size_t i = 0; i < msg_count; i += 1) {
        ir_ref_instruction(msg_list[i], irb->current_basic_block);
    }

    return &instruction->base;
}

static IrInstSrc *ir_build_err_name(IrBuilderSrc *irb, Scope *scope, AstNode *source_node, IrInstSrc *value) {
    IrInstSrcErrName *instruction = ir_build_instruction<IrInstSrcErrName>(irb, scope, source_node);
    instruction->value = value;

    ir_ref_instruction(value, irb->current_basic_block);

    return &instruction->base;
}

static IrInstGen *ir_build_err_name_gen(IrAnalyze *ira, IrInst *source_instr, IrInstGen *value,
        ZigType *str_type)
{
    IrInstGenErrName *instruction = ir_build_inst_gen<IrInstGenErrName>(&ira->new_irb,
            source_instr->scope, source_instr->source_node);
    instruction->base.value->type = str_type;
    instruction->value = value;

    ir_ref_inst_gen(value);

    return &instruction->base;
}

static IrInstSrc *ir_build_c_import(IrBuilderSrc *irb, Scope *scope, AstNode *source_node) {
    IrInstSrcCImport *instruction = ir_build_instruction<IrInstSrcCImport>(irb, scope, source_node);
    return &instruction->base;
}

static IrInstSrc *ir_build_c_include(IrBuilderSrc *irb, Scope *scope, AstNode *source_node, IrInstSrc *name) {
    IrInstSrcCInclude *instruction = ir_build_instruction<IrInstSrcCInclude>(irb, scope, source_node);
    instruction->name = name;

    ir_ref_instruction(name, irb->current_basic_block);

    return &instruction->base;
}

static IrInstSrc *ir_build_c_define(IrBuilderSrc *irb, Scope *scope, AstNode *source_node, IrInstSrc *name, IrInstSrc *value) {
    IrInstSrcCDefine *instruction = ir_build_instruction<IrInstSrcCDefine>(irb, scope, source_node);
    instruction->name = name;
    instruction->value = value;

    ir_ref_instruction(name, irb->current_basic_block);
    ir_ref_instruction(value, irb->current_basic_block);

    return &instruction->base;
}

static IrInstSrc *ir_build_c_undef(IrBuilderSrc *irb, Scope *scope, AstNode *source_node, IrInstSrc *name) {
    IrInstSrcCUndef *instruction = ir_build_instruction<IrInstSrcCUndef>(irb, scope, source_node);
    instruction->name = name;

    ir_ref_instruction(name, irb->current_basic_block);

    return &instruction->base;
}

static IrInstSrc *ir_build_embed_file(IrBuilderSrc *irb, Scope *scope, AstNode *source_node, IrInstSrc *name) {
    IrInstSrcEmbedFile *instruction = ir_build_instruction<IrInstSrcEmbedFile>(irb, scope, source_node);
    instruction->name = name;

    ir_ref_instruction(name, irb->current_basic_block);

    return &instruction->base;
}

static IrInstSrc *ir_build_cmpxchg_src(IrBuilderSrc *irb, Scope *scope, AstNode *source_node,
    IrInstSrc *type_value, IrInstSrc *ptr, IrInstSrc *cmp_value, IrInstSrc *new_value,
    IrInstSrc *success_order_value, IrInstSrc *failure_order_value, bool is_weak, ResultLoc *result_loc)
{
    IrInstSrcCmpxchg *instruction = ir_build_instruction<IrInstSrcCmpxchg>(irb, scope, source_node);
    instruction->type_value = type_value;
    instruction->ptr = ptr;
    instruction->cmp_value = cmp_value;
    instruction->new_value = new_value;
    instruction->success_order_value = success_order_value;
    instruction->failure_order_value = failure_order_value;
    instruction->is_weak = is_weak;
    instruction->result_loc = result_loc;

    ir_ref_instruction(type_value, irb->current_basic_block);
    ir_ref_instruction(ptr, irb->current_basic_block);
    ir_ref_instruction(cmp_value, irb->current_basic_block);
    ir_ref_instruction(new_value, irb->current_basic_block);
    ir_ref_instruction(success_order_value, irb->current_basic_block);
    ir_ref_instruction(failure_order_value, irb->current_basic_block);

    return &instruction->base;
}

static IrInstGen *ir_build_cmpxchg_gen(IrAnalyze *ira, IrInst *source_instruction, ZigType *result_type,
    IrInstGen *ptr, IrInstGen *cmp_value, IrInstGen *new_value,
    AtomicOrder success_order, AtomicOrder failure_order, bool is_weak, IrInstGen *result_loc)
{
    IrInstGenCmpxchg *instruction = ir_build_inst_gen<IrInstGenCmpxchg>(&ira->new_irb,
            source_instruction->scope, source_instruction->source_node);
    instruction->base.value->type = result_type;
    instruction->ptr = ptr;
    instruction->cmp_value = cmp_value;
    instruction->new_value = new_value;
    instruction->success_order = success_order;
    instruction->failure_order = failure_order;
    instruction->is_weak = is_weak;
    instruction->result_loc = result_loc;

    ir_ref_inst_gen(ptr);
    ir_ref_inst_gen(cmp_value);
    ir_ref_inst_gen(new_value);
    if (result_loc != nullptr) ir_ref_inst_gen(result_loc);

    return &instruction->base;
}

static IrInstSrc *ir_build_fence(IrBuilderSrc *irb, Scope *scope, AstNode *source_node, IrInstSrc *order) {
    IrInstSrcFence *instruction = ir_build_instruction<IrInstSrcFence>(irb, scope, source_node);
    instruction->order = order;

    ir_ref_instruction(order, irb->current_basic_block);

    return &instruction->base;
}

static IrInstGen *ir_build_fence_gen(IrAnalyze *ira, IrInst *source_instr, AtomicOrder order) {
    IrInstGenFence *instruction = ir_build_inst_void<IrInstGenFence>(&ira->new_irb,
            source_instr->scope, source_instr->source_node);
    instruction->order = order;

    return &instruction->base;
}

static IrInstSrc *ir_build_truncate(IrBuilderSrc *irb, Scope *scope, AstNode *source_node,
        IrInstSrc *dest_type, IrInstSrc *target)
{
    IrInstSrcTruncate *instruction = ir_build_instruction<IrInstSrcTruncate>(irb, scope, source_node);
    instruction->dest_type = dest_type;
    instruction->target = target;

    ir_ref_instruction(dest_type, irb->current_basic_block);
    ir_ref_instruction(target, irb->current_basic_block);

    return &instruction->base;
}

static IrInstGen *ir_build_truncate_gen(IrAnalyze *ira, IrInst *source_instr, ZigType *dest_type,
        IrInstGen *target)
{
    IrInstGenTruncate *instruction = ir_build_inst_gen<IrInstGenTruncate>(&ira->new_irb,
            source_instr->scope, source_instr->source_node);
    instruction->base.value->type = dest_type;
    instruction->target = target;

    ir_ref_inst_gen(target);

    return &instruction->base;
}

static IrInstSrc *ir_build_int_cast(IrBuilderSrc *irb, Scope *scope, AstNode *source_node, IrInstSrc *dest_type,
        IrInstSrc *target)
{
    IrInstSrcIntCast *instruction = ir_build_instruction<IrInstSrcIntCast>(irb, scope, source_node);
    instruction->dest_type = dest_type;
    instruction->target = target;

    ir_ref_instruction(dest_type, irb->current_basic_block);
    ir_ref_instruction(target, irb->current_basic_block);

    return &instruction->base;
}

static IrInstSrc *ir_build_float_cast(IrBuilderSrc *irb, Scope *scope, AstNode *source_node, IrInstSrc *dest_type,
        IrInstSrc *target)
{
    IrInstSrcFloatCast *instruction = ir_build_instruction<IrInstSrcFloatCast>(irb, scope, source_node);
    instruction->dest_type = dest_type;
    instruction->target = target;

    ir_ref_instruction(dest_type, irb->current_basic_block);
    ir_ref_instruction(target, irb->current_basic_block);

    return &instruction->base;
}

static IrInstSrc *ir_build_err_set_cast(IrBuilderSrc *irb, Scope *scope, AstNode *source_node,
        IrInstSrc *dest_type, IrInstSrc *target)
{
    IrInstSrcErrSetCast *instruction = ir_build_instruction<IrInstSrcErrSetCast>(irb, scope, source_node);
    instruction->dest_type = dest_type;
    instruction->target = target;

    ir_ref_instruction(dest_type, irb->current_basic_block);
    ir_ref_instruction(target, irb->current_basic_block);

    return &instruction->base;
}

static IrInstSrc *ir_build_int_to_float(IrBuilderSrc *irb, Scope *scope, AstNode *source_node,
        IrInstSrc *dest_type, IrInstSrc *target)
{
    IrInstSrcIntToFloat *instruction = ir_build_instruction<IrInstSrcIntToFloat>(irb, scope, source_node);
    instruction->dest_type = dest_type;
    instruction->target = target;

    ir_ref_instruction(dest_type, irb->current_basic_block);
    ir_ref_instruction(target, irb->current_basic_block);

    return &instruction->base;
}

static IrInstSrc *ir_build_float_to_int(IrBuilderSrc *irb, Scope *scope, AstNode *source_node,
        IrInstSrc *dest_type, IrInstSrc *target)
{
    IrInstSrcFloatToInt *instruction = ir_build_instruction<IrInstSrcFloatToInt>(irb, scope, source_node);
    instruction->dest_type = dest_type;
    instruction->target = target;

    ir_ref_instruction(dest_type, irb->current_basic_block);
    ir_ref_instruction(target, irb->current_basic_block);

    return &instruction->base;
}

static IrInstSrc *ir_build_bool_to_int(IrBuilderSrc *irb, Scope *scope, AstNode *source_node, IrInstSrc *target) {
    IrInstSrcBoolToInt *instruction = ir_build_instruction<IrInstSrcBoolToInt>(irb, scope, source_node);
    instruction->target = target;

    ir_ref_instruction(target, irb->current_basic_block);

    return &instruction->base;
}

static IrInstSrc *ir_build_vector_type(IrBuilderSrc *irb, Scope *scope, AstNode *source_node, IrInstSrc *len,
        IrInstSrc *elem_type)
{
    IrInstSrcVectorType *instruction = ir_build_instruction<IrInstSrcVectorType>(irb, scope, source_node);
    instruction->len = len;
    instruction->elem_type = elem_type;

    ir_ref_instruction(len, irb->current_basic_block);
    ir_ref_instruction(elem_type, irb->current_basic_block);

    return &instruction->base;
}

static IrInstSrc *ir_build_shuffle_vector(IrBuilderSrc *irb, Scope *scope, AstNode *source_node,
    IrInstSrc *scalar_type, IrInstSrc *a, IrInstSrc *b, IrInstSrc *mask)
{
    IrInstSrcShuffleVector *instruction = ir_build_instruction<IrInstSrcShuffleVector>(irb, scope, source_node);
    instruction->scalar_type = scalar_type;
    instruction->a = a;
    instruction->b = b;
    instruction->mask = mask;

    if (scalar_type != nullptr) ir_ref_instruction(scalar_type, irb->current_basic_block);
    ir_ref_instruction(a, irb->current_basic_block);
    ir_ref_instruction(b, irb->current_basic_block);
    ir_ref_instruction(mask, irb->current_basic_block);

    return &instruction->base;
}

static IrInstGen *ir_build_shuffle_vector_gen(IrAnalyze *ira, Scope *scope, AstNode *source_node,
        ZigType *result_type, IrInstGen *a, IrInstGen *b, IrInstGen *mask)
{
    IrInstGenShuffleVector *inst = ir_build_inst_gen<IrInstGenShuffleVector>(&ira->new_irb, scope, source_node);
    inst->base.value->type = result_type;
    inst->a = a;
    inst->b = b;
    inst->mask = mask;

    ir_ref_inst_gen(a);
    ir_ref_inst_gen(b);
    ir_ref_inst_gen(mask);

    return &inst->base;
}

static IrInstSrc *ir_build_splat_src(IrBuilderSrc *irb, Scope *scope, AstNode *source_node,
    IrInstSrc *len, IrInstSrc *scalar)
{
    IrInstSrcSplat *instruction = ir_build_instruction<IrInstSrcSplat>(irb, scope, source_node);
    instruction->len = len;
    instruction->scalar = scalar;

    ir_ref_instruction(len, irb->current_basic_block);
    ir_ref_instruction(scalar, irb->current_basic_block);

    return &instruction->base;
}

static IrInstGen *ir_build_splat_gen(IrAnalyze *ira, IrInst *source_instruction, ZigType *result_type,
    IrInstGen *scalar)
{
    IrInstGenSplat *instruction = ir_build_inst_gen<IrInstGenSplat>(
            &ira->new_irb, source_instruction->scope, source_instruction->source_node);
    instruction->base.value->type = result_type;
    instruction->scalar = scalar;

    ir_ref_inst_gen(scalar);

    return &instruction->base;
}

static IrInstSrc *ir_build_bool_not(IrBuilderSrc *irb, Scope *scope, AstNode *source_node, IrInstSrc *value) {
    IrInstSrcBoolNot *instruction = ir_build_instruction<IrInstSrcBoolNot>(irb, scope, source_node);
    instruction->value = value;

    ir_ref_instruction(value, irb->current_basic_block);

    return &instruction->base;
}

static IrInstGen *ir_build_bool_not_gen(IrAnalyze *ira, IrInst *source_instr, IrInstGen *value) {
    IrInstGenBoolNot *instruction = ir_build_inst_gen<IrInstGenBoolNot>(&ira->new_irb,
            source_instr->scope, source_instr->source_node);
    instruction->base.value->type = ira->codegen->builtin_types.entry_bool;
    instruction->value = value;

    ir_ref_inst_gen(value);

    return &instruction->base;
}

static IrInstSrc *ir_build_memset_src(IrBuilderSrc *irb, Scope *scope, AstNode *source_node,
    IrInstSrc *dest_ptr, IrInstSrc *byte, IrInstSrc *count)
{
    IrInstSrcMemset *instruction = ir_build_instruction<IrInstSrcMemset>(irb, scope, source_node);
    instruction->dest_ptr = dest_ptr;
    instruction->byte = byte;
    instruction->count = count;

    ir_ref_instruction(dest_ptr, irb->current_basic_block);
    ir_ref_instruction(byte, irb->current_basic_block);
    ir_ref_instruction(count, irb->current_basic_block);

    return &instruction->base;
}

static IrInstGen *ir_build_memset_gen(IrAnalyze *ira, IrInst *source_instr,
    IrInstGen *dest_ptr, IrInstGen *byte, IrInstGen *count)
{
    IrInstGenMemset *instruction = ir_build_inst_void<IrInstGenMemset>(&ira->new_irb,
            source_instr->scope, source_instr->source_node);
    instruction->dest_ptr = dest_ptr;
    instruction->byte = byte;
    instruction->count = count;

    ir_ref_inst_gen(dest_ptr);
    ir_ref_inst_gen(byte);
    ir_ref_inst_gen(count);

    return &instruction->base;
}

static IrInstSrc *ir_build_memcpy_src(IrBuilderSrc *irb, Scope *scope, AstNode *source_node,
    IrInstSrc *dest_ptr, IrInstSrc *src_ptr, IrInstSrc *count)
{
    IrInstSrcMemcpy *instruction = ir_build_instruction<IrInstSrcMemcpy>(irb, scope, source_node);
    instruction->dest_ptr = dest_ptr;
    instruction->src_ptr = src_ptr;
    instruction->count = count;

    ir_ref_instruction(dest_ptr, irb->current_basic_block);
    ir_ref_instruction(src_ptr, irb->current_basic_block);
    ir_ref_instruction(count, irb->current_basic_block);

    return &instruction->base;
}

static IrInstGen *ir_build_memcpy_gen(IrAnalyze *ira, IrInst *source_instr,
    IrInstGen *dest_ptr, IrInstGen *src_ptr, IrInstGen *count)
{
    IrInstGenMemcpy *instruction = ir_build_inst_void<IrInstGenMemcpy>(&ira->new_irb,
            source_instr->scope, source_instr->source_node);
    instruction->dest_ptr = dest_ptr;
    instruction->src_ptr = src_ptr;
    instruction->count = count;

    ir_ref_inst_gen(dest_ptr);
    ir_ref_inst_gen(src_ptr);
    ir_ref_inst_gen(count);

    return &instruction->base;
}

static IrInstSrc *ir_build_slice_src(IrBuilderSrc *irb, Scope *scope, AstNode *source_node,
    IrInstSrc *ptr, IrInstSrc *start, IrInstSrc *end, IrInstSrc *sentinel,
    bool safety_check_on, ResultLoc *result_loc)
{
    IrInstSrcSlice *instruction = ir_build_instruction<IrInstSrcSlice>(irb, scope, source_node);
    instruction->ptr = ptr;
    instruction->start = start;
    instruction->end = end;
    instruction->sentinel = sentinel;
    instruction->safety_check_on = safety_check_on;
    instruction->result_loc = result_loc;

    ir_ref_instruction(ptr, irb->current_basic_block);
    ir_ref_instruction(start, irb->current_basic_block);
    if (end) ir_ref_instruction(end, irb->current_basic_block);
    if (sentinel) ir_ref_instruction(sentinel, irb->current_basic_block);

    return &instruction->base;
}

static IrInstGen *ir_build_slice_gen(IrAnalyze *ira, IrInst *source_instruction, ZigType *slice_type,
    IrInstGen *ptr, IrInstGen *start, IrInstGen *end, bool safety_check_on, IrInstGen *result_loc,
    ZigValue *sentinel)
{
    IrInstGenSlice *instruction = ir_build_inst_gen<IrInstGenSlice>(
            &ira->new_irb, source_instruction->scope, source_instruction->source_node);
    instruction->base.value->type = slice_type;
    instruction->ptr = ptr;
    instruction->start = start;
    instruction->end = end;
    instruction->safety_check_on = safety_check_on;
    instruction->result_loc = result_loc;
    instruction->sentinel = sentinel;

    ir_ref_inst_gen(ptr);
    ir_ref_inst_gen(start);
    if (end != nullptr) ir_ref_inst_gen(end);
    if (result_loc != nullptr) ir_ref_inst_gen(result_loc);

    return &instruction->base;
}

static IrInstSrc *ir_build_breakpoint(IrBuilderSrc *irb, Scope *scope, AstNode *source_node) {
    IrInstSrcBreakpoint *instruction = ir_build_instruction<IrInstSrcBreakpoint>(irb, scope, source_node);
    return &instruction->base;
}

static IrInstGen *ir_build_breakpoint_gen(IrAnalyze *ira, IrInst *source_instr) {
    IrInstGenBreakpoint *instruction = ir_build_inst_void<IrInstGenBreakpoint>(&ira->new_irb,
            source_instr->scope, source_instr->source_node);
    return &instruction->base;
}

static IrInstSrc *ir_build_return_address_src(IrBuilderSrc *irb, Scope *scope, AstNode *source_node) {
    IrInstSrcReturnAddress *instruction = ir_build_instruction<IrInstSrcReturnAddress>(irb, scope, source_node);
    return &instruction->base;
}

static IrInstGen *ir_build_return_address_gen(IrAnalyze *ira, IrInst *source_instr) {
    IrInstGenReturnAddress *inst = ir_build_inst_gen<IrInstGenReturnAddress>(&ira->new_irb, source_instr->scope, source_instr->source_node);
    inst->base.value->type = ira->codegen->builtin_types.entry_usize;
    return &inst->base;
}

static IrInstSrc *ir_build_frame_address_src(IrBuilderSrc *irb, Scope *scope, AstNode *source_node) {
    IrInstSrcFrameAddress *inst = ir_build_instruction<IrInstSrcFrameAddress>(irb, scope, source_node);
    return &inst->base;
}

static IrInstGen *ir_build_frame_address_gen(IrAnalyze *ira, IrInst *source_instr) {
    IrInstGenFrameAddress *inst = ir_build_inst_gen<IrInstGenFrameAddress>(&ira->new_irb, source_instr->scope, source_instr->source_node);
    inst->base.value->type = ira->codegen->builtin_types.entry_usize;
    return &inst->base;
}

static IrInstSrc *ir_build_handle_src(IrBuilderSrc *irb, Scope *scope, AstNode *source_node) {
    IrInstSrcFrameHandle *inst = ir_build_instruction<IrInstSrcFrameHandle>(irb, scope, source_node);
    return &inst->base;
}

static IrInstGen *ir_build_handle_gen(IrAnalyze *ira, IrInst *source_instr, ZigType *ty) {
    IrInstGenFrameHandle *inst = ir_build_inst_gen<IrInstGenFrameHandle>(&ira->new_irb, source_instr->scope, source_instr->source_node);
    inst->base.value->type = ty;
    return &inst->base;
}

static IrInstSrc *ir_build_frame_type(IrBuilderSrc *irb, Scope *scope, AstNode *source_node, IrInstSrc *fn) {
    IrInstSrcFrameType *inst = ir_build_instruction<IrInstSrcFrameType>(irb, scope, source_node);
    inst->fn = fn;

    ir_ref_instruction(fn, irb->current_basic_block);

    return &inst->base;
}

static IrInstSrc *ir_build_frame_size_src(IrBuilderSrc *irb, Scope *scope, AstNode *source_node, IrInstSrc *fn) {
    IrInstSrcFrameSize *inst = ir_build_instruction<IrInstSrcFrameSize>(irb, scope, source_node);
    inst->fn = fn;

    ir_ref_instruction(fn, irb->current_basic_block);

    return &inst->base;
}

static IrInstGen *ir_build_frame_size_gen(IrAnalyze *ira, IrInst *source_instr, IrInstGen *fn)
{
    IrInstGenFrameSize *inst = ir_build_inst_gen<IrInstGenFrameSize>(&ira->new_irb, source_instr->scope, source_instr->source_node);
    inst->base.value->type = ira->codegen->builtin_types.entry_usize;
    inst->fn = fn;

    ir_ref_inst_gen(fn);

    return &inst->base;
}

static IrInstSrc *ir_build_overflow_op_src(IrBuilderSrc *irb, Scope *scope, AstNode *source_node,
        IrOverflowOp op, IrInstSrc *type_value, IrInstSrc *op1, IrInstSrc *op2, IrInstSrc *result_ptr)
{
    IrInstSrcOverflowOp *instruction = ir_build_instruction<IrInstSrcOverflowOp>(irb, scope, source_node);
    instruction->op = op;
    instruction->type_value = type_value;
    instruction->op1 = op1;
    instruction->op2 = op2;
    instruction->result_ptr = result_ptr;

    ir_ref_instruction(type_value, irb->current_basic_block);
    ir_ref_instruction(op1, irb->current_basic_block);
    ir_ref_instruction(op2, irb->current_basic_block);
    ir_ref_instruction(result_ptr, irb->current_basic_block);

    return &instruction->base;
}

static IrInstGen *ir_build_overflow_op_gen(IrAnalyze *ira, IrInst *source_instr,
        IrOverflowOp op, IrInstGen *op1, IrInstGen *op2, IrInstGen *result_ptr,
        ZigType *result_ptr_type)
{
    IrInstGenOverflowOp *instruction = ir_build_inst_gen<IrInstGenOverflowOp>(&ira->new_irb,
            source_instr->scope, source_instr->source_node);
    instruction->base.value->type = ira->codegen->builtin_types.entry_bool;
    instruction->op = op;
    instruction->op1 = op1;
    instruction->op2 = op2;
    instruction->result_ptr = result_ptr;
    instruction->result_ptr_type = result_ptr_type;

    ir_ref_inst_gen(op1);
    ir_ref_inst_gen(op2);
    ir_ref_inst_gen(result_ptr);

    return &instruction->base;
}

static IrInstSrc *ir_build_float_op_src(IrBuilderSrc *irb, Scope *scope, AstNode *source_node, IrInstSrc *operand,
        BuiltinFnId fn_id)
{
    IrInstSrcFloatOp *instruction = ir_build_instruction<IrInstSrcFloatOp>(irb, scope, source_node);
    instruction->operand = operand;
    instruction->fn_id = fn_id;

    ir_ref_instruction(operand, irb->current_basic_block);

    return &instruction->base;
}

static IrInstGen *ir_build_float_op_gen(IrAnalyze *ira, IrInst *source_instr, IrInstGen *operand,
        BuiltinFnId fn_id, ZigType *operand_type)
{
    IrInstGenFloatOp *instruction = ir_build_inst_gen<IrInstGenFloatOp>(&ira->new_irb,
            source_instr->scope, source_instr->source_node);
    instruction->base.value->type = operand_type;
    instruction->operand = operand;
    instruction->fn_id = fn_id;

    ir_ref_inst_gen(operand);

    return &instruction->base;
}

static IrInstSrc *ir_build_mul_add_src(IrBuilderSrc *irb, Scope *scope, AstNode *source_node,
        IrInstSrc *type_value, IrInstSrc *op1, IrInstSrc *op2, IrInstSrc *op3)
{
    IrInstSrcMulAdd *instruction = ir_build_instruction<IrInstSrcMulAdd>(irb, scope, source_node);
    instruction->type_value = type_value;
    instruction->op1 = op1;
    instruction->op2 = op2;
    instruction->op3 = op3;

    ir_ref_instruction(type_value, irb->current_basic_block);
    ir_ref_instruction(op1, irb->current_basic_block);
    ir_ref_instruction(op2, irb->current_basic_block);
    ir_ref_instruction(op3, irb->current_basic_block);

    return &instruction->base;
}

static IrInstGen *ir_build_mul_add_gen(IrAnalyze *ira, IrInst *source_instr, IrInstGen *op1, IrInstGen *op2,
        IrInstGen *op3, ZigType *expr_type)
{
    IrInstGenMulAdd *instruction = ir_build_inst_gen<IrInstGenMulAdd>(&ira->new_irb,
            source_instr->scope, source_instr->source_node);
    instruction->base.value->type = expr_type;
    instruction->op1 = op1;
    instruction->op2 = op2;
    instruction->op3 = op3;

    ir_ref_inst_gen(op1);
    ir_ref_inst_gen(op2);
    ir_ref_inst_gen(op3);

    return &instruction->base;
}

static IrInstSrc *ir_build_align_of(IrBuilderSrc *irb, Scope *scope, AstNode *source_node, IrInstSrc *type_value) {
    IrInstSrcAlignOf *instruction = ir_build_instruction<IrInstSrcAlignOf>(irb, scope, source_node);
    instruction->type_value = type_value;

    ir_ref_instruction(type_value, irb->current_basic_block);

    return &instruction->base;
}

static IrInstSrc *ir_build_test_err_src(IrBuilderSrc *irb, Scope *scope, AstNode *source_node,
    IrInstSrc *base_ptr, bool resolve_err_set, bool base_ptr_is_payload)
{
    IrInstSrcTestErr *instruction = ir_build_instruction<IrInstSrcTestErr>(irb, scope, source_node);
    instruction->base_ptr = base_ptr;
    instruction->resolve_err_set = resolve_err_set;
    instruction->base_ptr_is_payload = base_ptr_is_payload;

    ir_ref_instruction(base_ptr, irb->current_basic_block);

    return &instruction->base;
}

static IrInstGen *ir_build_test_err_gen(IrAnalyze *ira, IrInst *source_instruction, IrInstGen *err_union) {
    IrInstGenTestErr *instruction = ir_build_inst_gen<IrInstGenTestErr>(
            &ira->new_irb, source_instruction->scope, source_instruction->source_node);
    instruction->base.value->type = ira->codegen->builtin_types.entry_bool;
    instruction->err_union = err_union;

    ir_ref_inst_gen(err_union);

    return &instruction->base;
}

static IrInstSrc *ir_build_unwrap_err_code_src(IrBuilderSrc *irb, Scope *scope, AstNode *source_node,
    IrInstSrc *err_union_ptr)
{
    IrInstSrcUnwrapErrCode *inst = ir_build_instruction<IrInstSrcUnwrapErrCode>(irb, scope, source_node);
    inst->err_union_ptr = err_union_ptr;

    ir_ref_instruction(err_union_ptr, irb->current_basic_block);

    return &inst->base;
}

static IrInstGen *ir_build_unwrap_err_code_gen(IrAnalyze *ira, Scope *scope, AstNode *source_node,
    IrInstGen *err_union_ptr, ZigType *result_type)
{
    IrInstGenUnwrapErrCode *inst = ir_build_inst_gen<IrInstGenUnwrapErrCode>(&ira->new_irb, scope, source_node);
    inst->base.value->type = result_type;
    inst->err_union_ptr = err_union_ptr;

    ir_ref_inst_gen(err_union_ptr);

    return &inst->base;
}

static IrInstSrc *ir_build_unwrap_err_payload_src(IrBuilderSrc *irb, Scope *scope, AstNode *source_node,
    IrInstSrc *value, bool safety_check_on, bool initializing)
{
    IrInstSrcUnwrapErrPayload *inst = ir_build_instruction<IrInstSrcUnwrapErrPayload>(irb, scope, source_node);
    inst->value = value;
    inst->safety_check_on = safety_check_on;
    inst->initializing = initializing;

    ir_ref_instruction(value, irb->current_basic_block);

    return &inst->base;
}

static IrInstGen *ir_build_unwrap_err_payload_gen(IrAnalyze *ira, Scope *scope, AstNode *source_node,
    IrInstGen *value, bool safety_check_on, bool initializing, ZigType *result_type)
{
    IrInstGenUnwrapErrPayload *inst = ir_build_inst_gen<IrInstGenUnwrapErrPayload>(&ira->new_irb, scope, source_node);
    inst->base.value->type = result_type;
    inst->value = value;
    inst->safety_check_on = safety_check_on;
    inst->initializing = initializing;

    ir_ref_inst_gen(value);

    return &inst->base;
}

static IrInstSrc *ir_build_fn_proto(IrBuilderSrc *irb, Scope *scope, AstNode *source_node,
    IrInstSrc **param_types, IrInstSrc *align_value, IrInstSrc *callconv_value,
    IrInstSrc *return_type, bool is_var_args)
{
    IrInstSrcFnProto *instruction = ir_build_instruction<IrInstSrcFnProto>(irb, scope, source_node);
    instruction->param_types = param_types;
    instruction->align_value = align_value;
    instruction->callconv_value = callconv_value;
    instruction->return_type = return_type;
    instruction->is_var_args = is_var_args;

    assert(source_node->type == NodeTypeFnProto);
    size_t param_count = source_node->data.fn_proto.params.length;
    if (is_var_args) param_count -= 1;
    for (size_t i = 0; i < param_count; i += 1) {
        if (param_types[i] != nullptr) ir_ref_instruction(param_types[i], irb->current_basic_block);
    }
    if (align_value != nullptr) ir_ref_instruction(align_value, irb->current_basic_block);
    if (callconv_value != nullptr) ir_ref_instruction(callconv_value, irb->current_basic_block);
    ir_ref_instruction(return_type, irb->current_basic_block);

    return &instruction->base;
}

static IrInstSrc *ir_build_test_comptime(IrBuilderSrc *irb, Scope *scope, AstNode *source_node, IrInstSrc *value) {
    IrInstSrcTestComptime *instruction = ir_build_instruction<IrInstSrcTestComptime>(irb, scope, source_node);
    instruction->value = value;

    ir_ref_instruction(value, irb->current_basic_block);

    return &instruction->base;
}

static IrInstSrc *ir_build_ptr_cast_src(IrBuilderSrc *irb, Scope *scope, AstNode *source_node,
        IrInstSrc *dest_type, IrInstSrc *ptr, bool safety_check_on)
{
    IrInstSrcPtrCast *instruction = ir_build_instruction<IrInstSrcPtrCast>(
            irb, scope, source_node);
    instruction->dest_type = dest_type;
    instruction->ptr = ptr;
    instruction->safety_check_on = safety_check_on;

    ir_ref_instruction(dest_type, irb->current_basic_block);
    ir_ref_instruction(ptr, irb->current_basic_block);

    return &instruction->base;
}

static IrInstGen *ir_build_ptr_cast_gen(IrAnalyze *ira, IrInst *source_instruction,
        ZigType *ptr_type, IrInstGen *ptr, bool safety_check_on)
{
    IrInstGenPtrCast *instruction = ir_build_inst_gen<IrInstGenPtrCast>(
            &ira->new_irb, source_instruction->scope, source_instruction->source_node);
    instruction->base.value->type = ptr_type;
    instruction->ptr = ptr;
    instruction->safety_check_on = safety_check_on;

    ir_ref_inst_gen(ptr);

    return &instruction->base;
}

static IrInstSrc *ir_build_implicit_cast(IrBuilderSrc *irb, Scope *scope, AstNode *source_node,
        IrInstSrc *operand, ResultLocCast *result_loc_cast)
{
    IrInstSrcImplicitCast *instruction = ir_build_instruction<IrInstSrcImplicitCast>(irb, scope, source_node);
    instruction->operand = operand;
    instruction->result_loc_cast = result_loc_cast;

    ir_ref_instruction(operand, irb->current_basic_block);

    return &instruction->base;
}

static IrInstSrc *ir_build_bit_cast_src(IrBuilderSrc *irb, Scope *scope, AstNode *source_node,
        IrInstSrc *operand, ResultLocBitCast *result_loc_bit_cast)
{
    IrInstSrcBitCast *instruction = ir_build_instruction<IrInstSrcBitCast>(irb, scope, source_node);
    instruction->operand = operand;
    instruction->result_loc_bit_cast = result_loc_bit_cast;

    ir_ref_instruction(operand, irb->current_basic_block);

    return &instruction->base;
}

static IrInstGen *ir_build_bit_cast_gen(IrAnalyze *ira, IrInst *source_instruction,
        IrInstGen *operand, ZigType *ty)
{
    IrInstGenBitCast *instruction = ir_build_inst_gen<IrInstGenBitCast>(
            &ira->new_irb, source_instruction->scope, source_instruction->source_node);
    instruction->base.value->type = ty;
    instruction->operand = operand;

    ir_ref_inst_gen(operand);

    return &instruction->base;
}

static IrInstGen *ir_build_widen_or_shorten(IrAnalyze *ira, Scope *scope, AstNode *source_node, IrInstGen *target,
        ZigType *result_type)
{
    IrInstGenWidenOrShorten *inst = ir_build_inst_gen<IrInstGenWidenOrShorten>(&ira->new_irb, scope, source_node);
    inst->base.value->type = result_type;
    inst->target = target;

    ir_ref_inst_gen(target);

    return &inst->base;
}

static IrInstSrc *ir_build_int_to_ptr_src(IrBuilderSrc *irb, Scope *scope, AstNode *source_node,
        IrInstSrc *dest_type, IrInstSrc *target)
{
    IrInstSrcIntToPtr *instruction = ir_build_instruction<IrInstSrcIntToPtr>(irb, scope, source_node);
    instruction->dest_type = dest_type;
    instruction->target = target;

    ir_ref_instruction(dest_type, irb->current_basic_block);
    ir_ref_instruction(target, irb->current_basic_block);

    return &instruction->base;
}

static IrInstGen *ir_build_int_to_ptr_gen(IrAnalyze *ira, Scope *scope, AstNode *source_node,
        IrInstGen *target, ZigType *ptr_type)
{
    IrInstGenIntToPtr *instruction = ir_build_inst_gen<IrInstGenIntToPtr>(&ira->new_irb, scope, source_node);
    instruction->base.value->type = ptr_type;
    instruction->target = target;

    ir_ref_inst_gen(target);

    return &instruction->base;
}

static IrInstSrc *ir_build_ptr_to_int_src(IrBuilderSrc *irb, Scope *scope, AstNode *source_node,
        IrInstSrc *target)
{
    IrInstSrcPtrToInt *inst = ir_build_instruction<IrInstSrcPtrToInt>(irb, scope, source_node);
    inst->target = target;

    ir_ref_instruction(target, irb->current_basic_block);

    return &inst->base;
}

static IrInstGen *ir_build_ptr_to_int_gen(IrAnalyze *ira, IrInst *source_instr, IrInstGen *target) {
    IrInstGenPtrToInt *inst = ir_build_inst_gen<IrInstGenPtrToInt>(&ira->new_irb, source_instr->scope, source_instr->source_node);
    inst->base.value->type = ira->codegen->builtin_types.entry_usize;
    inst->target = target;

    ir_ref_inst_gen(target);

    return &inst->base;
}

static IrInstSrc *ir_build_int_to_enum_src(IrBuilderSrc *irb, Scope *scope, AstNode *source_node,
        IrInstSrc *dest_type, IrInstSrc *target)
{
    IrInstSrcIntToEnum *instruction = ir_build_instruction<IrInstSrcIntToEnum>(irb, scope, source_node);
    instruction->dest_type = dest_type;
    instruction->target = target;

    if (dest_type) ir_ref_instruction(dest_type, irb->current_basic_block);
    ir_ref_instruction(target, irb->current_basic_block);

    return &instruction->base;
}

static IrInstGen *ir_build_int_to_enum_gen(IrAnalyze *ira, Scope *scope, AstNode *source_node,
        ZigType *dest_type, IrInstGen *target)
{
    IrInstGenIntToEnum *instruction = ir_build_inst_gen<IrInstGenIntToEnum>(&ira->new_irb, scope, source_node);
    instruction->base.value->type = dest_type;
    instruction->target = target;

    ir_ref_inst_gen(target);

    return &instruction->base;
}

static IrInstSrc *ir_build_enum_to_int(IrBuilderSrc *irb, Scope *scope, AstNode *source_node,
        IrInstSrc *target)
{
    IrInstSrcEnumToInt *instruction = ir_build_instruction<IrInstSrcEnumToInt>(
            irb, scope, source_node);
    instruction->target = target;

    ir_ref_instruction(target, irb->current_basic_block);

    return &instruction->base;
}

static IrInstSrc *ir_build_int_to_err_src(IrBuilderSrc *irb, Scope *scope, AstNode *source_node,
        IrInstSrc *target)
{
    IrInstSrcIntToErr *instruction = ir_build_instruction<IrInstSrcIntToErr>(irb, scope, source_node);
    instruction->target = target;

    ir_ref_instruction(target, irb->current_basic_block);

    return &instruction->base;
}

static IrInstGen *ir_build_int_to_err_gen(IrAnalyze *ira, Scope *scope, AstNode *source_node, IrInstGen *target,
        ZigType *wanted_type)
{
    IrInstGenIntToErr *instruction = ir_build_inst_gen<IrInstGenIntToErr>(&ira->new_irb, scope, source_node);
    instruction->base.value->type = wanted_type;
    instruction->target = target;

    ir_ref_inst_gen(target);

    return &instruction->base;
}

static IrInstSrc *ir_build_err_to_int_src(IrBuilderSrc *irb, Scope *scope, AstNode *source_node,
        IrInstSrc *target)
{
    IrInstSrcErrToInt *instruction = ir_build_instruction<IrInstSrcErrToInt>(
            irb, scope, source_node);
    instruction->target = target;

    ir_ref_instruction(target, irb->current_basic_block);

    return &instruction->base;
}

static IrInstGen *ir_build_err_to_int_gen(IrAnalyze *ira, Scope *scope, AstNode *source_node, IrInstGen *target,
        ZigType *wanted_type)
{
    IrInstGenErrToInt *instruction = ir_build_inst_gen<IrInstGenErrToInt>(&ira->new_irb, scope, source_node);
    instruction->base.value->type = wanted_type;
    instruction->target = target;

    ir_ref_inst_gen(target);

    return &instruction->base;
}

static IrInstSrc *ir_build_check_switch_prongs(IrBuilderSrc *irb, Scope *scope, AstNode *source_node,
        IrInstSrc *target_value, IrInstSrcCheckSwitchProngsRange *ranges, size_t range_count,
        bool have_else_prong, bool have_underscore_prong)
{
    IrInstSrcCheckSwitchProngs *instruction = ir_build_instruction<IrInstSrcCheckSwitchProngs>(
            irb, scope, source_node);
    instruction->target_value = target_value;
    instruction->ranges = ranges;
    instruction->range_count = range_count;
    instruction->have_else_prong = have_else_prong;
    instruction->have_underscore_prong = have_underscore_prong;

    ir_ref_instruction(target_value, irb->current_basic_block);
    for (size_t i = 0; i < range_count; i += 1) {
        ir_ref_instruction(ranges[i].start, irb->current_basic_block);
        ir_ref_instruction(ranges[i].end, irb->current_basic_block);
    }

    return &instruction->base;
}

static IrInstSrc *ir_build_check_statement_is_void(IrBuilderSrc *irb, Scope *scope, AstNode *source_node,
    IrInstSrc* statement_value)
{
    IrInstSrcCheckStatementIsVoid *instruction = ir_build_instruction<IrInstSrcCheckStatementIsVoid>(
            irb, scope, source_node);
    instruction->statement_value = statement_value;

    ir_ref_instruction(statement_value, irb->current_basic_block);

    return &instruction->base;
}

static IrInstSrc *ir_build_type_name(IrBuilderSrc *irb, Scope *scope, AstNode *source_node,
        IrInstSrc *type_value)
{
    IrInstSrcTypeName *instruction = ir_build_instruction<IrInstSrcTypeName>(irb, scope, source_node);
    instruction->type_value = type_value;

    ir_ref_instruction(type_value, irb->current_basic_block);

    return &instruction->base;
}

static IrInstSrc *ir_build_decl_ref(IrBuilderSrc *irb, Scope *scope, AstNode *source_node, Tld *tld, LVal lval) {
    IrInstSrcDeclRef *instruction = ir_build_instruction<IrInstSrcDeclRef>(irb, scope, source_node);
    instruction->tld = tld;
    instruction->lval = lval;

    return &instruction->base;
}

static IrInstSrc *ir_build_panic_src(IrBuilderSrc *irb, Scope *scope, AstNode *source_node, IrInstSrc *msg) {
    IrInstSrcPanic *instruction = ir_build_instruction<IrInstSrcPanic>(irb, scope, source_node);
    instruction->base.is_noreturn = true;
    instruction->msg = msg;

    ir_ref_instruction(msg, irb->current_basic_block);

    return &instruction->base;
}

static IrInstGen *ir_build_panic_gen(IrAnalyze *ira, IrInst *source_instr, IrInstGen *msg) {
    IrInstGenPanic *instruction = ir_build_inst_noreturn<IrInstGenPanic>(&ira->new_irb,
            source_instr->scope, source_instr->source_node);
    instruction->msg = msg;

    ir_ref_inst_gen(msg);

    return &instruction->base;
}

static IrInstSrc *ir_build_tag_name_src(IrBuilderSrc *irb, Scope *scope, AstNode *source_node, IrInstSrc *target) {
    IrInstSrcTagName *instruction = ir_build_instruction<IrInstSrcTagName>(irb, scope, source_node);
    instruction->target = target;

    ir_ref_instruction(target, irb->current_basic_block);

    return &instruction->base;
}

static IrInstGen *ir_build_tag_name_gen(IrAnalyze *ira, IrInst *source_instr, IrInstGen *target,
        ZigType *result_type)
{
    IrInstGenTagName *instruction = ir_build_inst_gen<IrInstGenTagName>(&ira->new_irb,
            source_instr->scope, source_instr->source_node);
    instruction->base.value->type = result_type;
    instruction->target = target;

    ir_ref_inst_gen(target);

    return &instruction->base;
}

static IrInstSrc *ir_build_tag_type(IrBuilderSrc *irb, Scope *scope, AstNode *source_node,
        IrInstSrc *target)
{
    IrInstSrcTagType *instruction = ir_build_instruction<IrInstSrcTagType>(irb, scope, source_node);
    instruction->target = target;

    ir_ref_instruction(target, irb->current_basic_block);

    return &instruction->base;
}

static IrInstSrc *ir_build_field_parent_ptr_src(IrBuilderSrc *irb, Scope *scope, AstNode *source_node,
        IrInstSrc *type_value, IrInstSrc *field_name, IrInstSrc *field_ptr)
{
    IrInstSrcFieldParentPtr *inst = ir_build_instruction<IrInstSrcFieldParentPtr>(
            irb, scope, source_node);
    inst->type_value = type_value;
    inst->field_name = field_name;
    inst->field_ptr = field_ptr;

    ir_ref_instruction(type_value, irb->current_basic_block);
    ir_ref_instruction(field_name, irb->current_basic_block);
    ir_ref_instruction(field_ptr, irb->current_basic_block);

    return &inst->base;
}

static IrInstGen *ir_build_field_parent_ptr_gen(IrAnalyze *ira, IrInst *source_instr,
        IrInstGen *field_ptr, TypeStructField *field, ZigType *result_type)
{
    IrInstGenFieldParentPtr *inst = ir_build_inst_gen<IrInstGenFieldParentPtr>(&ira->new_irb,
            source_instr->scope, source_instr->source_node);
    inst->base.value->type = result_type;
    inst->field_ptr = field_ptr;
    inst->field = field;

    ir_ref_inst_gen(field_ptr);

    return &inst->base;
}

static IrInstSrc *ir_build_byte_offset_of(IrBuilderSrc *irb, Scope *scope, AstNode *source_node,
        IrInstSrc *type_value, IrInstSrc *field_name)
{
    IrInstSrcByteOffsetOf *instruction = ir_build_instruction<IrInstSrcByteOffsetOf>(irb, scope, source_node);
    instruction->type_value = type_value;
    instruction->field_name = field_name;

    ir_ref_instruction(type_value, irb->current_basic_block);
    ir_ref_instruction(field_name, irb->current_basic_block);

    return &instruction->base;
}

static IrInstSrc *ir_build_bit_offset_of(IrBuilderSrc *irb, Scope *scope, AstNode *source_node,
        IrInstSrc *type_value, IrInstSrc *field_name)
{
    IrInstSrcBitOffsetOf *instruction = ir_build_instruction<IrInstSrcBitOffsetOf>(irb, scope, source_node);
    instruction->type_value = type_value;
    instruction->field_name = field_name;

    ir_ref_instruction(type_value, irb->current_basic_block);
    ir_ref_instruction(field_name, irb->current_basic_block);

    return &instruction->base;
}

static IrInstSrc *ir_build_type_info(IrBuilderSrc *irb, Scope *scope, AstNode *source_node, IrInstSrc *type_value) {
    IrInstSrcTypeInfo *instruction = ir_build_instruction<IrInstSrcTypeInfo>(irb, scope, source_node);
    instruction->type_value = type_value;

    ir_ref_instruction(type_value, irb->current_basic_block);

    return &instruction->base;
}

static IrInstSrc *ir_build_type(IrBuilderSrc *irb, Scope *scope, AstNode *source_node, IrInstSrc *type_info) {
    IrInstSrcType *instruction = ir_build_instruction<IrInstSrcType>(irb, scope, source_node);
    instruction->type_info = type_info;

    ir_ref_instruction(type_info, irb->current_basic_block);

    return &instruction->base;
}

static IrInstSrc *ir_build_set_eval_branch_quota(IrBuilderSrc *irb, Scope *scope, AstNode *source_node,
        IrInstSrc *new_quota)
{
    IrInstSrcSetEvalBranchQuota *instruction = ir_build_instruction<IrInstSrcSetEvalBranchQuota>(irb, scope, source_node);
    instruction->new_quota = new_quota;

    ir_ref_instruction(new_quota, irb->current_basic_block);

    return &instruction->base;
}

static IrInstSrc *ir_build_align_cast_src(IrBuilderSrc *irb, Scope *scope, AstNode *source_node,
        IrInstSrc *align_bytes, IrInstSrc *target)
{
    IrInstSrcAlignCast *instruction = ir_build_instruction<IrInstSrcAlignCast>(irb, scope, source_node);
    instruction->align_bytes = align_bytes;
    instruction->target = target;

    ir_ref_instruction(align_bytes, irb->current_basic_block);
    ir_ref_instruction(target, irb->current_basic_block);

    return &instruction->base;
}

static IrInstGen *ir_build_align_cast_gen(IrAnalyze *ira, Scope *scope, AstNode *source_node, IrInstGen *target,
        ZigType *result_type)
{
    IrInstGenAlignCast *instruction = ir_build_inst_gen<IrInstGenAlignCast>(&ira->new_irb, scope, source_node);
    instruction->base.value->type = result_type;
    instruction->target = target;

    ir_ref_inst_gen(target);

    return &instruction->base;
}

static IrInstSrc *ir_build_resolve_result(IrBuilderSrc *irb, Scope *scope, AstNode *source_node,
        ResultLoc *result_loc, IrInstSrc *ty)
{
    IrInstSrcResolveResult *instruction = ir_build_instruction<IrInstSrcResolveResult>(irb, scope, source_node);
    instruction->result_loc = result_loc;
    instruction->ty = ty;

    if (ty != nullptr) ir_ref_instruction(ty, irb->current_basic_block);

    return &instruction->base;
}

static IrInstSrc *ir_build_reset_result(IrBuilderSrc *irb, Scope *scope, AstNode *source_node,
        ResultLoc *result_loc)
{
    IrInstSrcResetResult *instruction = ir_build_instruction<IrInstSrcResetResult>(irb, scope, source_node);
    instruction->result_loc = result_loc;
    instruction->base.is_gen = true;

    return &instruction->base;
}

static IrInstSrc *ir_build_opaque_type(IrBuilderSrc *irb, Scope *scope, AstNode *source_node) {
    IrInstSrcOpaqueType *instruction = ir_build_instruction<IrInstSrcOpaqueType>(irb, scope, source_node);

    return &instruction->base;
}

static IrInstSrc *ir_build_set_align_stack(IrBuilderSrc *irb, Scope *scope, AstNode *source_node,
        IrInstSrc *align_bytes)
{
    IrInstSrcSetAlignStack *instruction = ir_build_instruction<IrInstSrcSetAlignStack>(irb, scope, source_node);
    instruction->align_bytes = align_bytes;

    ir_ref_instruction(align_bytes, irb->current_basic_block);

    return &instruction->base;
}

static IrInstSrc *ir_build_arg_type(IrBuilderSrc *irb, Scope *scope, AstNode *source_node,
        IrInstSrc *fn_type, IrInstSrc *arg_index, bool allow_var)
{
    IrInstSrcArgType *instruction = ir_build_instruction<IrInstSrcArgType>(irb, scope, source_node);
    instruction->fn_type = fn_type;
    instruction->arg_index = arg_index;
    instruction->allow_var = allow_var;

    ir_ref_instruction(fn_type, irb->current_basic_block);
    ir_ref_instruction(arg_index, irb->current_basic_block);

    return &instruction->base;
}

static IrInstSrc *ir_build_error_return_trace_src(IrBuilderSrc *irb, Scope *scope, AstNode *source_node,
        IrInstErrorReturnTraceOptional optional)
{
    IrInstSrcErrorReturnTrace *inst = ir_build_instruction<IrInstSrcErrorReturnTrace>(irb, scope, source_node);
    inst->optional = optional;

    return &inst->base;
}

static IrInstGen *ir_build_error_return_trace_gen(IrAnalyze *ira, Scope *scope, AstNode *source_node,
        IrInstErrorReturnTraceOptional optional, ZigType *result_type)
{
    IrInstGenErrorReturnTrace *inst = ir_build_inst_gen<IrInstGenErrorReturnTrace>(&ira->new_irb, scope, source_node);
    inst->base.value->type = result_type;
    inst->optional = optional;

    return &inst->base;
}

static IrInstSrc *ir_build_error_union(IrBuilderSrc *irb, Scope *scope, AstNode *source_node,
        IrInstSrc *err_set, IrInstSrc *payload)
{
    IrInstSrcErrorUnion *instruction = ir_build_instruction<IrInstSrcErrorUnion>(irb, scope, source_node);
    instruction->err_set = err_set;
    instruction->payload = payload;

    ir_ref_instruction(err_set, irb->current_basic_block);
    ir_ref_instruction(payload, irb->current_basic_block);

    return &instruction->base;
}

static IrInstSrc *ir_build_atomic_rmw_src(IrBuilderSrc *irb, Scope *scope, AstNode *source_node,
        IrInstSrc *operand_type, IrInstSrc *ptr, IrInstSrc *op, IrInstSrc *operand,
        IrInstSrc *ordering)
{
    IrInstSrcAtomicRmw *instruction = ir_build_instruction<IrInstSrcAtomicRmw>(irb, scope, source_node);
    instruction->operand_type = operand_type;
    instruction->ptr = ptr;
    instruction->op = op;
    instruction->operand = operand;
    instruction->ordering = ordering;

    ir_ref_instruction(operand_type, irb->current_basic_block);
    ir_ref_instruction(ptr, irb->current_basic_block);
    ir_ref_instruction(op, irb->current_basic_block);
    ir_ref_instruction(operand, irb->current_basic_block);
    ir_ref_instruction(ordering, irb->current_basic_block);

    return &instruction->base;
}

static IrInstGen *ir_build_atomic_rmw_gen(IrAnalyze *ira, IrInst *source_instr,
        IrInstGen *ptr, IrInstGen *operand, AtomicRmwOp op, AtomicOrder ordering, ZigType *operand_type)
{
    IrInstGenAtomicRmw *instruction = ir_build_inst_gen<IrInstGenAtomicRmw>(&ira->new_irb, source_instr->scope, source_instr->source_node);
    instruction->base.value->type = operand_type;
    instruction->ptr = ptr;
    instruction->op = op;
    instruction->operand = operand;
    instruction->ordering = ordering;

    ir_ref_inst_gen(ptr);
    ir_ref_inst_gen(operand);

    return &instruction->base;
}

static IrInstSrc *ir_build_atomic_load_src(IrBuilderSrc *irb, Scope *scope, AstNode *source_node,
        IrInstSrc *operand_type, IrInstSrc *ptr, IrInstSrc *ordering)
{
    IrInstSrcAtomicLoad *instruction = ir_build_instruction<IrInstSrcAtomicLoad>(irb, scope, source_node);
    instruction->operand_type = operand_type;
    instruction->ptr = ptr;
    instruction->ordering = ordering;

    ir_ref_instruction(operand_type, irb->current_basic_block);
    ir_ref_instruction(ptr, irb->current_basic_block);
    ir_ref_instruction(ordering, irb->current_basic_block);

    return &instruction->base;
}

static IrInstGen *ir_build_atomic_load_gen(IrAnalyze *ira, IrInst *source_instr,
        IrInstGen *ptr, AtomicOrder ordering, ZigType *operand_type)
{
    IrInstGenAtomicLoad *instruction = ir_build_inst_gen<IrInstGenAtomicLoad>(&ira->new_irb,
            source_instr->scope, source_instr->source_node);
    instruction->base.value->type = operand_type;
    instruction->ptr = ptr;
    instruction->ordering = ordering;

    ir_ref_inst_gen(ptr);

    return &instruction->base;
}

static IrInstSrc *ir_build_atomic_store_src(IrBuilderSrc *irb, Scope *scope, AstNode *source_node,
        IrInstSrc *operand_type, IrInstSrc *ptr, IrInstSrc *value, IrInstSrc *ordering)
{
    IrInstSrcAtomicStore *instruction = ir_build_instruction<IrInstSrcAtomicStore>(irb, scope, source_node);
    instruction->operand_type = operand_type;
    instruction->ptr = ptr;
    instruction->value = value;
    instruction->ordering = ordering;

    ir_ref_instruction(operand_type, irb->current_basic_block);
    ir_ref_instruction(ptr, irb->current_basic_block);
    ir_ref_instruction(value, irb->current_basic_block);
    ir_ref_instruction(ordering, irb->current_basic_block);

    return &instruction->base;
}

static IrInstGen *ir_build_atomic_store_gen(IrAnalyze *ira, IrInst *source_instr,
        IrInstGen *ptr, IrInstGen *value, AtomicOrder ordering)
{
    IrInstGenAtomicStore *instruction = ir_build_inst_void<IrInstGenAtomicStore>(&ira->new_irb,
            source_instr->scope, source_instr->source_node);
    instruction->ptr = ptr;
    instruction->value = value;
    instruction->ordering = ordering;

    ir_ref_inst_gen(ptr);
    ir_ref_inst_gen(value);

    return &instruction->base;
}

static IrInstSrc *ir_build_save_err_ret_addr_src(IrBuilderSrc *irb, Scope *scope, AstNode *source_node) {
    IrInstSrcSaveErrRetAddr *inst = ir_build_instruction<IrInstSrcSaveErrRetAddr>(irb, scope, source_node);
    return &inst->base;
}

static IrInstGen *ir_build_save_err_ret_addr_gen(IrAnalyze *ira, IrInst *source_instr) {
    IrInstGenSaveErrRetAddr *inst = ir_build_inst_void<IrInstGenSaveErrRetAddr>(&ira->new_irb,
            source_instr->scope, source_instr->source_node);
    return &inst->base;
}

static IrInstSrc *ir_build_add_implicit_return_type(IrBuilderSrc *irb, Scope *scope, AstNode *source_node,
        IrInstSrc *value, ResultLocReturn *result_loc_ret)
{
    IrInstSrcAddImplicitReturnType *inst = ir_build_instruction<IrInstSrcAddImplicitReturnType>(irb, scope, source_node);
    inst->value = value;
    inst->result_loc_ret = result_loc_ret;

    ir_ref_instruction(value, irb->current_basic_block);

    return &inst->base;
}

static IrInstSrc *ir_build_has_decl(IrBuilderSrc *irb, Scope *scope, AstNode *source_node,
        IrInstSrc *container, IrInstSrc *name)
{
    IrInstSrcHasDecl *instruction = ir_build_instruction<IrInstSrcHasDecl>(irb, scope, source_node);
    instruction->container = container;
    instruction->name = name;

    ir_ref_instruction(container, irb->current_basic_block);
    ir_ref_instruction(name, irb->current_basic_block);

    return &instruction->base;
}

static IrInstSrc *ir_build_undeclared_identifier(IrBuilderSrc *irb, Scope *scope, AstNode *source_node, Buf *name) {
    IrInstSrcUndeclaredIdent *instruction = ir_build_instruction<IrInstSrcUndeclaredIdent>(irb, scope, source_node);
    instruction->name = name;

    return &instruction->base;
}

static IrInstSrc *ir_build_check_runtime_scope(IrBuilderSrc *irb, Scope *scope, AstNode *source_node, IrInstSrc *scope_is_comptime, IrInstSrc *is_comptime) {
    IrInstSrcCheckRuntimeScope *instruction = ir_build_instruction<IrInstSrcCheckRuntimeScope>(irb, scope, source_node);
    instruction->scope_is_comptime = scope_is_comptime;
    instruction->is_comptime = is_comptime;

    ir_ref_instruction(scope_is_comptime, irb->current_basic_block);
    ir_ref_instruction(is_comptime, irb->current_basic_block);

    return &instruction->base;
}

static IrInstSrc *ir_build_union_init_named_field(IrBuilderSrc *irb, Scope *scope, AstNode *source_node,
    IrInstSrc *union_type, IrInstSrc *field_name, IrInstSrc *field_result_loc, IrInstSrc *result_loc)
{
    IrInstSrcUnionInitNamedField *instruction = ir_build_instruction<IrInstSrcUnionInitNamedField>(irb, scope, source_node);
    instruction->union_type = union_type;
    instruction->field_name = field_name;
    instruction->field_result_loc = field_result_loc;
    instruction->result_loc = result_loc;

    ir_ref_instruction(union_type, irb->current_basic_block);
    ir_ref_instruction(field_name, irb->current_basic_block);
    ir_ref_instruction(field_result_loc, irb->current_basic_block);
    if (result_loc != nullptr) ir_ref_instruction(result_loc, irb->current_basic_block);

    return &instruction->base;
}


static IrInstGen *ir_build_vector_to_array(IrAnalyze *ira, IrInst *source_instruction,
        ZigType *result_type, IrInstGen *vector, IrInstGen *result_loc)
{
    IrInstGenVectorToArray *instruction = ir_build_inst_gen<IrInstGenVectorToArray>(&ira->new_irb,
        source_instruction->scope, source_instruction->source_node);
    instruction->base.value->type = result_type;
    instruction->vector = vector;
    instruction->result_loc = result_loc;

    ir_ref_inst_gen(vector);
    ir_ref_inst_gen(result_loc);

    return &instruction->base;
}

static IrInstGen *ir_build_ptr_of_array_to_slice(IrAnalyze *ira, IrInst *source_instruction,
        ZigType *result_type, IrInstGen *operand, IrInstGen *result_loc)
{
    IrInstGenPtrOfArrayToSlice *instruction = ir_build_inst_gen<IrInstGenPtrOfArrayToSlice>(&ira->new_irb,
        source_instruction->scope, source_instruction->source_node);
    instruction->base.value->type = result_type;
    instruction->operand = operand;
    instruction->result_loc = result_loc;

    ir_ref_inst_gen(operand);
    ir_ref_inst_gen(result_loc);

    return &instruction->base;
}

static IrInstGen *ir_build_array_to_vector(IrAnalyze *ira, IrInst *source_instruction,
        IrInstGen *array, ZigType *result_type)
{
    IrInstGenArrayToVector *instruction = ir_build_inst_gen<IrInstGenArrayToVector>(&ira->new_irb,
        source_instruction->scope, source_instruction->source_node);
    instruction->base.value->type = result_type;
    instruction->array = array;

    ir_ref_inst_gen(array);

    return &instruction->base;
}

static IrInstGen *ir_build_assert_zero(IrAnalyze *ira, IrInst *source_instruction,
        IrInstGen *target)
{
    IrInstGenAssertZero *instruction = ir_build_inst_gen<IrInstGenAssertZero>(&ira->new_irb,
        source_instruction->scope, source_instruction->source_node);
    instruction->base.value->type = ira->codegen->builtin_types.entry_void;
    instruction->target = target;

    ir_ref_inst_gen(target);

    return &instruction->base;
}

static IrInstGen *ir_build_assert_non_null(IrAnalyze *ira, IrInst *source_instruction,
        IrInstGen *target)
{
    IrInstGenAssertNonNull *instruction = ir_build_inst_gen<IrInstGenAssertNonNull>(&ira->new_irb,
        source_instruction->scope, source_instruction->source_node);
    instruction->base.value->type = ira->codegen->builtin_types.entry_void;
    instruction->target = target;

    ir_ref_inst_gen(target);

    return &instruction->base;
}

static IrInstSrc *ir_build_alloca_src(IrBuilderSrc *irb, Scope *scope, AstNode *source_node,
        IrInstSrc *align, const char *name_hint, IrInstSrc *is_comptime)
{
    IrInstSrcAlloca *instruction = ir_build_instruction<IrInstSrcAlloca>(irb, scope, source_node);
    instruction->base.is_gen = true;
    instruction->align = align;
    instruction->name_hint = name_hint;
    instruction->is_comptime = is_comptime;

    if (align != nullptr) ir_ref_instruction(align, irb->current_basic_block);
    if (is_comptime != nullptr) ir_ref_instruction(is_comptime, irb->current_basic_block);

    return &instruction->base;
}

static IrInstGenAlloca *ir_build_alloca_gen(IrAnalyze *ira, IrInst *source_instruction,
        uint32_t align, const char *name_hint)
{
    IrInstGenAlloca *instruction = ir_create_inst_gen<IrInstGenAlloca>(&ira->new_irb,
            source_instruction->scope, source_instruction->source_node);
    instruction->align = align;
    instruction->name_hint = name_hint;

    return instruction;
}

static IrInstSrc *ir_build_end_expr(IrBuilderSrc *irb, Scope *scope, AstNode *source_node,
        IrInstSrc *value, ResultLoc *result_loc)
{
    IrInstSrcEndExpr *instruction = ir_build_instruction<IrInstSrcEndExpr>(irb, scope, source_node);
    instruction->base.is_gen = true;
    instruction->value = value;
    instruction->result_loc = result_loc;

    ir_ref_instruction(value, irb->current_basic_block);

    return &instruction->base;
}

static IrInstSrcSuspendBegin *ir_build_suspend_begin_src(IrBuilderSrc *irb, Scope *scope, AstNode *source_node) {
    return ir_build_instruction<IrInstSrcSuspendBegin>(irb, scope, source_node);
}

static IrInstGen *ir_build_suspend_begin_gen(IrAnalyze *ira, IrInst *source_instr) {
    IrInstGenSuspendBegin *inst = ir_build_inst_void<IrInstGenSuspendBegin>(&ira->new_irb,
            source_instr->scope, source_instr->source_node);
    return &inst->base;
}

static IrInstSrc *ir_build_suspend_finish_src(IrBuilderSrc *irb, Scope *scope, AstNode *source_node,
        IrInstSrcSuspendBegin *begin)
{
    IrInstSrcSuspendFinish *inst = ir_build_instruction<IrInstSrcSuspendFinish>(irb, scope, source_node);
    inst->begin = begin;

    ir_ref_instruction(&begin->base, irb->current_basic_block);

    return &inst->base;
}

static IrInstGen *ir_build_suspend_finish_gen(IrAnalyze *ira, IrInst *source_instr, IrInstGenSuspendBegin *begin) {
    IrInstGenSuspendFinish *inst = ir_build_inst_void<IrInstGenSuspendFinish>(&ira->new_irb,
            source_instr->scope, source_instr->source_node);
    inst->begin = begin;

    ir_ref_inst_gen(&begin->base);

    return &inst->base;
}

static IrInstSrc *ir_build_await_src(IrBuilderSrc *irb, Scope *scope, AstNode *source_node,
        IrInstSrc *frame, ResultLoc *result_loc, bool is_nosuspend)
{
    IrInstSrcAwait *instruction = ir_build_instruction<IrInstSrcAwait>(irb, scope, source_node);
    instruction->frame = frame;
    instruction->result_loc = result_loc;
    instruction->is_nosuspend = is_nosuspend;

    ir_ref_instruction(frame, irb->current_basic_block);

    return &instruction->base;
}

static IrInstGenAwait *ir_build_await_gen(IrAnalyze *ira, IrInst *source_instruction,
        IrInstGen *frame, ZigType *result_type, IrInstGen *result_loc, bool is_nosuspend)
{
    IrInstGenAwait *instruction = ir_build_inst_gen<IrInstGenAwait>(&ira->new_irb,
            source_instruction->scope, source_instruction->source_node);
    instruction->base.value->type = result_type;
    instruction->frame = frame;
    instruction->result_loc = result_loc;
    instruction->is_nosuspend = is_nosuspend;

    ir_ref_inst_gen(frame);
    if (result_loc != nullptr) ir_ref_inst_gen(result_loc);

    return instruction;
}

static IrInstSrc *ir_build_resume_src(IrBuilderSrc *irb, Scope *scope, AstNode *source_node, IrInstSrc *frame) {
    IrInstSrcResume *instruction = ir_build_instruction<IrInstSrcResume>(irb, scope, source_node);
    instruction->frame = frame;

    ir_ref_instruction(frame, irb->current_basic_block);

    return &instruction->base;
}

static IrInstGen *ir_build_resume_gen(IrAnalyze *ira, IrInst *source_instr, IrInstGen *frame) {
    IrInstGenResume *instruction = ir_build_inst_void<IrInstGenResume>(&ira->new_irb,
            source_instr->scope, source_instr->source_node);
    instruction->frame = frame;

    ir_ref_inst_gen(frame);

    return &instruction->base;
}

static IrInstSrcSpillBegin *ir_build_spill_begin_src(IrBuilderSrc *irb, Scope *scope, AstNode *source_node,
        IrInstSrc *operand, SpillId spill_id)
{
    IrInstSrcSpillBegin *instruction = ir_build_instruction<IrInstSrcSpillBegin>(irb, scope, source_node);
    instruction->operand = operand;
    instruction->spill_id = spill_id;

    ir_ref_instruction(operand, irb->current_basic_block);

    return instruction;
}

static IrInstGen *ir_build_spill_begin_gen(IrAnalyze *ira, IrInst *source_instr, IrInstGen *operand,
        SpillId spill_id)
{
    IrInstGenSpillBegin *instruction = ir_build_inst_void<IrInstGenSpillBegin>(&ira->new_irb,
            source_instr->scope, source_instr->source_node);
    instruction->operand = operand;
    instruction->spill_id = spill_id;

    ir_ref_inst_gen(operand);

    return &instruction->base;
}

static IrInstSrc *ir_build_spill_end_src(IrBuilderSrc *irb, Scope *scope, AstNode *source_node,
        IrInstSrcSpillBegin *begin)
{
    IrInstSrcSpillEnd *instruction = ir_build_instruction<IrInstSrcSpillEnd>(irb, scope, source_node);
    instruction->begin = begin;

    ir_ref_instruction(&begin->base, irb->current_basic_block);

    return &instruction->base;
}

static IrInstGen *ir_build_spill_end_gen(IrAnalyze *ira, IrInst *source_instr, IrInstGenSpillBegin *begin,
        ZigType *result_type)
{
    IrInstGenSpillEnd *instruction = ir_build_inst_gen<IrInstGenSpillEnd>(&ira->new_irb,
            source_instr->scope, source_instr->source_node);
    instruction->base.value->type = result_type;
    instruction->begin = begin;

    ir_ref_inst_gen(&begin->base);

    return &instruction->base;
}

static IrInstGen *ir_build_vector_extract_elem(IrAnalyze *ira, IrInst *source_instruction,
        IrInstGen *vector, IrInstGen *index)
{
    IrInstGenVectorExtractElem *instruction = ir_build_inst_gen<IrInstGenVectorExtractElem>(
            &ira->new_irb, source_instruction->scope, source_instruction->source_node);
    instruction->base.value->type = vector->value->type->data.vector.elem_type;
    instruction->vector = vector;
    instruction->index = index;

    ir_ref_inst_gen(vector);
    ir_ref_inst_gen(index);

    return &instruction->base;
}

static IrInstSrc *ir_build_wasm_memory_size_src(IrBuilderSrc *irb, Scope *scope, AstNode *source_node, IrInstSrc *index) {
    IrInstSrcWasmMemorySize *instruction = ir_build_instruction<IrInstSrcWasmMemorySize>(irb, scope, source_node);
    instruction->index = index;

    ir_ref_instruction(index, irb->current_basic_block);

    return &instruction->base;
}

static IrInstGen *ir_build_wasm_memory_size_gen(IrAnalyze *ira, IrInst *source_instr, IrInstGen *index) {
    IrInstGenWasmMemorySize *instruction = ir_build_inst_gen<IrInstGenWasmMemorySize>(&ira->new_irb,
            source_instr->scope, source_instr->source_node);
    instruction->base.value->type = ira->codegen->builtin_types.entry_u32;
    instruction->index = index;

    ir_ref_inst_gen(index);

    return &instruction->base;
}

static IrInstSrc *ir_build_wasm_memory_grow_src(IrBuilderSrc *irb, Scope *scope, AstNode *source_node, IrInstSrc *index, IrInstSrc *delta) {
    IrInstSrcWasmMemoryGrow *instruction = ir_build_instruction<IrInstSrcWasmMemoryGrow>(irb, scope, source_node);
    instruction->index = index;
    instruction->delta = delta;

    ir_ref_instruction(index, irb->current_basic_block);
    ir_ref_instruction(delta, irb->current_basic_block);

    return &instruction->base;
}

static IrInstGen *ir_build_wasm_memory_grow_gen(IrAnalyze *ira, IrInst *source_instr, IrInstGen *index, IrInstGen *delta) {
    IrInstGenWasmMemoryGrow *instruction = ir_build_inst_gen<IrInstGenWasmMemoryGrow>(&ira->new_irb,
            source_instr->scope, source_instr->source_node);
    instruction->base.value->type = ira->codegen->builtin_types.entry_i32;
    instruction->index = index;
    instruction->delta = delta;

    ir_ref_inst_gen(index);
    ir_ref_inst_gen(delta);

    return &instruction->base;
}

static IrInstSrc *ir_build_src(IrBuilderSrc *irb, Scope *scope, AstNode *source_node) {
    IrInstSrcSrc *instruction = ir_build_instruction<IrInstSrcSrc>(irb, scope, source_node);

    return &instruction->base;
}

static void ir_count_defers(IrBuilderSrc *irb, Scope *inner_scope, Scope *outer_scope, size_t *results) {
    results[ReturnKindUnconditional] = 0;
    results[ReturnKindError] = 0;

    Scope *scope = inner_scope;

    while (scope != outer_scope) {
        assert(scope);
        switch (scope->id) {
            case ScopeIdDefer: {
                AstNode *defer_node = scope->source_node;
                assert(defer_node->type == NodeTypeDefer);
                ReturnKind defer_kind = defer_node->data.defer.kind;
                results[defer_kind] += 1;
                scope = scope->parent;
                continue;
            }
            case ScopeIdDecls:
            case ScopeIdFnDef:
                return;
            case ScopeIdBlock:
            case ScopeIdVarDecl:
            case ScopeIdLoop:
            case ScopeIdSuspend:
            case ScopeIdCompTime:
            case ScopeIdNoSuspend:
            case ScopeIdRuntime:
            case ScopeIdTypeOf:
            case ScopeIdExpr:
                scope = scope->parent;
                continue;
            case ScopeIdDeferExpr:
            case ScopeIdCImport:
                zig_unreachable();
        }
    }
}

static IrInstSrc *ir_mark_gen(IrInstSrc *instruction) {
    instruction->is_gen = true;
    return instruction;
}

static bool ir_gen_defers_for_block(IrBuilderSrc *irb, Scope *inner_scope, Scope *outer_scope, bool *is_noreturn, IrInstSrc *err_value) {
    Scope *scope = inner_scope;
    if (is_noreturn != nullptr) *is_noreturn = false;
    while (scope != outer_scope) {
        if (!scope)
            return true;

        switch (scope->id) {
            case ScopeIdDefer: {
                AstNode *defer_node = scope->source_node;
                assert(defer_node->type == NodeTypeDefer);
                ReturnKind defer_kind = defer_node->data.defer.kind;
                AstNode *defer_expr_node = defer_node->data.defer.expr;
                AstNode *defer_var_node = defer_node->data.defer.err_payload;

                if (defer_kind == ReturnKindError && err_value == nullptr) {
                    // This is an `errdefer` but we're generating code for a
                    // `return` that doesn't return an error, skip it
                    scope = scope->parent;
                    continue;
                }

                Scope *defer_expr_scope = defer_node->data.defer.expr_scope;
                if (defer_var_node != nullptr) {
                    assert(defer_kind == ReturnKindError);
                    assert(defer_var_node->type == NodeTypeSymbol);
                    Buf *var_name = defer_var_node->data.symbol_expr.symbol;

                    if (defer_expr_node->type == NodeTypeUnreachable) {
                        add_node_error(irb->codegen, defer_var_node,
                            buf_sprintf("unused variable: '%s'", buf_ptr(var_name)));
                        return false;
                    }

                    IrInstSrc *is_comptime;
                    if (ir_should_inline(irb->exec, defer_expr_scope)) {
                        is_comptime = ir_build_const_bool(irb, defer_expr_scope,
                            defer_expr_node, true);
                    } else {
                        is_comptime = ir_build_test_comptime(irb, defer_expr_scope,
                            defer_expr_node, err_value);
                    }

                    ZigVar *err_var = ir_create_var(irb, defer_var_node, defer_expr_scope,
                        var_name, true, true, false, is_comptime);
                    build_decl_var_and_init(irb, defer_expr_scope, defer_var_node, err_var, err_value,
                        buf_ptr(var_name), is_comptime);

                    defer_expr_scope = err_var->child_scope;
                }

                IrInstSrc *defer_expr_value = ir_gen_node(irb, defer_expr_node, defer_expr_scope);
                if (defer_expr_value == irb->codegen->invalid_inst_src)
                    return irb->codegen->invalid_inst_src;

                if (defer_expr_value->is_noreturn) {
                    if (is_noreturn != nullptr) *is_noreturn = true;
                } else {
                    ir_mark_gen(ir_build_check_statement_is_void(irb, defer_expr_scope, defer_expr_node,
                                defer_expr_value));
                }
                scope = scope->parent;
                continue;
            }
            case ScopeIdDecls:
            case ScopeIdFnDef:
                return true;
            case ScopeIdBlock:
            case ScopeIdVarDecl:
            case ScopeIdLoop:
            case ScopeIdSuspend:
            case ScopeIdCompTime:
            case ScopeIdNoSuspend:
            case ScopeIdRuntime:
            case ScopeIdTypeOf:
            case ScopeIdExpr:
                scope = scope->parent;
                continue;
            case ScopeIdDeferExpr:
            case ScopeIdCImport:
                zig_unreachable();
        }
    }
    return true;
}

static void ir_set_cursor_at_end_gen(IrBuilderGen *irb, IrBasicBlockGen *basic_block) {
    assert(basic_block);
    irb->current_basic_block = basic_block;
}

static void ir_set_cursor_at_end(IrBuilderSrc *irb, IrBasicBlockSrc *basic_block) {
    assert(basic_block);
    irb->current_basic_block = basic_block;
}

static void ir_append_basic_block_gen(IrBuilderGen *irb, IrBasicBlockGen *bb) {
    assert(!bb->already_appended);
    bb->already_appended = true;
    irb->exec->basic_block_list.append(bb);
}

static void ir_set_cursor_at_end_and_append_block_gen(IrBuilderGen *irb, IrBasicBlockGen *basic_block) {
    ir_append_basic_block_gen(irb, basic_block);
    ir_set_cursor_at_end_gen(irb, basic_block);
}

static void ir_set_cursor_at_end_and_append_block(IrBuilderSrc *irb, IrBasicBlockSrc *basic_block) {
    basic_block->index = irb->exec->basic_block_list.length;
    irb->exec->basic_block_list.append(basic_block);
    ir_set_cursor_at_end(irb, basic_block);
}

static ScopeSuspend *get_scope_suspend(Scope *scope) {
    while (scope) {
        if (scope->id == ScopeIdSuspend)
            return (ScopeSuspend *)scope;
        if (scope->id == ScopeIdFnDef)
            return nullptr;

        scope = scope->parent;
    }
    return nullptr;
}

static ScopeDeferExpr *get_scope_defer_expr(Scope *scope) {
    while (scope) {
        if (scope->id == ScopeIdDeferExpr)
            return (ScopeDeferExpr *)scope;
        if (scope->id == ScopeIdFnDef)
            return nullptr;

        scope = scope->parent;
    }
    return nullptr;
}

static IrInstSrc *ir_gen_return(IrBuilderSrc *irb, Scope *scope, AstNode *node, LVal lval, ResultLoc *result_loc) {
    assert(node->type == NodeTypeReturnExpr);

    ScopeDeferExpr *scope_defer_expr = get_scope_defer_expr(scope);
    if (scope_defer_expr) {
        if (!scope_defer_expr->reported_err) {
            add_node_error(irb->codegen, node, buf_sprintf("cannot return from defer expression"));
            scope_defer_expr->reported_err = true;
        }
        return irb->codegen->invalid_inst_src;
    }

    Scope *outer_scope = irb->exec->begin_scope;

    AstNode *expr_node = node->data.return_expr.expr;
    switch (node->data.return_expr.kind) {
        case ReturnKindUnconditional:
            {
                ResultLocReturn *result_loc_ret = heap::c_allocator.create<ResultLocReturn>();
                result_loc_ret->base.id = ResultLocIdReturn;
                ir_build_reset_result(irb, scope, node, &result_loc_ret->base);

                IrInstSrc *return_value;
                if (expr_node) {
                    // Temporarily set this so that if we return a type it gets the name of the function
                    ZigFn *prev_name_fn = irb->exec->name_fn;
                    irb->exec->name_fn = exec_fn_entry(irb->exec);
                    return_value = ir_gen_node_extra(irb, expr_node, scope, LValNone, &result_loc_ret->base);
                    irb->exec->name_fn = prev_name_fn;
                    if (return_value == irb->codegen->invalid_inst_src)
                        return irb->codegen->invalid_inst_src;
                } else {
                    return_value = ir_build_const_void(irb, scope, node);
                    ir_build_end_expr(irb, scope, node, return_value, &result_loc_ret->base);
                }

                ir_mark_gen(ir_build_add_implicit_return_type(irb, scope, node, return_value, result_loc_ret));

                size_t defer_counts[2];
                ir_count_defers(irb, scope, outer_scope, defer_counts);
                bool have_err_defers = defer_counts[ReturnKindError] > 0;
                if (!have_err_defers && !irb->codegen->have_err_ret_tracing) {
                    // only generate unconditional defers
                    if (!ir_gen_defers_for_block(irb, scope, outer_scope, nullptr, nullptr))
                        return irb->codegen->invalid_inst_src;
                    IrInstSrc *result = ir_build_return_src(irb, scope, node, nullptr);
                    result_loc_ret->base.source_instruction = result;
                    return result;
                }
                bool should_inline = ir_should_inline(irb->exec, scope);

                IrBasicBlockSrc *err_block = ir_create_basic_block(irb, scope, "ErrRetErr");
                IrBasicBlockSrc *ok_block = ir_create_basic_block(irb, scope, "ErrRetOk");

                IrInstSrc *is_err = ir_build_test_err_src(irb, scope, node, return_value, false, true);

                IrInstSrc *is_comptime;
                if (should_inline) {
                    is_comptime = ir_build_const_bool(irb, scope, node, should_inline);
                } else {
                    is_comptime = ir_build_test_comptime(irb, scope, node, is_err);
                }

                ir_mark_gen(ir_build_cond_br(irb, scope, node, is_err, err_block, ok_block, is_comptime));
                IrBasicBlockSrc *ret_stmt_block = ir_create_basic_block(irb, scope, "RetStmt");

                ir_set_cursor_at_end_and_append_block(irb, err_block);
                if (!ir_gen_defers_for_block(irb, scope, outer_scope, nullptr, return_value))
                    return irb->codegen->invalid_inst_src;
                if (irb->codegen->have_err_ret_tracing && !should_inline) {
                    ir_build_save_err_ret_addr_src(irb, scope, node);
                }
                ir_build_br(irb, scope, node, ret_stmt_block, is_comptime);

                ir_set_cursor_at_end_and_append_block(irb, ok_block);
                if (!ir_gen_defers_for_block(irb, scope, outer_scope, nullptr, nullptr))
                    return irb->codegen->invalid_inst_src;
                ir_build_br(irb, scope, node, ret_stmt_block, is_comptime);

                ir_set_cursor_at_end_and_append_block(irb, ret_stmt_block);
                IrInstSrc *result = ir_build_return_src(irb, scope, node, nullptr);
                result_loc_ret->base.source_instruction = result;
                return result;
            }
        case ReturnKindError:
            {
                assert(expr_node);
                IrInstSrc *err_union_ptr = ir_gen_node_extra(irb, expr_node, scope, LValPtr, nullptr);
                if (err_union_ptr == irb->codegen->invalid_inst_src)
                    return irb->codegen->invalid_inst_src;
                IrInstSrc *is_err_val = ir_build_test_err_src(irb, scope, node, err_union_ptr, true, false);

                IrBasicBlockSrc *return_block = ir_create_basic_block(irb, scope, "ErrRetReturn");
                IrBasicBlockSrc *continue_block = ir_create_basic_block(irb, scope, "ErrRetContinue");
                IrInstSrc *is_comptime;
                bool should_inline = ir_should_inline(irb->exec, scope);
                if (should_inline) {
                    is_comptime = ir_build_const_bool(irb, scope, node, true);
                } else {
                    is_comptime = ir_build_test_comptime(irb, scope, node, is_err_val);
                }
                ir_mark_gen(ir_build_cond_br(irb, scope, node, is_err_val, return_block, continue_block, is_comptime));

                ir_set_cursor_at_end_and_append_block(irb, return_block);
                IrInstSrc *err_val_ptr = ir_build_unwrap_err_code_src(irb, scope, node, err_union_ptr);
                IrInstSrc *err_val = ir_build_load_ptr(irb, scope, node, err_val_ptr);
                ir_mark_gen(ir_build_add_implicit_return_type(irb, scope, node, err_val, nullptr));
                IrInstSrcSpillBegin *spill_begin = ir_build_spill_begin_src(irb, scope, node, err_val,
                        SpillIdRetErrCode);
                ResultLocReturn *result_loc_ret = heap::c_allocator.create<ResultLocReturn>();
                result_loc_ret->base.id = ResultLocIdReturn;
                ir_build_reset_result(irb, scope, node, &result_loc_ret->base);
                ir_build_end_expr(irb, scope, node, err_val, &result_loc_ret->base);

                bool is_noreturn = false;
                if (!ir_gen_defers_for_block(irb, scope, outer_scope, &is_noreturn, err_val)) {
                    return irb->codegen->invalid_inst_src;
                }
                if (!is_noreturn) {
                    if (irb->codegen->have_err_ret_tracing && !should_inline) {
                        ir_build_save_err_ret_addr_src(irb, scope, node);
                    }
                    err_val = ir_build_spill_end_src(irb, scope, node, spill_begin);
                    IrInstSrc *ret_inst = ir_build_return_src(irb, scope, node, err_val);
                    result_loc_ret->base.source_instruction = ret_inst;
                }

                ir_set_cursor_at_end_and_append_block(irb, continue_block);
                IrInstSrc *unwrapped_ptr = ir_build_unwrap_err_payload_src(irb, scope, node, err_union_ptr, false, false);
                if (lval == LValPtr)
                    return unwrapped_ptr;
                else
                    return ir_expr_wrap(irb, scope, ir_build_load_ptr(irb, scope, node, unwrapped_ptr), result_loc);
            }
    }
    zig_unreachable();
}

static ZigVar *create_local_var(CodeGen *codegen, AstNode *node, Scope *parent_scope,
        Buf *name, bool src_is_const, bool gen_is_const, bool is_shadowable, IrInstSrc *is_comptime,
        bool skip_name_check)
{
    ZigVar *variable_entry = heap::c_allocator.create<ZigVar>();
    variable_entry->parent_scope = parent_scope;
    variable_entry->shadowable = is_shadowable;
    variable_entry->is_comptime = is_comptime;
    variable_entry->src_arg_index = SIZE_MAX;
    variable_entry->const_value = codegen->pass1_arena->create<ZigValue>();

    if (is_comptime != nullptr) {
        is_comptime->base.ref_count += 1;
    }

    if (name) {
        variable_entry->name = strdup(buf_ptr(name));

        if (!skip_name_check) {
            ZigVar *existing_var = find_variable(codegen, parent_scope, name, nullptr);
            if (existing_var && !existing_var->shadowable) {
                if (existing_var->var_type == nullptr || !type_is_invalid(existing_var->var_type)) {
                    ErrorMsg *msg = add_node_error(codegen, node,
                            buf_sprintf("redeclaration of variable '%s'", buf_ptr(name)));
                    add_error_note(codegen, msg, existing_var->decl_node, buf_sprintf("previous declaration is here"));
                }
                variable_entry->var_type = codegen->builtin_types.entry_invalid;
            } else {
                ZigType *type;
                if (get_primitive_type(codegen, name, &type) != ErrorPrimitiveTypeNotFound) {
                    add_node_error(codegen, node,
                            buf_sprintf("variable shadows primitive type '%s'", buf_ptr(name)));
                    variable_entry->var_type = codegen->builtin_types.entry_invalid;
                } else {
                    Tld *tld = find_decl(codegen, parent_scope, name);
                    if (tld != nullptr) {
                        bool want_err_msg = true;
                        if (tld->id == TldIdVar) {
                            ZigVar *var = reinterpret_cast<TldVar *>(tld)->var;
                            if (var != nullptr && var->var_type != nullptr && type_is_invalid(var->var_type)) {
                                want_err_msg = false;
                            }
                        }
                        if (want_err_msg) {
                            ErrorMsg *msg = add_node_error(codegen, node,
                                    buf_sprintf("redefinition of '%s'", buf_ptr(name)));
                            add_error_note(codegen, msg, tld->source_node, buf_sprintf("previous definition is here"));
                        }
                        variable_entry->var_type = codegen->builtin_types.entry_invalid;
                    }
                }
            }
        }
    } else {
        assert(is_shadowable);
        // TODO make this name not actually be in scope. user should be able to make a variable called "_anon"
        // might already be solved, let's just make sure it has test coverage
        // maybe we put a prefix on this so the debug info doesn't clobber user debug info for same named variables
        variable_entry->name = "_anon";
    }

    variable_entry->src_is_const = src_is_const;
    variable_entry->gen_is_const = gen_is_const;
    variable_entry->decl_node = node;
    variable_entry->child_scope = create_var_scope(codegen, node, parent_scope, variable_entry);

    return variable_entry;
}

// Set name to nullptr to make the variable anonymous (not visible to programmer).
// After you call this function var->child_scope has the variable in scope
static ZigVar *ir_create_var(IrBuilderSrc *irb, AstNode *node, Scope *scope, Buf *name,
        bool src_is_const, bool gen_is_const, bool is_shadowable, IrInstSrc *is_comptime)
{
    bool is_underscored = name ? buf_eql_str(name, "_") : false;
    ZigVar *var = create_local_var(irb->codegen, node, scope,
            (is_underscored ? nullptr : name), src_is_const, gen_is_const,
            (is_underscored ? true : is_shadowable), is_comptime, false);
    assert(var->child_scope);
    return var;
}

static ResultLocPeer *create_peer_result(ResultLocPeerParent *peer_parent) {
    ResultLocPeer *result = heap::c_allocator.create<ResultLocPeer>();
    result->base.id = ResultLocIdPeer;
    result->base.source_instruction = peer_parent->base.source_instruction;
    result->parent = peer_parent;
    result->base.allow_write_through_const = peer_parent->parent->allow_write_through_const;
    return result;
}

static IrInstSrc *ir_gen_block(IrBuilderSrc *irb, Scope *parent_scope, AstNode *block_node, LVal lval,
        ResultLoc *result_loc)
{
    assert(block_node->type == NodeTypeBlock);

    ZigList<IrInstSrc *> incoming_values = {0};
    ZigList<IrBasicBlockSrc *> incoming_blocks = {0};

    ScopeBlock *scope_block = create_block_scope(irb->codegen, block_node, parent_scope);

    Scope *outer_block_scope = &scope_block->base;
    Scope *child_scope = outer_block_scope;

    ZigFn *fn_entry = scope_fn_entry(parent_scope);
    if (fn_entry && fn_entry->child_scope == parent_scope) {
        fn_entry->def_scope = scope_block;
    }

    if (block_node->data.block.statements.length == 0) {
        // {}
        return ir_lval_wrap(irb, parent_scope, ir_build_const_void(irb, child_scope, block_node), lval, result_loc);
    }

    if (block_node->data.block.name != nullptr) {
        scope_block->lval = lval;
        scope_block->incoming_blocks = &incoming_blocks;
        scope_block->incoming_values = &incoming_values;
        scope_block->end_block = ir_create_basic_block(irb, parent_scope, "BlockEnd");
        scope_block->is_comptime = ir_build_const_bool(irb, parent_scope, block_node,
                ir_should_inline(irb->exec, parent_scope));

        scope_block->peer_parent = heap::c_allocator.create<ResultLocPeerParent>();
        scope_block->peer_parent->base.id = ResultLocIdPeerParent;
        scope_block->peer_parent->base.source_instruction = scope_block->is_comptime;
        scope_block->peer_parent->base.allow_write_through_const = result_loc->allow_write_through_const;
        scope_block->peer_parent->end_bb = scope_block->end_block;
        scope_block->peer_parent->is_comptime = scope_block->is_comptime;
        scope_block->peer_parent->parent = result_loc;
        ir_build_reset_result(irb, parent_scope, block_node, &scope_block->peer_parent->base);
    }

    bool is_continuation_unreachable = false;
    bool found_invalid_inst = false;
    IrInstSrc *noreturn_return_value = nullptr;
    for (size_t i = 0; i < block_node->data.block.statements.length; i += 1) {
        AstNode *statement_node = block_node->data.block.statements.at(i);

        IrInstSrc *statement_value = ir_gen_node(irb, statement_node, child_scope);
        if (statement_value == irb->codegen->invalid_inst_src) {
            // keep generating all the elements of the block in case of error,
            // we want to collect other compile errors
            found_invalid_inst = true;
            continue;
        }

        is_continuation_unreachable = instr_is_unreachable(statement_value);
        if (is_continuation_unreachable) {
            // keep the last noreturn statement value around in case we need to return it
            noreturn_return_value = statement_value;
        }
        // This logic must be kept in sync with
        // [STMT_EXPR_TEST_THING] <--- (search this token)
        if (statement_node->type == NodeTypeDefer) {
            // defer starts a new scope
            child_scope = statement_node->data.defer.child_scope;
            assert(child_scope);
        } else if (statement_value->id == IrInstSrcIdDeclVar) {
            // variable declarations start a new scope
            IrInstSrcDeclVar *decl_var_instruction = (IrInstSrcDeclVar *)statement_value;
            child_scope = decl_var_instruction->var->child_scope;
        } else if (!is_continuation_unreachable) {
            // this statement's value must be void
            ir_mark_gen(ir_build_check_statement_is_void(irb, child_scope, statement_node, statement_value));
        }
    }

    if (found_invalid_inst)
        return irb->codegen->invalid_inst_src;

    if (is_continuation_unreachable) {
        assert(noreturn_return_value != nullptr);
        if (block_node->data.block.name == nullptr || incoming_blocks.length == 0) {
            return noreturn_return_value;
        }

        if (scope_block->peer_parent != nullptr && scope_block->peer_parent->peers.length != 0) {
            scope_block->peer_parent->peers.last()->next_bb = scope_block->end_block;
        }
        ir_set_cursor_at_end_and_append_block(irb, scope_block->end_block);
        IrInstSrc *phi = ir_build_phi(irb, parent_scope, block_node, incoming_blocks.length,
                incoming_blocks.items, incoming_values.items, scope_block->peer_parent);
        return ir_expr_wrap(irb, parent_scope, phi, result_loc);
    } else {
        incoming_blocks.append(irb->current_basic_block);
        IrInstSrc *else_expr_result = ir_mark_gen(ir_build_const_void(irb, parent_scope, block_node));

        if (scope_block->peer_parent != nullptr) {
            ResultLocPeer *peer_result = create_peer_result(scope_block->peer_parent);
            scope_block->peer_parent->peers.append(peer_result);
            ir_build_end_expr(irb, parent_scope, block_node, else_expr_result, &peer_result->base);

            if (scope_block->peer_parent->peers.length != 0) {
                scope_block->peer_parent->peers.last()->next_bb = scope_block->end_block;
            }
        }

        incoming_values.append(else_expr_result);
    }

    bool is_return_from_fn = block_node == irb->main_block_node;
    if (!is_return_from_fn) {
        if (!ir_gen_defers_for_block(irb, child_scope, outer_block_scope, nullptr, nullptr))
            return irb->codegen->invalid_inst_src;
    }

    IrInstSrc *result;
    if (block_node->data.block.name != nullptr) {
        ir_mark_gen(ir_build_br(irb, parent_scope, block_node, scope_block->end_block, scope_block->is_comptime));
        ir_set_cursor_at_end_and_append_block(irb, scope_block->end_block);
        IrInstSrc *phi = ir_build_phi(irb, parent_scope, block_node, incoming_blocks.length,
                incoming_blocks.items, incoming_values.items, scope_block->peer_parent);
        result = ir_expr_wrap(irb, parent_scope, phi, result_loc);
    } else {
        IrInstSrc *void_inst = ir_mark_gen(ir_build_const_void(irb, child_scope, block_node));
        result = ir_lval_wrap(irb, parent_scope, void_inst, lval, result_loc);
    }
    if (!is_return_from_fn)
        return result;

    // no need for save_err_ret_addr because this cannot return error
    // only generate unconditional defers

    ir_mark_gen(ir_build_add_implicit_return_type(irb, child_scope, block_node, result, nullptr));
    ResultLocReturn *result_loc_ret = heap::c_allocator.create<ResultLocReturn>();
    result_loc_ret->base.id = ResultLocIdReturn;
    ir_build_reset_result(irb, parent_scope, block_node, &result_loc_ret->base);
    ir_mark_gen(ir_build_end_expr(irb, parent_scope, block_node, result, &result_loc_ret->base));
    if (!ir_gen_defers_for_block(irb, child_scope, outer_block_scope, nullptr, nullptr))
        return irb->codegen->invalid_inst_src;
    return ir_mark_gen(ir_build_return_src(irb, child_scope, result->base.source_node, result));
}

static IrInstSrc *ir_gen_bin_op_id(IrBuilderSrc *irb, Scope *scope, AstNode *node, IrBinOp op_id) {
    Scope *inner_scope = scope;
    if (op_id == IrBinOpArrayCat || op_id == IrBinOpArrayMult) {
        inner_scope = create_comptime_scope(irb->codegen, node, scope);
    }

    IrInstSrc *op1 = ir_gen_node(irb, node->data.bin_op_expr.op1, inner_scope);
    IrInstSrc *op2 = ir_gen_node(irb, node->data.bin_op_expr.op2, inner_scope);

    if (op1 == irb->codegen->invalid_inst_src || op2 == irb->codegen->invalid_inst_src)
        return irb->codegen->invalid_inst_src;

    return ir_build_bin_op(irb, scope, node, op_id, op1, op2, true);
}

static IrInstSrc *ir_gen_merge_err_sets(IrBuilderSrc *irb, Scope *scope, AstNode *node) {
    IrInstSrc *op1 = ir_gen_node(irb, node->data.bin_op_expr.op1, scope);
    IrInstSrc *op2 = ir_gen_node(irb, node->data.bin_op_expr.op2, scope);

    if (op1 == irb->codegen->invalid_inst_src || op2 == irb->codegen->invalid_inst_src)
        return irb->codegen->invalid_inst_src;

    // TODO only pass type_name when the || operator is the top level AST node in the var decl expr
    Buf bare_name = BUF_INIT;
    Buf *type_name = get_anon_type_name(irb->codegen, irb->exec, "error", scope, node, &bare_name);

    return ir_build_merge_err_sets(irb, scope, node, op1, op2, type_name);
}

static IrInstSrc *ir_gen_assign(IrBuilderSrc *irb, Scope *scope, AstNode *node) {
    IrInstSrc *lvalue = ir_gen_node_extra(irb, node->data.bin_op_expr.op1, scope, LValAssign, nullptr);
    if (lvalue == irb->codegen->invalid_inst_src)
        return irb->codegen->invalid_inst_src;

    ResultLocInstruction *result_loc_inst = heap::c_allocator.create<ResultLocInstruction>();
    result_loc_inst->base.id = ResultLocIdInstruction;
    result_loc_inst->base.source_instruction = lvalue;
    ir_ref_instruction(lvalue, irb->current_basic_block);
    ir_build_reset_result(irb, scope, node, &result_loc_inst->base);

    IrInstSrc *rvalue = ir_gen_node_extra(irb, node->data.bin_op_expr.op2, scope, LValNone,
            &result_loc_inst->base);
    if (rvalue == irb->codegen->invalid_inst_src)
        return irb->codegen->invalid_inst_src;

    return ir_build_const_void(irb, scope, node);
}

static IrInstSrc *ir_gen_assign_merge_err_sets(IrBuilderSrc *irb, Scope *scope, AstNode *node) {
    IrInstSrc *lvalue = ir_gen_node_extra(irb, node->data.bin_op_expr.op1, scope, LValAssign, nullptr);
    if (lvalue == irb->codegen->invalid_inst_src)
        return lvalue;
    IrInstSrc *op1 = ir_build_load_ptr(irb, scope, node->data.bin_op_expr.op1, lvalue);
    IrInstSrc *op2 = ir_gen_node(irb, node->data.bin_op_expr.op2, scope);
    if (op2 == irb->codegen->invalid_inst_src)
        return op2;
    IrInstSrc *result = ir_build_merge_err_sets(irb, scope, node, op1, op2, nullptr);
    ir_build_store_ptr(irb, scope, node, lvalue, result);
    return ir_build_const_void(irb, scope, node);
}

static IrInstSrc *ir_gen_assign_op(IrBuilderSrc *irb, Scope *scope, AstNode *node, IrBinOp op_id) {
    IrInstSrc *lvalue = ir_gen_node_extra(irb, node->data.bin_op_expr.op1, scope, LValAssign, nullptr);
    if (lvalue == irb->codegen->invalid_inst_src)
        return lvalue;
    IrInstSrc *op1 = ir_build_load_ptr(irb, scope, node->data.bin_op_expr.op1, lvalue);
    IrInstSrc *op2 = ir_gen_node(irb, node->data.bin_op_expr.op2, scope);
    if (op2 == irb->codegen->invalid_inst_src)
        return op2;
    IrInstSrc *result = ir_build_bin_op(irb, scope, node, op_id, op1, op2, true);
    ir_build_store_ptr(irb, scope, node, lvalue, result);
    return ir_build_const_void(irb, scope, node);
}

static IrInstSrc *ir_gen_bool_or(IrBuilderSrc *irb, Scope *scope, AstNode *node) {
    assert(node->type == NodeTypeBinOpExpr);

    IrInstSrc *val1 = ir_gen_node(irb, node->data.bin_op_expr.op1, scope);
    if (val1 == irb->codegen->invalid_inst_src)
        return irb->codegen->invalid_inst_src;
    IrBasicBlockSrc *post_val1_block = irb->current_basic_block;

    IrInstSrc *is_comptime;
    if (ir_should_inline(irb->exec, scope)) {
        is_comptime = ir_build_const_bool(irb, scope, node, true);
    } else {
        is_comptime = ir_build_test_comptime(irb, scope, node, val1);
    }

    // block for when val1 == false
    IrBasicBlockSrc *false_block = ir_create_basic_block(irb, scope, "BoolOrFalse");
    // block for when val1 == true (don't even evaluate the second part)
    IrBasicBlockSrc *true_block = ir_create_basic_block(irb, scope, "BoolOrTrue");

    ir_build_cond_br(irb, scope, node, val1, true_block, false_block, is_comptime);

    ir_set_cursor_at_end_and_append_block(irb, false_block);
    IrInstSrc *val2 = ir_gen_node(irb, node->data.bin_op_expr.op2, scope);
    if (val2 == irb->codegen->invalid_inst_src)
        return irb->codegen->invalid_inst_src;
    IrBasicBlockSrc *post_val2_block = irb->current_basic_block;

    ir_build_br(irb, scope, node, true_block, is_comptime);

    ir_set_cursor_at_end_and_append_block(irb, true_block);

    IrInstSrc **incoming_values = heap::c_allocator.allocate<IrInstSrc *>(2);
    incoming_values[0] = val1;
    incoming_values[1] = val2;
    IrBasicBlockSrc **incoming_blocks = heap::c_allocator.allocate<IrBasicBlockSrc *>(2);
    incoming_blocks[0] = post_val1_block;
    incoming_blocks[1] = post_val2_block;

    return ir_build_phi(irb, scope, node, 2, incoming_blocks, incoming_values, nullptr);
}

static IrInstSrc *ir_gen_bool_and(IrBuilderSrc *irb, Scope *scope, AstNode *node) {
    assert(node->type == NodeTypeBinOpExpr);

    IrInstSrc *val1 = ir_gen_node(irb, node->data.bin_op_expr.op1, scope);
    if (val1 == irb->codegen->invalid_inst_src)
        return irb->codegen->invalid_inst_src;
    IrBasicBlockSrc *post_val1_block = irb->current_basic_block;

    IrInstSrc *is_comptime;
    if (ir_should_inline(irb->exec, scope)) {
        is_comptime = ir_build_const_bool(irb, scope, node, true);
    } else {
        is_comptime = ir_build_test_comptime(irb, scope, node, val1);
    }

    // block for when val1 == true
    IrBasicBlockSrc *true_block = ir_create_basic_block(irb, scope, "BoolAndTrue");
    // block for when val1 == false (don't even evaluate the second part)
    IrBasicBlockSrc *false_block = ir_create_basic_block(irb, scope, "BoolAndFalse");

    ir_build_cond_br(irb, scope, node, val1, true_block, false_block, is_comptime);

    ir_set_cursor_at_end_and_append_block(irb, true_block);
    IrInstSrc *val2 = ir_gen_node(irb, node->data.bin_op_expr.op2, scope);
    if (val2 == irb->codegen->invalid_inst_src)
        return irb->codegen->invalid_inst_src;
    IrBasicBlockSrc *post_val2_block = irb->current_basic_block;

    ir_build_br(irb, scope, node, false_block, is_comptime);

    ir_set_cursor_at_end_and_append_block(irb, false_block);

    IrInstSrc **incoming_values = heap::c_allocator.allocate<IrInstSrc *>(2);
    incoming_values[0] = val1;
    incoming_values[1] = val2;
    IrBasicBlockSrc **incoming_blocks = heap::c_allocator.allocate<IrBasicBlockSrc *>(2);
    incoming_blocks[0] = post_val1_block;
    incoming_blocks[1] = post_val2_block;

    return ir_build_phi(irb, scope, node, 2, incoming_blocks, incoming_values, nullptr);
}

static ResultLocPeerParent *ir_build_result_peers(IrBuilderSrc *irb, IrInstSrc *cond_br_inst,
        IrBasicBlockSrc *end_block, ResultLoc *parent, IrInstSrc *is_comptime)
{
    ResultLocPeerParent *peer_parent = heap::c_allocator.create<ResultLocPeerParent>();
    peer_parent->base.id = ResultLocIdPeerParent;
    peer_parent->base.source_instruction = cond_br_inst;
    peer_parent->base.allow_write_through_const = parent->allow_write_through_const;
    peer_parent->end_bb = end_block;
    peer_parent->is_comptime = is_comptime;
    peer_parent->parent = parent;

    IrInstSrc *popped_inst = irb->current_basic_block->instruction_list.pop();
    ir_assert(popped_inst == cond_br_inst, &cond_br_inst->base);

    ir_build_reset_result(irb, cond_br_inst->base.scope, cond_br_inst->base.source_node, &peer_parent->base);
    irb->current_basic_block->instruction_list.append(popped_inst);

    return peer_parent;
}

static ResultLocPeerParent *ir_build_binary_result_peers(IrBuilderSrc *irb, IrInstSrc *cond_br_inst,
        IrBasicBlockSrc *else_block, IrBasicBlockSrc *end_block, ResultLoc *parent, IrInstSrc *is_comptime)
{
    ResultLocPeerParent *peer_parent = ir_build_result_peers(irb, cond_br_inst, end_block, parent, is_comptime);

    peer_parent->peers.append(create_peer_result(peer_parent));
    peer_parent->peers.last()->next_bb = else_block;

    peer_parent->peers.append(create_peer_result(peer_parent));
    peer_parent->peers.last()->next_bb = end_block;

    return peer_parent;
}

static IrInstSrc *ir_gen_orelse(IrBuilderSrc *irb, Scope *parent_scope, AstNode *node, LVal lval,
        ResultLoc *result_loc)
{
    assert(node->type == NodeTypeBinOpExpr);

    AstNode *op1_node = node->data.bin_op_expr.op1;
    AstNode *op2_node = node->data.bin_op_expr.op2;

    IrInstSrc *maybe_ptr = ir_gen_node_extra(irb, op1_node, parent_scope, LValPtr, nullptr);
    if (maybe_ptr == irb->codegen->invalid_inst_src)
        return irb->codegen->invalid_inst_src;

    IrInstSrc *maybe_val = ir_build_load_ptr(irb, parent_scope, node, maybe_ptr);
    IrInstSrc *is_non_null = ir_build_test_non_null_src(irb, parent_scope, node, maybe_val);

    IrInstSrc *is_comptime;
    if (ir_should_inline(irb->exec, parent_scope)) {
        is_comptime = ir_build_const_bool(irb, parent_scope, node, true);
    } else {
        is_comptime = ir_build_test_comptime(irb, parent_scope, node, is_non_null);
    }

    IrBasicBlockSrc *ok_block = ir_create_basic_block(irb, parent_scope, "OptionalNonNull");
    IrBasicBlockSrc *null_block = ir_create_basic_block(irb, parent_scope, "OptionalNull");
    IrBasicBlockSrc *end_block = ir_create_basic_block(irb, parent_scope, "OptionalEnd");
    IrInstSrc *cond_br_inst = ir_build_cond_br(irb, parent_scope, node, is_non_null, ok_block, null_block, is_comptime);

    ResultLocPeerParent *peer_parent = ir_build_binary_result_peers(irb, cond_br_inst, ok_block, end_block,
            result_loc, is_comptime);

    ir_set_cursor_at_end_and_append_block(irb, null_block);
    IrInstSrc *null_result = ir_gen_node_extra(irb, op2_node, parent_scope, LValNone,
            &peer_parent->peers.at(0)->base);
    if (null_result == irb->codegen->invalid_inst_src)
        return irb->codegen->invalid_inst_src;
    IrBasicBlockSrc *after_null_block = irb->current_basic_block;
    if (!instr_is_unreachable(null_result))
        ir_mark_gen(ir_build_br(irb, parent_scope, node, end_block, is_comptime));

    ir_set_cursor_at_end_and_append_block(irb, ok_block);
    IrInstSrc *unwrapped_ptr = ir_build_optional_unwrap_ptr(irb, parent_scope, node, maybe_ptr, false);
    IrInstSrc *unwrapped_payload = ir_build_load_ptr(irb, parent_scope, node, unwrapped_ptr);
    ir_build_end_expr(irb, parent_scope, node, unwrapped_payload, &peer_parent->peers.at(1)->base);
    IrBasicBlockSrc *after_ok_block = irb->current_basic_block;
    ir_build_br(irb, parent_scope, node, end_block, is_comptime);

    ir_set_cursor_at_end_and_append_block(irb, end_block);
    IrInstSrc **incoming_values = heap::c_allocator.allocate<IrInstSrc *>(2);
    incoming_values[0] = null_result;
    incoming_values[1] = unwrapped_payload;
    IrBasicBlockSrc **incoming_blocks = heap::c_allocator.allocate<IrBasicBlockSrc *>(2);
    incoming_blocks[0] = after_null_block;
    incoming_blocks[1] = after_ok_block;
    IrInstSrc *phi = ir_build_phi(irb, parent_scope, node, 2, incoming_blocks, incoming_values, peer_parent);
    return ir_lval_wrap(irb, parent_scope, phi, lval, result_loc);
}

static IrInstSrc *ir_gen_error_union(IrBuilderSrc *irb, Scope *parent_scope, AstNode *node) {
    assert(node->type == NodeTypeBinOpExpr);

    AstNode *op1_node = node->data.bin_op_expr.op1;
    AstNode *op2_node = node->data.bin_op_expr.op2;

    IrInstSrc *err_set = ir_gen_node(irb, op1_node, parent_scope);
    if (err_set == irb->codegen->invalid_inst_src)
        return irb->codegen->invalid_inst_src;

    IrInstSrc *payload = ir_gen_node(irb, op2_node, parent_scope);
    if (payload == irb->codegen->invalid_inst_src)
        return irb->codegen->invalid_inst_src;

    return ir_build_error_union(irb, parent_scope, node, err_set, payload);
}

static IrInstSrc *ir_gen_bin_op(IrBuilderSrc *irb, Scope *scope, AstNode *node, LVal lval, ResultLoc *result_loc) {
    assert(node->type == NodeTypeBinOpExpr);

    BinOpType bin_op_type = node->data.bin_op_expr.bin_op;
    switch (bin_op_type) {
        case BinOpTypeInvalid:
            zig_unreachable();
        case BinOpTypeAssign:
            return ir_lval_wrap(irb, scope, ir_gen_assign(irb, scope, node), lval, result_loc);
        case BinOpTypeAssignTimes:
            return ir_lval_wrap(irb, scope, ir_gen_assign_op(irb, scope, node, IrBinOpMult), lval, result_loc);
        case BinOpTypeAssignTimesWrap:
            return ir_lval_wrap(irb, scope, ir_gen_assign_op(irb, scope, node, IrBinOpMultWrap), lval, result_loc);
        case BinOpTypeAssignDiv:
            return ir_lval_wrap(irb, scope, ir_gen_assign_op(irb, scope, node, IrBinOpDivUnspecified), lval, result_loc);
        case BinOpTypeAssignMod:
            return ir_lval_wrap(irb, scope, ir_gen_assign_op(irb, scope, node, IrBinOpRemUnspecified), lval, result_loc);
        case BinOpTypeAssignPlus:
            return ir_lval_wrap(irb, scope, ir_gen_assign_op(irb, scope, node, IrBinOpAdd), lval, result_loc);
        case BinOpTypeAssignPlusWrap:
            return ir_lval_wrap(irb, scope, ir_gen_assign_op(irb, scope, node, IrBinOpAddWrap), lval, result_loc);
        case BinOpTypeAssignMinus:
            return ir_lval_wrap(irb, scope, ir_gen_assign_op(irb, scope, node, IrBinOpSub), lval, result_loc);
        case BinOpTypeAssignMinusWrap:
            return ir_lval_wrap(irb, scope, ir_gen_assign_op(irb, scope, node, IrBinOpSubWrap), lval, result_loc);
        case BinOpTypeAssignBitShiftLeft:
            return ir_lval_wrap(irb, scope, ir_gen_assign_op(irb, scope, node, IrBinOpBitShiftLeftLossy), lval, result_loc);
        case BinOpTypeAssignBitShiftRight:
            return ir_lval_wrap(irb, scope, ir_gen_assign_op(irb, scope, node, IrBinOpBitShiftRightLossy), lval, result_loc);
        case BinOpTypeAssignBitAnd:
            return ir_lval_wrap(irb, scope, ir_gen_assign_op(irb, scope, node, IrBinOpBinAnd), lval, result_loc);
        case BinOpTypeAssignBitXor:
            return ir_lval_wrap(irb, scope, ir_gen_assign_op(irb, scope, node, IrBinOpBinXor), lval, result_loc);
        case BinOpTypeAssignBitOr:
            return ir_lval_wrap(irb, scope, ir_gen_assign_op(irb, scope, node, IrBinOpBinOr), lval, result_loc);
        case BinOpTypeAssignMergeErrorSets:
            return ir_lval_wrap(irb, scope, ir_gen_assign_merge_err_sets(irb, scope, node), lval, result_loc);
        case BinOpTypeBoolOr:
            return ir_lval_wrap(irb, scope, ir_gen_bool_or(irb, scope, node), lval, result_loc);
        case BinOpTypeBoolAnd:
            return ir_lval_wrap(irb, scope, ir_gen_bool_and(irb, scope, node), lval, result_loc);
        case BinOpTypeCmpEq:
            return ir_lval_wrap(irb, scope, ir_gen_bin_op_id(irb, scope, node, IrBinOpCmpEq), lval, result_loc);
        case BinOpTypeCmpNotEq:
            return ir_lval_wrap(irb, scope, ir_gen_bin_op_id(irb, scope, node, IrBinOpCmpNotEq), lval, result_loc);
        case BinOpTypeCmpLessThan:
            return ir_lval_wrap(irb, scope, ir_gen_bin_op_id(irb, scope, node, IrBinOpCmpLessThan), lval, result_loc);
        case BinOpTypeCmpGreaterThan:
            return ir_lval_wrap(irb, scope, ir_gen_bin_op_id(irb, scope, node, IrBinOpCmpGreaterThan), lval, result_loc);
        case BinOpTypeCmpLessOrEq:
            return ir_lval_wrap(irb, scope, ir_gen_bin_op_id(irb, scope, node, IrBinOpCmpLessOrEq), lval, result_loc);
        case BinOpTypeCmpGreaterOrEq:
            return ir_lval_wrap(irb, scope, ir_gen_bin_op_id(irb, scope, node, IrBinOpCmpGreaterOrEq), lval, result_loc);
        case BinOpTypeBinOr:
            return ir_lval_wrap(irb, scope, ir_gen_bin_op_id(irb, scope, node, IrBinOpBinOr), lval, result_loc);
        case BinOpTypeBinXor:
            return ir_lval_wrap(irb, scope, ir_gen_bin_op_id(irb, scope, node, IrBinOpBinXor), lval, result_loc);
        case BinOpTypeBinAnd:
            return ir_lval_wrap(irb, scope, ir_gen_bin_op_id(irb, scope, node, IrBinOpBinAnd), lval, result_loc);
        case BinOpTypeBitShiftLeft:
            return ir_lval_wrap(irb, scope, ir_gen_bin_op_id(irb, scope, node, IrBinOpBitShiftLeftLossy), lval, result_loc);
        case BinOpTypeBitShiftRight:
            return ir_lval_wrap(irb, scope, ir_gen_bin_op_id(irb, scope, node, IrBinOpBitShiftRightLossy), lval, result_loc);
        case BinOpTypeAdd:
            return ir_lval_wrap(irb, scope, ir_gen_bin_op_id(irb, scope, node, IrBinOpAdd), lval, result_loc);
        case BinOpTypeAddWrap:
            return ir_lval_wrap(irb, scope, ir_gen_bin_op_id(irb, scope, node, IrBinOpAddWrap), lval, result_loc);
        case BinOpTypeSub:
            return ir_lval_wrap(irb, scope, ir_gen_bin_op_id(irb, scope, node, IrBinOpSub), lval, result_loc);
        case BinOpTypeSubWrap:
            return ir_lval_wrap(irb, scope, ir_gen_bin_op_id(irb, scope, node, IrBinOpSubWrap), lval, result_loc);
        case BinOpTypeMult:
            return ir_lval_wrap(irb, scope, ir_gen_bin_op_id(irb, scope, node, IrBinOpMult), lval, result_loc);
        case BinOpTypeMultWrap:
            return ir_lval_wrap(irb, scope, ir_gen_bin_op_id(irb, scope, node, IrBinOpMultWrap), lval, result_loc);
        case BinOpTypeDiv:
            return ir_lval_wrap(irb, scope, ir_gen_bin_op_id(irb, scope, node, IrBinOpDivUnspecified), lval, result_loc);
        case BinOpTypeMod:
            return ir_lval_wrap(irb, scope, ir_gen_bin_op_id(irb, scope, node, IrBinOpRemUnspecified), lval, result_loc);
        case BinOpTypeArrayCat:
            return ir_lval_wrap(irb, scope, ir_gen_bin_op_id(irb, scope, node, IrBinOpArrayCat), lval, result_loc);
        case BinOpTypeArrayMult:
            return ir_lval_wrap(irb, scope, ir_gen_bin_op_id(irb, scope, node, IrBinOpArrayMult), lval, result_loc);
        case BinOpTypeMergeErrorSets:
            return ir_lval_wrap(irb, scope, ir_gen_merge_err_sets(irb, scope, node), lval, result_loc);
        case BinOpTypeUnwrapOptional:
            return ir_gen_orelse(irb, scope, node, lval, result_loc);
        case BinOpTypeErrorUnion:
            return ir_lval_wrap(irb, scope, ir_gen_error_union(irb, scope, node), lval, result_loc);
    }
    zig_unreachable();
}

static IrInstSrc *ir_gen_int_lit(IrBuilderSrc *irb, Scope *scope, AstNode *node) {
    assert(node->type == NodeTypeIntLiteral);

    return ir_build_const_bigint(irb, scope, node, node->data.int_literal.bigint);
}

static IrInstSrc *ir_gen_float_lit(IrBuilderSrc *irb, Scope *scope, AstNode *node) {
    assert(node->type == NodeTypeFloatLiteral);

    if (node->data.float_literal.overflow) {
        add_node_error(irb->codegen, node, buf_sprintf("float literal out of range of any type"));
        return irb->codegen->invalid_inst_src;
    }

    return ir_build_const_bigfloat(irb, scope, node, node->data.float_literal.bigfloat);
}

static IrInstSrc *ir_gen_char_lit(IrBuilderSrc *irb, Scope *scope, AstNode *node) {
    assert(node->type == NodeTypeCharLiteral);

    return ir_build_const_uint(irb, scope, node, node->data.char_literal.value);
}

static IrInstSrc *ir_gen_null_literal(IrBuilderSrc *irb, Scope *scope, AstNode *node) {
    assert(node->type == NodeTypeNullLiteral);

    return ir_build_const_null(irb, scope, node);
}

static void populate_invalid_variable_in_scope(CodeGen *g, Scope *scope, AstNode *node, Buf *var_name) {
    ScopeDecls *scope_decls = nullptr;
    while (scope != nullptr) {
        if (scope->id == ScopeIdDecls) {
            scope_decls = reinterpret_cast<ScopeDecls *>(scope);
        }
        scope = scope->parent;
    }
    TldVar *tld_var = heap::c_allocator.create<TldVar>();
    init_tld(&tld_var->base, TldIdVar, var_name, VisibModPub, node, &scope_decls->base);
    tld_var->base.resolution = TldResolutionInvalid;
    tld_var->var = add_variable(g, node, &scope_decls->base, var_name, false,
            g->invalid_inst_gen->value, &tld_var->base, g->builtin_types.entry_invalid);
    scope_decls->decl_table.put(var_name, &tld_var->base);
}

static IrInstSrc *ir_gen_symbol(IrBuilderSrc *irb, Scope *scope, AstNode *node, LVal lval, ResultLoc *result_loc) {
    Error err;
    assert(node->type == NodeTypeSymbol);

    Buf *variable_name = node->data.symbol_expr.symbol;

    if (buf_eql_str(variable_name, "_")) {
        if (lval == LValAssign) {
            IrInstSrcConst *const_instruction = ir_build_instruction<IrInstSrcConst>(irb, scope, node);
            const_instruction->value = irb->codegen->pass1_arena->create<ZigValue>();
            const_instruction->value->type = get_pointer_to_type(irb->codegen,
                    irb->codegen->builtin_types.entry_void, false);
            const_instruction->value->special = ConstValSpecialStatic;
            const_instruction->value->data.x_ptr.special = ConstPtrSpecialDiscard;
            return &const_instruction->base;
        } else {
            add_node_error(irb->codegen, node, buf_sprintf("`_` may only be used to assign things to"));
            return irb->codegen->invalid_inst_src;
        }
    }

    ZigType *primitive_type;
    if ((err = get_primitive_type(irb->codegen, variable_name, &primitive_type))) {
        if (err == ErrorOverflow) {
            add_node_error(irb->codegen, node,
                buf_sprintf("primitive integer type '%s' exceeds maximum bit width of 65535",
                    buf_ptr(variable_name)));
            return irb->codegen->invalid_inst_src;
        }
        assert(err == ErrorPrimitiveTypeNotFound);
    } else {
        IrInstSrc *value = ir_build_const_type(irb, scope, node, primitive_type);
        if (lval == LValPtr || lval == LValAssign) {
            return ir_build_ref_src(irb, scope, node, value);
        } else {
            return ir_expr_wrap(irb, scope, value, result_loc);
        }
    }

    ScopeFnDef *crossed_fndef_scope;
    ZigVar *var = find_variable(irb->codegen, scope, variable_name, &crossed_fndef_scope);
    if (var) {
        IrInstSrc *var_ptr = ir_build_var_ptr_x(irb, scope, node, var, crossed_fndef_scope);
        if (lval == LValPtr || lval == LValAssign) {
            return var_ptr;
        } else {
            return ir_expr_wrap(irb, scope, ir_build_load_ptr(irb, scope, node, var_ptr), result_loc);
        }
    }

    Tld *tld = find_decl(irb->codegen, scope, variable_name);
    if (tld) {
        IrInstSrc *decl_ref = ir_build_decl_ref(irb, scope, node, tld, lval);
        if (lval == LValPtr || lval == LValAssign) {
            return decl_ref;
        } else {
            return ir_expr_wrap(irb, scope, decl_ref, result_loc);
        }
    }

    if (get_container_scope(node->owner)->any_imports_failed) {
        // skip the error message since we had a failing import in this file
        // if an import breaks we don't need redundant undeclared identifier errors
        return irb->codegen->invalid_inst_src;
    }

    return ir_build_undeclared_identifier(irb, scope, node, variable_name);
}

static IrInstSrc *ir_gen_array_access(IrBuilderSrc *irb, Scope *scope, AstNode *node, LVal lval,
        ResultLoc *result_loc)
{
    assert(node->type == NodeTypeArrayAccessExpr);

    AstNode *array_ref_node = node->data.array_access_expr.array_ref_expr;
    IrInstSrc *array_ref_instruction = ir_gen_node_extra(irb, array_ref_node, scope, LValPtr, nullptr);
    if (array_ref_instruction == irb->codegen->invalid_inst_src)
        return array_ref_instruction;

    // Create an usize-typed result location to hold the subscript value, this
    // makes it possible for the compiler to infer the subscript expression type
    // if needed
    IrInstSrc *usize_type_inst = ir_build_const_type(irb, scope, node, irb->codegen->builtin_types.entry_usize);
    ResultLocCast *result_loc_cast = ir_build_cast_result_loc(irb, usize_type_inst, no_result_loc());

    AstNode *subscript_node = node->data.array_access_expr.subscript;
    IrInstSrc *subscript_value = ir_gen_node_extra(irb, subscript_node, scope, LValNone, &result_loc_cast->base);
    if (subscript_value == irb->codegen->invalid_inst_src)
        return irb->codegen->invalid_inst_src;

    IrInstSrc *subscript_instruction = ir_build_implicit_cast(irb, scope, subscript_node, subscript_value, result_loc_cast);

    IrInstSrc *ptr_instruction = ir_build_elem_ptr(irb, scope, node, array_ref_instruction,
            subscript_instruction, true, PtrLenSingle, nullptr);
    if (lval == LValPtr || lval == LValAssign)
        return ptr_instruction;

    IrInstSrc *load_ptr = ir_build_load_ptr(irb, scope, node, ptr_instruction);
    return ir_expr_wrap(irb, scope, load_ptr, result_loc);
}

static IrInstSrc *ir_gen_field_access(IrBuilderSrc *irb, Scope *scope, AstNode *node) {
    assert(node->type == NodeTypeFieldAccessExpr);

    AstNode *container_ref_node = node->data.field_access_expr.struct_expr;
    Buf *field_name = node->data.field_access_expr.field_name;

    IrInstSrc *container_ref_instruction = ir_gen_node_extra(irb, container_ref_node, scope, LValPtr, nullptr);
    if (container_ref_instruction == irb->codegen->invalid_inst_src)
        return container_ref_instruction;

    return ir_build_field_ptr(irb, scope, node, container_ref_instruction, field_name, false);
}

static IrInstSrc *ir_gen_overflow_op(IrBuilderSrc *irb, Scope *scope, AstNode *node, IrOverflowOp op) {
    assert(node->type == NodeTypeFnCallExpr);

    AstNode *type_node = node->data.fn_call_expr.params.at(0);
    AstNode *op1_node = node->data.fn_call_expr.params.at(1);
    AstNode *op2_node = node->data.fn_call_expr.params.at(2);
    AstNode *result_ptr_node = node->data.fn_call_expr.params.at(3);


    IrInstSrc *type_value = ir_gen_node(irb, type_node, scope);
    if (type_value == irb->codegen->invalid_inst_src)
        return irb->codegen->invalid_inst_src;

    IrInstSrc *op1 = ir_gen_node(irb, op1_node, scope);
    if (op1 == irb->codegen->invalid_inst_src)
        return irb->codegen->invalid_inst_src;

    IrInstSrc *op2 = ir_gen_node(irb, op2_node, scope);
    if (op2 == irb->codegen->invalid_inst_src)
        return irb->codegen->invalid_inst_src;

    IrInstSrc *result_ptr = ir_gen_node(irb, result_ptr_node, scope);
    if (result_ptr == irb->codegen->invalid_inst_src)
        return irb->codegen->invalid_inst_src;

    return ir_build_overflow_op_src(irb, scope, node, op, type_value, op1, op2, result_ptr);
}

static IrInstSrc *ir_gen_mul_add(IrBuilderSrc *irb, Scope *scope, AstNode *node) {
    assert(node->type == NodeTypeFnCallExpr);

    AstNode *type_node = node->data.fn_call_expr.params.at(0);
    AstNode *op1_node = node->data.fn_call_expr.params.at(1);
    AstNode *op2_node = node->data.fn_call_expr.params.at(2);
    AstNode *op3_node = node->data.fn_call_expr.params.at(3);

    IrInstSrc *type_value = ir_gen_node(irb, type_node, scope);
    if (type_value == irb->codegen->invalid_inst_src)
        return irb->codegen->invalid_inst_src;

    IrInstSrc *op1 = ir_gen_node(irb, op1_node, scope);
    if (op1 == irb->codegen->invalid_inst_src)
        return irb->codegen->invalid_inst_src;

    IrInstSrc *op2 = ir_gen_node(irb, op2_node, scope);
    if (op2 == irb->codegen->invalid_inst_src)
        return irb->codegen->invalid_inst_src;

    IrInstSrc *op3 = ir_gen_node(irb, op3_node, scope);
    if (op3 == irb->codegen->invalid_inst_src)
        return irb->codegen->invalid_inst_src;

    return ir_build_mul_add_src(irb, scope, node, type_value, op1, op2, op3);
}

static IrInstSrc *ir_gen_this(IrBuilderSrc *irb, Scope *orig_scope, AstNode *node) {
    for (Scope *it_scope = orig_scope; it_scope != nullptr; it_scope = it_scope->parent) {
        if (it_scope->id == ScopeIdDecls) {
            ScopeDecls *decls_scope = (ScopeDecls *)it_scope;
            ZigType *container_type = decls_scope->container_type;
            if (container_type != nullptr) {
                return ir_build_const_type(irb, orig_scope, node, container_type);
            } else {
                return ir_build_const_import(irb, orig_scope, node, decls_scope->import);
            }
        }
    }
    zig_unreachable();
}

static IrInstSrc *ir_gen_async_call(IrBuilderSrc *irb, Scope *scope, AstNode *await_node, AstNode *call_node,
        LVal lval, ResultLoc *result_loc)
{
    if (call_node->data.fn_call_expr.params.length != 4) {
        add_node_error(irb->codegen, call_node,
            buf_sprintf("expected 4 arguments, found %" ZIG_PRI_usize,
                call_node->data.fn_call_expr.params.length));
        return irb->codegen->invalid_inst_src;
    }

    AstNode *bytes_node = call_node->data.fn_call_expr.params.at(0);
    IrInstSrc *bytes = ir_gen_node(irb, bytes_node, scope);
    if (bytes == irb->codegen->invalid_inst_src)
        return bytes;

    AstNode *ret_ptr_node = call_node->data.fn_call_expr.params.at(1);
    IrInstSrc *ret_ptr = ir_gen_node(irb, ret_ptr_node, scope);
    if (ret_ptr == irb->codegen->invalid_inst_src)
        return ret_ptr;

    AstNode *fn_ref_node = call_node->data.fn_call_expr.params.at(2);
    IrInstSrc *fn_ref = ir_gen_node(irb, fn_ref_node, scope);
    if (fn_ref == irb->codegen->invalid_inst_src)
        return fn_ref;

    CallModifier modifier = (await_node == nullptr) ? CallModifierAsync : CallModifierNone;
    bool is_async_call_builtin = true;
    AstNode *args_node = call_node->data.fn_call_expr.params.at(3);
    if (args_node->type == NodeTypeContainerInitExpr) {
        if (args_node->data.container_init_expr.kind == ContainerInitKindArray ||
            args_node->data.container_init_expr.entries.length == 0)
        {
            size_t arg_count = args_node->data.container_init_expr.entries.length;
            IrInstSrc **args = heap::c_allocator.allocate<IrInstSrc*>(arg_count);
            for (size_t i = 0; i < arg_count; i += 1) {
                AstNode *arg_node = args_node->data.container_init_expr.entries.at(i);
                IrInstSrc *arg = ir_gen_node(irb, arg_node, scope);
                if (arg == irb->codegen->invalid_inst_src)
                    return arg;
                args[i] = arg;
            }

            IrInstSrc *call = ir_build_call_src(irb, scope, call_node, nullptr, fn_ref, arg_count, args,
                ret_ptr, modifier, is_async_call_builtin, bytes, result_loc);
            return ir_lval_wrap(irb, scope, call, lval, result_loc);
        } else {
            exec_add_error_node(irb->codegen, irb->exec, args_node,
                    buf_sprintf("TODO: @asyncCall with anon struct literal"));
            return irb->codegen->invalid_inst_src;
        }
    }
    IrInstSrc *args = ir_gen_node(irb, args_node, scope);
    if (args == irb->codegen->invalid_inst_src)
        return args;

    IrInstSrc *call = ir_build_async_call_extra(irb, scope, call_node, modifier, fn_ref, ret_ptr, bytes, args, result_loc);
    return ir_lval_wrap(irb, scope, call, lval, result_loc);
}

static IrInstSrc *ir_gen_fn_call_with_args(IrBuilderSrc *irb, Scope *scope, AstNode *source_node,
        AstNode *fn_ref_node, CallModifier modifier, IrInstSrc *options,
        AstNode **args_ptr, size_t args_len, LVal lval, ResultLoc *result_loc)
{
    IrInstSrc *fn_ref = ir_gen_node(irb, fn_ref_node, scope);
    if (fn_ref == irb->codegen->invalid_inst_src)
        return fn_ref;

    IrInstSrc *fn_type = ir_build_typeof_1(irb, scope, source_node, fn_ref);

    IrInstSrc **args = heap::c_allocator.allocate<IrInstSrc*>(args_len);
    for (size_t i = 0; i < args_len; i += 1) {
        AstNode *arg_node = args_ptr[i];

        IrInstSrc *arg_index = ir_build_const_usize(irb, scope, arg_node, i);
        IrInstSrc *arg_type = ir_build_arg_type(irb, scope, source_node, fn_type, arg_index, true);
        ResultLoc *no_result = no_result_loc();
        ir_build_reset_result(irb, scope, source_node, no_result);
        ResultLocCast *result_loc_cast = ir_build_cast_result_loc(irb, arg_type, no_result);

        IrInstSrc *arg = ir_gen_node_extra(irb, arg_node, scope, LValNone, &result_loc_cast->base);
        if (arg == irb->codegen->invalid_inst_src)
            return arg;

        args[i] = ir_build_implicit_cast(irb, scope, arg_node, arg, result_loc_cast);
    }

    IrInstSrc *fn_call;
    if (options != nullptr) {
        fn_call = ir_build_call_args(irb, scope, source_node, options, fn_ref, args, args_len, result_loc);
    } else {
        fn_call = ir_build_call_src(irb, scope, source_node, nullptr, fn_ref, args_len, args, nullptr,
                modifier, false, nullptr, result_loc);
    }
    return ir_lval_wrap(irb, scope, fn_call, lval, result_loc);
}

static IrInstSrc *ir_gen_builtin_fn_call(IrBuilderSrc *irb, Scope *scope, AstNode *node, LVal lval,
        ResultLoc *result_loc)
{
    assert(node->type == NodeTypeFnCallExpr);

    AstNode *fn_ref_expr = node->data.fn_call_expr.fn_ref_expr;
    Buf *name = fn_ref_expr->data.symbol_expr.symbol;
    auto entry = irb->codegen->builtin_fn_table.maybe_get(name);

    if (!entry) {
        add_node_error(irb->codegen, node,
                buf_sprintf("invalid builtin function: '%s'", buf_ptr(name)));
        return irb->codegen->invalid_inst_src;
    }

    BuiltinFnEntry *builtin_fn = entry->value;
    size_t actual_param_count = node->data.fn_call_expr.params.length;

    if (builtin_fn->param_count != SIZE_MAX && builtin_fn->param_count != actual_param_count) {
        add_node_error(irb->codegen, node,
                buf_sprintf("expected %" ZIG_PRI_usize " arguments, found %" ZIG_PRI_usize,
                    builtin_fn->param_count, actual_param_count));
        return irb->codegen->invalid_inst_src;
    }

    switch (builtin_fn->id) {
        case BuiltinFnIdInvalid:
            zig_unreachable();
        case BuiltinFnIdTypeof:
            {
                Scope *sub_scope = create_typeof_scope(irb->codegen, node, scope);

                size_t arg_count = node->data.fn_call_expr.params.length;

                IrInstSrc *type_of;

                if (arg_count == 0) {
                    add_node_error(irb->codegen, node,
                        buf_sprintf("expected at least 1 argument, found 0"));
                    return irb->codegen->invalid_inst_src;
                } else if (arg_count == 1) {
                    AstNode *arg0_node = node->data.fn_call_expr.params.at(0);
                    IrInstSrc *arg0_value = ir_gen_node(irb, arg0_node, sub_scope);
                    if (arg0_value == irb->codegen->invalid_inst_src)
                        return arg0_value;

                    type_of = ir_build_typeof_1(irb, scope, node, arg0_value);
                } else {
                    IrInstSrc **args = heap::c_allocator.allocate<IrInstSrc*>(arg_count);
                    for (size_t i = 0; i < arg_count; i += 1) {
                        AstNode *arg_node = node->data.fn_call_expr.params.at(i);
                        IrInstSrc *arg = ir_gen_node(irb, arg_node, sub_scope);
                        if (arg == irb->codegen->invalid_inst_src)
                            return irb->codegen->invalid_inst_src;
                        args[i] = arg;
                    }

                    type_of = ir_build_typeof_n(irb, scope, node, args, arg_count);
                }
                return ir_lval_wrap(irb, scope, type_of, lval, result_loc);
            }
        case BuiltinFnIdSetCold:
            {
                AstNode *arg0_node = node->data.fn_call_expr.params.at(0);
                IrInstSrc *arg0_value = ir_gen_node(irb, arg0_node, scope);
                if (arg0_value == irb->codegen->invalid_inst_src)
                    return arg0_value;

                IrInstSrc *set_cold = ir_build_set_cold(irb, scope, node, arg0_value);
                return ir_lval_wrap(irb, scope, set_cold, lval, result_loc);
            }
        case BuiltinFnIdSetRuntimeSafety:
            {
                AstNode *arg0_node = node->data.fn_call_expr.params.at(0);
                IrInstSrc *arg0_value = ir_gen_node(irb, arg0_node, scope);
                if (arg0_value == irb->codegen->invalid_inst_src)
                    return arg0_value;

                IrInstSrc *set_safety = ir_build_set_runtime_safety(irb, scope, node, arg0_value);
                return ir_lval_wrap(irb, scope, set_safety, lval, result_loc);
            }
        case BuiltinFnIdSetFloatMode:
            {
                AstNode *arg0_node = node->data.fn_call_expr.params.at(0);
                IrInstSrc *arg0_value = ir_gen_node(irb, arg0_node, scope);
                if (arg0_value == irb->codegen->invalid_inst_src)
                    return arg0_value;

                IrInstSrc *set_float_mode = ir_build_set_float_mode(irb, scope, node, arg0_value);
                return ir_lval_wrap(irb, scope, set_float_mode, lval, result_loc);
            }
        case BuiltinFnIdSizeof:
        case BuiltinFnIdBitSizeof:
            {
                AstNode *arg0_node = node->data.fn_call_expr.params.at(0);
                IrInstSrc *arg0_value = ir_gen_node(irb, arg0_node, scope);
                if (arg0_value == irb->codegen->invalid_inst_src)
                    return arg0_value;

                IrInstSrc *size_of = ir_build_size_of(irb, scope, node, arg0_value, builtin_fn->id == BuiltinFnIdBitSizeof);
                return ir_lval_wrap(irb, scope, size_of, lval, result_loc);
            }
        case BuiltinFnIdImport:
            {
                AstNode *arg0_node = node->data.fn_call_expr.params.at(0);
                IrInstSrc *arg0_value = ir_gen_node(irb, arg0_node, scope);
                if (arg0_value == irb->codegen->invalid_inst_src)
                    return arg0_value;

                IrInstSrc *import = ir_build_import(irb, scope, node, arg0_value);
                return ir_lval_wrap(irb, scope, import, lval, result_loc);
            }
        case BuiltinFnIdCImport:
            {
                IrInstSrc *c_import = ir_build_c_import(irb, scope, node);
                return ir_lval_wrap(irb, scope, c_import, lval, result_loc);
            }
        case BuiltinFnIdCInclude:
            {
                AstNode *arg0_node = node->data.fn_call_expr.params.at(0);
                IrInstSrc *arg0_value = ir_gen_node(irb, arg0_node, scope);
                if (arg0_value == irb->codegen->invalid_inst_src)
                    return arg0_value;

                if (!exec_c_import_buf(irb->exec)) {
                    add_node_error(irb->codegen, node, buf_sprintf("C include valid only inside C import block"));
                    return irb->codegen->invalid_inst_src;
                }

                IrInstSrc *c_include = ir_build_c_include(irb, scope, node, arg0_value);
                return ir_lval_wrap(irb, scope, c_include, lval, result_loc);
            }
        case BuiltinFnIdCDefine:
            {
                AstNode *arg0_node = node->data.fn_call_expr.params.at(0);
                IrInstSrc *arg0_value = ir_gen_node(irb, arg0_node, scope);
                if (arg0_value == irb->codegen->invalid_inst_src)
                    return arg0_value;

                AstNode *arg1_node = node->data.fn_call_expr.params.at(1);
                IrInstSrc *arg1_value = ir_gen_node(irb, arg1_node, scope);
                if (arg1_value == irb->codegen->invalid_inst_src)
                    return arg1_value;

                if (!exec_c_import_buf(irb->exec)) {
                    add_node_error(irb->codegen, node, buf_sprintf("C define valid only inside C import block"));
                    return irb->codegen->invalid_inst_src;
                }

                IrInstSrc *c_define = ir_build_c_define(irb, scope, node, arg0_value, arg1_value);
                return ir_lval_wrap(irb, scope, c_define, lval, result_loc);
            }
        case BuiltinFnIdCUndef:
            {
                AstNode *arg0_node = node->data.fn_call_expr.params.at(0);
                IrInstSrc *arg0_value = ir_gen_node(irb, arg0_node, scope);
                if (arg0_value == irb->codegen->invalid_inst_src)
                    return arg0_value;

                if (!exec_c_import_buf(irb->exec)) {
                    add_node_error(irb->codegen, node, buf_sprintf("C undef valid only inside C import block"));
                    return irb->codegen->invalid_inst_src;
                }

                IrInstSrc *c_undef = ir_build_c_undef(irb, scope, node, arg0_value);
                return ir_lval_wrap(irb, scope, c_undef, lval, result_loc);
            }
        case BuiltinFnIdCompileErr:
            {
                AstNode *arg0_node = node->data.fn_call_expr.params.at(0);
                IrInstSrc *arg0_value = ir_gen_node(irb, arg0_node, scope);
                if (arg0_value == irb->codegen->invalid_inst_src)
                    return arg0_value;

                IrInstSrc *compile_err = ir_build_compile_err(irb, scope, node, arg0_value);
                return ir_lval_wrap(irb, scope, compile_err, lval, result_loc);
            }
        case BuiltinFnIdCompileLog:
            {
                IrInstSrc **args = heap::c_allocator.allocate<IrInstSrc*>(actual_param_count);

                for (size_t i = 0; i < actual_param_count; i += 1) {
                    AstNode *arg_node = node->data.fn_call_expr.params.at(i);
                    args[i] = ir_gen_node(irb, arg_node, scope);
                    if (args[i] == irb->codegen->invalid_inst_src)
                        return irb->codegen->invalid_inst_src;
                }

                IrInstSrc *compile_log = ir_build_compile_log(irb, scope, node, actual_param_count, args);
                return ir_lval_wrap(irb, scope, compile_log, lval, result_loc);
            }
        case BuiltinFnIdErrName:
            {
                AstNode *arg0_node = node->data.fn_call_expr.params.at(0);
                IrInstSrc *arg0_value = ir_gen_node(irb, arg0_node, scope);
                if (arg0_value == irb->codegen->invalid_inst_src)
                    return arg0_value;

                IrInstSrc *err_name = ir_build_err_name(irb, scope, node, arg0_value);
                return ir_lval_wrap(irb, scope, err_name, lval, result_loc);
            }
        case BuiltinFnIdEmbedFile:
            {
                AstNode *arg0_node = node->data.fn_call_expr.params.at(0);
                IrInstSrc *arg0_value = ir_gen_node(irb, arg0_node, scope);
                if (arg0_value == irb->codegen->invalid_inst_src)
                    return arg0_value;

                IrInstSrc *embed_file = ir_build_embed_file(irb, scope, node, arg0_value);
                return ir_lval_wrap(irb, scope, embed_file, lval, result_loc);
            }
        case BuiltinFnIdCmpxchgWeak:
        case BuiltinFnIdCmpxchgStrong:
            {
                AstNode *arg0_node = node->data.fn_call_expr.params.at(0);
                IrInstSrc *arg0_value = ir_gen_node(irb, arg0_node, scope);
                if (arg0_value == irb->codegen->invalid_inst_src)
                    return arg0_value;

                AstNode *arg1_node = node->data.fn_call_expr.params.at(1);
                IrInstSrc *arg1_value = ir_gen_node(irb, arg1_node, scope);
                if (arg1_value == irb->codegen->invalid_inst_src)
                    return arg1_value;

                AstNode *arg2_node = node->data.fn_call_expr.params.at(2);
                IrInstSrc *arg2_value = ir_gen_node(irb, arg2_node, scope);
                if (arg2_value == irb->codegen->invalid_inst_src)
                    return arg2_value;

                AstNode *arg3_node = node->data.fn_call_expr.params.at(3);
                IrInstSrc *arg3_value = ir_gen_node(irb, arg3_node, scope);
                if (arg3_value == irb->codegen->invalid_inst_src)
                    return arg3_value;

                AstNode *arg4_node = node->data.fn_call_expr.params.at(4);
                IrInstSrc *arg4_value = ir_gen_node(irb, arg4_node, scope);
                if (arg4_value == irb->codegen->invalid_inst_src)
                    return arg4_value;

                AstNode *arg5_node = node->data.fn_call_expr.params.at(5);
                IrInstSrc *arg5_value = ir_gen_node(irb, arg5_node, scope);
                if (arg5_value == irb->codegen->invalid_inst_src)
                    return arg5_value;

                IrInstSrc *cmpxchg = ir_build_cmpxchg_src(irb, scope, node, arg0_value, arg1_value,
                    arg2_value, arg3_value, arg4_value, arg5_value, (builtin_fn->id == BuiltinFnIdCmpxchgWeak),
                    result_loc);
                return ir_lval_wrap(irb, scope, cmpxchg, lval, result_loc);
            }
        case BuiltinFnIdFence:
            {
                AstNode *arg0_node = node->data.fn_call_expr.params.at(0);
                IrInstSrc *arg0_value = ir_gen_node(irb, arg0_node, scope);
                if (arg0_value == irb->codegen->invalid_inst_src)
                    return arg0_value;

                IrInstSrc *fence = ir_build_fence(irb, scope, node, arg0_value);
                return ir_lval_wrap(irb, scope, fence, lval, result_loc);
            }
        case BuiltinFnIdDivExact:
            {
                AstNode *arg0_node = node->data.fn_call_expr.params.at(0);
                IrInstSrc *arg0_value = ir_gen_node(irb, arg0_node, scope);
                if (arg0_value == irb->codegen->invalid_inst_src)
                    return arg0_value;

                AstNode *arg1_node = node->data.fn_call_expr.params.at(1);
                IrInstSrc *arg1_value = ir_gen_node(irb, arg1_node, scope);
                if (arg1_value == irb->codegen->invalid_inst_src)
                    return arg1_value;

                IrInstSrc *bin_op = ir_build_bin_op(irb, scope, node, IrBinOpDivExact, arg0_value, arg1_value, true);
                return ir_lval_wrap(irb, scope, bin_op, lval, result_loc);
            }
        case BuiltinFnIdDivTrunc:
            {
                AstNode *arg0_node = node->data.fn_call_expr.params.at(0);
                IrInstSrc *arg0_value = ir_gen_node(irb, arg0_node, scope);
                if (arg0_value == irb->codegen->invalid_inst_src)
                    return arg0_value;

                AstNode *arg1_node = node->data.fn_call_expr.params.at(1);
                IrInstSrc *arg1_value = ir_gen_node(irb, arg1_node, scope);
                if (arg1_value == irb->codegen->invalid_inst_src)
                    return arg1_value;

                IrInstSrc *bin_op = ir_build_bin_op(irb, scope, node, IrBinOpDivTrunc, arg0_value, arg1_value, true);
                return ir_lval_wrap(irb, scope, bin_op, lval, result_loc);
            }
        case BuiltinFnIdDivFloor:
            {
                AstNode *arg0_node = node->data.fn_call_expr.params.at(0);
                IrInstSrc *arg0_value = ir_gen_node(irb, arg0_node, scope);
                if (arg0_value == irb->codegen->invalid_inst_src)
                    return arg0_value;

                AstNode *arg1_node = node->data.fn_call_expr.params.at(1);
                IrInstSrc *arg1_value = ir_gen_node(irb, arg1_node, scope);
                if (arg1_value == irb->codegen->invalid_inst_src)
                    return arg1_value;

                IrInstSrc *bin_op = ir_build_bin_op(irb, scope, node, IrBinOpDivFloor, arg0_value, arg1_value, true);
                return ir_lval_wrap(irb, scope, bin_op, lval, result_loc);
            }
        case BuiltinFnIdRem:
            {
                AstNode *arg0_node = node->data.fn_call_expr.params.at(0);
                IrInstSrc *arg0_value = ir_gen_node(irb, arg0_node, scope);
                if (arg0_value == irb->codegen->invalid_inst_src)
                    return arg0_value;

                AstNode *arg1_node = node->data.fn_call_expr.params.at(1);
                IrInstSrc *arg1_value = ir_gen_node(irb, arg1_node, scope);
                if (arg1_value == irb->codegen->invalid_inst_src)
                    return arg1_value;

                IrInstSrc *bin_op = ir_build_bin_op(irb, scope, node, IrBinOpRemRem, arg0_value, arg1_value, true);
                return ir_lval_wrap(irb, scope, bin_op, lval, result_loc);
            }
        case BuiltinFnIdMod:
            {
                AstNode *arg0_node = node->data.fn_call_expr.params.at(0);
                IrInstSrc *arg0_value = ir_gen_node(irb, arg0_node, scope);
                if (arg0_value == irb->codegen->invalid_inst_src)
                    return arg0_value;

                AstNode *arg1_node = node->data.fn_call_expr.params.at(1);
                IrInstSrc *arg1_value = ir_gen_node(irb, arg1_node, scope);
                if (arg1_value == irb->codegen->invalid_inst_src)
                    return arg1_value;

                IrInstSrc *bin_op = ir_build_bin_op(irb, scope, node, IrBinOpRemMod, arg0_value, arg1_value, true);
                return ir_lval_wrap(irb, scope, bin_op, lval, result_loc);
            }
        case BuiltinFnIdSqrt:
        case BuiltinFnIdSin:
        case BuiltinFnIdCos:
        case BuiltinFnIdExp:
        case BuiltinFnIdExp2:
        case BuiltinFnIdLog:
        case BuiltinFnIdLog2:
        case BuiltinFnIdLog10:
        case BuiltinFnIdFabs:
        case BuiltinFnIdFloor:
        case BuiltinFnIdCeil:
        case BuiltinFnIdTrunc:
        case BuiltinFnIdNearbyInt:
        case BuiltinFnIdRound:
            {
                AstNode *arg0_node = node->data.fn_call_expr.params.at(0);
                IrInstSrc *arg0_value = ir_gen_node(irb, arg0_node, scope);
                if (arg0_value == irb->codegen->invalid_inst_src)
                    return arg0_value;

                IrInstSrc *inst = ir_build_float_op_src(irb, scope, node, arg0_value, builtin_fn->id);
                return ir_lval_wrap(irb, scope, inst, lval, result_loc);
            }
        case BuiltinFnIdTruncate:
            {
                AstNode *arg0_node = node->data.fn_call_expr.params.at(0);
                IrInstSrc *arg0_value = ir_gen_node(irb, arg0_node, scope);
                if (arg0_value == irb->codegen->invalid_inst_src)
                    return arg0_value;

                AstNode *arg1_node = node->data.fn_call_expr.params.at(1);
                IrInstSrc *arg1_value = ir_gen_node(irb, arg1_node, scope);
                if (arg1_value == irb->codegen->invalid_inst_src)
                    return arg1_value;

                IrInstSrc *truncate = ir_build_truncate(irb, scope, node, arg0_value, arg1_value);
                return ir_lval_wrap(irb, scope, truncate, lval, result_loc);
            }
        case BuiltinFnIdIntCast:
            {
                AstNode *arg0_node = node->data.fn_call_expr.params.at(0);
                IrInstSrc *arg0_value = ir_gen_node(irb, arg0_node, scope);
                if (arg0_value == irb->codegen->invalid_inst_src)
                    return arg0_value;

                AstNode *arg1_node = node->data.fn_call_expr.params.at(1);
                IrInstSrc *arg1_value = ir_gen_node(irb, arg1_node, scope);
                if (arg1_value == irb->codegen->invalid_inst_src)
                    return arg1_value;

                IrInstSrc *result = ir_build_int_cast(irb, scope, node, arg0_value, arg1_value);
                return ir_lval_wrap(irb, scope, result, lval, result_loc);
            }
        case BuiltinFnIdFloatCast:
            {
                AstNode *arg0_node = node->data.fn_call_expr.params.at(0);
                IrInstSrc *arg0_value = ir_gen_node(irb, arg0_node, scope);
                if (arg0_value == irb->codegen->invalid_inst_src)
                    return arg0_value;

                AstNode *arg1_node = node->data.fn_call_expr.params.at(1);
                IrInstSrc *arg1_value = ir_gen_node(irb, arg1_node, scope);
                if (arg1_value == irb->codegen->invalid_inst_src)
                    return arg1_value;

                IrInstSrc *result = ir_build_float_cast(irb, scope, node, arg0_value, arg1_value);
                return ir_lval_wrap(irb, scope, result, lval, result_loc);
            }
        case BuiltinFnIdErrSetCast:
            {
                AstNode *arg0_node = node->data.fn_call_expr.params.at(0);
                IrInstSrc *arg0_value = ir_gen_node(irb, arg0_node, scope);
                if (arg0_value == irb->codegen->invalid_inst_src)
                    return arg0_value;

                AstNode *arg1_node = node->data.fn_call_expr.params.at(1);
                IrInstSrc *arg1_value = ir_gen_node(irb, arg1_node, scope);
                if (arg1_value == irb->codegen->invalid_inst_src)
                    return arg1_value;

                IrInstSrc *result = ir_build_err_set_cast(irb, scope, node, arg0_value, arg1_value);
                return ir_lval_wrap(irb, scope, result, lval, result_loc);
            }
        case BuiltinFnIdIntToFloat:
            {
                AstNode *arg0_node = node->data.fn_call_expr.params.at(0);
                IrInstSrc *arg0_value = ir_gen_node(irb, arg0_node, scope);
                if (arg0_value == irb->codegen->invalid_inst_src)
                    return arg0_value;

                AstNode *arg1_node = node->data.fn_call_expr.params.at(1);
                IrInstSrc *arg1_value = ir_gen_node(irb, arg1_node, scope);
                if (arg1_value == irb->codegen->invalid_inst_src)
                    return arg1_value;

                IrInstSrc *result = ir_build_int_to_float(irb, scope, node, arg0_value, arg1_value);
                return ir_lval_wrap(irb, scope, result, lval, result_loc);
            }
        case BuiltinFnIdFloatToInt:
            {
                AstNode *arg0_node = node->data.fn_call_expr.params.at(0);
                IrInstSrc *arg0_value = ir_gen_node(irb, arg0_node, scope);
                if (arg0_value == irb->codegen->invalid_inst_src)
                    return arg0_value;

                AstNode *arg1_node = node->data.fn_call_expr.params.at(1);
                IrInstSrc *arg1_value = ir_gen_node(irb, arg1_node, scope);
                if (arg1_value == irb->codegen->invalid_inst_src)
                    return arg1_value;

                IrInstSrc *result = ir_build_float_to_int(irb, scope, node, arg0_value, arg1_value);
                return ir_lval_wrap(irb, scope, result, lval, result_loc);
            }
        case BuiltinFnIdErrToInt:
            {
                AstNode *arg0_node = node->data.fn_call_expr.params.at(0);
                IrInstSrc *arg0_value = ir_gen_node(irb, arg0_node, scope);
                if (arg0_value == irb->codegen->invalid_inst_src)
                    return arg0_value;

                IrInstSrc *result = ir_build_err_to_int_src(irb, scope, node, arg0_value);
                return ir_lval_wrap(irb, scope, result, lval, result_loc);
            }
        case BuiltinFnIdIntToErr:
            {
                AstNode *arg0_node = node->data.fn_call_expr.params.at(0);
                IrInstSrc *arg0_value = ir_gen_node(irb, arg0_node, scope);
                if (arg0_value == irb->codegen->invalid_inst_src)
                    return arg0_value;

                IrInstSrc *result = ir_build_int_to_err_src(irb, scope, node, arg0_value);
                return ir_lval_wrap(irb, scope, result, lval, result_loc);
            }
        case BuiltinFnIdBoolToInt:
            {
                AstNode *arg0_node = node->data.fn_call_expr.params.at(0);
                IrInstSrc *arg0_value = ir_gen_node(irb, arg0_node, scope);
                if (arg0_value == irb->codegen->invalid_inst_src)
                    return arg0_value;

                IrInstSrc *result = ir_build_bool_to_int(irb, scope, node, arg0_value);
                return ir_lval_wrap(irb, scope, result, lval, result_loc);
            }
        case BuiltinFnIdVectorType:
            {
                AstNode *arg0_node = node->data.fn_call_expr.params.at(0);
                IrInstSrc *arg0_value = ir_gen_node(irb, arg0_node, scope);
                if (arg0_value == irb->codegen->invalid_inst_src)
                    return arg0_value;

                AstNode *arg1_node = node->data.fn_call_expr.params.at(1);
                IrInstSrc *arg1_value = ir_gen_node(irb, arg1_node, scope);
                if (arg1_value == irb->codegen->invalid_inst_src)
                    return arg1_value;

                IrInstSrc *vector_type = ir_build_vector_type(irb, scope, node, arg0_value, arg1_value);
                return ir_lval_wrap(irb, scope, vector_type, lval, result_loc);
            }
        case BuiltinFnIdShuffle:
            {
                AstNode *arg0_node = node->data.fn_call_expr.params.at(0);
                IrInstSrc *arg0_value = ir_gen_node(irb, arg0_node, scope);
                if (arg0_value == irb->codegen->invalid_inst_src)
                    return arg0_value;

                AstNode *arg1_node = node->data.fn_call_expr.params.at(1);
                IrInstSrc *arg1_value = ir_gen_node(irb, arg1_node, scope);
                if (arg1_value == irb->codegen->invalid_inst_src)
                    return arg1_value;

                AstNode *arg2_node = node->data.fn_call_expr.params.at(2);
                IrInstSrc *arg2_value = ir_gen_node(irb, arg2_node, scope);
                if (arg2_value == irb->codegen->invalid_inst_src)
                    return arg2_value;

                AstNode *arg3_node = node->data.fn_call_expr.params.at(3);
                IrInstSrc *arg3_value = ir_gen_node(irb, arg3_node, scope);
                if (arg3_value == irb->codegen->invalid_inst_src)
                    return arg3_value;

                IrInstSrc *shuffle_vector = ir_build_shuffle_vector(irb, scope, node,
                    arg0_value, arg1_value, arg2_value, arg3_value);
                return ir_lval_wrap(irb, scope, shuffle_vector, lval, result_loc);
            }
        case BuiltinFnIdSplat:
            {
                AstNode *arg0_node = node->data.fn_call_expr.params.at(0);
                IrInstSrc *arg0_value = ir_gen_node(irb, arg0_node, scope);
                if (arg0_value == irb->codegen->invalid_inst_src)
                    return arg0_value;

                AstNode *arg1_node = node->data.fn_call_expr.params.at(1);
                IrInstSrc *arg1_value = ir_gen_node(irb, arg1_node, scope);
                if (arg1_value == irb->codegen->invalid_inst_src)
                    return arg1_value;

                IrInstSrc *splat = ir_build_splat_src(irb, scope, node,
                    arg0_value, arg1_value);
                return ir_lval_wrap(irb, scope, splat, lval, result_loc);
            }
        case BuiltinFnIdMemcpy:
            {
                AstNode *arg0_node = node->data.fn_call_expr.params.at(0);
                IrInstSrc *arg0_value = ir_gen_node(irb, arg0_node, scope);
                if (arg0_value == irb->codegen->invalid_inst_src)
                    return arg0_value;

                AstNode *arg1_node = node->data.fn_call_expr.params.at(1);
                IrInstSrc *arg1_value = ir_gen_node(irb, arg1_node, scope);
                if (arg1_value == irb->codegen->invalid_inst_src)
                    return arg1_value;

                AstNode *arg2_node = node->data.fn_call_expr.params.at(2);
                IrInstSrc *arg2_value = ir_gen_node(irb, arg2_node, scope);
                if (arg2_value == irb->codegen->invalid_inst_src)
                    return arg2_value;

                IrInstSrc *ir_memcpy = ir_build_memcpy_src(irb, scope, node, arg0_value, arg1_value, arg2_value);
                return ir_lval_wrap(irb, scope, ir_memcpy, lval, result_loc);
            }
        case BuiltinFnIdMemset:
            {
                AstNode *arg0_node = node->data.fn_call_expr.params.at(0);
                IrInstSrc *arg0_value = ir_gen_node(irb, arg0_node, scope);
                if (arg0_value == irb->codegen->invalid_inst_src)
                    return arg0_value;

                AstNode *arg1_node = node->data.fn_call_expr.params.at(1);
                IrInstSrc *arg1_value = ir_gen_node(irb, arg1_node, scope);
                if (arg1_value == irb->codegen->invalid_inst_src)
                    return arg1_value;

                AstNode *arg2_node = node->data.fn_call_expr.params.at(2);
                IrInstSrc *arg2_value = ir_gen_node(irb, arg2_node, scope);
                if (arg2_value == irb->codegen->invalid_inst_src)
                    return arg2_value;

                IrInstSrc *ir_memset = ir_build_memset_src(irb, scope, node, arg0_value, arg1_value, arg2_value);
                return ir_lval_wrap(irb, scope, ir_memset, lval, result_loc);
            }
        case BuiltinFnIdWasmMemorySize:
            {
                AstNode *arg0_node = node->data.fn_call_expr.params.at(0);
                IrInstSrc *arg0_value = ir_gen_node(irb, arg0_node, scope);
                if (arg0_value == irb->codegen->invalid_inst_src)
                    return arg0_value;

                IrInstSrc *ir_wasm_memory_size = ir_build_wasm_memory_size_src(irb, scope, node, arg0_value);
                return ir_lval_wrap(irb, scope, ir_wasm_memory_size, lval, result_loc);
            }
        case BuiltinFnIdWasmMemoryGrow:
            {
                AstNode *arg0_node = node->data.fn_call_expr.params.at(0);
                IrInstSrc *arg0_value = ir_gen_node(irb, arg0_node, scope);
                if (arg0_value == irb->codegen->invalid_inst_src)
                    return arg0_value;

                AstNode *arg1_node = node->data.fn_call_expr.params.at(1);
                IrInstSrc *arg1_value = ir_gen_node(irb, arg1_node, scope);
                if (arg1_value == irb->codegen->invalid_inst_src)
                    return arg1_value;

                IrInstSrc *ir_wasm_memory_grow = ir_build_wasm_memory_grow_src(irb, scope, node, arg0_value, arg1_value);
                return ir_lval_wrap(irb, scope, ir_wasm_memory_grow, lval, result_loc);
            }
        case BuiltinFnIdField:
            {
                AstNode *arg0_node = node->data.fn_call_expr.params.at(0);
                IrInstSrc *arg0_value = ir_gen_node_extra(irb, arg0_node, scope, LValPtr, nullptr);
                if (arg0_value == irb->codegen->invalid_inst_src)
                    return arg0_value;

                AstNode *arg1_node = node->data.fn_call_expr.params.at(1);
                IrInstSrc *arg1_value = ir_gen_node(irb, arg1_node, scope);
                if (arg1_value == irb->codegen->invalid_inst_src)
                    return arg1_value;

                IrInstSrc *ptr_instruction = ir_build_field_ptr_instruction(irb, scope, node,
                        arg0_value, arg1_value, false);

                if (lval == LValPtr || lval == LValAssign)
                    return ptr_instruction;

                IrInstSrc *load_ptr = ir_build_load_ptr(irb, scope, node, ptr_instruction);
                return ir_expr_wrap(irb, scope, load_ptr, result_loc);
            }
        case BuiltinFnIdHasField:
            {
                AstNode *arg0_node = node->data.fn_call_expr.params.at(0);
                IrInstSrc *arg0_value = ir_gen_node(irb, arg0_node, scope);
                if (arg0_value == irb->codegen->invalid_inst_src)
                    return arg0_value;

                AstNode *arg1_node = node->data.fn_call_expr.params.at(1);
                IrInstSrc *arg1_value = ir_gen_node(irb, arg1_node, scope);
                if (arg1_value == irb->codegen->invalid_inst_src)
                    return arg1_value;

                IrInstSrc *type_info = ir_build_has_field(irb, scope, node, arg0_value, arg1_value);
                return ir_lval_wrap(irb, scope, type_info, lval, result_loc);
            }
        case BuiltinFnIdTypeInfo:
            {
                AstNode *arg0_node = node->data.fn_call_expr.params.at(0);
                IrInstSrc *arg0_value = ir_gen_node(irb, arg0_node, scope);
                if (arg0_value == irb->codegen->invalid_inst_src)
                    return arg0_value;

                IrInstSrc *type_info = ir_build_type_info(irb, scope, node, arg0_value);
                return ir_lval_wrap(irb, scope, type_info, lval, result_loc);
            }
        case BuiltinFnIdType:
            {
                AstNode *arg_node = node->data.fn_call_expr.params.at(0);
                IrInstSrc *arg = ir_gen_node(irb, arg_node, scope);
                if (arg == irb->codegen->invalid_inst_src)
                    return arg;

                IrInstSrc *type = ir_build_type(irb, scope, node, arg);
                return ir_lval_wrap(irb, scope, type, lval, result_loc);
            }
        case BuiltinFnIdBreakpoint:
            return ir_lval_wrap(irb, scope, ir_build_breakpoint(irb, scope, node), lval, result_loc);
        case BuiltinFnIdReturnAddress:
            return ir_lval_wrap(irb, scope, ir_build_return_address_src(irb, scope, node), lval, result_loc);
        case BuiltinFnIdFrameAddress:
            return ir_lval_wrap(irb, scope, ir_build_frame_address_src(irb, scope, node), lval, result_loc);
        case BuiltinFnIdFrameHandle:
            if (!irb->exec->fn_entry) {
                add_node_error(irb->codegen, node, buf_sprintf("@frame() called outside of function definition"));
                return irb->codegen->invalid_inst_src;
            }
            return ir_lval_wrap(irb, scope, ir_build_handle_src(irb, scope, node), lval, result_loc);
        case BuiltinFnIdFrameType: {
            AstNode *arg0_node = node->data.fn_call_expr.params.at(0);
            IrInstSrc *arg0_value = ir_gen_node(irb, arg0_node, scope);
            if (arg0_value == irb->codegen->invalid_inst_src)
                return arg0_value;

            IrInstSrc *frame_type = ir_build_frame_type(irb, scope, node, arg0_value);
            return ir_lval_wrap(irb, scope, frame_type, lval, result_loc);
        }
        case BuiltinFnIdFrameSize: {
            AstNode *arg0_node = node->data.fn_call_expr.params.at(0);
            IrInstSrc *arg0_value = ir_gen_node(irb, arg0_node, scope);
            if (arg0_value == irb->codegen->invalid_inst_src)
                return arg0_value;

            IrInstSrc *frame_size = ir_build_frame_size_src(irb, scope, node, arg0_value);
            return ir_lval_wrap(irb, scope, frame_size, lval, result_loc);
        }
        case BuiltinFnIdAlignOf:
            {
                AstNode *arg0_node = node->data.fn_call_expr.params.at(0);
                IrInstSrc *arg0_value = ir_gen_node(irb, arg0_node, scope);
                if (arg0_value == irb->codegen->invalid_inst_src)
                    return arg0_value;

                IrInstSrc *align_of = ir_build_align_of(irb, scope, node, arg0_value);
                return ir_lval_wrap(irb, scope, align_of, lval, result_loc);
            }
        case BuiltinFnIdAddWithOverflow:
            return ir_lval_wrap(irb, scope, ir_gen_overflow_op(irb, scope, node, IrOverflowOpAdd), lval, result_loc);
        case BuiltinFnIdSubWithOverflow:
            return ir_lval_wrap(irb, scope, ir_gen_overflow_op(irb, scope, node, IrOverflowOpSub), lval, result_loc);
        case BuiltinFnIdMulWithOverflow:
            return ir_lval_wrap(irb, scope, ir_gen_overflow_op(irb, scope, node, IrOverflowOpMul), lval, result_loc);
        case BuiltinFnIdShlWithOverflow:
            return ir_lval_wrap(irb, scope, ir_gen_overflow_op(irb, scope, node, IrOverflowOpShl), lval, result_loc);
        case BuiltinFnIdMulAdd:
            return ir_lval_wrap(irb, scope, ir_gen_mul_add(irb, scope, node), lval, result_loc);
        case BuiltinFnIdTypeName:
            {
                AstNode *arg0_node = node->data.fn_call_expr.params.at(0);
                IrInstSrc *arg0_value = ir_gen_node(irb, arg0_node, scope);
                if (arg0_value == irb->codegen->invalid_inst_src)
                    return arg0_value;

                IrInstSrc *type_name = ir_build_type_name(irb, scope, node, arg0_value);
                return ir_lval_wrap(irb, scope, type_name, lval, result_loc);
            }
        case BuiltinFnIdPanic:
            {
                AstNode *arg0_node = node->data.fn_call_expr.params.at(0);
                IrInstSrc *arg0_value = ir_gen_node(irb, arg0_node, scope);
                if (arg0_value == irb->codegen->invalid_inst_src)
                    return arg0_value;

                IrInstSrc *panic = ir_build_panic_src(irb, scope, node, arg0_value);
                return ir_lval_wrap(irb, scope, panic, lval, result_loc);
            }
        case BuiltinFnIdPtrCast:
            {
                AstNode *arg0_node = node->data.fn_call_expr.params.at(0);
                IrInstSrc *arg0_value = ir_gen_node(irb, arg0_node, scope);
                if (arg0_value == irb->codegen->invalid_inst_src)
                    return arg0_value;

                AstNode *arg1_node = node->data.fn_call_expr.params.at(1);
                IrInstSrc *arg1_value = ir_gen_node(irb, arg1_node, scope);
                if (arg1_value == irb->codegen->invalid_inst_src)
                    return arg1_value;

                IrInstSrc *ptr_cast = ir_build_ptr_cast_src(irb, scope, node, arg0_value, arg1_value, true);
                return ir_lval_wrap(irb, scope, ptr_cast, lval, result_loc);
            }
        case BuiltinFnIdBitCast:
            {
                AstNode *dest_type_node = node->data.fn_call_expr.params.at(0);
                IrInstSrc *dest_type = ir_gen_node(irb, dest_type_node, scope);
                if (dest_type == irb->codegen->invalid_inst_src)
                    return dest_type;

                ResultLocBitCast *result_loc_bit_cast = heap::c_allocator.create<ResultLocBitCast>();
                result_loc_bit_cast->base.id = ResultLocIdBitCast;
                result_loc_bit_cast->base.source_instruction = dest_type;
                result_loc_bit_cast->base.allow_write_through_const = result_loc->allow_write_through_const;
                ir_ref_instruction(dest_type, irb->current_basic_block);
                result_loc_bit_cast->parent = result_loc;

                ir_build_reset_result(irb, scope, node, &result_loc_bit_cast->base);

                AstNode *arg1_node = node->data.fn_call_expr.params.at(1);
                IrInstSrc *arg1_value = ir_gen_node_extra(irb, arg1_node, scope, LValNone,
                        &result_loc_bit_cast->base);
                if (arg1_value == irb->codegen->invalid_inst_src)
                    return arg1_value;

                IrInstSrc *bitcast = ir_build_bit_cast_src(irb, scope, arg1_node, arg1_value, result_loc_bit_cast);
                return ir_lval_wrap(irb, scope, bitcast, lval, result_loc);
            }
        case BuiltinFnIdAs:
            {
                AstNode *dest_type_node = node->data.fn_call_expr.params.at(0);
                IrInstSrc *dest_type = ir_gen_node(irb, dest_type_node, scope);
                if (dest_type == irb->codegen->invalid_inst_src)
                    return dest_type;

                ResultLocCast *result_loc_cast = ir_build_cast_result_loc(irb, dest_type, result_loc);

                AstNode *arg1_node = node->data.fn_call_expr.params.at(1);
                IrInstSrc *arg1_value = ir_gen_node_extra(irb, arg1_node, scope, LValNone,
                        &result_loc_cast->base);
                if (arg1_value == irb->codegen->invalid_inst_src)
                    return arg1_value;

                IrInstSrc *result = ir_build_implicit_cast(irb, scope, node, arg1_value, result_loc_cast);
                return ir_lval_wrap(irb, scope, result, lval, result_loc);
            }
        case BuiltinFnIdIntToPtr:
            {
                AstNode *arg0_node = node->data.fn_call_expr.params.at(0);
                IrInstSrc *arg0_value = ir_gen_node(irb, arg0_node, scope);
                if (arg0_value == irb->codegen->invalid_inst_src)
                    return arg0_value;

                AstNode *arg1_node = node->data.fn_call_expr.params.at(1);
                IrInstSrc *arg1_value = ir_gen_node(irb, arg1_node, scope);
                if (arg1_value == irb->codegen->invalid_inst_src)
                    return arg1_value;

                IrInstSrc *int_to_ptr = ir_build_int_to_ptr_src(irb, scope, node, arg0_value, arg1_value);
                return ir_lval_wrap(irb, scope, int_to_ptr, lval, result_loc);
            }
        case BuiltinFnIdPtrToInt:
            {
                AstNode *arg0_node = node->data.fn_call_expr.params.at(0);
                IrInstSrc *arg0_value = ir_gen_node(irb, arg0_node, scope);
                if (arg0_value == irb->codegen->invalid_inst_src)
                    return arg0_value;

                IrInstSrc *ptr_to_int = ir_build_ptr_to_int_src(irb, scope, node, arg0_value);
                return ir_lval_wrap(irb, scope, ptr_to_int, lval, result_loc);
            }
        case BuiltinFnIdTagName:
            {
                AstNode *arg0_node = node->data.fn_call_expr.params.at(0);
                IrInstSrc *arg0_value = ir_gen_node(irb, arg0_node, scope);
                if (arg0_value == irb->codegen->invalid_inst_src)
                    return arg0_value;

                IrInstSrc *tag_name = ir_build_tag_name_src(irb, scope, node, arg0_value);
                return ir_lval_wrap(irb, scope, tag_name, lval, result_loc);
            }
        case BuiltinFnIdTagType:
            {
                AstNode *arg0_node = node->data.fn_call_expr.params.at(0);
                IrInstSrc *arg0_value = ir_gen_node(irb, arg0_node, scope);
                if (arg0_value == irb->codegen->invalid_inst_src)
                    return arg0_value;

                IrInstSrc *tag_type = ir_build_tag_type(irb, scope, node, arg0_value);
                return ir_lval_wrap(irb, scope, tag_type, lval, result_loc);
            }
        case BuiltinFnIdFieldParentPtr:
            {
                AstNode *arg0_node = node->data.fn_call_expr.params.at(0);
                IrInstSrc *arg0_value = ir_gen_node(irb, arg0_node, scope);
                if (arg0_value == irb->codegen->invalid_inst_src)
                    return arg0_value;

                AstNode *arg1_node = node->data.fn_call_expr.params.at(1);
                IrInstSrc *arg1_value = ir_gen_node(irb, arg1_node, scope);
                if (arg1_value == irb->codegen->invalid_inst_src)
                    return arg1_value;

                AstNode *arg2_node = node->data.fn_call_expr.params.at(2);
                IrInstSrc *arg2_value = ir_gen_node(irb, arg2_node, scope);
                if (arg2_value == irb->codegen->invalid_inst_src)
                    return arg2_value;

                IrInstSrc *field_parent_ptr = ir_build_field_parent_ptr_src(irb, scope, node,
                        arg0_value, arg1_value, arg2_value);
                return ir_lval_wrap(irb, scope, field_parent_ptr, lval, result_loc);
            }
        case BuiltinFnIdByteOffsetOf:
            {
                AstNode *arg0_node = node->data.fn_call_expr.params.at(0);
                IrInstSrc *arg0_value = ir_gen_node(irb, arg0_node, scope);
                if (arg0_value == irb->codegen->invalid_inst_src)
                    return arg0_value;

                AstNode *arg1_node = node->data.fn_call_expr.params.at(1);
                IrInstSrc *arg1_value = ir_gen_node(irb, arg1_node, scope);
                if (arg1_value == irb->codegen->invalid_inst_src)
                    return arg1_value;

                IrInstSrc *offset_of = ir_build_byte_offset_of(irb, scope, node, arg0_value, arg1_value);
                return ir_lval_wrap(irb, scope, offset_of, lval, result_loc);
            }
        case BuiltinFnIdBitOffsetOf:
            {
                AstNode *arg0_node = node->data.fn_call_expr.params.at(0);
                IrInstSrc *arg0_value = ir_gen_node(irb, arg0_node, scope);
                if (arg0_value == irb->codegen->invalid_inst_src)
                    return arg0_value;

                AstNode *arg1_node = node->data.fn_call_expr.params.at(1);
                IrInstSrc *arg1_value = ir_gen_node(irb, arg1_node, scope);
                if (arg1_value == irb->codegen->invalid_inst_src)
                    return arg1_value;

                IrInstSrc *offset_of = ir_build_bit_offset_of(irb, scope, node, arg0_value, arg1_value);
                return ir_lval_wrap(irb, scope, offset_of, lval, result_loc);
            }
        case BuiltinFnIdCall: {
            // Cast the options parameter to the options type
            ZigType *options_type = get_builtin_type(irb->codegen, "CallOptions");
            IrInstSrc *options_type_inst = ir_build_const_type(irb, scope, node, options_type);
            ResultLocCast *result_loc_cast = ir_build_cast_result_loc(irb, options_type_inst, no_result_loc());

            AstNode *options_node = node->data.fn_call_expr.params.at(0);
            IrInstSrc *options_inner = ir_gen_node_extra(irb, options_node, scope,
                    LValNone, &result_loc_cast->base);
            if (options_inner == irb->codegen->invalid_inst_src)
                return options_inner;
            IrInstSrc *options = ir_build_implicit_cast(irb, scope, options_node, options_inner, result_loc_cast);

            AstNode *fn_ref_node = node->data.fn_call_expr.params.at(1);
            AstNode *args_node = node->data.fn_call_expr.params.at(2);
            if (args_node->type == NodeTypeContainerInitExpr) {
                if (args_node->data.container_init_expr.kind == ContainerInitKindArray ||
                    args_node->data.container_init_expr.entries.length == 0)
                {
                    return ir_gen_fn_call_with_args(irb, scope, node,
                            fn_ref_node, CallModifierNone, options,
                            args_node->data.container_init_expr.entries.items,
                            args_node->data.container_init_expr.entries.length,
                            lval, result_loc);
                } else {
                    exec_add_error_node(irb->codegen, irb->exec, args_node,
                            buf_sprintf("TODO: @call with anon struct literal"));
                    return irb->codegen->invalid_inst_src;
                }
            } else {
                IrInstSrc *fn_ref = ir_gen_node(irb, fn_ref_node, scope);
                if (fn_ref == irb->codegen->invalid_inst_src)
                    return fn_ref;

                IrInstSrc *args = ir_gen_node(irb, args_node, scope);
                if (args == irb->codegen->invalid_inst_src)
                    return args;

                IrInstSrc *call = ir_build_call_extra(irb, scope, node, options, fn_ref, args, result_loc);
                return ir_lval_wrap(irb, scope, call, lval, result_loc);
            }
        }
        case BuiltinFnIdAsyncCall:
            return ir_gen_async_call(irb, scope, nullptr, node, lval, result_loc);
        case BuiltinFnIdShlExact:
            {
                AstNode *arg0_node = node->data.fn_call_expr.params.at(0);
                IrInstSrc *arg0_value = ir_gen_node(irb, arg0_node, scope);
                if (arg0_value == irb->codegen->invalid_inst_src)
                    return arg0_value;

                AstNode *arg1_node = node->data.fn_call_expr.params.at(1);
                IrInstSrc *arg1_value = ir_gen_node(irb, arg1_node, scope);
                if (arg1_value == irb->codegen->invalid_inst_src)
                    return arg1_value;

                IrInstSrc *bin_op = ir_build_bin_op(irb, scope, node, IrBinOpBitShiftLeftExact, arg0_value, arg1_value, true);
                return ir_lval_wrap(irb, scope, bin_op, lval, result_loc);
            }
        case BuiltinFnIdShrExact:
            {
                AstNode *arg0_node = node->data.fn_call_expr.params.at(0);
                IrInstSrc *arg0_value = ir_gen_node(irb, arg0_node, scope);
                if (arg0_value == irb->codegen->invalid_inst_src)
                    return arg0_value;

                AstNode *arg1_node = node->data.fn_call_expr.params.at(1);
                IrInstSrc *arg1_value = ir_gen_node(irb, arg1_node, scope);
                if (arg1_value == irb->codegen->invalid_inst_src)
                    return arg1_value;

                IrInstSrc *bin_op = ir_build_bin_op(irb, scope, node, IrBinOpBitShiftRightExact, arg0_value, arg1_value, true);
                return ir_lval_wrap(irb, scope, bin_op, lval, result_loc);
            }
        case BuiltinFnIdSetEvalBranchQuota:
            {
                AstNode *arg0_node = node->data.fn_call_expr.params.at(0);
                IrInstSrc *arg0_value = ir_gen_node(irb, arg0_node, scope);
                if (arg0_value == irb->codegen->invalid_inst_src)
                    return arg0_value;

                IrInstSrc *set_eval_branch_quota = ir_build_set_eval_branch_quota(irb, scope, node, arg0_value);
                return ir_lval_wrap(irb, scope, set_eval_branch_quota, lval, result_loc);
            }
        case BuiltinFnIdAlignCast:
            {
                AstNode *arg0_node = node->data.fn_call_expr.params.at(0);
                IrInstSrc *arg0_value = ir_gen_node(irb, arg0_node, scope);
                if (arg0_value == irb->codegen->invalid_inst_src)
                    return arg0_value;

                AstNode *arg1_node = node->data.fn_call_expr.params.at(1);
                IrInstSrc *arg1_value = ir_gen_node(irb, arg1_node, scope);
                if (arg1_value == irb->codegen->invalid_inst_src)
                    return arg1_value;

                IrInstSrc *align_cast = ir_build_align_cast_src(irb, scope, node, arg0_value, arg1_value);
                return ir_lval_wrap(irb, scope, align_cast, lval, result_loc);
            }
        case BuiltinFnIdOpaqueType:
            {
                IrInstSrc *opaque_type = ir_build_opaque_type(irb, scope, node);
                return ir_lval_wrap(irb, scope, opaque_type, lval, result_loc);
            }
        case BuiltinFnIdThis:
            {
                IrInstSrc *this_inst = ir_gen_this(irb, scope, node);
                return ir_lval_wrap(irb, scope, this_inst, lval, result_loc);
            }
        case BuiltinFnIdSetAlignStack:
            {
                AstNode *arg0_node = node->data.fn_call_expr.params.at(0);
                IrInstSrc *arg0_value = ir_gen_node(irb, arg0_node, scope);
                if (arg0_value == irb->codegen->invalid_inst_src)
                    return arg0_value;

                IrInstSrc *set_align_stack = ir_build_set_align_stack(irb, scope, node, arg0_value);
                return ir_lval_wrap(irb, scope, set_align_stack, lval, result_loc);
            }
        case BuiltinFnIdExport:
            {
                // Cast the options parameter to the options type
                ZigType *options_type = get_builtin_type(irb->codegen, "ExportOptions");
                IrInstSrc *options_type_inst = ir_build_const_type(irb, scope, node, options_type);
                ResultLocCast *result_loc_cast = ir_build_cast_result_loc(irb, options_type_inst, no_result_loc());

                AstNode *target_node = node->data.fn_call_expr.params.at(0);
                IrInstSrc *target_value = ir_gen_node(irb, target_node, scope);
                if (target_value == irb->codegen->invalid_inst_src)
                    return target_value;

                AstNode *options_node = node->data.fn_call_expr.params.at(1);
                IrInstSrc *options_value = ir_gen_node_extra(irb, options_node,
                    scope, LValNone, &result_loc_cast->base);
                if (options_value == irb->codegen->invalid_inst_src)
                    return options_value;

                IrInstSrc *casted_options_value = ir_build_implicit_cast(
                    irb, scope, options_node, options_value, result_loc_cast);

                IrInstSrc *ir_export = ir_build_export(irb, scope, node, target_value, casted_options_value);
                return ir_lval_wrap(irb, scope, ir_export, lval, result_loc);
            }
        case BuiltinFnIdErrorReturnTrace:
            {
                IrInstSrc *error_return_trace = ir_build_error_return_trace_src(irb, scope, node,
                        IrInstErrorReturnTraceNull);
                return ir_lval_wrap(irb, scope, error_return_trace, lval, result_loc);
            }
        case BuiltinFnIdAtomicRmw:
            {
                AstNode *arg0_node = node->data.fn_call_expr.params.at(0);
                IrInstSrc *arg0_value = ir_gen_node(irb, arg0_node, scope);
                if (arg0_value == irb->codegen->invalid_inst_src)
                    return arg0_value;

                AstNode *arg1_node = node->data.fn_call_expr.params.at(1);
                IrInstSrc *arg1_value = ir_gen_node(irb, arg1_node, scope);
                if (arg1_value == irb->codegen->invalid_inst_src)
                    return arg1_value;

                AstNode *arg2_node = node->data.fn_call_expr.params.at(2);
                IrInstSrc *arg2_value = ir_gen_node(irb, arg2_node, scope);
                if (arg2_value == irb->codegen->invalid_inst_src)
                    return arg2_value;

                AstNode *arg3_node = node->data.fn_call_expr.params.at(3);
                IrInstSrc *arg3_value = ir_gen_node(irb, arg3_node, scope);
                if (arg3_value == irb->codegen->invalid_inst_src)
                    return arg3_value;

                AstNode *arg4_node = node->data.fn_call_expr.params.at(4);
                IrInstSrc *arg4_value = ir_gen_node(irb, arg4_node, scope);
                if (arg4_value == irb->codegen->invalid_inst_src)
                    return arg4_value;

                IrInstSrc *inst = ir_build_atomic_rmw_src(irb, scope, node,
                        arg0_value, arg1_value, arg2_value, arg3_value, arg4_value);
                return ir_lval_wrap(irb, scope, inst, lval, result_loc);
            }
        case BuiltinFnIdAtomicLoad:
            {
                AstNode *arg0_node = node->data.fn_call_expr.params.at(0);
                IrInstSrc *arg0_value = ir_gen_node(irb, arg0_node, scope);
                if (arg0_value == irb->codegen->invalid_inst_src)
                    return arg0_value;

                AstNode *arg1_node = node->data.fn_call_expr.params.at(1);
                IrInstSrc *arg1_value = ir_gen_node(irb, arg1_node, scope);
                if (arg1_value == irb->codegen->invalid_inst_src)
                    return arg1_value;

                AstNode *arg2_node = node->data.fn_call_expr.params.at(2);
                IrInstSrc *arg2_value = ir_gen_node(irb, arg2_node, scope);
                if (arg2_value == irb->codegen->invalid_inst_src)
                    return arg2_value;

                IrInstSrc *inst = ir_build_atomic_load_src(irb, scope, node, arg0_value, arg1_value, arg2_value);
                return ir_lval_wrap(irb, scope, inst, lval, result_loc);
            }
        case BuiltinFnIdAtomicStore:
            {
                AstNode *arg0_node = node->data.fn_call_expr.params.at(0);
                IrInstSrc *arg0_value = ir_gen_node(irb, arg0_node, scope);
                if (arg0_value == irb->codegen->invalid_inst_src)
                    return arg0_value;

                AstNode *arg1_node = node->data.fn_call_expr.params.at(1);
                IrInstSrc *arg1_value = ir_gen_node(irb, arg1_node, scope);
                if (arg1_value == irb->codegen->invalid_inst_src)
                    return arg1_value;

                AstNode *arg2_node = node->data.fn_call_expr.params.at(2);
                IrInstSrc *arg2_value = ir_gen_node(irb, arg2_node, scope);
                if (arg2_value == irb->codegen->invalid_inst_src)
                    return arg2_value;

                AstNode *arg3_node = node->data.fn_call_expr.params.at(3);
                IrInstSrc *arg3_value = ir_gen_node(irb, arg3_node, scope);
                if (arg3_value == irb->codegen->invalid_inst_src)
                    return arg3_value;

                IrInstSrc *inst = ir_build_atomic_store_src(irb, scope, node, arg0_value, arg1_value,
                        arg2_value, arg3_value);
                return ir_lval_wrap(irb, scope, inst, lval, result_loc);
            }
        case BuiltinFnIdIntToEnum:
            {
                AstNode *arg0_node = node->data.fn_call_expr.params.at(0);
                IrInstSrc *arg0_value = ir_gen_node(irb, arg0_node, scope);
                if (arg0_value == irb->codegen->invalid_inst_src)
                    return arg0_value;

                AstNode *arg1_node = node->data.fn_call_expr.params.at(1);
                IrInstSrc *arg1_value = ir_gen_node(irb, arg1_node, scope);
                if (arg1_value == irb->codegen->invalid_inst_src)
                    return arg1_value;

                IrInstSrc *result = ir_build_int_to_enum_src(irb, scope, node, arg0_value, arg1_value);
                return ir_lval_wrap(irb, scope, result, lval, result_loc);
            }
        case BuiltinFnIdEnumToInt:
            {
                AstNode *arg0_node = node->data.fn_call_expr.params.at(0);
                IrInstSrc *arg0_value = ir_gen_node(irb, arg0_node, scope);
                if (arg0_value == irb->codegen->invalid_inst_src)
                    return arg0_value;

                IrInstSrc *result = ir_build_enum_to_int(irb, scope, node, arg0_value);
                return ir_lval_wrap(irb, scope, result, lval, result_loc);
            }
        case BuiltinFnIdCtz:
        case BuiltinFnIdPopCount:
        case BuiltinFnIdClz:
        case BuiltinFnIdBswap:
        case BuiltinFnIdBitReverse:
            {
                AstNode *arg0_node = node->data.fn_call_expr.params.at(0);
                IrInstSrc *arg0_value = ir_gen_node(irb, arg0_node, scope);
                if (arg0_value == irb->codegen->invalid_inst_src)
                    return arg0_value;

                AstNode *arg1_node = node->data.fn_call_expr.params.at(1);
                IrInstSrc *arg1_value = ir_gen_node(irb, arg1_node, scope);
                if (arg1_value == irb->codegen->invalid_inst_src)
                    return arg1_value;

                IrInstSrc *result;
                switch (builtin_fn->id) {
                case BuiltinFnIdCtz:
                    result = ir_build_ctz(irb, scope, node, arg0_value, arg1_value);
                    break;
                case BuiltinFnIdPopCount:
                    result = ir_build_pop_count(irb, scope, node, arg0_value, arg1_value);
                    break;
                case BuiltinFnIdClz:
                    result = ir_build_clz(irb, scope, node, arg0_value, arg1_value);
                    break;
                case BuiltinFnIdBswap:
                    result = ir_build_bswap(irb, scope, node, arg0_value, arg1_value);
                    break;
                case BuiltinFnIdBitReverse:
                    result = ir_build_bit_reverse(irb, scope, node, arg0_value, arg1_value);
                    break;
                default:
                    zig_unreachable();
                }
                return ir_lval_wrap(irb, scope, result, lval, result_loc);
            }
        case BuiltinFnIdHasDecl:
            {
                AstNode *arg0_node = node->data.fn_call_expr.params.at(0);
                IrInstSrc *arg0_value = ir_gen_node(irb, arg0_node, scope);
                if (arg0_value == irb->codegen->invalid_inst_src)
                    return arg0_value;

                AstNode *arg1_node = node->data.fn_call_expr.params.at(1);
                IrInstSrc *arg1_value = ir_gen_node(irb, arg1_node, scope);
                if (arg1_value == irb->codegen->invalid_inst_src)
                    return arg1_value;

                IrInstSrc *has_decl = ir_build_has_decl(irb, scope, node, arg0_value, arg1_value);
                return ir_lval_wrap(irb, scope, has_decl, lval, result_loc);
            }
        case BuiltinFnIdUnionInit:
            {
                AstNode *union_type_node = node->data.fn_call_expr.params.at(0);
                IrInstSrc *union_type_inst = ir_gen_node(irb, union_type_node, scope);
                if (union_type_inst == irb->codegen->invalid_inst_src)
                    return union_type_inst;

                AstNode *name_node = node->data.fn_call_expr.params.at(1);
                IrInstSrc *name_inst = ir_gen_node(irb, name_node, scope);
                if (name_inst == irb->codegen->invalid_inst_src)
                    return name_inst;

                AstNode *init_node = node->data.fn_call_expr.params.at(2);

                return ir_gen_union_init_expr(irb, scope, node, union_type_inst, name_inst, init_node,
                        lval, result_loc);
            }
        case BuiltinFnIdSrc:
            {
                IrInstSrc *src_inst = ir_build_src(irb, scope, node);
                return ir_lval_wrap(irb, scope, src_inst, lval, result_loc);
            }
    }
    zig_unreachable();
}

static ScopeNoSuspend *get_scope_nosuspend(Scope *scope) {
    while (scope) {
        if (scope->id == ScopeIdNoSuspend)
            return (ScopeNoSuspend *)scope;
        if (scope->id == ScopeIdFnDef)
            return nullptr;

        scope = scope->parent;
    }
    return nullptr;
}

static IrInstSrc *ir_gen_fn_call(IrBuilderSrc *irb, Scope *scope, AstNode *node, LVal lval,
        ResultLoc *result_loc)
{
    assert(node->type == NodeTypeFnCallExpr);

    if (node->data.fn_call_expr.modifier == CallModifierBuiltin)
        return ir_gen_builtin_fn_call(irb, scope, node, lval, result_loc);

    bool is_nosuspend = get_scope_nosuspend(scope) != nullptr;
    CallModifier modifier = node->data.fn_call_expr.modifier;
    if (is_nosuspend) {
        if (modifier == CallModifierAsync) {
            add_node_error(irb->codegen, node,
                    buf_sprintf("async call in nosuspend scope"));
            return irb->codegen->invalid_inst_src;
        }
        modifier = CallModifierNoSuspend;
    }

    AstNode *fn_ref_node = node->data.fn_call_expr.fn_ref_expr;
    return ir_gen_fn_call_with_args(irb, scope, node, fn_ref_node, modifier,
        nullptr, node->data.fn_call_expr.params.items, node->data.fn_call_expr.params.length, lval, result_loc);
}

static IrInstSrc *ir_gen_if_bool_expr(IrBuilderSrc *irb, Scope *scope, AstNode *node, LVal lval,
        ResultLoc *result_loc)
{
    assert(node->type == NodeTypeIfBoolExpr);

    IrInstSrc *condition = ir_gen_node(irb, node->data.if_bool_expr.condition, scope);
    if (condition == irb->codegen->invalid_inst_src)
        return irb->codegen->invalid_inst_src;

    IrInstSrc *is_comptime;
    if (ir_should_inline(irb->exec, scope)) {
        is_comptime = ir_build_const_bool(irb, scope, node, true);
    } else {
        is_comptime = ir_build_test_comptime(irb, scope, node, condition);
    }

    AstNode *then_node = node->data.if_bool_expr.then_block;
    AstNode *else_node = node->data.if_bool_expr.else_node;

    IrBasicBlockSrc *then_block = ir_create_basic_block(irb, scope, "Then");
    IrBasicBlockSrc *else_block = ir_create_basic_block(irb, scope, "Else");
    IrBasicBlockSrc *endif_block = ir_create_basic_block(irb, scope, "EndIf");

    IrInstSrc *cond_br_inst = ir_build_cond_br(irb, scope, node, condition,
            then_block, else_block, is_comptime);
    ResultLocPeerParent *peer_parent = ir_build_binary_result_peers(irb, cond_br_inst, else_block, endif_block,
            result_loc, is_comptime);

    ir_set_cursor_at_end_and_append_block(irb, then_block);

    Scope *subexpr_scope = create_runtime_scope(irb->codegen, node, scope, is_comptime);
    IrInstSrc *then_expr_result = ir_gen_node_extra(irb, then_node, subexpr_scope, lval,
            &peer_parent->peers.at(0)->base);
    if (then_expr_result == irb->codegen->invalid_inst_src)
        return irb->codegen->invalid_inst_src;
    IrBasicBlockSrc *after_then_block = irb->current_basic_block;
    if (!instr_is_unreachable(then_expr_result))
        ir_mark_gen(ir_build_br(irb, scope, node, endif_block, is_comptime));

    ir_set_cursor_at_end_and_append_block(irb, else_block);
    IrInstSrc *else_expr_result;
    if (else_node) {
        else_expr_result = ir_gen_node_extra(irb, else_node, subexpr_scope, lval, &peer_parent->peers.at(1)->base);
        if (else_expr_result == irb->codegen->invalid_inst_src)
            return irb->codegen->invalid_inst_src;
    } else {
        else_expr_result = ir_build_const_void(irb, scope, node);
        ir_build_end_expr(irb, scope, node, else_expr_result, &peer_parent->peers.at(1)->base);
    }
    IrBasicBlockSrc *after_else_block = irb->current_basic_block;
    if (!instr_is_unreachable(else_expr_result))
        ir_mark_gen(ir_build_br(irb, scope, node, endif_block, is_comptime));

    ir_set_cursor_at_end_and_append_block(irb, endif_block);
    IrInstSrc **incoming_values = heap::c_allocator.allocate<IrInstSrc *>(2);
    incoming_values[0] = then_expr_result;
    incoming_values[1] = else_expr_result;
    IrBasicBlockSrc **incoming_blocks = heap::c_allocator.allocate<IrBasicBlockSrc *>(2);
    incoming_blocks[0] = after_then_block;
    incoming_blocks[1] = after_else_block;

    IrInstSrc *phi = ir_build_phi(irb, scope, node, 2, incoming_blocks, incoming_values, peer_parent);
    return ir_expr_wrap(irb, scope, phi, result_loc);
}

static IrInstSrc *ir_gen_prefix_op_id_lval(IrBuilderSrc *irb, Scope *scope, AstNode *node, IrUnOp op_id, LVal lval) {
    assert(node->type == NodeTypePrefixOpExpr);
    AstNode *expr_node = node->data.prefix_op_expr.primary_expr;

    IrInstSrc *value = ir_gen_node_extra(irb, expr_node, scope, lval, nullptr);
    if (value == irb->codegen->invalid_inst_src)
        return value;

    return ir_build_un_op(irb, scope, node, op_id, value);
}

static IrInstSrc *ir_gen_prefix_op_id(IrBuilderSrc *irb, Scope *scope, AstNode *node, IrUnOp op_id) {
    return ir_gen_prefix_op_id_lval(irb, scope, node, op_id, LValNone);
}

static IrInstSrc *ir_expr_wrap(IrBuilderSrc *irb, Scope *scope, IrInstSrc *inst, ResultLoc *result_loc) {
    if (inst == irb->codegen->invalid_inst_src) return inst;
    ir_build_end_expr(irb, scope, inst->base.source_node, inst, result_loc);
    return inst;
}

static IrInstSrc *ir_lval_wrap(IrBuilderSrc *irb, Scope *scope, IrInstSrc *value, LVal lval,
        ResultLoc *result_loc)
{
    // This logic must be kept in sync with
    // [STMT_EXPR_TEST_THING] <--- (search this token)
    if (value == irb->codegen->invalid_inst_src ||
        instr_is_unreachable(value) ||
        value->base.source_node->type == NodeTypeDefer ||
        value->id == IrInstSrcIdDeclVar)
    {
        return value;
    }

    assert(lval != LValAssign);
    if (lval == LValPtr) {
        // We needed a pointer to a value, but we got a value. So we create
        // an instruction which just makes a pointer of it.
        return ir_build_ref_src(irb, scope, value->base.source_node, value);
    } else if (result_loc != nullptr) {
        return ir_expr_wrap(irb, scope, value, result_loc);
    } else {
        return value;
    }

}

static PtrLen star_token_to_ptr_len(TokenId token_id) {
    switch (token_id) {
        case TokenIdStar:
        case TokenIdStarStar:
            return PtrLenSingle;
        case TokenIdLBracket:
            return PtrLenUnknown;
        case TokenIdSymbol:
            return PtrLenC;
        default:
            zig_unreachable();
    }
}

static IrInstSrc *ir_gen_pointer_type(IrBuilderSrc *irb, Scope *scope, AstNode *node) {
    assert(node->type == NodeTypePointerType);

    PtrLen ptr_len = star_token_to_ptr_len(node->data.pointer_type.star_token->id);

    bool is_const = node->data.pointer_type.is_const;
    bool is_volatile = node->data.pointer_type.is_volatile;
    bool is_allow_zero = node->data.pointer_type.allow_zero_token != nullptr;
    AstNode *sentinel_expr = node->data.pointer_type.sentinel;
    AstNode *expr_node = node->data.pointer_type.op_expr;
    AstNode *align_expr = node->data.pointer_type.align_expr;

    IrInstSrc *sentinel;
    if (sentinel_expr != nullptr) {
        sentinel = ir_gen_node(irb, sentinel_expr, scope);
        if (sentinel == irb->codegen->invalid_inst_src)
            return sentinel;
    } else {
        sentinel = nullptr;
    }

    IrInstSrc *align_value;
    if (align_expr != nullptr) {
        align_value = ir_gen_node(irb, align_expr, scope);
        if (align_value == irb->codegen->invalid_inst_src)
            return align_value;
    } else {
        align_value = nullptr;
    }

    IrInstSrc *child_type = ir_gen_node(irb, expr_node, scope);
    if (child_type == irb->codegen->invalid_inst_src)
        return child_type;

    uint32_t bit_offset_start = 0;
    if (node->data.pointer_type.bit_offset_start != nullptr) {
        if (!bigint_fits_in_bits(node->data.pointer_type.bit_offset_start, 32, false)) {
            Buf *val_buf = buf_alloc();
            bigint_append_buf(val_buf, node->data.pointer_type.bit_offset_start, 10);
            exec_add_error_node(irb->codegen, irb->exec, node,
                    buf_sprintf("value %s too large for u32 bit offset", buf_ptr(val_buf)));
            return irb->codegen->invalid_inst_src;
        }
        bit_offset_start = bigint_as_u32(node->data.pointer_type.bit_offset_start);
    }

    uint32_t host_int_bytes = 0;
    if (node->data.pointer_type.host_int_bytes != nullptr) {
        if (!bigint_fits_in_bits(node->data.pointer_type.host_int_bytes, 32, false)) {
            Buf *val_buf = buf_alloc();
            bigint_append_buf(val_buf, node->data.pointer_type.host_int_bytes, 10);
            exec_add_error_node(irb->codegen, irb->exec, node,
                    buf_sprintf("value %s too large for u32 byte count", buf_ptr(val_buf)));
            return irb->codegen->invalid_inst_src;
        }
        host_int_bytes = bigint_as_u32(node->data.pointer_type.host_int_bytes);
    }

    if (host_int_bytes != 0 && bit_offset_start >= host_int_bytes * 8) {
        exec_add_error_node(irb->codegen, irb->exec, node,
                buf_sprintf("bit offset starts after end of host integer"));
        return irb->codegen->invalid_inst_src;
    }

    return ir_build_ptr_type(irb, scope, node, child_type, is_const, is_volatile,
            ptr_len, sentinel, align_value, bit_offset_start, host_int_bytes, is_allow_zero);
}

static IrInstSrc *ir_gen_catch_unreachable(IrBuilderSrc *irb, Scope *scope, AstNode *source_node,
        AstNode *expr_node, LVal lval, ResultLoc *result_loc)
{
    IrInstSrc *err_union_ptr = ir_gen_node_extra(irb, expr_node, scope, LValPtr, nullptr);
    if (err_union_ptr == irb->codegen->invalid_inst_src)
        return irb->codegen->invalid_inst_src;

    IrInstSrc *payload_ptr = ir_build_unwrap_err_payload_src(irb, scope, source_node, err_union_ptr, true, false);
    if (payload_ptr == irb->codegen->invalid_inst_src)
        return irb->codegen->invalid_inst_src;

    if (lval == LValPtr)
        return payload_ptr;

    IrInstSrc *load_ptr = ir_build_load_ptr(irb, scope, source_node, payload_ptr);
    return ir_expr_wrap(irb, scope, load_ptr, result_loc);
}

static IrInstSrc *ir_gen_bool_not(IrBuilderSrc *irb, Scope *scope, AstNode *node) {
    assert(node->type == NodeTypePrefixOpExpr);
    AstNode *expr_node = node->data.prefix_op_expr.primary_expr;

    IrInstSrc *value = ir_gen_node(irb, expr_node, scope);
    if (value == irb->codegen->invalid_inst_src)
        return irb->codegen->invalid_inst_src;

    return ir_build_bool_not(irb, scope, node, value);
}

static IrInstSrc *ir_gen_prefix_op_expr(IrBuilderSrc *irb, Scope *scope, AstNode *node, LVal lval,
        ResultLoc *result_loc)
{
    assert(node->type == NodeTypePrefixOpExpr);

    PrefixOp prefix_op = node->data.prefix_op_expr.prefix_op;

    switch (prefix_op) {
        case PrefixOpInvalid:
            zig_unreachable();
        case PrefixOpBoolNot:
            return ir_lval_wrap(irb, scope, ir_gen_bool_not(irb, scope, node), lval, result_loc);
        case PrefixOpBinNot:
            return ir_lval_wrap(irb, scope, ir_gen_prefix_op_id(irb, scope, node, IrUnOpBinNot), lval, result_loc);
        case PrefixOpNegation:
            return ir_lval_wrap(irb, scope, ir_gen_prefix_op_id(irb, scope, node, IrUnOpNegation), lval, result_loc);
        case PrefixOpNegationWrap:
            return ir_lval_wrap(irb, scope, ir_gen_prefix_op_id(irb, scope, node, IrUnOpNegationWrap), lval, result_loc);
        case PrefixOpOptional:
            return ir_lval_wrap(irb, scope, ir_gen_prefix_op_id(irb, scope, node, IrUnOpOptional), lval, result_loc);
        case PrefixOpAddrOf: {
            AstNode *expr_node = node->data.prefix_op_expr.primary_expr;
            return ir_lval_wrap(irb, scope, ir_gen_node_extra(irb, expr_node, scope, LValPtr, nullptr), lval, result_loc);
        }
    }
    zig_unreachable();
}

static IrInstSrc *ir_gen_union_init_expr(IrBuilderSrc *irb, Scope *scope, AstNode *source_node,
    IrInstSrc *union_type, IrInstSrc *field_name, AstNode *expr_node,
    LVal lval, ResultLoc *parent_result_loc)
{
    IrInstSrc *container_ptr = ir_build_resolve_result(irb, scope, source_node, parent_result_loc, union_type);
    IrInstSrc *field_ptr = ir_build_field_ptr_instruction(irb, scope, source_node, container_ptr,
            field_name, true);

    ResultLocInstruction *result_loc_inst = heap::c_allocator.create<ResultLocInstruction>();
    result_loc_inst->base.id = ResultLocIdInstruction;
    result_loc_inst->base.source_instruction = field_ptr;
    ir_ref_instruction(field_ptr, irb->current_basic_block);
    ir_build_reset_result(irb, scope, expr_node, &result_loc_inst->base);

    IrInstSrc *expr_value = ir_gen_node_extra(irb, expr_node, scope, LValNone,
            &result_loc_inst->base);
    if (expr_value == irb->codegen->invalid_inst_src)
        return expr_value;

    IrInstSrc *init_union = ir_build_union_init_named_field(irb, scope, source_node, union_type,
            field_name, field_ptr, container_ptr);

    return ir_lval_wrap(irb, scope, init_union, lval, parent_result_loc);
}

static IrInstSrc *ir_gen_container_init_expr(IrBuilderSrc *irb, Scope *scope, AstNode *node, LVal lval,
        ResultLoc *parent_result_loc)
{
    assert(node->type == NodeTypeContainerInitExpr);

    AstNodeContainerInitExpr *container_init_expr = &node->data.container_init_expr;
    ContainerInitKind kind = container_init_expr->kind;

    ResultLocCast *result_loc_cast = nullptr;
    ResultLoc *child_result_loc;
    AstNode *init_array_type_source_node;
    if (container_init_expr->type != nullptr) {
        IrInstSrc *container_type;
        if (container_init_expr->type->type == NodeTypeInferredArrayType) {
            if (kind == ContainerInitKindStruct) {
                add_node_error(irb->codegen, container_init_expr->type,
                        buf_sprintf("initializing array with struct syntax"));
                return irb->codegen->invalid_inst_src;
            }
            IrInstSrc *sentinel;
            if (container_init_expr->type->data.inferred_array_type.sentinel != nullptr) {
                sentinel = ir_gen_node(irb, container_init_expr->type->data.inferred_array_type.sentinel, scope);
                if (sentinel == irb->codegen->invalid_inst_src)
                    return sentinel;
            } else {
                sentinel = nullptr;
            }

            IrInstSrc *elem_type = ir_gen_node(irb,
                    container_init_expr->type->data.inferred_array_type.child_type, scope);
            if (elem_type == irb->codegen->invalid_inst_src)
                return elem_type;
            size_t item_count = container_init_expr->entries.length;
            IrInstSrc *item_count_inst = ir_build_const_usize(irb, scope, node, item_count);
            container_type = ir_build_array_type(irb, scope, node, item_count_inst, sentinel, elem_type);
        } else {
            container_type = ir_gen_node(irb, container_init_expr->type, scope);
            if (container_type == irb->codegen->invalid_inst_src)
                return container_type;
        }

        result_loc_cast = ir_build_cast_result_loc(irb, container_type, parent_result_loc);
        child_result_loc = &result_loc_cast->base;
        init_array_type_source_node = container_type->base.source_node;
    } else {
        child_result_loc = parent_result_loc;
        if (parent_result_loc->source_instruction != nullptr) {
            init_array_type_source_node = parent_result_loc->source_instruction->base.source_node;
        } else {
            init_array_type_source_node = node;
        }
    }

    switch (kind) {
        case ContainerInitKindStruct: {
            IrInstSrc *container_ptr = ir_build_resolve_result(irb, scope, node, child_result_loc,
                    nullptr);

            size_t field_count = container_init_expr->entries.length;
            IrInstSrcContainerInitFieldsField *fields = heap::c_allocator.allocate<IrInstSrcContainerInitFieldsField>(field_count);
            for (size_t i = 0; i < field_count; i += 1) {
                AstNode *entry_node = container_init_expr->entries.at(i);
                assert(entry_node->type == NodeTypeStructValueField);

                Buf *name = entry_node->data.struct_val_field.name;
                AstNode *expr_node = entry_node->data.struct_val_field.expr;

                IrInstSrc *field_ptr = ir_build_field_ptr(irb, scope, entry_node, container_ptr, name, true);
                ResultLocInstruction *result_loc_inst = heap::c_allocator.create<ResultLocInstruction>();
                result_loc_inst->base.id = ResultLocIdInstruction;
                result_loc_inst->base.source_instruction = field_ptr;
                result_loc_inst->base.allow_write_through_const = true;
                ir_ref_instruction(field_ptr, irb->current_basic_block);
                ir_build_reset_result(irb, scope, expr_node, &result_loc_inst->base);

                IrInstSrc *expr_value = ir_gen_node_extra(irb, expr_node, scope, LValNone,
                        &result_loc_inst->base);
                if (expr_value == irb->codegen->invalid_inst_src)
                    return expr_value;

                fields[i].name = name;
                fields[i].source_node = entry_node;
                fields[i].result_loc = field_ptr;
            }
            IrInstSrc *result = ir_build_container_init_fields(irb, scope, node, field_count,
                    fields, container_ptr);

            if (result_loc_cast != nullptr) {
                result = ir_build_implicit_cast(irb, scope, node, result, result_loc_cast);
            }
            return ir_lval_wrap(irb, scope, result, lval, parent_result_loc);
        }
        case ContainerInitKindArray: {
            size_t item_count = container_init_expr->entries.length;

            IrInstSrc *container_ptr = ir_build_resolve_result(irb, scope, node, child_result_loc,
                    nullptr);

            IrInstSrc **result_locs = heap::c_allocator.allocate<IrInstSrc *>(item_count);
            for (size_t i = 0; i < item_count; i += 1) {
                AstNode *expr_node = container_init_expr->entries.at(i);

                IrInstSrc *elem_index = ir_build_const_usize(irb, scope, expr_node, i);
                IrInstSrc *elem_ptr = ir_build_elem_ptr(irb, scope, expr_node, container_ptr,
                        elem_index, false, PtrLenSingle, init_array_type_source_node);
                ResultLocInstruction *result_loc_inst = heap::c_allocator.create<ResultLocInstruction>();
                result_loc_inst->base.id = ResultLocIdInstruction;
                result_loc_inst->base.source_instruction = elem_ptr;
                result_loc_inst->base.allow_write_through_const = true;
                ir_ref_instruction(elem_ptr, irb->current_basic_block);
                ir_build_reset_result(irb, scope, expr_node, &result_loc_inst->base);

                IrInstSrc *expr_value = ir_gen_node_extra(irb, expr_node, scope, LValNone,
                        &result_loc_inst->base);
                if (expr_value == irb->codegen->invalid_inst_src)
                    return expr_value;

                result_locs[i] = elem_ptr;
            }
            IrInstSrc *result = ir_build_container_init_list(irb, scope, node, item_count,
                    result_locs, container_ptr, init_array_type_source_node);
            if (result_loc_cast != nullptr) {
                result = ir_build_implicit_cast(irb, scope, node, result, result_loc_cast);
            }
            return ir_lval_wrap(irb, scope, result, lval, parent_result_loc);
        }
    }
    zig_unreachable();
}

static ResultLocVar *ir_build_var_result_loc(IrBuilderSrc *irb, IrInstSrc *alloca, ZigVar *var) {
    ResultLocVar *result_loc_var = heap::c_allocator.create<ResultLocVar>();
    result_loc_var->base.id = ResultLocIdVar;
    result_loc_var->base.source_instruction = alloca;
    result_loc_var->base.allow_write_through_const = true;
    result_loc_var->var = var;

    ir_build_reset_result(irb, alloca->base.scope, alloca->base.source_node, &result_loc_var->base);

    return result_loc_var;
}

static ResultLocCast *ir_build_cast_result_loc(IrBuilderSrc *irb, IrInstSrc *dest_type,
        ResultLoc *parent_result_loc)
{
    ResultLocCast *result_loc_cast = heap::c_allocator.create<ResultLocCast>();
    result_loc_cast->base.id = ResultLocIdCast;
    result_loc_cast->base.source_instruction = dest_type;
    result_loc_cast->base.allow_write_through_const = parent_result_loc->allow_write_through_const;
    ir_ref_instruction(dest_type, irb->current_basic_block);
    result_loc_cast->parent = parent_result_loc;

    ir_build_reset_result(irb, dest_type->base.scope, dest_type->base.source_node, &result_loc_cast->base);

    return result_loc_cast;
}

static void build_decl_var_and_init(IrBuilderSrc *irb, Scope *scope, AstNode *source_node, ZigVar *var,
        IrInstSrc *init, const char *name_hint, IrInstSrc *is_comptime)
{
    IrInstSrc *alloca = ir_build_alloca_src(irb, scope, source_node, nullptr, name_hint, is_comptime);
    ResultLocVar *var_result_loc = ir_build_var_result_loc(irb, alloca, var);
    ir_build_end_expr(irb, scope, source_node, init, &var_result_loc->base);
    ir_build_var_decl_src(irb, scope, source_node, var, nullptr, alloca);
}

static IrInstSrc *ir_gen_var_decl(IrBuilderSrc *irb, Scope *scope, AstNode *node) {
    assert(node->type == NodeTypeVariableDeclaration);

    AstNodeVariableDeclaration *variable_declaration = &node->data.variable_declaration;

    if (buf_eql_str(variable_declaration->symbol, "_")) {
        add_node_error(irb->codegen, node, buf_sprintf("`_` is not a declarable symbol"));
        return irb->codegen->invalid_inst_src;
    }

    // Used for the type expr and the align expr
    Scope *comptime_scope = create_comptime_scope(irb->codegen, node, scope);

    IrInstSrc *type_instruction;
    if (variable_declaration->type != nullptr) {
        type_instruction = ir_gen_node(irb, variable_declaration->type, comptime_scope);
        if (type_instruction == irb->codegen->invalid_inst_src)
            return type_instruction;
    } else {
        type_instruction = nullptr;
    }

    bool is_shadowable = false;
    bool is_const = variable_declaration->is_const;
    bool is_extern = variable_declaration->is_extern;

    bool is_comptime_scalar = ir_should_inline(irb->exec, scope) || variable_declaration->is_comptime;
    IrInstSrc *is_comptime = ir_build_const_bool(irb, scope, node, is_comptime_scalar);
    ZigVar *var = ir_create_var(irb, node, scope, variable_declaration->symbol,
        is_const, is_const, is_shadowable, is_comptime);
    // we detect IrInstSrcDeclVar in gen_block to make sure the next node
    // is inside var->child_scope

    if (!is_extern && !variable_declaration->expr) {
        var->var_type = irb->codegen->builtin_types.entry_invalid;
        add_node_error(irb->codegen, node, buf_sprintf("variables must be initialized"));
        return irb->codegen->invalid_inst_src;
    }

    IrInstSrc *align_value = nullptr;
    if (variable_declaration->align_expr != nullptr) {
        align_value = ir_gen_node(irb, variable_declaration->align_expr, comptime_scope);
        if (align_value == irb->codegen->invalid_inst_src)
            return align_value;
    }

    if (variable_declaration->section_expr != nullptr) {
        add_node_error(irb->codegen, variable_declaration->section_expr,
            buf_sprintf("cannot set section of local variable '%s'", buf_ptr(variable_declaration->symbol)));
    }

    // Parser should ensure that this never happens
    assert(variable_declaration->threadlocal_tok == nullptr);

    IrInstSrc *alloca = ir_build_alloca_src(irb, scope, node, align_value,
            buf_ptr(variable_declaration->symbol), is_comptime);

    // Create a result location for the initialization expression.
    ResultLocVar *result_loc_var = ir_build_var_result_loc(irb, alloca, var);
    ResultLoc *init_result_loc;
    ResultLocCast *result_loc_cast;
    if (type_instruction != nullptr) {
        result_loc_cast = ir_build_cast_result_loc(irb, type_instruction, &result_loc_var->base);
        init_result_loc = &result_loc_cast->base;
    } else {
        result_loc_cast = nullptr;
        init_result_loc = &result_loc_var->base;
    }

    Scope *init_scope = is_comptime_scalar ?
        create_comptime_scope(irb->codegen, variable_declaration->expr, scope) : scope;

    // Temporarily set the name of the IrExecutableSrc to the VariableDeclaration
    // so that the struct or enum from the init expression inherits the name.
    Buf *old_exec_name = irb->exec->name;
    irb->exec->name = variable_declaration->symbol;
    IrInstSrc *init_value = ir_gen_node_extra(irb, variable_declaration->expr, init_scope,
            LValNone, init_result_loc);
    irb->exec->name = old_exec_name;

    if (init_value == irb->codegen->invalid_inst_src)
        return irb->codegen->invalid_inst_src;

    if (result_loc_cast != nullptr) {
        IrInstSrc *implicit_cast = ir_build_implicit_cast(irb, scope, init_value->base.source_node,
                init_value, result_loc_cast);
        ir_build_end_expr(irb, scope, node, implicit_cast, &result_loc_var->base);
    }

    return ir_build_var_decl_src(irb, scope, node, var, align_value, alloca);
}

static IrInstSrc *ir_gen_while_expr(IrBuilderSrc *irb, Scope *scope, AstNode *node, LVal lval,
        ResultLoc *result_loc)
{
    assert(node->type == NodeTypeWhileExpr);

    AstNode *continue_expr_node = node->data.while_expr.continue_expr;
    AstNode *else_node = node->data.while_expr.else_node;

    IrBasicBlockSrc *cond_block = ir_create_basic_block(irb, scope, "WhileCond");
    IrBasicBlockSrc *body_block = ir_create_basic_block(irb, scope, "WhileBody");
    IrBasicBlockSrc *continue_block = continue_expr_node ?
        ir_create_basic_block(irb, scope, "WhileContinue") : cond_block;
    IrBasicBlockSrc *end_block = ir_create_basic_block(irb, scope, "WhileEnd");
    IrBasicBlockSrc *else_block = else_node ?
        ir_create_basic_block(irb, scope, "WhileElse") : end_block;

    IrInstSrc *is_comptime = ir_build_const_bool(irb, scope, node,
        ir_should_inline(irb->exec, scope) || node->data.while_expr.is_inline);
    ir_build_br(irb, scope, node, cond_block, is_comptime);

    Scope *subexpr_scope = create_runtime_scope(irb->codegen, node, scope, is_comptime);
    Buf *var_symbol = node->data.while_expr.var_symbol;
    Buf *err_symbol = node->data.while_expr.err_symbol;
    if (err_symbol != nullptr) {
        ir_set_cursor_at_end_and_append_block(irb, cond_block);

        Scope *payload_scope;
        AstNode *symbol_node = node; // TODO make more accurate
        ZigVar *payload_var;
        if (var_symbol) {
            // TODO make it an error to write to payload variable
            payload_var = ir_create_var(irb, symbol_node, subexpr_scope, var_symbol,
                    true, false, false, is_comptime);
            payload_scope = payload_var->child_scope;
        } else {
            payload_scope = subexpr_scope;
        }
        ScopeExpr *spill_scope = create_expr_scope(irb->codegen, node, payload_scope);
        IrInstSrc *err_val_ptr = ir_gen_node_extra(irb, node->data.while_expr.condition, subexpr_scope,
                LValPtr, nullptr);
        if (err_val_ptr == irb->codegen->invalid_inst_src)
            return err_val_ptr;
        IrInstSrc *is_err = ir_build_test_err_src(irb, scope, node->data.while_expr.condition, err_val_ptr,
                true, false);
        IrBasicBlockSrc *after_cond_block = irb->current_basic_block;
        IrInstSrc *void_else_result = else_node ? nullptr : ir_mark_gen(ir_build_const_void(irb, scope, node));
        IrInstSrc *cond_br_inst;
        if (!instr_is_unreachable(is_err)) {
            cond_br_inst = ir_build_cond_br(irb, scope, node->data.while_expr.condition, is_err,
                        else_block, body_block, is_comptime);
            cond_br_inst->is_gen = true;
        } else {
            // for the purposes of the source instruction to ir_build_result_peers
            cond_br_inst = irb->current_basic_block->instruction_list.last();
        }

        ResultLocPeerParent *peer_parent = ir_build_result_peers(irb, cond_br_inst, end_block, result_loc,
                is_comptime);

        ir_set_cursor_at_end_and_append_block(irb, body_block);
        if (var_symbol) {
            IrInstSrc *payload_ptr = ir_build_unwrap_err_payload_src(irb, &spill_scope->base, symbol_node,
                    err_val_ptr, false, false);
            IrInstSrc *var_value = node->data.while_expr.var_is_ptr ?
                payload_ptr : ir_build_load_ptr(irb, &spill_scope->base, symbol_node, payload_ptr);
            build_decl_var_and_init(irb, payload_scope, symbol_node, payload_var, var_value, buf_ptr(var_symbol), is_comptime);
        }

        ZigList<IrInstSrc *> incoming_values = {0};
        ZigList<IrBasicBlockSrc *> incoming_blocks = {0};

        ScopeLoop *loop_scope = create_loop_scope(irb->codegen, node, payload_scope);
        loop_scope->break_block = end_block;
        loop_scope->continue_block = continue_block;
        loop_scope->is_comptime = is_comptime;
        loop_scope->incoming_blocks = &incoming_blocks;
        loop_scope->incoming_values = &incoming_values;
        loop_scope->lval = lval;
        loop_scope->peer_parent = peer_parent;
        loop_scope->spill_scope = spill_scope;

        // Note the body block of the loop is not the place that lval and result_loc are used -
        // it's actually in break statements, handled similarly to return statements.
        // That is why we set those values in loop_scope above and not in this ir_gen_node call.
        IrInstSrc *body_result = ir_gen_node(irb, node->data.while_expr.body, &loop_scope->base);
        if (body_result == irb->codegen->invalid_inst_src)
            return body_result;

        if (!instr_is_unreachable(body_result)) {
            ir_mark_gen(ir_build_check_statement_is_void(irb, payload_scope, node->data.while_expr.body, body_result));
            ir_mark_gen(ir_build_br(irb, payload_scope, node, continue_block, is_comptime));
        }

        if (continue_expr_node) {
            ir_set_cursor_at_end_and_append_block(irb, continue_block);
            IrInstSrc *expr_result = ir_gen_node(irb, continue_expr_node, payload_scope);
            if (expr_result == irb->codegen->invalid_inst_src)
                return expr_result;
            if (!instr_is_unreachable(expr_result)) {
                ir_mark_gen(ir_build_check_statement_is_void(irb, payload_scope, continue_expr_node, expr_result));
                ir_mark_gen(ir_build_br(irb, payload_scope, node, cond_block, is_comptime));
            }
        }

        ir_set_cursor_at_end_and_append_block(irb, else_block);
        assert(else_node != nullptr);

        // TODO make it an error to write to error variable
        AstNode *err_symbol_node = else_node; // TODO make more accurate
        ZigVar *err_var = ir_create_var(irb, err_symbol_node, scope, err_symbol,
                true, false, false, is_comptime);
        Scope *err_scope = err_var->child_scope;
        IrInstSrc *err_ptr = ir_build_unwrap_err_code_src(irb, err_scope, err_symbol_node, err_val_ptr);
        IrInstSrc *err_value = ir_build_load_ptr(irb, err_scope, err_symbol_node, err_ptr);
        build_decl_var_and_init(irb, err_scope, err_symbol_node, err_var, err_value, buf_ptr(err_symbol), is_comptime);

        if (peer_parent->peers.length != 0) {
            peer_parent->peers.last()->next_bb = else_block;
        }
        ResultLocPeer *peer_result = create_peer_result(peer_parent);
        peer_parent->peers.append(peer_result);
        IrInstSrc *else_result = ir_gen_node_extra(irb, else_node, err_scope, lval, &peer_result->base);
        if (else_result == irb->codegen->invalid_inst_src)
            return else_result;
        if (!instr_is_unreachable(else_result))
            ir_mark_gen(ir_build_br(irb, scope, node, end_block, is_comptime));
        IrBasicBlockSrc *after_else_block = irb->current_basic_block;
        ir_set_cursor_at_end_and_append_block(irb, end_block);
        if (else_result) {
            incoming_blocks.append(after_else_block);
            incoming_values.append(else_result);
        } else {
            incoming_blocks.append(after_cond_block);
            incoming_values.append(void_else_result);
        }
        if (peer_parent->peers.length != 0) {
            peer_parent->peers.last()->next_bb = end_block;
        }

        IrInstSrc *phi = ir_build_phi(irb, scope, node, incoming_blocks.length,
                incoming_blocks.items, incoming_values.items, peer_parent);
        return ir_expr_wrap(irb, scope, phi, result_loc);
    } else if (var_symbol != nullptr) {
        ir_set_cursor_at_end_and_append_block(irb, cond_block);
        Scope *subexpr_scope = create_runtime_scope(irb->codegen, node, scope, is_comptime);
        // TODO make it an error to write to payload variable
        AstNode *symbol_node = node; // TODO make more accurate

        ZigVar *payload_var = ir_create_var(irb, symbol_node, subexpr_scope, var_symbol,
                true, false, false, is_comptime);
        Scope *child_scope = payload_var->child_scope;
        ScopeExpr *spill_scope = create_expr_scope(irb->codegen, node, child_scope);
        IrInstSrc *maybe_val_ptr = ir_gen_node_extra(irb, node->data.while_expr.condition, subexpr_scope,
                LValPtr, nullptr);
        if (maybe_val_ptr == irb->codegen->invalid_inst_src)
            return maybe_val_ptr;
        IrInstSrc *maybe_val = ir_build_load_ptr(irb, scope, node->data.while_expr.condition, maybe_val_ptr);
        IrInstSrc *is_non_null = ir_build_test_non_null_src(irb, scope, node->data.while_expr.condition, maybe_val);
        IrBasicBlockSrc *after_cond_block = irb->current_basic_block;
        IrInstSrc *void_else_result = else_node ? nullptr : ir_mark_gen(ir_build_const_void(irb, scope, node));
        IrInstSrc *cond_br_inst;
        if (!instr_is_unreachable(is_non_null)) {
            cond_br_inst = ir_build_cond_br(irb, scope, node->data.while_expr.condition, is_non_null,
                        body_block, else_block, is_comptime);
            cond_br_inst->is_gen = true;
        } else {
            // for the purposes of the source instruction to ir_build_result_peers
            cond_br_inst = irb->current_basic_block->instruction_list.last();
        }

        ResultLocPeerParent *peer_parent = ir_build_result_peers(irb, cond_br_inst, end_block, result_loc,
                is_comptime);

        ir_set_cursor_at_end_and_append_block(irb, body_block);
        IrInstSrc *payload_ptr = ir_build_optional_unwrap_ptr(irb, &spill_scope->base, symbol_node, maybe_val_ptr, false);
        IrInstSrc *var_value = node->data.while_expr.var_is_ptr ?
            payload_ptr : ir_build_load_ptr(irb, &spill_scope->base, symbol_node, payload_ptr);
        build_decl_var_and_init(irb, child_scope, symbol_node, payload_var, var_value, buf_ptr(var_symbol), is_comptime);

        ZigList<IrInstSrc *> incoming_values = {0};
        ZigList<IrBasicBlockSrc *> incoming_blocks = {0};

        ScopeLoop *loop_scope = create_loop_scope(irb->codegen, node, child_scope);
        loop_scope->break_block = end_block;
        loop_scope->continue_block = continue_block;
        loop_scope->is_comptime = is_comptime;
        loop_scope->incoming_blocks = &incoming_blocks;
        loop_scope->incoming_values = &incoming_values;
        loop_scope->lval = lval;
        loop_scope->peer_parent = peer_parent;
        loop_scope->spill_scope = spill_scope;

        // Note the body block of the loop is not the place that lval and result_loc are used -
        // it's actually in break statements, handled similarly to return statements.
        // That is why we set those values in loop_scope above and not in this ir_gen_node call.
        IrInstSrc *body_result = ir_gen_node(irb, node->data.while_expr.body, &loop_scope->base);
        if (body_result == irb->codegen->invalid_inst_src)
            return body_result;

        if (!instr_is_unreachable(body_result)) {
            ir_mark_gen(ir_build_check_statement_is_void(irb, child_scope, node->data.while_expr.body, body_result));
            ir_mark_gen(ir_build_br(irb, child_scope, node, continue_block, is_comptime));
        }

        if (continue_expr_node) {
            ir_set_cursor_at_end_and_append_block(irb, continue_block);
            IrInstSrc *expr_result = ir_gen_node(irb, continue_expr_node, child_scope);
            if (expr_result == irb->codegen->invalid_inst_src)
                return expr_result;
            if (!instr_is_unreachable(expr_result)) {
                ir_mark_gen(ir_build_check_statement_is_void(irb, child_scope, continue_expr_node, expr_result));
                ir_mark_gen(ir_build_br(irb, child_scope, node, cond_block, is_comptime));
            }
        }

        IrInstSrc *else_result = nullptr;
        if (else_node) {
            ir_set_cursor_at_end_and_append_block(irb, else_block);

            if (peer_parent->peers.length != 0) {
                peer_parent->peers.last()->next_bb = else_block;
            }
            ResultLocPeer *peer_result = create_peer_result(peer_parent);
            peer_parent->peers.append(peer_result);
            else_result = ir_gen_node_extra(irb, else_node, scope, lval, &peer_result->base);
            if (else_result == irb->codegen->invalid_inst_src)
                return else_result;
            if (!instr_is_unreachable(else_result))
                ir_mark_gen(ir_build_br(irb, scope, node, end_block, is_comptime));
        }
        IrBasicBlockSrc *after_else_block = irb->current_basic_block;
        ir_set_cursor_at_end_and_append_block(irb, end_block);
        if (else_result) {
            incoming_blocks.append(after_else_block);
            incoming_values.append(else_result);
        } else {
            incoming_blocks.append(after_cond_block);
            incoming_values.append(void_else_result);
        }
        if (peer_parent->peers.length != 0) {
            peer_parent->peers.last()->next_bb = end_block;
        }

        IrInstSrc *phi = ir_build_phi(irb, scope, node, incoming_blocks.length,
                incoming_blocks.items, incoming_values.items, peer_parent);
        return ir_expr_wrap(irb, scope, phi, result_loc);
    } else {
        ir_set_cursor_at_end_and_append_block(irb, cond_block);
        IrInstSrc *cond_val = ir_gen_node(irb, node->data.while_expr.condition, scope);
        if (cond_val == irb->codegen->invalid_inst_src)
            return cond_val;
        IrBasicBlockSrc *after_cond_block = irb->current_basic_block;
        IrInstSrc *void_else_result = else_node ? nullptr : ir_mark_gen(ir_build_const_void(irb, scope, node));
        IrInstSrc *cond_br_inst;
        if (!instr_is_unreachable(cond_val)) {
            cond_br_inst = ir_build_cond_br(irb, scope, node->data.while_expr.condition, cond_val,
                        body_block, else_block, is_comptime);
            cond_br_inst->is_gen = true;
        } else {
            // for the purposes of the source instruction to ir_build_result_peers
            cond_br_inst = irb->current_basic_block->instruction_list.last();
        }

        ResultLocPeerParent *peer_parent = ir_build_result_peers(irb, cond_br_inst, end_block, result_loc,
                is_comptime);
        ir_set_cursor_at_end_and_append_block(irb, body_block);

        ZigList<IrInstSrc *> incoming_values = {0};
        ZigList<IrBasicBlockSrc *> incoming_blocks = {0};

        Scope *subexpr_scope = create_runtime_scope(irb->codegen, node, scope, is_comptime);

        ScopeLoop *loop_scope = create_loop_scope(irb->codegen, node, subexpr_scope);
        loop_scope->break_block = end_block;
        loop_scope->continue_block = continue_block;
        loop_scope->is_comptime = is_comptime;
        loop_scope->incoming_blocks = &incoming_blocks;
        loop_scope->incoming_values = &incoming_values;
        loop_scope->lval = lval;
        loop_scope->peer_parent = peer_parent;

        // Note the body block of the loop is not the place that lval and result_loc are used -
        // it's actually in break statements, handled similarly to return statements.
        // That is why we set those values in loop_scope above and not in this ir_gen_node call.
        IrInstSrc *body_result = ir_gen_node(irb, node->data.while_expr.body, &loop_scope->base);
        if (body_result == irb->codegen->invalid_inst_src)
            return body_result;

        if (!instr_is_unreachable(body_result)) {
            ir_mark_gen(ir_build_check_statement_is_void(irb, scope, node->data.while_expr.body, body_result));
            ir_mark_gen(ir_build_br(irb, scope, node, continue_block, is_comptime));
        }

        if (continue_expr_node) {
            ir_set_cursor_at_end_and_append_block(irb, continue_block);
            IrInstSrc *expr_result = ir_gen_node(irb, continue_expr_node, subexpr_scope);
            if (expr_result == irb->codegen->invalid_inst_src)
                return expr_result;
            if (!instr_is_unreachable(expr_result)) {
                ir_mark_gen(ir_build_check_statement_is_void(irb, scope, continue_expr_node, expr_result));
                ir_mark_gen(ir_build_br(irb, scope, node, cond_block, is_comptime));
            }
        }

        IrInstSrc *else_result = nullptr;
        if (else_node) {
            ir_set_cursor_at_end_and_append_block(irb, else_block);

            if (peer_parent->peers.length != 0) {
                peer_parent->peers.last()->next_bb = else_block;
            }
            ResultLocPeer *peer_result = create_peer_result(peer_parent);
            peer_parent->peers.append(peer_result);

            else_result = ir_gen_node_extra(irb, else_node, subexpr_scope, lval, &peer_result->base);
            if (else_result == irb->codegen->invalid_inst_src)
                return else_result;
            if (!instr_is_unreachable(else_result))
                ir_mark_gen(ir_build_br(irb, scope, node, end_block, is_comptime));
        }
        IrBasicBlockSrc *after_else_block = irb->current_basic_block;
        ir_set_cursor_at_end_and_append_block(irb, end_block);
        if (else_result) {
            incoming_blocks.append(after_else_block);
            incoming_values.append(else_result);
        } else {
            incoming_blocks.append(after_cond_block);
            incoming_values.append(void_else_result);
        }
        if (peer_parent->peers.length != 0) {
            peer_parent->peers.last()->next_bb = end_block;
        }

        IrInstSrc *phi = ir_build_phi(irb, scope, node, incoming_blocks.length,
                incoming_blocks.items, incoming_values.items, peer_parent);
        return ir_expr_wrap(irb, scope, phi, result_loc);
    }
}

static IrInstSrc *ir_gen_for_expr(IrBuilderSrc *irb, Scope *parent_scope, AstNode *node, LVal lval,
        ResultLoc *result_loc)
{
    assert(node->type == NodeTypeForExpr);

    AstNode *array_node = node->data.for_expr.array_expr;
    AstNode *elem_node = node->data.for_expr.elem_node;
    AstNode *index_node = node->data.for_expr.index_node;
    AstNode *body_node = node->data.for_expr.body;
    AstNode *else_node = node->data.for_expr.else_node;

    if (!elem_node) {
        add_node_error(irb->codegen, node, buf_sprintf("for loop expression missing element parameter"));
        return irb->codegen->invalid_inst_src;
    }
    assert(elem_node->type == NodeTypeSymbol);

    ScopeExpr *spill_scope = create_expr_scope(irb->codegen, node, parent_scope);

    IrInstSrc *array_val_ptr = ir_gen_node_extra(irb, array_node, &spill_scope->base, LValPtr, nullptr);
    if (array_val_ptr == irb->codegen->invalid_inst_src)
        return array_val_ptr;

    IrInstSrc *is_comptime = ir_build_const_bool(irb, parent_scope, node,
        ir_should_inline(irb->exec, parent_scope) || node->data.for_expr.is_inline);

    AstNode *index_var_source_node;
    ZigVar *index_var;
    const char *index_var_name;
    if (index_node) {
        index_var_source_node = index_node;
        Buf *index_var_name_buf = index_node->data.symbol_expr.symbol;
        index_var = ir_create_var(irb, index_node, parent_scope, index_var_name_buf, true, false, false, is_comptime);
        index_var_name = buf_ptr(index_var_name_buf);
    } else {
        index_var_source_node = node;
        index_var = ir_create_var(irb, node, parent_scope, nullptr, true, false, true, is_comptime);
        index_var_name = "i";
    }

    IrInstSrc *zero = ir_build_const_usize(irb, parent_scope, node, 0);
    build_decl_var_and_init(irb, parent_scope, index_var_source_node, index_var, zero, index_var_name, is_comptime);
    parent_scope = index_var->child_scope;

    IrInstSrc *one = ir_build_const_usize(irb, parent_scope, node, 1);
    IrInstSrc *index_ptr = ir_build_var_ptr(irb, parent_scope, node, index_var);


    IrBasicBlockSrc *cond_block = ir_create_basic_block(irb, parent_scope, "ForCond");
    IrBasicBlockSrc *body_block = ir_create_basic_block(irb, parent_scope, "ForBody");
    IrBasicBlockSrc *end_block = ir_create_basic_block(irb, parent_scope, "ForEnd");
    IrBasicBlockSrc *else_block = else_node ? ir_create_basic_block(irb, parent_scope, "ForElse") : end_block;
    IrBasicBlockSrc *continue_block = ir_create_basic_block(irb, parent_scope, "ForContinue");

    Buf *len_field_name = buf_create_from_str("len");
    IrInstSrc *len_ref = ir_build_field_ptr(irb, parent_scope, node, array_val_ptr, len_field_name, false);
    IrInstSrc *len_val = ir_build_load_ptr(irb, &spill_scope->base, node, len_ref);
    ir_build_br(irb, parent_scope, node, cond_block, is_comptime);

    ir_set_cursor_at_end_and_append_block(irb, cond_block);
    IrInstSrc *index_val = ir_build_load_ptr(irb, &spill_scope->base, node, index_ptr);
    IrInstSrc *cond = ir_build_bin_op(irb, parent_scope, node, IrBinOpCmpLessThan, index_val, len_val, false);
    IrBasicBlockSrc *after_cond_block = irb->current_basic_block;
    IrInstSrc *void_else_value = else_node ? nullptr : ir_mark_gen(ir_build_const_void(irb, parent_scope, node));
    IrInstSrc *cond_br_inst = ir_mark_gen(ir_build_cond_br(irb, parent_scope, node, cond,
                body_block, else_block, is_comptime));

    ResultLocPeerParent *peer_parent = ir_build_result_peers(irb, cond_br_inst, end_block, result_loc, is_comptime);

    ir_set_cursor_at_end_and_append_block(irb, body_block);
    IrInstSrc *elem_ptr = ir_build_elem_ptr(irb, &spill_scope->base, node, array_val_ptr, index_val,
            false, PtrLenSingle, nullptr);
    // TODO make it an error to write to element variable or i variable.
    Buf *elem_var_name = elem_node->data.symbol_expr.symbol;
    ZigVar *elem_var = ir_create_var(irb, elem_node, parent_scope, elem_var_name, true, false, false, is_comptime);
    Scope *child_scope = elem_var->child_scope;

    IrInstSrc *elem_value = node->data.for_expr.elem_is_ptr ?
        elem_ptr : ir_build_load_ptr(irb, &spill_scope->base, elem_node, elem_ptr);
    build_decl_var_and_init(irb, parent_scope, elem_node, elem_var, elem_value, buf_ptr(elem_var_name), is_comptime);

    ZigList<IrInstSrc *> incoming_values = {0};
    ZigList<IrBasicBlockSrc *> incoming_blocks = {0};
    ScopeLoop *loop_scope = create_loop_scope(irb->codegen, node, child_scope);
    loop_scope->break_block = end_block;
    loop_scope->continue_block = continue_block;
    loop_scope->is_comptime = is_comptime;
    loop_scope->incoming_blocks = &incoming_blocks;
    loop_scope->incoming_values = &incoming_values;
    loop_scope->lval = LValNone;
    loop_scope->peer_parent = peer_parent;
    loop_scope->spill_scope = spill_scope;

    // Note the body block of the loop is not the place that lval and result_loc are used -
    // it's actually in break statements, handled similarly to return statements.
    // That is why we set those values in loop_scope above and not in this ir_gen_node call.
    IrInstSrc *body_result = ir_gen_node(irb, body_node, &loop_scope->base);
    if (body_result == irb->codegen->invalid_inst_src)
        return irb->codegen->invalid_inst_src;

    if (!instr_is_unreachable(body_result)) {
        ir_mark_gen(ir_build_check_statement_is_void(irb, child_scope, node->data.for_expr.body, body_result));
        ir_mark_gen(ir_build_br(irb, child_scope, node, continue_block, is_comptime));
    }

    ir_set_cursor_at_end_and_append_block(irb, continue_block);
    IrInstSrc *new_index_val = ir_build_bin_op(irb, child_scope, node, IrBinOpAdd, index_val, one, false);
    ir_build_store_ptr(irb, child_scope, node, index_ptr, new_index_val)->allow_write_through_const = true;
    ir_build_br(irb, child_scope, node, cond_block, is_comptime);

    IrInstSrc *else_result = nullptr;
    if (else_node) {
        ir_set_cursor_at_end_and_append_block(irb, else_block);

        if (peer_parent->peers.length != 0) {
            peer_parent->peers.last()->next_bb = else_block;
        }
        ResultLocPeer *peer_result = create_peer_result(peer_parent);
        peer_parent->peers.append(peer_result);
        else_result = ir_gen_node_extra(irb, else_node, parent_scope, LValNone, &peer_result->base);
        if (else_result == irb->codegen->invalid_inst_src)
            return else_result;
        if (!instr_is_unreachable(else_result))
            ir_mark_gen(ir_build_br(irb, parent_scope, node, end_block, is_comptime));
    }
    IrBasicBlockSrc *after_else_block = irb->current_basic_block;
    ir_set_cursor_at_end_and_append_block(irb, end_block);

    if (else_result) {
        incoming_blocks.append(after_else_block);
        incoming_values.append(else_result);
    } else {
        incoming_blocks.append(after_cond_block);
        incoming_values.append(void_else_value);
    }
    if (peer_parent->peers.length != 0) {
        peer_parent->peers.last()->next_bb = end_block;
    }

    IrInstSrc *phi = ir_build_phi(irb, parent_scope, node, incoming_blocks.length,
            incoming_blocks.items, incoming_values.items, peer_parent);
    return ir_lval_wrap(irb, parent_scope, phi, lval, result_loc);
}

static IrInstSrc *ir_gen_bool_literal(IrBuilderSrc *irb, Scope *scope, AstNode *node) {
    assert(node->type == NodeTypeBoolLiteral);
    return ir_build_const_bool(irb, scope, node, node->data.bool_literal.value);
}

static IrInstSrc *ir_gen_enum_literal(IrBuilderSrc *irb, Scope *scope, AstNode *node) {
    assert(node->type == NodeTypeEnumLiteral);
    Buf *name = &node->data.enum_literal.identifier->data.str_lit.str;
    return ir_build_const_enum_literal(irb, scope, node, name);
}

static IrInstSrc *ir_gen_string_literal(IrBuilderSrc *irb, Scope *scope, AstNode *node) {
    assert(node->type == NodeTypeStringLiteral);

    return ir_build_const_str_lit(irb, scope, node, node->data.string_literal.buf);
}

static IrInstSrc *ir_gen_array_type(IrBuilderSrc *irb, Scope *scope, AstNode *node) {
    assert(node->type == NodeTypeArrayType);

    AstNode *size_node = node->data.array_type.size;
    AstNode *child_type_node = node->data.array_type.child_type;
    bool is_const = node->data.array_type.is_const;
    bool is_volatile = node->data.array_type.is_volatile;
    bool is_allow_zero = node->data.array_type.allow_zero_token != nullptr;
    AstNode *sentinel_expr = node->data.array_type.sentinel;
    AstNode *align_expr = node->data.array_type.align_expr;

    Scope *comptime_scope = create_comptime_scope(irb->codegen, node, scope);

    IrInstSrc *sentinel;
    if (sentinel_expr != nullptr) {
        sentinel = ir_gen_node(irb, sentinel_expr, comptime_scope);
        if (sentinel == irb->codegen->invalid_inst_src)
            return sentinel;
    } else {
        sentinel = nullptr;
    }

    if (size_node) {
        if (is_const) {
            add_node_error(irb->codegen, node, buf_create_from_str("const qualifier invalid on array type"));
            return irb->codegen->invalid_inst_src;
        }
        if (is_volatile) {
            add_node_error(irb->codegen, node, buf_create_from_str("volatile qualifier invalid on array type"));
            return irb->codegen->invalid_inst_src;
        }
        if (is_allow_zero) {
            add_node_error(irb->codegen, node, buf_create_from_str("allowzero qualifier invalid on array type"));
            return irb->codegen->invalid_inst_src;
        }
        if (align_expr != nullptr) {
            add_node_error(irb->codegen, node, buf_create_from_str("align qualifier invalid on array type"));
            return irb->codegen->invalid_inst_src;
        }

        IrInstSrc *size_value = ir_gen_node(irb, size_node, comptime_scope);
        if (size_value == irb->codegen->invalid_inst_src)
            return size_value;

        IrInstSrc *child_type = ir_gen_node(irb, child_type_node, comptime_scope);
        if (child_type == irb->codegen->invalid_inst_src)
            return child_type;

        return ir_build_array_type(irb, scope, node, size_value, sentinel, child_type);
    } else {
        IrInstSrc *align_value;
        if (align_expr != nullptr) {
            align_value = ir_gen_node(irb, align_expr, comptime_scope);
            if (align_value == irb->codegen->invalid_inst_src)
                return align_value;
        } else {
            align_value = nullptr;
        }

        IrInstSrc *child_type = ir_gen_node(irb, child_type_node, comptime_scope);
        if (child_type == irb->codegen->invalid_inst_src)
            return child_type;

        return ir_build_slice_type(irb, scope, node, child_type, is_const, is_volatile, sentinel,
                align_value, is_allow_zero);
    }
}

static IrInstSrc *ir_gen_anyframe_type(IrBuilderSrc *irb, Scope *scope, AstNode *node) {
    assert(node->type == NodeTypeAnyFrameType);

    AstNode *payload_type_node = node->data.anyframe_type.payload_type;
    IrInstSrc *payload_type_value = nullptr;

    if (payload_type_node != nullptr) {
        payload_type_value = ir_gen_node(irb, payload_type_node, scope);
        if (payload_type_value == irb->codegen->invalid_inst_src)
            return payload_type_value;

    }

    return ir_build_anyframe_type(irb, scope, node, payload_type_value);
}

static IrInstSrc *ir_gen_undefined_literal(IrBuilderSrc *irb, Scope *scope, AstNode *node) {
    assert(node->type == NodeTypeUndefinedLiteral);
    return ir_build_const_undefined(irb, scope, node);
}

static Error parse_asm_template(IrAnalyze *ira, AstNode *source_node, Buf *asm_template,
        ZigList<AsmToken> *tok_list)
{
    // TODO Connect the errors in this function back up to the actual source location
    // rather than just the token. https://github.com/ziglang/zig/issues/2080
    enum State {
        StateStart,
        StatePercent,
        StateTemplate,
        StateVar,
    };

    assert(tok_list->length == 0);

    AsmToken *cur_tok = nullptr;

    enum State state = StateStart;

    for (size_t i = 0; i < buf_len(asm_template); i += 1) {
        uint8_t c = *((uint8_t*)buf_ptr(asm_template) + i);
        switch (state) {
            case StateStart:
                if (c == '%') {
                    tok_list->add_one();
                    cur_tok = &tok_list->last();
                    cur_tok->id = AsmTokenIdPercent;
                    cur_tok->start = i;
                    state = StatePercent;
                } else {
                    tok_list->add_one();
                    cur_tok = &tok_list->last();
                    cur_tok->id = AsmTokenIdTemplate;
                    cur_tok->start = i;
                    state = StateTemplate;
                }
                break;
            case StatePercent:
                if (c == '%') {
                    cur_tok->end = i;
                    state = StateStart;
                } else if (c == '[') {
                    cur_tok->id = AsmTokenIdVar;
                    state = StateVar;
                } else if (c == '=') {
                    cur_tok->id = AsmTokenIdUniqueId;
                    cur_tok->end = i;
                    state = StateStart;
                } else {
                    add_node_error(ira->codegen, source_node,
                        buf_create_from_str("expected a '%' or '['"));
                    return ErrorSemanticAnalyzeFail;
                }
                break;
            case StateTemplate:
                if (c == '%') {
                    cur_tok->end = i;
                    i -= 1;
                    cur_tok = nullptr;
                    state = StateStart;
                }
                break;
            case StateVar:
                if (c == ']') {
                    cur_tok->end = i;
                    state = StateStart;
                } else if ((c >= 'a' && c <= 'z') ||
                        (c >= '0' && c <= '9') ||
                        (c == '_'))
                {
                    // do nothing
                } else {
                    add_node_error(ira->codegen, source_node,
                        buf_sprintf("invalid substitution character: '%c'", c));
                    return ErrorSemanticAnalyzeFail;
                }
                break;
        }
    }

    switch (state) {
        case StateStart:
            break;
        case StatePercent:
        case StateVar:
            add_node_error(ira->codegen, source_node, buf_sprintf("unexpected end of assembly template"));
            return ErrorSemanticAnalyzeFail;
        case StateTemplate:
            cur_tok->end = buf_len(asm_template);
            break;
    }
    return ErrorNone;
}

static size_t find_asm_index(CodeGen *g, AstNode *node, AsmToken *tok, Buf *src_template) {
    const char *ptr = buf_ptr(src_template) + tok->start + 2;
    size_t len = tok->end - tok->start - 2;
    size_t result = 0;
    for (size_t i = 0; i < node->data.asm_expr.output_list.length; i += 1, result += 1) {
        AsmOutput *asm_output = node->data.asm_expr.output_list.at(i);
        if (buf_eql_mem(asm_output->asm_symbolic_name, ptr, len)) {
            return result;
        }
    }
    for (size_t i = 0; i < node->data.asm_expr.input_list.length; i += 1, result += 1) {
        AsmInput *asm_input = node->data.asm_expr.input_list.at(i);
        if (buf_eql_mem(asm_input->asm_symbolic_name, ptr, len)) {
            return result;
        }
    }
    return SIZE_MAX;
}

static IrInstSrc *ir_gen_asm_expr(IrBuilderSrc *irb, Scope *scope, AstNode *node) {
    assert(node->type == NodeTypeAsmExpr);
    AstNodeAsmExpr *asm_expr = &node->data.asm_expr;

    IrInstSrc *asm_template = ir_gen_node(irb, asm_expr->asm_template, scope);
    if (asm_template == irb->codegen->invalid_inst_src)
        return irb->codegen->invalid_inst_src;

    bool is_volatile = asm_expr->volatile_token != nullptr;
    bool in_fn_scope = (scope_fn_entry(scope) != nullptr);

    if (!in_fn_scope) {
        if (is_volatile) {
            add_token_error(irb->codegen, node->owner, asm_expr->volatile_token,
                    buf_sprintf("volatile is meaningless on global assembly"));
            return irb->codegen->invalid_inst_src;
        }

        if (asm_expr->output_list.length != 0 || asm_expr->input_list.length != 0 ||
            asm_expr->clobber_list.length != 0)
        {
            add_node_error(irb->codegen, node,
                buf_sprintf("global assembly cannot have inputs, outputs, or clobbers"));
            return irb->codegen->invalid_inst_src;
        }

        return ir_build_asm_src(irb, scope, node, asm_template, nullptr, nullptr,
                                nullptr, 0, is_volatile, true);
    }

    IrInstSrc **input_list = heap::c_allocator.allocate<IrInstSrc *>(asm_expr->input_list.length);
    IrInstSrc **output_types = heap::c_allocator.allocate<IrInstSrc *>(asm_expr->output_list.length);
    ZigVar **output_vars = heap::c_allocator.allocate<ZigVar *>(asm_expr->output_list.length);
    size_t return_count = 0;
    if (!is_volatile && asm_expr->output_list.length == 0) {
        add_node_error(irb->codegen, node,
                buf_sprintf("assembly expression with no output must be marked volatile"));
        return irb->codegen->invalid_inst_src;
    }
    for (size_t i = 0; i < asm_expr->output_list.length; i += 1) {
        AsmOutput *asm_output = asm_expr->output_list.at(i);
        if (asm_output->return_type) {
            return_count += 1;

            IrInstSrc *return_type = ir_gen_node(irb, asm_output->return_type, scope);
            if (return_type == irb->codegen->invalid_inst_src)
                return irb->codegen->invalid_inst_src;
            if (return_count > 1) {
                add_node_error(irb->codegen, node,
                        buf_sprintf("inline assembly allows up to one output value"));
                return irb->codegen->invalid_inst_src;
            }
            output_types[i] = return_type;
        } else {
            Buf *variable_name = asm_output->variable_name;
            // TODO there is some duplication here with ir_gen_symbol. I need to do a full audit of how
            // inline assembly works. https://github.com/ziglang/zig/issues/215
            ZigVar *var = find_variable(irb->codegen, scope, variable_name, nullptr);
            if (var) {
                output_vars[i] = var;
            } else {
                add_node_error(irb->codegen, node,
                        buf_sprintf("use of undeclared identifier '%s'", buf_ptr(variable_name)));
                return irb->codegen->invalid_inst_src;
            }
        }

        const char modifier = *buf_ptr(asm_output->constraint);
        if (modifier != '=') {
            add_node_error(irb->codegen, node,
                buf_sprintf("invalid modifier starting output constraint for '%s': '%c', only '=' is supported."
                    " Compiler TODO: see https://github.com/ziglang/zig/issues/215",
                    buf_ptr(asm_output->asm_symbolic_name), modifier));
            return irb->codegen->invalid_inst_src;
        }
    }
    for (size_t i = 0; i < asm_expr->input_list.length; i += 1) {
        AsmInput *asm_input = asm_expr->input_list.at(i);
        IrInstSrc *input_value = ir_gen_node(irb, asm_input->expr, scope);
        if (input_value == irb->codegen->invalid_inst_src)
            return irb->codegen->invalid_inst_src;

        input_list[i] = input_value;
    }

    return ir_build_asm_src(irb, scope, node, asm_template, input_list, output_types,
                            output_vars, return_count, is_volatile, false);
}

static IrInstSrc *ir_gen_if_optional_expr(IrBuilderSrc *irb, Scope *scope, AstNode *node, LVal lval,
        ResultLoc *result_loc)
{
    assert(node->type == NodeTypeIfOptional);

    Buf *var_symbol = node->data.test_expr.var_symbol;
    AstNode *expr_node = node->data.test_expr.target_node;
    AstNode *then_node = node->data.test_expr.then_node;
    AstNode *else_node = node->data.test_expr.else_node;
    bool var_is_ptr = node->data.test_expr.var_is_ptr;

    ScopeExpr *spill_scope = create_expr_scope(irb->codegen, expr_node, scope);
    spill_scope->spill_harder = true;

    IrInstSrc *maybe_val_ptr = ir_gen_node_extra(irb, expr_node, &spill_scope->base, LValPtr, nullptr);
    if (maybe_val_ptr == irb->codegen->invalid_inst_src)
        return maybe_val_ptr;

    IrInstSrc *maybe_val = ir_build_load_ptr(irb, scope, node, maybe_val_ptr);
    IrInstSrc *is_non_null = ir_build_test_non_null_src(irb, scope, node, maybe_val);

    IrBasicBlockSrc *then_block = ir_create_basic_block(irb, scope, "OptionalThen");
    IrBasicBlockSrc *else_block = ir_create_basic_block(irb, scope, "OptionalElse");
    IrBasicBlockSrc *endif_block = ir_create_basic_block(irb, scope, "OptionalEndIf");

    IrInstSrc *is_comptime;
    if (ir_should_inline(irb->exec, scope)) {
        is_comptime = ir_build_const_bool(irb, scope, node, true);
    } else {
        is_comptime = ir_build_test_comptime(irb, scope, node, is_non_null);
    }
    IrInstSrc *cond_br_inst = ir_build_cond_br(irb, scope, node, is_non_null,
            then_block, else_block, is_comptime);

    ResultLocPeerParent *peer_parent = ir_build_binary_result_peers(irb, cond_br_inst, else_block, endif_block,
            result_loc, is_comptime);

    ir_set_cursor_at_end_and_append_block(irb, then_block);

    Scope *subexpr_scope = create_runtime_scope(irb->codegen, node, &spill_scope->base, is_comptime);
    Scope *var_scope;
    if (var_symbol) {
        bool is_shadowable = false;
        bool is_const = true;
        ZigVar *var = ir_create_var(irb, node, subexpr_scope,
                var_symbol, is_const, is_const, is_shadowable, is_comptime);

        IrInstSrc *payload_ptr = ir_build_optional_unwrap_ptr(irb, subexpr_scope, node, maybe_val_ptr, false);
        IrInstSrc *var_value = var_is_ptr ?
            payload_ptr : ir_build_load_ptr(irb, &spill_scope->base, node, payload_ptr);
        build_decl_var_and_init(irb, subexpr_scope, node, var, var_value, buf_ptr(var_symbol), is_comptime);
        var_scope = var->child_scope;
    } else {
        var_scope = subexpr_scope;
    }
    IrInstSrc *then_expr_result = ir_gen_node_extra(irb, then_node, var_scope, lval,
            &peer_parent->peers.at(0)->base);
    if (then_expr_result == irb->codegen->invalid_inst_src)
        return then_expr_result;
    IrBasicBlockSrc *after_then_block = irb->current_basic_block;
    if (!instr_is_unreachable(then_expr_result))
        ir_mark_gen(ir_build_br(irb, scope, node, endif_block, is_comptime));

    ir_set_cursor_at_end_and_append_block(irb, else_block);
    IrInstSrc *else_expr_result;
    if (else_node) {
        else_expr_result = ir_gen_node_extra(irb, else_node, subexpr_scope, lval, &peer_parent->peers.at(1)->base);
        if (else_expr_result == irb->codegen->invalid_inst_src)
            return else_expr_result;
    } else {
        else_expr_result = ir_build_const_void(irb, scope, node);
        ir_build_end_expr(irb, scope, node, else_expr_result, &peer_parent->peers.at(1)->base);
    }
    IrBasicBlockSrc *after_else_block = irb->current_basic_block;
    if (!instr_is_unreachable(else_expr_result))
        ir_mark_gen(ir_build_br(irb, scope, node, endif_block, is_comptime));

    ir_set_cursor_at_end_and_append_block(irb, endif_block);
    IrInstSrc **incoming_values = heap::c_allocator.allocate<IrInstSrc *>(2);
    incoming_values[0] = then_expr_result;
    incoming_values[1] = else_expr_result;
    IrBasicBlockSrc **incoming_blocks = heap::c_allocator.allocate<IrBasicBlockSrc *>(2);
    incoming_blocks[0] = after_then_block;
    incoming_blocks[1] = after_else_block;

    IrInstSrc *phi = ir_build_phi(irb, scope, node, 2, incoming_blocks, incoming_values, peer_parent);
    return ir_expr_wrap(irb, scope, phi, result_loc);
}

static IrInstSrc *ir_gen_if_err_expr(IrBuilderSrc *irb, Scope *scope, AstNode *node, LVal lval,
        ResultLoc *result_loc)
{
    assert(node->type == NodeTypeIfErrorExpr);

    AstNode *target_node = node->data.if_err_expr.target_node;
    AstNode *then_node = node->data.if_err_expr.then_node;
    AstNode *else_node = node->data.if_err_expr.else_node;
    bool var_is_ptr = node->data.if_err_expr.var_is_ptr;
    bool var_is_const = true;
    Buf *var_symbol = node->data.if_err_expr.var_symbol;
    Buf *err_symbol = node->data.if_err_expr.err_symbol;

    IrInstSrc *err_val_ptr = ir_gen_node_extra(irb, target_node, scope, LValPtr, nullptr);
    if (err_val_ptr == irb->codegen->invalid_inst_src)
        return err_val_ptr;

    IrInstSrc *err_val = ir_build_load_ptr(irb, scope, node, err_val_ptr);
    IrInstSrc *is_err = ir_build_test_err_src(irb, scope, node, err_val_ptr, true, false);

    IrBasicBlockSrc *ok_block = ir_create_basic_block(irb, scope, "TryOk");
    IrBasicBlockSrc *else_block = ir_create_basic_block(irb, scope, "TryElse");
    IrBasicBlockSrc *endif_block = ir_create_basic_block(irb, scope, "TryEnd");

    bool force_comptime = ir_should_inline(irb->exec, scope);
    IrInstSrc *is_comptime = force_comptime ? ir_build_const_bool(irb, scope, node, true) : ir_build_test_comptime(irb, scope, node, is_err);
    IrInstSrc *cond_br_inst = ir_build_cond_br(irb, scope, node, is_err, else_block, ok_block, is_comptime);

    ResultLocPeerParent *peer_parent = ir_build_binary_result_peers(irb, cond_br_inst, else_block, endif_block,
            result_loc, is_comptime);

    ir_set_cursor_at_end_and_append_block(irb, ok_block);

    Scope *subexpr_scope = create_runtime_scope(irb->codegen, node, scope, is_comptime);
    Scope *var_scope;
    if (var_symbol) {
        bool is_shadowable = false;
        IrInstSrc *var_is_comptime = force_comptime ? ir_build_const_bool(irb, subexpr_scope, node, true) : ir_build_test_comptime(irb, subexpr_scope, node, err_val);
        ZigVar *var = ir_create_var(irb, node, subexpr_scope,
                var_symbol, var_is_const, var_is_const, is_shadowable, var_is_comptime);

        IrInstSrc *payload_ptr = ir_build_unwrap_err_payload_src(irb, subexpr_scope, node, err_val_ptr, false, false);
        IrInstSrc *var_value = var_is_ptr ?
            payload_ptr : ir_build_load_ptr(irb, subexpr_scope, node, payload_ptr);
        build_decl_var_and_init(irb, subexpr_scope, node, var, var_value, buf_ptr(var_symbol), var_is_comptime);
        var_scope = var->child_scope;
    } else {
        var_scope = subexpr_scope;
    }
    IrInstSrc *then_expr_result = ir_gen_node_extra(irb, then_node, var_scope, lval,
            &peer_parent->peers.at(0)->base);
    if (then_expr_result == irb->codegen->invalid_inst_src)
        return then_expr_result;
    IrBasicBlockSrc *after_then_block = irb->current_basic_block;
    if (!instr_is_unreachable(then_expr_result))
        ir_mark_gen(ir_build_br(irb, scope, node, endif_block, is_comptime));

    ir_set_cursor_at_end_and_append_block(irb, else_block);

    IrInstSrc *else_expr_result;
    if (else_node) {
        Scope *err_var_scope;
        if (err_symbol) {
            bool is_shadowable = false;
            bool is_const = true;
            ZigVar *var = ir_create_var(irb, node, subexpr_scope,
                    err_symbol, is_const, is_const, is_shadowable, is_comptime);

            IrInstSrc *err_ptr = ir_build_unwrap_err_code_src(irb, subexpr_scope, node, err_val_ptr);
            IrInstSrc *err_value = ir_build_load_ptr(irb, subexpr_scope, node, err_ptr);
            build_decl_var_and_init(irb, subexpr_scope, node, var, err_value, buf_ptr(err_symbol), is_comptime);
            err_var_scope = var->child_scope;
        } else {
            err_var_scope = subexpr_scope;
        }
        else_expr_result = ir_gen_node_extra(irb, else_node, err_var_scope, lval, &peer_parent->peers.at(1)->base);
        if (else_expr_result == irb->codegen->invalid_inst_src)
            return else_expr_result;
    } else {
        else_expr_result = ir_build_const_void(irb, scope, node);
        ir_build_end_expr(irb, scope, node, else_expr_result, &peer_parent->peers.at(1)->base);
    }
    IrBasicBlockSrc *after_else_block = irb->current_basic_block;
    if (!instr_is_unreachable(else_expr_result))
        ir_mark_gen(ir_build_br(irb, scope, node, endif_block, is_comptime));

    ir_set_cursor_at_end_and_append_block(irb, endif_block);
    IrInstSrc **incoming_values = heap::c_allocator.allocate<IrInstSrc *>(2);
    incoming_values[0] = then_expr_result;
    incoming_values[1] = else_expr_result;
    IrBasicBlockSrc **incoming_blocks = heap::c_allocator.allocate<IrBasicBlockSrc *>(2);
    incoming_blocks[0] = after_then_block;
    incoming_blocks[1] = after_else_block;

    IrInstSrc *phi = ir_build_phi(irb, scope, node, 2, incoming_blocks, incoming_values, peer_parent);
    return ir_expr_wrap(irb, scope, phi, result_loc);
}

static bool ir_gen_switch_prong_expr(IrBuilderSrc *irb, Scope *scope, AstNode *switch_node, AstNode *prong_node,
        IrBasicBlockSrc *end_block, IrInstSrc *is_comptime, IrInstSrc *var_is_comptime,
        IrInstSrc *target_value_ptr, IrInstSrc **prong_values, size_t prong_values_len,
        ZigList<IrBasicBlockSrc *> *incoming_blocks, ZigList<IrInstSrc *> *incoming_values,
        IrInstSrcSwitchElseVar **out_switch_else_var, LVal lval, ResultLoc *result_loc)
{
    assert(switch_node->type == NodeTypeSwitchExpr);
    assert(prong_node->type == NodeTypeSwitchProng);

    AstNode *expr_node = prong_node->data.switch_prong.expr;
    AstNode *var_symbol_node = prong_node->data.switch_prong.var_symbol;
    Scope *child_scope;
    if (var_symbol_node) {
        assert(var_symbol_node->type == NodeTypeSymbol);
        Buf *var_name = var_symbol_node->data.symbol_expr.symbol;
        bool var_is_ptr = prong_node->data.switch_prong.var_is_ptr;

        bool is_shadowable = false;
        bool is_const = true;
        ZigVar *var = ir_create_var(irb, var_symbol_node, scope,
                var_name, is_const, is_const, is_shadowable, var_is_comptime);
        child_scope = var->child_scope;
        IrInstSrc *var_value;
        if (out_switch_else_var != nullptr) {
            IrInstSrcSwitchElseVar *switch_else_var = ir_build_switch_else_var(irb, scope, var_symbol_node,
                    target_value_ptr);
            *out_switch_else_var = switch_else_var;
            IrInstSrc *payload_ptr = &switch_else_var->base;
            var_value = var_is_ptr ?
                payload_ptr : ir_build_load_ptr(irb, scope, var_symbol_node, payload_ptr);
        } else if (prong_values != nullptr) {
            IrInstSrc *payload_ptr = ir_build_switch_var(irb, scope, var_symbol_node, target_value_ptr,
                    prong_values, prong_values_len);
            var_value = var_is_ptr ?
                payload_ptr : ir_build_load_ptr(irb, scope, var_symbol_node, payload_ptr);
        } else {
            var_value = var_is_ptr ?
                target_value_ptr : ir_build_load_ptr(irb, scope, var_symbol_node, target_value_ptr);
        }
        build_decl_var_and_init(irb, scope, var_symbol_node, var, var_value, buf_ptr(var_name), var_is_comptime);
    } else {
        child_scope = scope;
    }

    IrInstSrc *expr_result = ir_gen_node_extra(irb, expr_node, child_scope, lval, result_loc);
    if (expr_result == irb->codegen->invalid_inst_src)
        return false;
    if (!instr_is_unreachable(expr_result))
        ir_mark_gen(ir_build_br(irb, scope, switch_node, end_block, is_comptime));
    incoming_blocks->append(irb->current_basic_block);
    incoming_values->append(expr_result);
    return true;
}

static IrInstSrc *ir_gen_switch_expr(IrBuilderSrc *irb, Scope *scope, AstNode *node, LVal lval,
        ResultLoc *result_loc)
{
    assert(node->type == NodeTypeSwitchExpr);

    AstNode *target_node = node->data.switch_expr.expr;
    IrInstSrc *target_value_ptr = ir_gen_node_extra(irb, target_node, scope, LValPtr, nullptr);
    if (target_value_ptr == irb->codegen->invalid_inst_src)
        return target_value_ptr;
    IrInstSrc *target_value = ir_build_switch_target(irb, scope, node, target_value_ptr);

    IrBasicBlockSrc *else_block = ir_create_basic_block(irb, scope, "SwitchElse");
    IrBasicBlockSrc *end_block = ir_create_basic_block(irb, scope, "SwitchEnd");

    size_t prong_count = node->data.switch_expr.prongs.length;
    ZigList<IrInstSrcSwitchBrCase> cases = {0};

    IrInstSrc *is_comptime;
    IrInstSrc *var_is_comptime;
    if (ir_should_inline(irb->exec, scope)) {
        is_comptime = ir_build_const_bool(irb, scope, node, true);
        var_is_comptime = is_comptime;
    } else {
        is_comptime = ir_build_test_comptime(irb, scope, node, target_value);
        var_is_comptime = ir_build_test_comptime(irb, scope, node, target_value_ptr);
    }

    ZigList<IrInstSrc *> incoming_values = {0};
    ZigList<IrBasicBlockSrc *> incoming_blocks = {0};
    ZigList<IrInstSrcCheckSwitchProngsRange> check_ranges = {0};

    IrInstSrcSwitchElseVar *switch_else_var = nullptr;

    ResultLocPeerParent *peer_parent = heap::c_allocator.create<ResultLocPeerParent>();
    peer_parent->base.id = ResultLocIdPeerParent;
    peer_parent->base.allow_write_through_const = result_loc->allow_write_through_const;
    peer_parent->end_bb = end_block;
    peer_parent->is_comptime = is_comptime;
    peer_parent->parent = result_loc;

    ir_build_reset_result(irb, scope, node, &peer_parent->base);

    // First do the else and the ranges
    Scope *subexpr_scope = create_runtime_scope(irb->codegen, node, scope, is_comptime);
    Scope *comptime_scope = create_comptime_scope(irb->codegen, node, scope);
    AstNode *else_prong = nullptr;
    AstNode *underscore_prong = nullptr;
    for (size_t prong_i = 0; prong_i < prong_count; prong_i += 1) {
        AstNode *prong_node = node->data.switch_expr.prongs.at(prong_i);
        size_t prong_item_count = prong_node->data.switch_prong.items.length;
        if (prong_node->data.switch_prong.any_items_are_range) {
            ResultLocPeer *this_peer_result_loc = create_peer_result(peer_parent);

            IrInstSrc *ok_bit = nullptr;
            AstNode *last_item_node = nullptr;
            for (size_t item_i = 0; item_i < prong_item_count; item_i += 1) {
                AstNode *item_node = prong_node->data.switch_prong.items.at(item_i);
                last_item_node = item_node;
                if (item_node->type == NodeTypeSwitchRange) {
                    AstNode *start_node = item_node->data.switch_range.start;
                    AstNode *end_node = item_node->data.switch_range.end;

                    IrInstSrc *start_value = ir_gen_node(irb, start_node, comptime_scope);
                    if (start_value == irb->codegen->invalid_inst_src)
                        return irb->codegen->invalid_inst_src;

                    IrInstSrc *end_value = ir_gen_node(irb, end_node, comptime_scope);
                    if (end_value == irb->codegen->invalid_inst_src)
                        return irb->codegen->invalid_inst_src;

                    IrInstSrcCheckSwitchProngsRange *check_range = check_ranges.add_one();
                    check_range->start = start_value;
                    check_range->end = end_value;

                    IrInstSrc *lower_range_ok = ir_build_bin_op(irb, scope, item_node, IrBinOpCmpGreaterOrEq,
                            target_value, start_value, false);
                    IrInstSrc *upper_range_ok = ir_build_bin_op(irb, scope, item_node, IrBinOpCmpLessOrEq,
                            target_value, end_value, false);
                    IrInstSrc *both_ok = ir_build_bin_op(irb, scope, item_node, IrBinOpBoolAnd,
                            lower_range_ok, upper_range_ok, false);
                    if (ok_bit) {
                        ok_bit = ir_build_bin_op(irb, scope, item_node, IrBinOpBoolOr, both_ok, ok_bit, false);
                    } else {
                        ok_bit = both_ok;
                    }
                } else {
                    IrInstSrc *item_value = ir_gen_node(irb, item_node, comptime_scope);
                    if (item_value == irb->codegen->invalid_inst_src)
                        return irb->codegen->invalid_inst_src;

                    IrInstSrcCheckSwitchProngsRange *check_range = check_ranges.add_one();
                    check_range->start = item_value;
                    check_range->end = item_value;

                    IrInstSrc *cmp_ok = ir_build_bin_op(irb, scope, item_node, IrBinOpCmpEq,
                            item_value, target_value, false);
                    if (ok_bit) {
                        ok_bit = ir_build_bin_op(irb, scope, item_node, IrBinOpBoolOr, cmp_ok, ok_bit, false);
                    } else {
                        ok_bit = cmp_ok;
                    }
                }
            }

            IrBasicBlockSrc *range_block_yes = ir_create_basic_block(irb, scope, "SwitchRangeYes");
            IrBasicBlockSrc *range_block_no = ir_create_basic_block(irb, scope, "SwitchRangeNo");

            assert(ok_bit);
            assert(last_item_node);
            IrInstSrc *br_inst = ir_mark_gen(ir_build_cond_br(irb, scope, last_item_node, ok_bit,
                        range_block_yes, range_block_no, is_comptime));
            if (peer_parent->base.source_instruction == nullptr) {
                peer_parent->base.source_instruction = br_inst;
            }

            if (peer_parent->peers.length > 0) {
                peer_parent->peers.last()->next_bb = range_block_yes;
            }
            peer_parent->peers.append(this_peer_result_loc);
            ir_set_cursor_at_end_and_append_block(irb, range_block_yes);
            if (!ir_gen_switch_prong_expr(irb, subexpr_scope, node, prong_node, end_block,
                is_comptime, var_is_comptime, target_value_ptr, nullptr, 0,
                &incoming_blocks, &incoming_values, nullptr, LValNone, &this_peer_result_loc->base))
            {
                return irb->codegen->invalid_inst_src;
            }

            ir_set_cursor_at_end_and_append_block(irb, range_block_no);
        } else {
            if (prong_item_count == 0) {
                if (else_prong) {
                    ErrorMsg *msg = add_node_error(irb->codegen, prong_node,
                            buf_sprintf("multiple else prongs in switch expression"));
                    add_error_note(irb->codegen, msg, else_prong,
                            buf_sprintf("previous else prong is here"));
                    return irb->codegen->invalid_inst_src;
                }
                else_prong = prong_node;
            } else if (prong_item_count == 1 &&
                    prong_node->data.switch_prong.items.at(0)->type == NodeTypeSymbol &&
                    buf_eql_str(prong_node->data.switch_prong.items.at(0)->data.symbol_expr.symbol, "_")) {
                if (underscore_prong) {
                    ErrorMsg *msg = add_node_error(irb->codegen, prong_node,
                            buf_sprintf("multiple '_' prongs in switch expression"));
                    add_error_note(irb->codegen, msg, underscore_prong,
                            buf_sprintf("previous '_' prong is here"));
                    return irb->codegen->invalid_inst_src;
                }
                underscore_prong = prong_node;
            } else {
                continue;
            }
           if (underscore_prong && else_prong) {
                ErrorMsg *msg = add_node_error(irb->codegen, prong_node,
                        buf_sprintf("else and '_' prong in switch expression"));
                if (underscore_prong == prong_node)
                    add_error_note(irb->codegen, msg, else_prong,
                            buf_sprintf("else prong is here"));
                else
                    add_error_note(irb->codegen, msg, underscore_prong,
                            buf_sprintf("'_' prong is here"));
                return irb->codegen->invalid_inst_src;
            }
            ResultLocPeer *this_peer_result_loc = create_peer_result(peer_parent);

            IrBasicBlockSrc *prev_block = irb->current_basic_block;
            if (peer_parent->peers.length > 0) {
                peer_parent->peers.last()->next_bb = else_block;
            }
            peer_parent->peers.append(this_peer_result_loc);
            ir_set_cursor_at_end_and_append_block(irb, else_block);
            if (!ir_gen_switch_prong_expr(irb, subexpr_scope, node, prong_node, end_block,
                is_comptime, var_is_comptime, target_value_ptr, nullptr, 0, &incoming_blocks, &incoming_values,
                &switch_else_var, LValNone, &this_peer_result_loc->base))
            {
                return irb->codegen->invalid_inst_src;
            }
            ir_set_cursor_at_end(irb, prev_block);
        }
    }

    // next do the non-else non-ranges
    for (size_t prong_i = 0; prong_i < prong_count; prong_i += 1) {
        AstNode *prong_node = node->data.switch_expr.prongs.at(prong_i);
        size_t prong_item_count = prong_node->data.switch_prong.items.length;
        if (prong_item_count == 0)
            continue;
        if (prong_node->data.switch_prong.any_items_are_range)
            continue;
        if (underscore_prong == prong_node)
            continue;

        ResultLocPeer *this_peer_result_loc = create_peer_result(peer_parent);

        IrBasicBlockSrc *prong_block = ir_create_basic_block(irb, scope, "SwitchProng");
        IrInstSrc **items = heap::c_allocator.allocate<IrInstSrc *>(prong_item_count);

        for (size_t item_i = 0; item_i < prong_item_count; item_i += 1) {
            AstNode *item_node = prong_node->data.switch_prong.items.at(item_i);
            assert(item_node->type != NodeTypeSwitchRange);

            IrInstSrc *item_value = ir_gen_node(irb, item_node, comptime_scope);
            if (item_value == irb->codegen->invalid_inst_src)
                return irb->codegen->invalid_inst_src;

            IrInstSrcCheckSwitchProngsRange *check_range = check_ranges.add_one();
            check_range->start = item_value;
            check_range->end = item_value;

            IrInstSrcSwitchBrCase *this_case = cases.add_one();
            this_case->value = item_value;
            this_case->block = prong_block;

            items[item_i] = item_value;
        }

        IrBasicBlockSrc *prev_block = irb->current_basic_block;
        if (peer_parent->peers.length > 0) {
            peer_parent->peers.last()->next_bb = prong_block;
        }
        peer_parent->peers.append(this_peer_result_loc);
        ir_set_cursor_at_end_and_append_block(irb, prong_block);
        if (!ir_gen_switch_prong_expr(irb, subexpr_scope, node, prong_node, end_block,
            is_comptime, var_is_comptime, target_value_ptr, items, prong_item_count,
            &incoming_blocks, &incoming_values, nullptr, LValNone, &this_peer_result_loc->base))
        {
            return irb->codegen->invalid_inst_src;
        }

        ir_set_cursor_at_end(irb, prev_block);

    }

    IrInstSrc *switch_prongs_void = ir_build_check_switch_prongs(irb, scope, node, target_value,
            check_ranges.items, check_ranges.length, else_prong != nullptr, underscore_prong != nullptr);

    IrInstSrc *br_instruction;
    if (cases.length == 0) {
        br_instruction = ir_build_br(irb, scope, node, else_block, is_comptime);
    } else {
        IrInstSrcSwitchBr *switch_br = ir_build_switch_br_src(irb, scope, node, target_value, else_block,
                cases.length, cases.items, is_comptime, switch_prongs_void);
        if (switch_else_var != nullptr) {
            switch_else_var->switch_br = switch_br;
        }
        br_instruction = &switch_br->base;
    }
    if (peer_parent->base.source_instruction == nullptr) {
        peer_parent->base.source_instruction = br_instruction;
    }
    for (size_t i = 0; i < peer_parent->peers.length; i += 1) {
        peer_parent->peers.at(i)->base.source_instruction = peer_parent->base.source_instruction;
    }

    if (!else_prong && !underscore_prong) {
        if (peer_parent->peers.length != 0) {
            peer_parent->peers.last()->next_bb = else_block;
        }
        ir_set_cursor_at_end_and_append_block(irb, else_block);
        ir_build_unreachable(irb, scope, node);
    } else {
        if (peer_parent->peers.length != 0) {
            peer_parent->peers.last()->next_bb = end_block;
        }
    }

    ir_set_cursor_at_end_and_append_block(irb, end_block);
    assert(incoming_blocks.length == incoming_values.length);
    IrInstSrc *result_instruction;
    if (incoming_blocks.length == 0) {
        result_instruction = ir_build_const_void(irb, scope, node);
    } else {
        result_instruction = ir_build_phi(irb, scope, node, incoming_blocks.length,
                incoming_blocks.items, incoming_values.items, peer_parent);
    }
    return ir_lval_wrap(irb, scope, result_instruction, lval, result_loc);
}

static IrInstSrc *ir_gen_comptime(IrBuilderSrc *irb, Scope *parent_scope, AstNode *node, LVal lval) {
    assert(node->type == NodeTypeCompTime);

    Scope *child_scope = create_comptime_scope(irb->codegen, node, parent_scope);
    // purposefully pass null for result_loc and let EndExpr handle it
    return ir_gen_node_extra(irb, node->data.comptime_expr.expr, child_scope, lval, nullptr);
}

static IrInstSrc *ir_gen_nosuspend(IrBuilderSrc *irb, Scope *parent_scope, AstNode *node, LVal lval) {
    assert(node->type == NodeTypeNoSuspend);

    Scope *child_scope = create_nosuspend_scope(irb->codegen, node, parent_scope);
    // purposefully pass null for result_loc and let EndExpr handle it
    return ir_gen_node_extra(irb, node->data.comptime_expr.expr, child_scope, lval, nullptr);
}

static IrInstSrc *ir_gen_return_from_block(IrBuilderSrc *irb, Scope *break_scope, AstNode *node, ScopeBlock *block_scope) {
    IrInstSrc *is_comptime;
    if (ir_should_inline(irb->exec, break_scope)) {
        is_comptime = ir_build_const_bool(irb, break_scope, node, true);
    } else {
        is_comptime = block_scope->is_comptime;
    }

    IrInstSrc *result_value;
    if (node->data.break_expr.expr) {
        ResultLocPeer *peer_result = create_peer_result(block_scope->peer_parent);
        block_scope->peer_parent->peers.append(peer_result);

        result_value = ir_gen_node_extra(irb, node->data.break_expr.expr, break_scope, block_scope->lval,
                &peer_result->base);
        if (result_value == irb->codegen->invalid_inst_src)
            return irb->codegen->invalid_inst_src;
    } else {
        result_value = ir_build_const_void(irb, break_scope, node);
    }

    IrBasicBlockSrc *dest_block = block_scope->end_block;
    if (!ir_gen_defers_for_block(irb, break_scope, dest_block->scope, nullptr, nullptr))
        return irb->codegen->invalid_inst_src;

    block_scope->incoming_blocks->append(irb->current_basic_block);
    block_scope->incoming_values->append(result_value);
    return ir_build_br(irb, break_scope, node, dest_block, is_comptime);
}

static IrInstSrc *ir_gen_break(IrBuilderSrc *irb, Scope *break_scope, AstNode *node) {
    assert(node->type == NodeTypeBreak);

    // Search up the scope. We'll find one of these things first:
    // * function definition scope or global scope => error, break outside loop
    // * defer expression scope => error, cannot break out of defer expression
    // * loop scope => OK
    // * (if it's a labeled break) labeled block => OK

    Scope *search_scope = break_scope;
    ScopeLoop *loop_scope;
    for (;;) {
        if (search_scope == nullptr || search_scope->id == ScopeIdFnDef) {
            if (node->data.break_expr.name != nullptr) {
                add_node_error(irb->codegen, node, buf_sprintf("label not found: '%s'", buf_ptr(node->data.break_expr.name)));
                return irb->codegen->invalid_inst_src;
            } else {
                add_node_error(irb->codegen, node, buf_sprintf("break expression outside loop"));
                return irb->codegen->invalid_inst_src;
            }
        } else if (search_scope->id == ScopeIdDeferExpr) {
            add_node_error(irb->codegen, node, buf_sprintf("cannot break out of defer expression"));
            return irb->codegen->invalid_inst_src;
        } else if (search_scope->id == ScopeIdLoop) {
            ScopeLoop *this_loop_scope = (ScopeLoop *)search_scope;
            if (node->data.break_expr.name == nullptr ||
                (this_loop_scope->name != nullptr && buf_eql_buf(node->data.break_expr.name, this_loop_scope->name)))
            {
                loop_scope = this_loop_scope;
                break;
            }
        } else if (search_scope->id == ScopeIdBlock) {
            ScopeBlock *this_block_scope = (ScopeBlock *)search_scope;
            if (node->data.break_expr.name != nullptr &&
                (this_block_scope->name != nullptr && buf_eql_buf(node->data.break_expr.name, this_block_scope->name)))
            {
                assert(this_block_scope->end_block != nullptr);
                return ir_gen_return_from_block(irb, break_scope, node, this_block_scope);
            }
        } else if (search_scope->id == ScopeIdSuspend) {
            add_node_error(irb->codegen, node, buf_sprintf("cannot break out of suspend block"));
            return irb->codegen->invalid_inst_src;
        }
        search_scope = search_scope->parent;
    }

    IrInstSrc *is_comptime;
    if (ir_should_inline(irb->exec, break_scope)) {
        is_comptime = ir_build_const_bool(irb, break_scope, node, true);
    } else {
        is_comptime = loop_scope->is_comptime;
    }

    IrInstSrc *result_value;
    if (node->data.break_expr.expr) {
        ResultLocPeer *peer_result = create_peer_result(loop_scope->peer_parent);
        loop_scope->peer_parent->peers.append(peer_result);

        result_value = ir_gen_node_extra(irb, node->data.break_expr.expr, break_scope,
                loop_scope->lval, &peer_result->base);
        if (result_value == irb->codegen->invalid_inst_src)
            return irb->codegen->invalid_inst_src;
    } else {
        result_value = ir_build_const_void(irb, break_scope, node);
    }

    IrBasicBlockSrc *dest_block = loop_scope->break_block;
    if (!ir_gen_defers_for_block(irb, break_scope, dest_block->scope, nullptr, nullptr))
        return irb->codegen->invalid_inst_src;

    loop_scope->incoming_blocks->append(irb->current_basic_block);
    loop_scope->incoming_values->append(result_value);
    return ir_build_br(irb, break_scope, node, dest_block, is_comptime);
}

static IrInstSrc *ir_gen_continue(IrBuilderSrc *irb, Scope *continue_scope, AstNode *node) {
    assert(node->type == NodeTypeContinue);

    // Search up the scope. We'll find one of these things first:
    // * function definition scope or global scope => error, break outside loop
    // * defer expression scope => error, cannot break out of defer expression
    // * loop scope => OK

    ZigList<ScopeRuntime *> runtime_scopes = {};

    Scope *search_scope = continue_scope;
    ScopeLoop *loop_scope;
    for (;;) {
        if (search_scope == nullptr || search_scope->id == ScopeIdFnDef) {
            if (node->data.continue_expr.name != nullptr) {
                add_node_error(irb->codegen, node, buf_sprintf("labeled loop not found: '%s'", buf_ptr(node->data.continue_expr.name)));
                return irb->codegen->invalid_inst_src;
            } else {
                add_node_error(irb->codegen, node, buf_sprintf("continue expression outside loop"));
                return irb->codegen->invalid_inst_src;
            }
        } else if (search_scope->id == ScopeIdDeferExpr) {
            add_node_error(irb->codegen, node, buf_sprintf("cannot continue out of defer expression"));
            return irb->codegen->invalid_inst_src;
        } else if (search_scope->id == ScopeIdLoop) {
            ScopeLoop *this_loop_scope = (ScopeLoop *)search_scope;
            if (node->data.continue_expr.name == nullptr ||
                (this_loop_scope->name != nullptr && buf_eql_buf(node->data.continue_expr.name, this_loop_scope->name)))
            {
                loop_scope = this_loop_scope;
                break;
            }
        } else if (search_scope->id == ScopeIdRuntime) {
            ScopeRuntime *scope_runtime = (ScopeRuntime *)search_scope;
            runtime_scopes.append(scope_runtime);
        }
        search_scope = search_scope->parent;
    }

    IrInstSrc *is_comptime;
    if (ir_should_inline(irb->exec, continue_scope)) {
        is_comptime = ir_build_const_bool(irb, continue_scope, node, true);
    } else {
        is_comptime = loop_scope->is_comptime;
    }

    for (size_t i = 0; i < runtime_scopes.length; i += 1) {
        ScopeRuntime *scope_runtime = runtime_scopes.at(i);
        ir_mark_gen(ir_build_check_runtime_scope(irb, continue_scope, node, scope_runtime->is_comptime, is_comptime));
    }

    IrBasicBlockSrc *dest_block = loop_scope->continue_block;
    if (!ir_gen_defers_for_block(irb, continue_scope, dest_block->scope, nullptr, nullptr))
        return irb->codegen->invalid_inst_src;
    return ir_mark_gen(ir_build_br(irb, continue_scope, node, dest_block, is_comptime));
}

static IrInstSrc *ir_gen_error_type(IrBuilderSrc *irb, Scope *scope, AstNode *node) {
    assert(node->type == NodeTypeErrorType);
    return ir_build_const_type(irb, scope, node, irb->codegen->builtin_types.entry_global_error_set);
}

static IrInstSrc *ir_gen_defer(IrBuilderSrc *irb, Scope *parent_scope, AstNode *node) {
    assert(node->type == NodeTypeDefer);

    ScopeDefer *defer_child_scope = create_defer_scope(irb->codegen, node, parent_scope);
    node->data.defer.child_scope = &defer_child_scope->base;

    ScopeDeferExpr *defer_expr_scope = create_defer_expr_scope(irb->codegen, node, parent_scope);
    node->data.defer.expr_scope = &defer_expr_scope->base;

    return ir_build_const_void(irb, parent_scope, node);
}

static IrInstSrc *ir_gen_slice(IrBuilderSrc *irb, Scope *scope, AstNode *node, LVal lval, ResultLoc *result_loc) {
    assert(node->type == NodeTypeSliceExpr);

    AstNodeSliceExpr *slice_expr = &node->data.slice_expr;
    AstNode *array_node = slice_expr->array_ref_expr;
    AstNode *start_node = slice_expr->start;
    AstNode *end_node = slice_expr->end;
    AstNode *sentinel_node = slice_expr->sentinel;

    IrInstSrc *ptr_value = ir_gen_node_extra(irb, array_node, scope, LValPtr, nullptr);
    if (ptr_value == irb->codegen->invalid_inst_src)
        return irb->codegen->invalid_inst_src;

    IrInstSrc *start_value = ir_gen_node(irb, start_node, scope);
    if (start_value == irb->codegen->invalid_inst_src)
        return irb->codegen->invalid_inst_src;

    IrInstSrc *end_value;
    if (end_node) {
        end_value = ir_gen_node(irb, end_node, scope);
        if (end_value == irb->codegen->invalid_inst_src)
            return irb->codegen->invalid_inst_src;
    } else {
        end_value = nullptr;
    }

    IrInstSrc *sentinel_value;
    if (sentinel_node) {
        sentinel_value = ir_gen_node(irb, sentinel_node, scope);
        if (sentinel_value == irb->codegen->invalid_inst_src)
            return irb->codegen->invalid_inst_src;
    } else {
        sentinel_value = nullptr;
    }

    IrInstSrc *slice = ir_build_slice_src(irb, scope, node, ptr_value, start_value, end_value,
            sentinel_value, true, result_loc);
    return ir_lval_wrap(irb, scope, slice, lval, result_loc);
}

static IrInstSrc *ir_gen_catch(IrBuilderSrc *irb, Scope *parent_scope, AstNode *node, LVal lval,
        ResultLoc *result_loc)
{
    assert(node->type == NodeTypeCatchExpr);

    AstNode *op1_node = node->data.unwrap_err_expr.op1;
    AstNode *op2_node = node->data.unwrap_err_expr.op2;
    AstNode *var_node = node->data.unwrap_err_expr.symbol;

    if (op2_node->type == NodeTypeUnreachable) {
        if (var_node != nullptr) {
            assert(var_node->type == NodeTypeSymbol);
            Buf *var_name = var_node->data.symbol_expr.symbol;
            add_node_error(irb->codegen, var_node, buf_sprintf("unused variable: '%s'", buf_ptr(var_name)));
            return irb->codegen->invalid_inst_src;
        }
        return ir_gen_catch_unreachable(irb, parent_scope, node, op1_node, lval, result_loc);
    }


    ScopeExpr *spill_scope = create_expr_scope(irb->codegen, op1_node, parent_scope);
    spill_scope->spill_harder = true;

    IrInstSrc *err_union_ptr = ir_gen_node_extra(irb, op1_node, &spill_scope->base, LValPtr, nullptr);
    if (err_union_ptr == irb->codegen->invalid_inst_src)
        return irb->codegen->invalid_inst_src;

    IrInstSrc *is_err = ir_build_test_err_src(irb, parent_scope, node, err_union_ptr, true, false);

    IrInstSrc *is_comptime;
    if (ir_should_inline(irb->exec, parent_scope)) {
        is_comptime = ir_build_const_bool(irb, parent_scope, node, true);
    } else {
        is_comptime = ir_build_test_comptime(irb, parent_scope, node, is_err);
    }

    IrBasicBlockSrc *ok_block = ir_create_basic_block(irb, parent_scope, "UnwrapErrOk");
    IrBasicBlockSrc *err_block = ir_create_basic_block(irb, parent_scope, "UnwrapErrError");
    IrBasicBlockSrc *end_block = ir_create_basic_block(irb, parent_scope, "UnwrapErrEnd");
    IrInstSrc *cond_br_inst = ir_build_cond_br(irb, parent_scope, node, is_err, err_block, ok_block, is_comptime);

    ResultLocPeerParent *peer_parent = ir_build_binary_result_peers(irb, cond_br_inst, ok_block, end_block, result_loc,
            is_comptime);

    ir_set_cursor_at_end_and_append_block(irb, err_block);
    Scope *subexpr_scope = create_runtime_scope(irb->codegen, node, &spill_scope->base, is_comptime);
    Scope *err_scope;
    if (var_node) {
        assert(var_node->type == NodeTypeSymbol);
        Buf *var_name = var_node->data.symbol_expr.symbol;
        bool is_const = true;
        bool is_shadowable = false;
        ZigVar *var = ir_create_var(irb, node, subexpr_scope, var_name,
            is_const, is_const, is_shadowable, is_comptime);
        err_scope = var->child_scope;
        IrInstSrc *err_ptr = ir_build_unwrap_err_code_src(irb, err_scope, node, err_union_ptr);
        IrInstSrc *err_value = ir_build_load_ptr(irb, err_scope, var_node, err_ptr);
        build_decl_var_and_init(irb, err_scope, var_node, var, err_value, buf_ptr(var_name), is_comptime);
    } else {
        err_scope = subexpr_scope;
    }
    IrInstSrc *err_result = ir_gen_node_extra(irb, op2_node, err_scope, LValNone, &peer_parent->peers.at(0)->base);
    if (err_result == irb->codegen->invalid_inst_src)
        return irb->codegen->invalid_inst_src;
    IrBasicBlockSrc *after_err_block = irb->current_basic_block;
    if (!instr_is_unreachable(err_result))
        ir_mark_gen(ir_build_br(irb, parent_scope, node, end_block, is_comptime));

    ir_set_cursor_at_end_and_append_block(irb, ok_block);
    IrInstSrc *unwrapped_ptr = ir_build_unwrap_err_payload_src(irb, parent_scope, node, err_union_ptr, false, false);
    IrInstSrc *unwrapped_payload = ir_build_load_ptr(irb, parent_scope, node, unwrapped_ptr);
    ir_build_end_expr(irb, parent_scope, node, unwrapped_payload, &peer_parent->peers.at(1)->base);
    IrBasicBlockSrc *after_ok_block = irb->current_basic_block;
    ir_build_br(irb, parent_scope, node, end_block, is_comptime);

    ir_set_cursor_at_end_and_append_block(irb, end_block);
    IrInstSrc **incoming_values = heap::c_allocator.allocate<IrInstSrc *>(2);
    incoming_values[0] = err_result;
    incoming_values[1] = unwrapped_payload;
    IrBasicBlockSrc **incoming_blocks = heap::c_allocator.allocate<IrBasicBlockSrc *>(2);
    incoming_blocks[0] = after_err_block;
    incoming_blocks[1] = after_ok_block;
    IrInstSrc *phi = ir_build_phi(irb, parent_scope, node, 2, incoming_blocks, incoming_values, peer_parent);
    return ir_lval_wrap(irb, parent_scope, phi, lval, result_loc);
}

static bool render_instance_name_recursive(CodeGen *codegen, Buf *name, Scope *outer_scope, Scope *inner_scope) {
    if (inner_scope == nullptr || inner_scope == outer_scope) return false;
    bool need_comma = render_instance_name_recursive(codegen, name, outer_scope, inner_scope->parent);
    if (inner_scope->id != ScopeIdVarDecl)
        return need_comma;

    ScopeVarDecl *var_scope = (ScopeVarDecl *)inner_scope;
    if (need_comma)
        buf_append_char(name, ',');
    // TODO: const ptr reinterpret here to make the var type agree with the value?
    render_const_value(codegen, name, var_scope->var->const_value);
    return true;
}

static Buf *get_anon_type_name(CodeGen *codegen, IrExecutableSrc *exec, const char *kind_name,
        Scope *scope, AstNode *source_node, Buf *out_bare_name)
{
    if (exec != nullptr && exec->name) {
        ZigType *import = get_scope_import(scope);
        Buf *namespace_name = buf_alloc();
        append_namespace_qualification(codegen, namespace_name, import);
        buf_append_buf(namespace_name, exec->name);
        buf_init_from_buf(out_bare_name, exec->name);
        return namespace_name;
    } else if (exec != nullptr && exec->name_fn != nullptr) {
        Buf *name = buf_alloc();
        buf_append_buf(name, &exec->name_fn->symbol_name);
        buf_appendf(name, "(");
        render_instance_name_recursive(codegen, name, &exec->name_fn->fndef_scope->base, exec->begin_scope);
        buf_appendf(name, ")");
        buf_init_from_buf(out_bare_name, name);
        return name;
    } else {
        ZigType *import = get_scope_import(scope);
        Buf *namespace_name = buf_alloc();
        append_namespace_qualification(codegen, namespace_name, import);
        buf_appendf(namespace_name, "%s:%" ZIG_PRI_usize ":%" ZIG_PRI_usize, kind_name,
                source_node->line + 1, source_node->column + 1);
        buf_init_from_buf(out_bare_name, namespace_name);
        return namespace_name;
    }
}

static IrInstSrc *ir_gen_container_decl(IrBuilderSrc *irb, Scope *parent_scope, AstNode *node) {
    assert(node->type == NodeTypeContainerDecl);

    ContainerKind kind = node->data.container_decl.kind;
    Buf *bare_name = buf_alloc();
    Buf *name = get_anon_type_name(irb->codegen, irb->exec, container_string(kind), parent_scope, node, bare_name);

    ContainerLayout layout = node->data.container_decl.layout;
    ZigType *container_type = get_partial_container_type(irb->codegen, parent_scope,
            kind, node, buf_ptr(name), bare_name, layout);
    ScopeDecls *child_scope = get_container_scope(container_type);

    for (size_t i = 0; i < node->data.container_decl.decls.length; i += 1) {
        AstNode *child_node = node->data.container_decl.decls.at(i);
        scan_decls(irb->codegen, child_scope, child_node);
    }

    TldContainer *tld_container = heap::c_allocator.create<TldContainer>();
    init_tld(&tld_container->base, TldIdContainer, bare_name, VisibModPub, node, parent_scope);
    tld_container->type_entry = container_type;
    tld_container->decls_scope = child_scope;
    irb->codegen->resolve_queue.append(&tld_container->base);

    // Add this to the list to mark as invalid if analyzing this exec fails.
    irb->exec->tld_list.append(&tld_container->base);

    return ir_build_const_type(irb, parent_scope, node, container_type);
}

// errors should be populated with set1's values
static ZigType *get_error_set_union(CodeGen *g, ErrorTableEntry **errors, ZigType *set1, ZigType *set2,
        Buf *type_name)
{
    assert(set1->id == ZigTypeIdErrorSet);
    assert(set2->id == ZigTypeIdErrorSet);

    ZigType *err_set_type = new_type_table_entry(ZigTypeIdErrorSet);
    err_set_type->size_in_bits = g->builtin_types.entry_global_error_set->size_in_bits;
    err_set_type->abi_align = g->builtin_types.entry_global_error_set->abi_align;
    err_set_type->abi_size = g->builtin_types.entry_global_error_set->abi_size;
    if (type_name == nullptr) {
        buf_resize(&err_set_type->name, 0);
        buf_appendf(&err_set_type->name, "error{");
    } else {
        buf_init_from_buf(&err_set_type->name, type_name);
    }

    for (uint32_t i = 0, count = set1->data.error_set.err_count; i < count; i += 1) {
        assert(errors[set1->data.error_set.errors[i]->value] == set1->data.error_set.errors[i]);
    }

    uint32_t count = set1->data.error_set.err_count;
    for (uint32_t i = 0; i < set2->data.error_set.err_count; i += 1) {
        ErrorTableEntry *error_entry = set2->data.error_set.errors[i];
        if (errors[error_entry->value] == nullptr) {
            count += 1;
        }
    }

    err_set_type->data.error_set.err_count = count;
    err_set_type->data.error_set.errors = heap::c_allocator.allocate<ErrorTableEntry *>(count);

    bool need_comma = false;
    for (uint32_t i = 0; i < set1->data.error_set.err_count; i += 1) {
        ErrorTableEntry *error_entry = set1->data.error_set.errors[i];
        if (type_name == nullptr) {
            const char *comma = need_comma ? "," : "";
            need_comma = true;
            buf_appendf(&err_set_type->name, "%s%s", comma, buf_ptr(&error_entry->name));
        }
        err_set_type->data.error_set.errors[i] = error_entry;
    }

    uint32_t index = set1->data.error_set.err_count;
    for (uint32_t i = 0; i < set2->data.error_set.err_count; i += 1) {
        ErrorTableEntry *error_entry = set2->data.error_set.errors[i];
        if (errors[error_entry->value] == nullptr) {
            errors[error_entry->value] = error_entry;
            if (type_name == nullptr) {
                const char *comma = need_comma ? "," : "";
                need_comma = true;
                buf_appendf(&err_set_type->name, "%s%s", comma, buf_ptr(&error_entry->name));
            }
            err_set_type->data.error_set.errors[index] = error_entry;
            index += 1;
        }
    }
    assert(index == count);

    if (type_name == nullptr) {
        buf_appendf(&err_set_type->name, "}");
    }

    return err_set_type;

}

static ZigType *make_err_set_with_one_item(CodeGen *g, Scope *parent_scope, AstNode *node,
        ErrorTableEntry *err_entry)
{
    ZigType *err_set_type = new_type_table_entry(ZigTypeIdErrorSet);
    buf_resize(&err_set_type->name, 0);
    buf_appendf(&err_set_type->name, "error{%s}", buf_ptr(&err_entry->name));
    err_set_type->size_in_bits = g->builtin_types.entry_global_error_set->size_in_bits;
    err_set_type->abi_align = g->builtin_types.entry_global_error_set->abi_align;
    err_set_type->abi_size = g->builtin_types.entry_global_error_set->abi_size;
    err_set_type->data.error_set.err_count = 1;
    err_set_type->data.error_set.errors = heap::c_allocator.create<ErrorTableEntry *>();

    err_set_type->data.error_set.errors[0] = err_entry;

    return err_set_type;
}

static AstNode *ast_field_to_symbol_node(AstNode *err_set_field_node) {
    if (err_set_field_node->type == NodeTypeSymbol) {
        return err_set_field_node;
    } else if (err_set_field_node->type == NodeTypeErrorSetField) {
        assert(err_set_field_node->data.err_set_field.field_name->type == NodeTypeSymbol);
        return err_set_field_node->data.err_set_field.field_name;
    } else {
        return err_set_field_node;
    }
}

static IrInstSrc *ir_gen_err_set_decl(IrBuilderSrc *irb, Scope *parent_scope, AstNode *node) {
    assert(node->type == NodeTypeErrorSetDecl);

    uint32_t err_count = node->data.err_set_decl.decls.length;

    Buf bare_name = BUF_INIT;
    Buf *type_name = get_anon_type_name(irb->codegen, irb->exec, "error", parent_scope, node, &bare_name);
    ZigType *err_set_type = new_type_table_entry(ZigTypeIdErrorSet);
    buf_init_from_buf(&err_set_type->name, type_name);
    err_set_type->data.error_set.err_count = err_count;
    err_set_type->size_in_bits = irb->codegen->builtin_types.entry_global_error_set->size_in_bits;
    err_set_type->abi_align = irb->codegen->builtin_types.entry_global_error_set->abi_align;
    err_set_type->abi_size = irb->codegen->builtin_types.entry_global_error_set->abi_size;
    err_set_type->data.error_set.errors = heap::c_allocator.allocate<ErrorTableEntry *>(err_count);

    size_t errors_count = irb->codegen->errors_by_index.length + err_count;
    ErrorTableEntry **errors = heap::c_allocator.allocate<ErrorTableEntry *>(errors_count);

    for (uint32_t i = 0; i < err_count; i += 1) {
        AstNode *field_node = node->data.err_set_decl.decls.at(i);
        AstNode *symbol_node = ast_field_to_symbol_node(field_node);
        Buf *err_name = symbol_node->data.symbol_expr.symbol;
        ErrorTableEntry *err = heap::c_allocator.create<ErrorTableEntry>();
        err->decl_node = field_node;
        buf_init_from_buf(&err->name, err_name);

        auto existing_entry = irb->codegen->error_table.put_unique(err_name, err);
        if (existing_entry) {
            err->value = existing_entry->value->value;
        } else {
            size_t error_value_count = irb->codegen->errors_by_index.length;
            assert((uint32_t)error_value_count < (((uint32_t)1) << (uint32_t)irb->codegen->err_tag_type->data.integral.bit_count));
            err->value = error_value_count;
            irb->codegen->errors_by_index.append(err);
        }
        err_set_type->data.error_set.errors[i] = err;

        ErrorTableEntry *prev_err = errors[err->value];
        if (prev_err != nullptr) {
            ErrorMsg *msg = add_node_error(irb->codegen, ast_field_to_symbol_node(err->decl_node),
                    buf_sprintf("duplicate error: '%s'", buf_ptr(&err->name)));
            add_error_note(irb->codegen, msg, ast_field_to_symbol_node(prev_err->decl_node),
                    buf_sprintf("other error here"));
            return irb->codegen->invalid_inst_src;
        }
        errors[err->value] = err;
    }
    heap::c_allocator.deallocate(errors, errors_count);
    return ir_build_const_type(irb, parent_scope, node, err_set_type);
}

static IrInstSrc *ir_gen_fn_proto(IrBuilderSrc *irb, Scope *parent_scope, AstNode *node) {
    assert(node->type == NodeTypeFnProto);

    size_t param_count = node->data.fn_proto.params.length;
    IrInstSrc **param_types = heap::c_allocator.allocate<IrInstSrc*>(param_count);

    bool is_var_args = false;
    for (size_t i = 0; i < param_count; i += 1) {
        AstNode *param_node = node->data.fn_proto.params.at(i);
        if (param_node->data.param_decl.is_var_args) {
            is_var_args = true;
            break;
        }
        if (param_node->data.param_decl.var_token == nullptr) {
            AstNode *type_node = param_node->data.param_decl.type;
            IrInstSrc *type_value = ir_gen_node(irb, type_node, parent_scope);
            if (type_value == irb->codegen->invalid_inst_src)
                return irb->codegen->invalid_inst_src;
            param_types[i] = type_value;
        } else {
            param_types[i] = nullptr;
        }
    }

    IrInstSrc *align_value = nullptr;
    if (node->data.fn_proto.align_expr != nullptr) {
        align_value = ir_gen_node(irb, node->data.fn_proto.align_expr, parent_scope);
        if (align_value == irb->codegen->invalid_inst_src)
            return irb->codegen->invalid_inst_src;
    }

    IrInstSrc *callconv_value = nullptr;
    if (node->data.fn_proto.callconv_expr != nullptr) {
        callconv_value = ir_gen_node(irb, node->data.fn_proto.callconv_expr, parent_scope);
        if (callconv_value == irb->codegen->invalid_inst_src)
            return irb->codegen->invalid_inst_src;
    }

    IrInstSrc *return_type;
    if (node->data.fn_proto.return_var_token == nullptr) {
        if (node->data.fn_proto.return_type == nullptr) {
            return_type = ir_build_const_type(irb, parent_scope, node, irb->codegen->builtin_types.entry_void);
        } else {
            return_type = ir_gen_node(irb, node->data.fn_proto.return_type, parent_scope);
            if (return_type == irb->codegen->invalid_inst_src)
                return irb->codegen->invalid_inst_src;
        }
    } else {
        add_node_error(irb->codegen, node,
            buf_sprintf("TODO implement inferred return types https://github.com/ziglang/zig/issues/447"));
        return irb->codegen->invalid_inst_src;
        //return_type = nullptr;
    }

    return ir_build_fn_proto(irb, parent_scope, node, param_types, align_value, callconv_value, return_type, is_var_args);
}

static IrInstSrc *ir_gen_resume(IrBuilderSrc *irb, Scope *scope, AstNode *node) {
    assert(node->type == NodeTypeResume);
    if (get_scope_nosuspend(scope) != nullptr) {
        add_node_error(irb->codegen, node, buf_sprintf("resume in nosuspend scope"));
        return irb->codegen->invalid_inst_src;
    }

    IrInstSrc *target_inst = ir_gen_node_extra(irb, node->data.resume_expr.expr, scope, LValPtr, nullptr);
    if (target_inst == irb->codegen->invalid_inst_src)
        return irb->codegen->invalid_inst_src;

    return ir_build_resume_src(irb, scope, node, target_inst);
}

static IrInstSrc *ir_gen_await_expr(IrBuilderSrc *irb, Scope *scope, AstNode *node, LVal lval,
        ResultLoc *result_loc)
{
    assert(node->type == NodeTypeAwaitExpr);

    bool is_nosuspend = get_scope_nosuspend(scope) != nullptr;

    AstNode *expr_node = node->data.await_expr.expr;
    if (expr_node->type == NodeTypeFnCallExpr && expr_node->data.fn_call_expr.modifier == CallModifierBuiltin) {
        AstNode *fn_ref_expr = expr_node->data.fn_call_expr.fn_ref_expr;
        Buf *name = fn_ref_expr->data.symbol_expr.symbol;
        auto entry = irb->codegen->builtin_fn_table.maybe_get(name);
        if (entry != nullptr) {
            BuiltinFnEntry *builtin_fn = entry->value;
            if (builtin_fn->id == BuiltinFnIdAsyncCall) {
                return ir_gen_async_call(irb, scope, node, expr_node, lval, result_loc);
            }
        }
    }

    ZigFn *fn_entry = exec_fn_entry(irb->exec);
    if (!fn_entry) {
        add_node_error(irb->codegen, node, buf_sprintf("await outside function definition"));
        return irb->codegen->invalid_inst_src;
    }
    ScopeSuspend *existing_suspend_scope = get_scope_suspend(scope);
    if (existing_suspend_scope) {
        if (!existing_suspend_scope->reported_err) {
            ErrorMsg *msg = add_node_error(irb->codegen, node, buf_sprintf("cannot await inside suspend block"));
            add_error_note(irb->codegen, msg, existing_suspend_scope->base.source_node, buf_sprintf("suspend block here"));
            existing_suspend_scope->reported_err = true;
        }
        return irb->codegen->invalid_inst_src;
    }

    IrInstSrc *target_inst = ir_gen_node_extra(irb, expr_node, scope, LValPtr, nullptr);
    if (target_inst == irb->codegen->invalid_inst_src)
        return irb->codegen->invalid_inst_src;

    IrInstSrc *await_inst = ir_build_await_src(irb, scope, node, target_inst, result_loc, is_nosuspend);
    return ir_lval_wrap(irb, scope, await_inst, lval, result_loc);
}

static IrInstSrc *ir_gen_suspend(IrBuilderSrc *irb, Scope *parent_scope, AstNode *node) {
    assert(node->type == NodeTypeSuspend);

    ZigFn *fn_entry = exec_fn_entry(irb->exec);
    if (!fn_entry) {
        add_node_error(irb->codegen, node, buf_sprintf("suspend outside function definition"));
        return irb->codegen->invalid_inst_src;
    }
    if (get_scope_nosuspend(parent_scope) != nullptr) {
        add_node_error(irb->codegen, node, buf_sprintf("suspend in nosuspend scope"));
        return irb->codegen->invalid_inst_src;
    }

    ScopeSuspend *existing_suspend_scope = get_scope_suspend(parent_scope);
    if (existing_suspend_scope) {
        if (!existing_suspend_scope->reported_err) {
            ErrorMsg *msg = add_node_error(irb->codegen, node, buf_sprintf("cannot suspend inside suspend block"));
            add_error_note(irb->codegen, msg, existing_suspend_scope->base.source_node, buf_sprintf("other suspend block here"));
            existing_suspend_scope->reported_err = true;
        }
        return irb->codegen->invalid_inst_src;
    }

    IrInstSrcSuspendBegin *begin = ir_build_suspend_begin_src(irb, parent_scope, node);
    if (node->data.suspend.block != nullptr) {
        ScopeSuspend *suspend_scope = create_suspend_scope(irb->codegen, node, parent_scope);
        Scope *child_scope = &suspend_scope->base;
        IrInstSrc *susp_res = ir_gen_node(irb, node->data.suspend.block, child_scope);
        if (susp_res == irb->codegen->invalid_inst_src)
            return irb->codegen->invalid_inst_src;
        ir_mark_gen(ir_build_check_statement_is_void(irb, child_scope, node->data.suspend.block, susp_res));
    }

    return ir_mark_gen(ir_build_suspend_finish_src(irb, parent_scope, node, begin));
}

static IrInstSrc *ir_gen_node_raw(IrBuilderSrc *irb, AstNode *node, Scope *scope,
        LVal lval, ResultLoc *result_loc)
{
    assert(scope);
    switch (node->type) {
        case NodeTypeStructValueField:
        case NodeTypeParamDecl:
        case NodeTypeUsingNamespace:
        case NodeTypeSwitchProng:
        case NodeTypeSwitchRange:
        case NodeTypeStructField:
        case NodeTypeErrorSetField:
        case NodeTypeFnDef:
        case NodeTypeTestDecl:
            zig_unreachable();
        case NodeTypeBlock:
            return ir_gen_block(irb, scope, node, lval, result_loc);
        case NodeTypeGroupedExpr:
            return ir_gen_node_raw(irb, node->data.grouped_expr, scope, lval, result_loc);
        case NodeTypeBinOpExpr:
            return ir_gen_bin_op(irb, scope, node, lval, result_loc);
        case NodeTypeIntLiteral:
            return ir_lval_wrap(irb, scope, ir_gen_int_lit(irb, scope, node), lval, result_loc);
        case NodeTypeFloatLiteral:
            return ir_lval_wrap(irb, scope, ir_gen_float_lit(irb, scope, node), lval, result_loc);
        case NodeTypeCharLiteral:
            return ir_lval_wrap(irb, scope, ir_gen_char_lit(irb, scope, node), lval, result_loc);
        case NodeTypeSymbol:
            return ir_gen_symbol(irb, scope, node, lval, result_loc);
        case NodeTypeFnCallExpr:
            return ir_gen_fn_call(irb, scope, node, lval, result_loc);
        case NodeTypeIfBoolExpr:
            return ir_gen_if_bool_expr(irb, scope, node, lval, result_loc);
        case NodeTypePrefixOpExpr:
            return ir_gen_prefix_op_expr(irb, scope, node, lval, result_loc);
        case NodeTypeContainerInitExpr:
            return ir_gen_container_init_expr(irb, scope, node, lval, result_loc);
        case NodeTypeVariableDeclaration:
            return ir_gen_var_decl(irb, scope, node);
        case NodeTypeWhileExpr:
            return ir_gen_while_expr(irb, scope, node, lval, result_loc);
        case NodeTypeForExpr:
            return ir_gen_for_expr(irb, scope, node, lval, result_loc);
        case NodeTypeArrayAccessExpr:
            return ir_gen_array_access(irb, scope, node, lval, result_loc);
        case NodeTypeReturnExpr:
            return ir_gen_return(irb, scope, node, lval, result_loc);
        case NodeTypeFieldAccessExpr:
            {
                IrInstSrc *ptr_instruction = ir_gen_field_access(irb, scope, node);
                if (ptr_instruction == irb->codegen->invalid_inst_src)
                    return ptr_instruction;
                if (lval == LValPtr || lval == LValAssign)
                    return ptr_instruction;

                IrInstSrc *load_ptr = ir_build_load_ptr(irb, scope, node, ptr_instruction);
                return ir_expr_wrap(irb, scope, load_ptr, result_loc);
            }
        case NodeTypePtrDeref: {
            AstNode *expr_node = node->data.ptr_deref_expr.target;

            LVal child_lval = lval;
            if (child_lval == LValAssign)
                child_lval = LValPtr;

            IrInstSrc *value = ir_gen_node_extra(irb, expr_node, scope, child_lval, nullptr);
            if (value == irb->codegen->invalid_inst_src)
                return value;

            // We essentially just converted any lvalue from &(x.*) to (&x).*;
            // this inhibits checking that x is a pointer later, so we directly
            // record whether the pointer check is needed
            IrInstSrc *un_op = ir_build_un_op_lval(irb, scope, node, IrUnOpDereference, value, lval, result_loc);
            return ir_expr_wrap(irb, scope, un_op, result_loc);
        }
        case NodeTypeUnwrapOptional: {
            AstNode *expr_node = node->data.unwrap_optional.expr;

            IrInstSrc *maybe_ptr = ir_gen_node_extra(irb, expr_node, scope, LValPtr, nullptr);
            if (maybe_ptr == irb->codegen->invalid_inst_src)
                return irb->codegen->invalid_inst_src;

            IrInstSrc *unwrapped_ptr = ir_build_optional_unwrap_ptr(irb, scope, node, maybe_ptr, true );
            if (lval == LValPtr || lval == LValAssign)
                return unwrapped_ptr;

            IrInstSrc *load_ptr = ir_build_load_ptr(irb, scope, node, unwrapped_ptr);
            return ir_expr_wrap(irb, scope, load_ptr, result_loc);
        }
        case NodeTypeBoolLiteral:
            return ir_lval_wrap(irb, scope, ir_gen_bool_literal(irb, scope, node), lval, result_loc);
        case NodeTypeArrayType:
            return ir_lval_wrap(irb, scope, ir_gen_array_type(irb, scope, node), lval, result_loc);
        case NodeTypePointerType:
            return ir_lval_wrap(irb, scope, ir_gen_pointer_type(irb, scope, node), lval, result_loc);
        case NodeTypeAnyFrameType:
            return ir_lval_wrap(irb, scope, ir_gen_anyframe_type(irb, scope, node), lval, result_loc);
        case NodeTypeStringLiteral:
            return ir_lval_wrap(irb, scope, ir_gen_string_literal(irb, scope, node), lval, result_loc);
        case NodeTypeUndefinedLiteral:
            return ir_lval_wrap(irb, scope, ir_gen_undefined_literal(irb, scope, node), lval, result_loc);
        case NodeTypeAsmExpr:
            return ir_lval_wrap(irb, scope, ir_gen_asm_expr(irb, scope, node), lval, result_loc);
        case NodeTypeNullLiteral:
            return ir_lval_wrap(irb, scope, ir_gen_null_literal(irb, scope, node), lval, result_loc);
        case NodeTypeIfErrorExpr:
            return ir_gen_if_err_expr(irb, scope, node, lval, result_loc);
        case NodeTypeIfOptional:
            return ir_gen_if_optional_expr(irb, scope, node, lval, result_loc);
        case NodeTypeSwitchExpr:
            return ir_gen_switch_expr(irb, scope, node, lval, result_loc);
        case NodeTypeCompTime:
            return ir_expr_wrap(irb, scope, ir_gen_comptime(irb, scope, node, lval), result_loc);
        case NodeTypeNoSuspend:
            return ir_expr_wrap(irb, scope, ir_gen_nosuspend(irb, scope, node, lval), result_loc);
        case NodeTypeErrorType:
            return ir_lval_wrap(irb, scope, ir_gen_error_type(irb, scope, node), lval, result_loc);
        case NodeTypeBreak:
            return ir_lval_wrap(irb, scope, ir_gen_break(irb, scope, node), lval, result_loc);
        case NodeTypeContinue:
            return ir_lval_wrap(irb, scope, ir_gen_continue(irb, scope, node), lval, result_loc);
        case NodeTypeUnreachable:
            return ir_build_unreachable(irb, scope, node);
        case NodeTypeDefer:
            return ir_lval_wrap(irb, scope, ir_gen_defer(irb, scope, node), lval, result_loc);
        case NodeTypeSliceExpr:
            return ir_gen_slice(irb, scope, node, lval, result_loc);
        case NodeTypeCatchExpr:
            return ir_gen_catch(irb, scope, node, lval, result_loc);
        case NodeTypeContainerDecl:
            return ir_lval_wrap(irb, scope, ir_gen_container_decl(irb, scope, node), lval, result_loc);
        case NodeTypeFnProto:
            return ir_lval_wrap(irb, scope, ir_gen_fn_proto(irb, scope, node), lval, result_loc);
        case NodeTypeErrorSetDecl:
            return ir_lval_wrap(irb, scope, ir_gen_err_set_decl(irb, scope, node), lval, result_loc);
        case NodeTypeResume:
            return ir_lval_wrap(irb, scope, ir_gen_resume(irb, scope, node), lval, result_loc);
        case NodeTypeAwaitExpr:
            return ir_gen_await_expr(irb, scope, node, lval, result_loc);
        case NodeTypeSuspend:
            return ir_lval_wrap(irb, scope, ir_gen_suspend(irb, scope, node), lval, result_loc);
        case NodeTypeEnumLiteral:
            return ir_lval_wrap(irb, scope, ir_gen_enum_literal(irb, scope, node), lval, result_loc);
        case NodeTypeInferredArrayType:
            add_node_error(irb->codegen, node,
                buf_sprintf("inferred array size invalid here"));
            return irb->codegen->invalid_inst_src;
        case NodeTypeVarFieldType:
            return ir_lval_wrap(irb, scope,
                    ir_build_const_type(irb, scope, node, irb->codegen->builtin_types.entry_var), lval, result_loc);
    }
    zig_unreachable();
}

static ResultLoc *no_result_loc(void) {
    ResultLocNone *result_loc_none = heap::c_allocator.create<ResultLocNone>();
    result_loc_none->base.id = ResultLocIdNone;
    return &result_loc_none->base;
}

static IrInstSrc *ir_gen_node_extra(IrBuilderSrc *irb, AstNode *node, Scope *scope, LVal lval,
        ResultLoc *result_loc)
{
    if (lval == LValAssign) {
        switch (node->type) {
            case NodeTypeStructValueField:
            case NodeTypeParamDecl:
            case NodeTypeUsingNamespace:
            case NodeTypeSwitchProng:
            case NodeTypeSwitchRange:
            case NodeTypeStructField:
            case NodeTypeErrorSetField:
            case NodeTypeFnDef:
            case NodeTypeTestDecl:
                zig_unreachable();

            // cannot be assigned to
            case NodeTypeBlock:
            case NodeTypeGroupedExpr:
            case NodeTypeBinOpExpr:
            case NodeTypeIntLiteral:
            case NodeTypeFloatLiteral:
            case NodeTypeCharLiteral:
            case NodeTypeIfBoolExpr:
            case NodeTypeContainerInitExpr:
            case NodeTypeVariableDeclaration:
            case NodeTypeWhileExpr:
            case NodeTypeForExpr:
            case NodeTypeReturnExpr:
            case NodeTypeBoolLiteral:
            case NodeTypeArrayType:
            case NodeTypePointerType:
            case NodeTypeAnyFrameType:
            case NodeTypeStringLiteral:
            case NodeTypeUndefinedLiteral:
            case NodeTypeAsmExpr:
            case NodeTypeNullLiteral:
            case NodeTypeIfErrorExpr:
            case NodeTypeIfOptional:
            case NodeTypeSwitchExpr:
            case NodeTypeCompTime:
            case NodeTypeNoSuspend:
            case NodeTypeErrorType:
            case NodeTypeBreak:
            case NodeTypeContinue:
            case NodeTypeUnreachable:
            case NodeTypeDefer:
            case NodeTypeSliceExpr:
            case NodeTypeCatchExpr:
            case NodeTypeContainerDecl:
            case NodeTypeFnProto:
            case NodeTypeErrorSetDecl:
            case NodeTypeResume:
            case NodeTypeAwaitExpr:
            case NodeTypeSuspend:
            case NodeTypeEnumLiteral:
            case NodeTypeInferredArrayType:
            case NodeTypeVarFieldType:
            case NodeTypePrefixOpExpr:
                add_node_error(irb->codegen, node,
                    buf_sprintf("invalid left-hand side to assignment"));
                return irb->codegen->invalid_inst_src;

            // @field can be assigned to
            case NodeTypeFnCallExpr:
                if (node->data.fn_call_expr.modifier == CallModifierBuiltin) {
                    AstNode *fn_ref_expr = node->data.fn_call_expr.fn_ref_expr;
                    Buf *name = fn_ref_expr->data.symbol_expr.symbol;
                    auto entry = irb->codegen->builtin_fn_table.maybe_get(name);

                    if (!entry) {
                        add_node_error(irb->codegen, node,
                                buf_sprintf("invalid builtin function: '%s'", buf_ptr(name)));
                        return irb->codegen->invalid_inst_src;
                    }

                    if (entry->value->id == BuiltinFnIdField) {
                        break;
                    }
                }
                add_node_error(irb->codegen, node,
                    buf_sprintf("invalid left-hand side to assignment"));
                return irb->codegen->invalid_inst_src;


            // can be assigned to
            case NodeTypeUnwrapOptional:
            case NodeTypePtrDeref:
            case NodeTypeFieldAccessExpr:
            case NodeTypeArrayAccessExpr:
            case NodeTypeSymbol:
                break;
        }
    }
    if (result_loc == nullptr) {
        // Create a result location indicating there is none - but if one gets created
        // it will be properly distributed.
        result_loc = no_result_loc();
        ir_build_reset_result(irb, scope, node, result_loc);
    }
    Scope *child_scope;
    if (irb->exec->is_inline ||
        (irb->exec->fn_entry != nullptr && irb->exec->fn_entry->child_scope == scope))
    {
        child_scope = scope;
    } else {
        child_scope = &create_expr_scope(irb->codegen, node, scope)->base;
    }
    IrInstSrc *result = ir_gen_node_raw(irb, node, child_scope, lval, result_loc);
    if (result == irb->codegen->invalid_inst_src) {
        if (irb->exec->first_err_trace_msg == nullptr) {
            irb->exec->first_err_trace_msg = irb->codegen->trace_err;
        }
    }
    return result;
}

static IrInstSrc *ir_gen_node(IrBuilderSrc *irb, AstNode *node, Scope *scope) {
    return ir_gen_node_extra(irb, node, scope, LValNone, nullptr);
}

static void invalidate_exec(IrExecutableSrc *exec, ErrorMsg *msg) {
    if (exec->first_err_trace_msg != nullptr)
        return;

    exec->first_err_trace_msg = msg;

    for (size_t i = 0; i < exec->tld_list.length; i += 1) {
        exec->tld_list.items[i]->resolution = TldResolutionInvalid;
    }
}

static void invalidate_exec_gen(IrExecutableGen *exec, ErrorMsg *msg) {
    if (exec->first_err_trace_msg != nullptr)
        return;

    exec->first_err_trace_msg = msg;

    for (size_t i = 0; i < exec->tld_list.length; i += 1) {
        exec->tld_list.items[i]->resolution = TldResolutionInvalid;
    }

    if (exec->source_exec != nullptr)
        invalidate_exec(exec->source_exec, msg);
}


bool ir_gen(CodeGen *codegen, AstNode *node, Scope *scope, IrExecutableSrc *ir_executable) {
    assert(node->owner);

    IrBuilderSrc ir_builder = {0};
    IrBuilderSrc *irb = &ir_builder;

    irb->codegen = codegen;
    irb->exec = ir_executable;
    irb->main_block_node = node;

    IrBasicBlockSrc *entry_block = ir_create_basic_block(irb, scope, "Entry");
    ir_set_cursor_at_end_and_append_block(irb, entry_block);
    // Entry block gets a reference because we enter it to begin.
    ir_ref_bb(irb->current_basic_block);

    IrInstSrc *result = ir_gen_node_extra(irb, node, scope, LValNone, nullptr);

    if (result == irb->codegen->invalid_inst_src)
        return false;

    if (irb->exec->first_err_trace_msg != nullptr) {
        codegen->trace_err = irb->exec->first_err_trace_msg;
        return false;
    }

    if (!instr_is_unreachable(result)) {
        ir_mark_gen(ir_build_add_implicit_return_type(irb, scope, result->base.source_node, result, nullptr));
        // no need for save_err_ret_addr because this cannot return error
        ResultLocReturn *result_loc_ret = heap::c_allocator.create<ResultLocReturn>();
        result_loc_ret->base.id = ResultLocIdReturn;
        ir_build_reset_result(irb, scope, node, &result_loc_ret->base);
        ir_mark_gen(ir_build_end_expr(irb, scope, node, result, &result_loc_ret->base));
        ir_mark_gen(ir_build_return_src(irb, scope, result->base.source_node, result));
    }

    return true;
}

bool ir_gen_fn(CodeGen *codegen, ZigFn *fn_entry) {
    assert(fn_entry);

    IrExecutableSrc *ir_executable = fn_entry->ir_executable;
    AstNode *body_node = fn_entry->body_node;

    assert(fn_entry->child_scope);

    return ir_gen(codegen, body_node, fn_entry->child_scope, ir_executable);
}

static void ir_add_call_stack_errors_gen(CodeGen *codegen, IrExecutableGen *exec, ErrorMsg *err_msg, int limit) {
    if (!exec || !exec->source_node || limit < 0) return;
    add_error_note(codegen, err_msg, exec->source_node, buf_sprintf("called from here"));

    ir_add_call_stack_errors_gen(codegen, exec->parent_exec, err_msg, limit - 1);
}

static void ir_add_call_stack_errors(CodeGen *codegen, IrExecutableSrc *exec, ErrorMsg *err_msg, int limit) {
    if (!exec || !exec->source_node || limit < 0) return;
    add_error_note(codegen, err_msg, exec->source_node, buf_sprintf("called from here"));

    ir_add_call_stack_errors_gen(codegen, exec->parent_exec, err_msg, limit - 1);
}

static ErrorMsg *exec_add_error_node(CodeGen *codegen, IrExecutableSrc *exec, AstNode *source_node, Buf *msg) {
    ErrorMsg *err_msg = add_node_error(codegen, source_node, msg);
    invalidate_exec(exec, err_msg);
    if (exec->parent_exec) {
        ir_add_call_stack_errors(codegen, exec, err_msg, 10);
    }
    return err_msg;
}

static ErrorMsg *exec_add_error_node_gen(CodeGen *codegen, IrExecutableGen *exec, AstNode *source_node, Buf *msg) {
    ErrorMsg *err_msg = add_node_error(codegen, source_node, msg);
    invalidate_exec_gen(exec, err_msg);
    if (exec->parent_exec) {
        ir_add_call_stack_errors_gen(codegen, exec, err_msg, 10);
    }
    return err_msg;
}

static ErrorMsg *ir_add_error_node(IrAnalyze *ira, AstNode *source_node, Buf *msg) {
    return exec_add_error_node_gen(ira->codegen, ira->new_irb.exec, source_node, msg);
}

static ErrorMsg *opt_ir_add_error_node(IrAnalyze *ira, CodeGen *codegen, AstNode *source_node, Buf *msg) {
    if (ira != nullptr)
        return exec_add_error_node_gen(codegen, ira->new_irb.exec, source_node, msg);
    else
        return add_node_error(codegen, source_node, msg);
}

static ErrorMsg *ir_add_error(IrAnalyze *ira, IrInst *source_instruction, Buf *msg) {
    return ir_add_error_node(ira, source_instruction->source_node, msg);
}

static void ir_assert_impl(bool ok, IrInst *source_instruction, char const *file, unsigned int line) {
    if (ok) return;
    src_assert_impl(ok, source_instruction->source_node, file, line);
}

static void ir_assert_gen_impl(bool ok, IrInstGen *source_instruction, char const *file, unsigned int line) {
    if (ok) return;
    src_assert_impl(ok, source_instruction->base.source_node, file, line);
}

// This function takes a comptime ptr and makes the child const value conform to the type
// described by the pointer.
static Error eval_comptime_ptr_reinterpret(IrAnalyze *ira, CodeGen *codegen, AstNode *source_node,
        ZigValue *ptr_val)
{
    Error err;
    assert(ptr_val->type->id == ZigTypeIdPointer);
    assert(ptr_val->special == ConstValSpecialStatic);
    ZigValue tmp = {};
    tmp.special = ConstValSpecialStatic;
    tmp.type = ptr_val->type->data.pointer.child_type;
    if ((err = ir_read_const_ptr(ira, codegen, source_node, &tmp, ptr_val)))
        return err;
    ZigValue *child_val = const_ptr_pointee_unchecked(codegen, ptr_val);
    copy_const_val(codegen, child_val, &tmp);
    return ErrorNone;
}

ZigValue *const_ptr_pointee(IrAnalyze *ira, CodeGen *codegen, ZigValue *const_val,
        AstNode *source_node)
{
    Error err;
    ZigValue *val = const_ptr_pointee_unchecked(codegen, const_val);
    if (val == nullptr) return nullptr;
    assert(const_val->type->id == ZigTypeIdPointer);
    ZigType *expected_type = const_val->type->data.pointer.child_type;
    if (expected_type == codegen->builtin_types.entry_var) {
        return val;
    }
    switch (type_has_one_possible_value(codegen, expected_type)) {
        case OnePossibleValueInvalid:
            return nullptr;
        case OnePossibleValueNo:
            break;
        case OnePossibleValueYes:
            return get_the_one_possible_value(codegen, expected_type);
    }
    if (!types_have_same_zig_comptime_repr(codegen, expected_type, val->type)) {
        if ((err = eval_comptime_ptr_reinterpret(ira, codegen, source_node, const_val)))
            return nullptr;
        return const_ptr_pointee_unchecked(codegen, const_val);
    }
    return val;
}

static Error ir_exec_scan_for_side_effects(CodeGen *codegen, IrExecutableGen *exec) {
    IrBasicBlockGen *bb = exec->basic_block_list.at(0);
    for (size_t i = 0; i < bb->instruction_list.length; i += 1) {
        IrInstGen *instruction = bb->instruction_list.at(i);
        if (instruction->id == IrInstGenIdReturn) {
            return ErrorNone;
        } else if (ir_inst_gen_has_side_effects(instruction)) {
            if (instr_is_comptime(instruction)) {
                switch (instruction->id) {
                    case IrInstGenIdUnwrapErrPayload:
                    case IrInstGenIdOptionalUnwrapPtr:
                    case IrInstGenIdUnionFieldPtr:
                        continue;
                    default:
                        break;
                }
            }
            if (get_scope_typeof(instruction->base.scope) != nullptr) {
                // doesn't count, it's inside a @TypeOf()
                continue;
            }
            exec_add_error_node_gen(codegen, exec, instruction->base.source_node,
                    buf_sprintf("unable to evaluate constant expression"));
            return ErrorSemanticAnalyzeFail;
        }
    }
    zig_unreachable();
}

static bool ir_emit_global_runtime_side_effect(IrAnalyze *ira, IrInst* source_instruction) {
    if (ir_should_inline(ira->old_irb.exec, source_instruction->scope)) {
        ir_add_error(ira, source_instruction, buf_sprintf("unable to evaluate constant expression"));
        return false;
    }
    return true;
}

static bool const_val_fits_in_num_lit(ZigValue *const_val, ZigType *num_lit_type) {
    return ((num_lit_type->id == ZigTypeIdComptimeFloat &&
        (const_val->type->id == ZigTypeIdFloat || const_val->type->id == ZigTypeIdComptimeFloat)) ||
               (num_lit_type->id == ZigTypeIdComptimeInt &&
        (const_val->type->id == ZigTypeIdInt || const_val->type->id == ZigTypeIdComptimeInt)));
}

static bool float_has_fraction(ZigValue *const_val) {
    if (const_val->type->id == ZigTypeIdComptimeFloat) {
        return bigfloat_has_fraction(&const_val->data.x_bigfloat);
    } else if (const_val->type->id == ZigTypeIdFloat) {
        switch (const_val->type->data.floating.bit_count) {
            case 16:
                {
                    float16_t floored = f16_roundToInt(const_val->data.x_f16, softfloat_round_minMag, false);
                    return !f16_eq(floored, const_val->data.x_f16);
                }
            case 32:
                return floorf(const_val->data.x_f32) != const_val->data.x_f32;
            case 64:
                return floor(const_val->data.x_f64) != const_val->data.x_f64;
            case 128:
                {
                    float128_t floored;
                    f128M_roundToInt(&const_val->data.x_f128, softfloat_round_minMag, false, &floored);
                    return !f128M_eq(&floored, &const_val->data.x_f128);
                }
            default:
                zig_unreachable();
        }
    } else {
        zig_unreachable();
    }
}

static void float_append_buf(Buf *buf, ZigValue *const_val) {
    if (const_val->type->id == ZigTypeIdComptimeFloat) {
        bigfloat_append_buf(buf, &const_val->data.x_bigfloat);
    } else if (const_val->type->id == ZigTypeIdFloat) {
        switch (const_val->type->data.floating.bit_count) {
            case 16:
                buf_appendf(buf, "%f", zig_f16_to_double(const_val->data.x_f16));
                break;
            case 32:
                buf_appendf(buf, "%f", const_val->data.x_f32);
                break;
            case 64:
                buf_appendf(buf, "%f", const_val->data.x_f64);
                break;
            case 128:
                {
                    // TODO actual implementation
                    const size_t extra_len = 100;
                    size_t old_len = buf_len(buf);
                    buf_resize(buf, old_len + extra_len);

                    float64_t f64_value = f128M_to_f64(&const_val->data.x_f128);
                    double double_value;
                    memcpy(&double_value, &f64_value, sizeof(double));

                    int len = snprintf(buf_ptr(buf) + old_len, extra_len, "%f", double_value);
                    assert(len > 0);
                    buf_resize(buf, old_len + len);
                    break;
                }
            default:
                zig_unreachable();
        }
    } else {
        zig_unreachable();
    }
}

static void float_init_bigint(BigInt *bigint, ZigValue *const_val) {
    if (const_val->type->id == ZigTypeIdComptimeFloat) {
        bigint_init_bigfloat(bigint, &const_val->data.x_bigfloat);
    } else if (const_val->type->id == ZigTypeIdFloat) {
        switch (const_val->type->data.floating.bit_count) {
            case 16:
                {
                    double x = zig_f16_to_double(const_val->data.x_f16);
                    if (x >= 0) {
                        bigint_init_unsigned(bigint, (uint64_t)x);
                    } else {
                        bigint_init_unsigned(bigint, (uint64_t)-x);
                        bigint->is_negative = true;
                    }
                    break;
                }
            case 32:
                if (const_val->data.x_f32 >= 0) {
                    bigint_init_unsigned(bigint, (uint64_t)(const_val->data.x_f32));
                } else {
                    bigint_init_unsigned(bigint, (uint64_t)(-const_val->data.x_f32));
                    bigint->is_negative = true;
                }
                break;
            case 64:
                if (const_val->data.x_f64 >= 0) {
                    bigint_init_unsigned(bigint, (uint64_t)(const_val->data.x_f64));
                } else {
                    bigint_init_unsigned(bigint, (uint64_t)(-const_val->data.x_f64));
                    bigint->is_negative = true;
                }
                break;
            case 128:
                {
                    BigFloat tmp_float;
                    bigfloat_init_128(&tmp_float, const_val->data.x_f128);
                    bigint_init_bigfloat(bigint, &tmp_float);
                }
                break;
            default:
                zig_unreachable();
        }
    } else {
        zig_unreachable();
    }
}

static void float_init_bigfloat(ZigValue *dest_val, BigFloat *bigfloat) {
    if (dest_val->type->id == ZigTypeIdComptimeFloat) {
        bigfloat_init_bigfloat(&dest_val->data.x_bigfloat, bigfloat);
    } else if (dest_val->type->id == ZigTypeIdFloat) {
        switch (dest_val->type->data.floating.bit_count) {
            case 16:
                dest_val->data.x_f16 = bigfloat_to_f16(bigfloat);
                break;
            case 32:
                dest_val->data.x_f32 = bigfloat_to_f32(bigfloat);
                break;
            case 64:
                dest_val->data.x_f64 = bigfloat_to_f64(bigfloat);
                break;
            case 80:
                zig_panic("TODO");
            case 128:
                dest_val->data.x_f128 = bigfloat_to_f128(bigfloat);
                break;
            default:
                zig_unreachable();
        }
    } else {
        zig_unreachable();
    }
}

static void float_init_f16(ZigValue *dest_val, float16_t x) {
    if (dest_val->type->id == ZigTypeIdComptimeFloat) {
        bigfloat_init_16(&dest_val->data.x_bigfloat, x);
    } else if (dest_val->type->id == ZigTypeIdFloat) {
        switch (dest_val->type->data.floating.bit_count) {
            case 16:
                dest_val->data.x_f16 = x;
                break;
            case 32:
                dest_val->data.x_f32 = zig_f16_to_double(x);
                break;
            case 64:
                dest_val->data.x_f64 = zig_f16_to_double(x);
                break;
            case 128:
                f16_to_f128M(x, &dest_val->data.x_f128);
                break;
            default:
                zig_unreachable();
        }
    } else {
        zig_unreachable();
    }
}

static void float_init_f32(ZigValue *dest_val, float x) {
    if (dest_val->type->id == ZigTypeIdComptimeFloat) {
        bigfloat_init_32(&dest_val->data.x_bigfloat, x);
    } else if (dest_val->type->id == ZigTypeIdFloat) {
        switch (dest_val->type->data.floating.bit_count) {
            case 16:
                dest_val->data.x_f16 = zig_double_to_f16(x);
                break;
            case 32:
                dest_val->data.x_f32 = x;
                break;
            case 64:
                dest_val->data.x_f64 = x;
                break;
            case 128:
                {
                    float32_t x_f32;
                    memcpy(&x_f32, &x, sizeof(float));
                    f32_to_f128M(x_f32, &dest_val->data.x_f128);
                    break;
                }
            default:
                zig_unreachable();
        }
    } else {
        zig_unreachable();
    }
}

static void float_init_f64(ZigValue *dest_val, double x) {
    if (dest_val->type->id == ZigTypeIdComptimeFloat) {
        bigfloat_init_64(&dest_val->data.x_bigfloat, x);
    } else if (dest_val->type->id == ZigTypeIdFloat) {
        switch (dest_val->type->data.floating.bit_count) {
            case 16:
                dest_val->data.x_f16 = zig_double_to_f16(x);
                break;
            case 32:
                dest_val->data.x_f32 = x;
                break;
            case 64:
                dest_val->data.x_f64 = x;
                break;
            case 128:
                {
                    float64_t x_f64;
                    memcpy(&x_f64, &x, sizeof(double));
                    f64_to_f128M(x_f64, &dest_val->data.x_f128);
                    break;
                }
            default:
                zig_unreachable();
        }
    } else {
        zig_unreachable();
    }
}

static void float_init_f128(ZigValue *dest_val, float128_t x) {
    if (dest_val->type->id == ZigTypeIdComptimeFloat) {
        bigfloat_init_128(&dest_val->data.x_bigfloat, x);
    } else if (dest_val->type->id == ZigTypeIdFloat) {
        switch (dest_val->type->data.floating.bit_count) {
            case 16:
                dest_val->data.x_f16 = f128M_to_f16(&x);
                break;
            case 32:
                {
                    float32_t f32_val = f128M_to_f32(&x);
                    memcpy(&dest_val->data.x_f32, &f32_val, sizeof(float));
                    break;
                }
            case 64:
                {
                    float64_t f64_val = f128M_to_f64(&x);
                    memcpy(&dest_val->data.x_f64, &f64_val, sizeof(double));
                    break;
                }
            case 128:
                {
                    memcpy(&dest_val->data.x_f128, &x, sizeof(float128_t));
                    break;
                }
            default:
                zig_unreachable();
        }
    } else {
        zig_unreachable();
    }
}

static void float_init_float(ZigValue *dest_val, ZigValue *src_val) {
    if (src_val->type->id == ZigTypeIdComptimeFloat) {
        float_init_bigfloat(dest_val, &src_val->data.x_bigfloat);
    } else if (src_val->type->id == ZigTypeIdFloat) {
        switch (src_val->type->data.floating.bit_count) {
            case 16:
                float_init_f16(dest_val, src_val->data.x_f16);
                break;
            case 32:
                float_init_f32(dest_val, src_val->data.x_f32);
                break;
            case 64:
                float_init_f64(dest_val, src_val->data.x_f64);
                break;
            case 128:
                float_init_f128(dest_val, src_val->data.x_f128);
                break;
            default:
                zig_unreachable();
        }
    } else {
        zig_unreachable();
    }
}

static bool float_is_nan(ZigValue *op) {
    if (op->type->id == ZigTypeIdComptimeFloat) {
        return bigfloat_is_nan(&op->data.x_bigfloat);
    } else if (op->type->id == ZigTypeIdFloat) {
        switch (op->type->data.floating.bit_count) {
            case 16:
                return f16_isSignalingNaN(op->data.x_f16);
            case 32:
                return op->data.x_f32 != op->data.x_f32;
            case 64:
                return op->data.x_f64 != op->data.x_f64;
            case 128:
                return f128M_isSignalingNaN(&op->data.x_f128);
            default:
                zig_unreachable();
        }
    } else {
        zig_unreachable();
    }
}

static Cmp float_cmp(ZigValue *op1, ZigValue *op2) {
    if (op1->type == op2->type) {
        if (op1->type->id == ZigTypeIdComptimeFloat) {
            return bigfloat_cmp(&op1->data.x_bigfloat, &op2->data.x_bigfloat);
        } else if (op1->type->id == ZigTypeIdFloat) {
            switch (op1->type->data.floating.bit_count) {
                case 16:
                    if (f16_lt(op1->data.x_f16, op2->data.x_f16)) {
                        return CmpLT;
                    } else if (f16_lt(op2->data.x_f16, op1->data.x_f16)) {
                        return CmpGT;
                    } else {
                        return CmpEQ;
                    }
                case 32:
                    if (op1->data.x_f32 > op2->data.x_f32) {
                        return CmpGT;
                    } else if (op1->data.x_f32 < op2->data.x_f32) {
                        return CmpLT;
                    } else {
                        return CmpEQ;
                    }
                case 64:
                    if (op1->data.x_f64 > op2->data.x_f64) {
                        return CmpGT;
                    } else if (op1->data.x_f64 < op2->data.x_f64) {
                        return CmpLT;
                    } else {
                        return CmpEQ;
                    }
                case 128:
                    if (f128M_lt(&op1->data.x_f128, &op2->data.x_f128)) {
                        return CmpLT;
                    } else if (f128M_eq(&op1->data.x_f128, &op2->data.x_f128)) {
                        return CmpEQ;
                    } else {
                        return CmpGT;
                    }
                default:
                    zig_unreachable();
            }
        } else {
            zig_unreachable();
        }
    }
    BigFloat op1_big;
    BigFloat op2_big;
    float_init_bigfloat(op1, &op1_big);
    float_init_bigfloat(op2, &op2_big);
    return bigfloat_cmp(&op1_big, &op2_big);
}

// This function cannot handle NaN
static Cmp float_cmp_zero(ZigValue *op) {
    if (op->type->id == ZigTypeIdComptimeFloat) {
        return bigfloat_cmp_zero(&op->data.x_bigfloat);
    } else if (op->type->id == ZigTypeIdFloat) {
        switch (op->type->data.floating.bit_count) {
            case 16:
                {
                    const float16_t zero = zig_double_to_f16(0);
                    if (f16_lt(op->data.x_f16, zero)) {
                        return CmpLT;
                    } else if (f16_lt(zero, op->data.x_f16)) {
                        return CmpGT;
                    } else {
                        return CmpEQ;
                    }
                }
            case 32:
                if (op->data.x_f32 < 0.0) {
                    return CmpLT;
                } else if (op->data.x_f32 > 0.0) {
                    return CmpGT;
                } else {
                    return CmpEQ;
                }
            case 64:
                if (op->data.x_f64 < 0.0) {
                    return CmpLT;
                } else if (op->data.x_f64 > 0.0) {
                    return CmpGT;
                } else {
                    return CmpEQ;
                }
            case 128:
                float128_t zero_float;
                ui32_to_f128M(0, &zero_float);
                if (f128M_lt(&op->data.x_f128, &zero_float)) {
                    return CmpLT;
                } else if (f128M_eq(&op->data.x_f128, &zero_float)) {
                    return CmpEQ;
                } else {
                    return CmpGT;
                }
            default:
                zig_unreachable();
        }
    } else {
        zig_unreachable();
    }
}

static void float_add(ZigValue *out_val, ZigValue *op1, ZigValue *op2) {
    assert(op1->type == op2->type);
    out_val->type = op1->type;
    if (op1->type->id == ZigTypeIdComptimeFloat) {
        bigfloat_add(&out_val->data.x_bigfloat, &op1->data.x_bigfloat, &op2->data.x_bigfloat);
    } else if (op1->type->id == ZigTypeIdFloat) {
        switch (op1->type->data.floating.bit_count) {
            case 16:
                out_val->data.x_f16 = f16_add(op1->data.x_f16, op2->data.x_f16);
                return;
            case 32:
                out_val->data.x_f32 =  op1->data.x_f32 + op2->data.x_f32;
                return;
            case 64:
                out_val->data.x_f64 =  op1->data.x_f64 + op2->data.x_f64;
                return;
            case 128:
                f128M_add(&op1->data.x_f128, &op2->data.x_f128, &out_val->data.x_f128);
                return;
            default:
                zig_unreachable();
        }
    } else {
        zig_unreachable();
    }
}

static void float_sub(ZigValue *out_val, ZigValue *op1, ZigValue *op2) {
    assert(op1->type == op2->type);
    out_val->type = op1->type;
    if (op1->type->id == ZigTypeIdComptimeFloat) {
        bigfloat_sub(&out_val->data.x_bigfloat, &op1->data.x_bigfloat, &op2->data.x_bigfloat);
    } else if (op1->type->id == ZigTypeIdFloat) {
        switch (op1->type->data.floating.bit_count) {
            case 16:
                out_val->data.x_f16 = f16_sub(op1->data.x_f16, op2->data.x_f16);
                return;
            case 32:
                out_val->data.x_f32 = op1->data.x_f32 - op2->data.x_f32;
                return;
            case 64:
                out_val->data.x_f64 = op1->data.x_f64 - op2->data.x_f64;
                return;
            case 128:
                f128M_sub(&op1->data.x_f128, &op2->data.x_f128, &out_val->data.x_f128);
                return;
            default:
                zig_unreachable();
        }
    } else {
        zig_unreachable();
    }
}

static void float_mul(ZigValue *out_val, ZigValue *op1, ZigValue *op2) {
    assert(op1->type == op2->type);
    out_val->type = op1->type;
    if (op1->type->id == ZigTypeIdComptimeFloat) {
        bigfloat_mul(&out_val->data.x_bigfloat, &op1->data.x_bigfloat, &op2->data.x_bigfloat);
    } else if (op1->type->id == ZigTypeIdFloat) {
        switch (op1->type->data.floating.bit_count) {
            case 16:
                out_val->data.x_f16 = f16_mul(op1->data.x_f16, op2->data.x_f16);
                return;
            case 32:
                out_val->data.x_f32 = op1->data.x_f32 * op2->data.x_f32;
                return;
            case 64:
                out_val->data.x_f64 = op1->data.x_f64 * op2->data.x_f64;
                return;
            case 128:
                f128M_mul(&op1->data.x_f128, &op2->data.x_f128, &out_val->data.x_f128);
                return;
            default:
                zig_unreachable();
        }
    } else {
        zig_unreachable();
    }
}

static void float_div(ZigValue *out_val, ZigValue *op1, ZigValue *op2) {
    assert(op1->type == op2->type);
    out_val->type = op1->type;
    if (op1->type->id == ZigTypeIdComptimeFloat) {
        bigfloat_div(&out_val->data.x_bigfloat, &op1->data.x_bigfloat, &op2->data.x_bigfloat);
    } else if (op1->type->id == ZigTypeIdFloat) {
        switch (op1->type->data.floating.bit_count) {
            case 16:
                out_val->data.x_f16 = f16_div(op1->data.x_f16, op2->data.x_f16);
                return;
            case 32:
                out_val->data.x_f32 = op1->data.x_f32 / op2->data.x_f32;
                return;
            case 64:
                out_val->data.x_f64 = op1->data.x_f64 / op2->data.x_f64;
                return;
            case 128:
                f128M_div(&op1->data.x_f128, &op2->data.x_f128, &out_val->data.x_f128);
                return;
            default:
                zig_unreachable();
        }
    } else {
        zig_unreachable();
    }
}

static void float_div_trunc(ZigValue *out_val, ZigValue *op1, ZigValue *op2) {
    assert(op1->type == op2->type);
    out_val->type = op1->type;
    if (op1->type->id == ZigTypeIdComptimeFloat) {
        bigfloat_div_trunc(&out_val->data.x_bigfloat, &op1->data.x_bigfloat, &op2->data.x_bigfloat);
    } else if (op1->type->id == ZigTypeIdFloat) {
        switch (op1->type->data.floating.bit_count) {
            case 16:
                out_val->data.x_f16 = f16_div(op1->data.x_f16, op2->data.x_f16);
                out_val->data.x_f16 = f16_roundToInt(out_val->data.x_f16, softfloat_round_minMag, false);
                return;
            case 32:
                out_val->data.x_f32 = truncf(op1->data.x_f32 / op2->data.x_f32);
                return;
            case 64:
                out_val->data.x_f64 = trunc(op1->data.x_f64 / op2->data.x_f64);
                return;
            case 128:
                f128M_div(&op1->data.x_f128, &op2->data.x_f128, &out_val->data.x_f128);
                f128M_roundToInt(&out_val->data.x_f128, softfloat_round_minMag, false, &out_val->data.x_f128);
                return;
            default:
                zig_unreachable();
        }
    } else {
        zig_unreachable();
    }
}

static void float_div_floor(ZigValue *out_val, ZigValue *op1, ZigValue *op2) {
    assert(op1->type == op2->type);
    out_val->type = op1->type;
    if (op1->type->id == ZigTypeIdComptimeFloat) {
        bigfloat_div_floor(&out_val->data.x_bigfloat, &op1->data.x_bigfloat, &op2->data.x_bigfloat);
    } else if (op1->type->id == ZigTypeIdFloat) {
        switch (op1->type->data.floating.bit_count) {
            case 16:
                out_val->data.x_f16 = f16_div(op1->data.x_f16, op2->data.x_f16);
                out_val->data.x_f16 = f16_roundToInt(out_val->data.x_f16, softfloat_round_min, false);
                return;
            case 32:
                out_val->data.x_f32 = floorf(op1->data.x_f32 / op2->data.x_f32);
                return;
            case 64:
                out_val->data.x_f64 = floor(op1->data.x_f64 / op2->data.x_f64);
                return;
            case 128:
                f128M_div(&op1->data.x_f128, &op2->data.x_f128, &out_val->data.x_f128);
                f128M_roundToInt(&out_val->data.x_f128, softfloat_round_min, false, &out_val->data.x_f128);
                return;
            default:
                zig_unreachable();
        }
    } else {
        zig_unreachable();
    }
}

static void float_rem(ZigValue *out_val, ZigValue *op1, ZigValue *op2) {
    assert(op1->type == op2->type);
    out_val->type = op1->type;
    if (op1->type->id == ZigTypeIdComptimeFloat) {
        bigfloat_rem(&out_val->data.x_bigfloat, &op1->data.x_bigfloat, &op2->data.x_bigfloat);
    } else if (op1->type->id == ZigTypeIdFloat) {
        switch (op1->type->data.floating.bit_count) {
            case 16:
                out_val->data.x_f16 = f16_rem(op1->data.x_f16, op2->data.x_f16);
                return;
            case 32:
                out_val->data.x_f32 = fmodf(op1->data.x_f32, op2->data.x_f32);
                return;
            case 64:
                out_val->data.x_f64 = fmod(op1->data.x_f64, op2->data.x_f64);
                return;
            case 128:
                f128M_rem(&op1->data.x_f128, &op2->data.x_f128, &out_val->data.x_f128);
                return;
            default:
                zig_unreachable();
        }
    } else {
        zig_unreachable();
    }
}

// c = a - b * trunc(a / b)
static float16_t zig_f16_mod(float16_t a, float16_t b) {
    float16_t c;
    c = f16_div(a, b);
    c = f16_roundToInt(c, softfloat_round_min, true);
    c = f16_mul(b, c);
    c = f16_sub(a, c);
    return c;
}

// c = a - b * trunc(a / b)
static void zig_f128M_mod(const float128_t* a, const float128_t* b, float128_t* c) {
    f128M_div(a, b, c);
    f128M_roundToInt(c, softfloat_round_min, true, c);
    f128M_mul(b, c, c);
    f128M_sub(a, c, c);
}

static void float_mod(ZigValue *out_val, ZigValue *op1, ZigValue *op2) {
    assert(op1->type == op2->type);
    out_val->type = op1->type;
    if (op1->type->id == ZigTypeIdComptimeFloat) {
        bigfloat_mod(&out_val->data.x_bigfloat, &op1->data.x_bigfloat, &op2->data.x_bigfloat);
    } else if (op1->type->id == ZigTypeIdFloat) {
        switch (op1->type->data.floating.bit_count) {
            case 16:
                out_val->data.x_f16 = zig_f16_mod(op1->data.x_f16, op2->data.x_f16);
                return;
            case 32:
                out_val->data.x_f32 = fmodf(fmodf(op1->data.x_f32, op2->data.x_f32) + op2->data.x_f32, op2->data.x_f32);
                return;
            case 64:
                out_val->data.x_f64 = fmod(fmod(op1->data.x_f64, op2->data.x_f64) + op2->data.x_f64, op2->data.x_f64);
                return;
            case 128:
                zig_f128M_mod(&op1->data.x_f128, &op2->data.x_f128, &out_val->data.x_f128);
                return;
            default:
                zig_unreachable();
        }
    } else {
        zig_unreachable();
    }
}

static void float_negate(ZigValue *out_val, ZigValue *op) {
    out_val->type = op->type;
    if (op->type->id == ZigTypeIdComptimeFloat) {
        bigfloat_negate(&out_val->data.x_bigfloat, &op->data.x_bigfloat);
    } else if (op->type->id == ZigTypeIdFloat) {
        switch (op->type->data.floating.bit_count) {
            case 16:
                {
                    const float16_t zero = zig_double_to_f16(0);
                    out_val->data.x_f16 = f16_sub(zero, op->data.x_f16);
                    return;
                }
            case 32:
                out_val->data.x_f32 = -op->data.x_f32;
                return;
            case 64:
                out_val->data.x_f64 = -op->data.x_f64;
                return;
            case 128:
                float128_t zero_f128;
                ui32_to_f128M(0, &zero_f128);
                f128M_sub(&zero_f128, &op->data.x_f128, &out_val->data.x_f128);
                return;
            default:
                zig_unreachable();
        }
    } else {
        zig_unreachable();
    }
}

void float_write_ieee597(ZigValue *op, uint8_t *buf, bool is_big_endian) {
    if (op->type->id != ZigTypeIdFloat)
        zig_unreachable();

    const unsigned n = op->type->data.floating.bit_count / 8;
    assert(n <= 16);

    switch (op->type->data.floating.bit_count) {
        case 16:
            memcpy(buf, &op->data.x_f16, 2);
            break;
        case 32:
            memcpy(buf, &op->data.x_f32, 4);
            break;
        case 64:
            memcpy(buf, &op->data.x_f64, 8);
            break;
        case 128:
            memcpy(buf, &op->data.x_f128, 16);
            break;
        default:
            zig_unreachable();
    }

    if (is_big_endian) {
        // Byteswap in place if needed
        for (size_t i = 0; i < n / 2; i++) {
            uint8_t u = buf[i];
            buf[i] = buf[n - 1 - i];
            buf[n - 1 - i] = u;
        }
    }
}

void float_read_ieee597(ZigValue *val, uint8_t *buf, bool is_big_endian) {
    if (val->type->id != ZigTypeIdFloat)
        zig_unreachable();

    const unsigned n = val->type->data.floating.bit_count / 8;
    assert(n <= 16);

    uint8_t tmp[16];
    uint8_t *ptr = buf;

    if (is_big_endian) {
        memcpy(tmp, buf, n);

        // Byteswap if needed
        for (size_t i = 0; i < n / 2; i++) {
            uint8_t u = tmp[i];
            tmp[i] = tmp[n - 1 - i];
            tmp[n - 1 - i] = u;
        }

        ptr = tmp;
    }

    switch (val->type->data.floating.bit_count) {
        case 16:
            memcpy(&val->data.x_f16, ptr, 2);
            return;
        case 32:
            memcpy(&val->data.x_f32, ptr, 4);
            return;
        case 64:
            memcpy(&val->data.x_f64, ptr, 8);
            return;
        case 128:
            memcpy(&val->data.x_f128, ptr, 16);
            return;
        default:
            zig_unreachable();
    }
}

static void value_to_bigfloat(BigFloat *out, ZigValue *val) {
    switch (val->type->id) {
        case ZigTypeIdInt:
        case ZigTypeIdComptimeInt:
            bigfloat_init_bigint(out, &val->data.x_bigint);
            return;
        case ZigTypeIdComptimeFloat:
            *out = val->data.x_bigfloat;
            return;
        case ZigTypeIdFloat: switch (val->type->data.floating.bit_count) {
            case 16:
                bigfloat_init_16(out, val->data.x_f16);
                return;
            case 32:
                bigfloat_init_32(out, val->data.x_f32);
                return;
            case 64:
                bigfloat_init_64(out, val->data.x_f64);
                return;
            case 80:
                zig_panic("TODO");
            case 128:
                bigfloat_init_128(out, val->data.x_f128);
                return;
            default:
                zig_unreachable();
        }
        default:
            zig_unreachable();
    }
}

static bool ir_num_lit_fits_in_other_type(IrAnalyze *ira, IrInstGen *instruction, ZigType *other_type,
        bool explicit_cast)
{
    if (type_is_invalid(other_type)) {
        return false;
    }

    ZigValue *const_val = ir_resolve_const(ira, instruction, LazyOkNoUndef);
    if (const_val == nullptr)
        return false;

    if (const_val->special == ConstValSpecialLazy) {
        switch (const_val->data.x_lazy->id) {
            case LazyValueIdAlignOf: {
                // This is guaranteed to fit into a u29
                if (other_type->id == ZigTypeIdComptimeInt)
                    return true;
                size_t align_bits = get_align_amt_type(ira->codegen)->data.integral.bit_count;
                if (other_type->id == ZigTypeIdInt && !other_type->data.integral.is_signed &&
                    other_type->data.integral.bit_count >= align_bits)
                {
                    return true;
                }
                break;
            }
            case LazyValueIdSizeOf: {
                // This is guaranteed to fit into a usize
                if (other_type->id == ZigTypeIdComptimeInt)
                    return true;
                size_t usize_bits = ira->codegen->builtin_types.entry_usize->data.integral.bit_count;
                if (other_type->id == ZigTypeIdInt && !other_type->data.integral.is_signed &&
                    other_type->data.integral.bit_count >= usize_bits)
                {
                    return true;
                }
                break;
            }
            default:
                break;
        }
    }

    const_val = ir_resolve_const(ira, instruction, UndefBad);
    if (const_val == nullptr)
        return false;

    bool const_val_is_int = (const_val->type->id == ZigTypeIdInt || const_val->type->id == ZigTypeIdComptimeInt);
    bool const_val_is_float = (const_val->type->id == ZigTypeIdFloat || const_val->type->id == ZigTypeIdComptimeFloat);
    assert(const_val_is_int || const_val_is_float);

    if (const_val_is_int && other_type->id == ZigTypeIdComptimeFloat) {
        return true;
    }
    if (other_type->id == ZigTypeIdFloat) {
        if (const_val->type->id == ZigTypeIdComptimeInt || const_val->type->id == ZigTypeIdComptimeFloat) {
            return true;
        }
        if (const_val->type->id == ZigTypeIdInt) {
            BigFloat tmp_bf;
            bigfloat_init_bigint(&tmp_bf, &const_val->data.x_bigint);
            BigFloat orig_bf;
            switch (other_type->data.floating.bit_count) {
                case 16: {
                    float16_t tmp = bigfloat_to_f16(&tmp_bf);
                    bigfloat_init_16(&orig_bf, tmp);
                    break;
                }
                case 32: {
                    float tmp = bigfloat_to_f32(&tmp_bf);
                    bigfloat_init_32(&orig_bf, tmp);
                    break;
                }
                case 64: {
                    double tmp = bigfloat_to_f64(&tmp_bf);
                    bigfloat_init_64(&orig_bf, tmp);
                    break;
                }
                case 80:
                    zig_panic("TODO");
                case 128: {
                    float128_t tmp = bigfloat_to_f128(&tmp_bf);
                    bigfloat_init_128(&orig_bf, tmp);
                    break;
                }
                default:
                    zig_unreachable();
            }
            BigInt orig_bi;
            bigint_init_bigfloat(&orig_bi, &orig_bf);
            if (bigint_cmp(&orig_bi, &const_val->data.x_bigint) == CmpEQ) {
                return true;
            }
            Buf *val_buf = buf_alloc();
            bigint_append_buf(val_buf, &const_val->data.x_bigint, 10);
            ir_add_error_node(ira, instruction->base.source_node,
                buf_sprintf("type %s cannot represent integer value %s",
                    buf_ptr(&other_type->name),
                    buf_ptr(val_buf)));
            return false;
        }
        if (other_type->data.floating.bit_count >= const_val->type->data.floating.bit_count) {
            return true;
        }
        switch (other_type->data.floating.bit_count) {
            case 16:
                switch (const_val->type->data.floating.bit_count) {
                    case 32: {
                        float16_t tmp = zig_double_to_f16(const_val->data.x_f32);
                        float orig = zig_f16_to_double(tmp);
                        if (const_val->data.x_f32 == orig) {
                            return true;
                        }
                        break;
                    }
                    case 64: {
                        float16_t tmp = zig_double_to_f16(const_val->data.x_f64);
                        double orig = zig_f16_to_double(tmp);
                        if (const_val->data.x_f64 == orig) {
                            return true;
                        }
                        break;
                    }
                    case 80:
                        zig_panic("TODO");
                    case 128: {
                        float16_t tmp = f128M_to_f16(&const_val->data.x_f128);
                        float128_t orig;
                        f16_to_f128M(tmp, &orig);
                        if (f128M_eq(&orig, &const_val->data.x_f128)) {
                            return true;
                        }
                        break;
                    }
                    default:
                        zig_unreachable();
                }
                break;
            case 32:
                switch (const_val->type->data.floating.bit_count) {
                    case 64: {
                        float tmp = const_val->data.x_f64;
                        double orig = tmp;
                        if (const_val->data.x_f64 == orig) {
                            return true;
                        }
                        break;
                    }
                    case 80:
                        zig_panic("TODO");
                    case 128: {
                        float32_t tmp = f128M_to_f32(&const_val->data.x_f128);
                        float128_t orig;
                        f32_to_f128M(tmp, &orig);
                        if (f128M_eq(&orig, &const_val->data.x_f128)) {
                            return true;
                        }
                        break;
                    }
                    default:
                        zig_unreachable();
                }
                break;
            case 64:
                switch (const_val->type->data.floating.bit_count) {
                    case 80:
                        zig_panic("TODO");
                    case 128: {
                        float64_t tmp = f128M_to_f64(&const_val->data.x_f128);
                        float128_t orig;
                        f64_to_f128M(tmp, &orig);
                        if (f128M_eq(&orig, &const_val->data.x_f128)) {
                            return true;
                        }
                        break;
                    }
                    default:
                        zig_unreachable();
                }
                break;
            case 80:
                assert(const_val->type->data.floating.bit_count == 128);
                zig_panic("TODO");
            case 128:
                return true;
            default:
                zig_unreachable();
        }
        Buf *val_buf = buf_alloc();
        float_append_buf(val_buf, const_val);
        ir_add_error_node(ira, instruction->base.source_node,
            buf_sprintf("cast of value %s to type '%s' loses information",
                buf_ptr(val_buf),
                buf_ptr(&other_type->name)));
        return false;
    } else if (other_type->id == ZigTypeIdInt && const_val_is_int) {
        if (!other_type->data.integral.is_signed && const_val->data.x_bigint.is_negative) {
            Buf *val_buf = buf_alloc();
            bigint_append_buf(val_buf, &const_val->data.x_bigint, 10);
            ir_add_error_node(ira, instruction->base.source_node,
                buf_sprintf("cannot cast negative value %s to unsigned integer type '%s'",
                    buf_ptr(val_buf),
                    buf_ptr(&other_type->name)));
            return false;
        }
        if (bigint_fits_in_bits(&const_val->data.x_bigint, other_type->data.integral.bit_count,
                    other_type->data.integral.is_signed))
        {
            return true;
        }
    } else if (const_val_fits_in_num_lit(const_val, other_type)) {
        return true;
    } else if (other_type->id == ZigTypeIdOptional) {
        ZigType *child_type = other_type->data.maybe.child_type;
        if (const_val_fits_in_num_lit(const_val, child_type)) {
            return true;
        } else if (child_type->id == ZigTypeIdInt && const_val_is_int) {
            if (!child_type->data.integral.is_signed && const_val->data.x_bigint.is_negative) {
                Buf *val_buf = buf_alloc();
                bigint_append_buf(val_buf, &const_val->data.x_bigint, 10);
                ir_add_error_node(ira, instruction->base.source_node,
                    buf_sprintf("cannot cast negative value %s to unsigned integer type '%s'",
                        buf_ptr(val_buf),
                        buf_ptr(&child_type->name)));
                return false;
            }
            if (bigint_fits_in_bits(&const_val->data.x_bigint,
                        child_type->data.integral.bit_count,
                        child_type->data.integral.is_signed))
            {
                return true;
            }
        } else if (child_type->id == ZigTypeIdFloat && const_val_is_float) {
            return true;
        }
    }
    if (explicit_cast && (other_type->id == ZigTypeIdInt || other_type->id == ZigTypeIdComptimeInt) &&
        const_val_is_float)
    {
        if (float_has_fraction(const_val)) {
            Buf *val_buf = buf_alloc();
            float_append_buf(val_buf, const_val);

            ir_add_error_node(ira, instruction->base.source_node,
                buf_sprintf("fractional component prevents float value %s from being casted to type '%s'",
                    buf_ptr(val_buf),
                    buf_ptr(&other_type->name)));
            return false;
        } else {
            if (other_type->id == ZigTypeIdComptimeInt) {
                return true;
            } else {
                BigInt bigint;
                float_init_bigint(&bigint, const_val);
                if (bigint_fits_in_bits(&bigint, other_type->data.integral.bit_count,
                    other_type->data.integral.is_signed))
                {
                    return true;
                }
            }
        }
    }

    const char *num_lit_str;
    Buf *val_buf = buf_alloc();
    if (const_val_is_float) {
        num_lit_str = "float";
        float_append_buf(val_buf, const_val);
    } else {
        num_lit_str = "integer";
        bigint_append_buf(val_buf, &const_val->data.x_bigint, 10);
    }

    ir_add_error_node(ira, instruction->base.source_node,
        buf_sprintf("%s value %s cannot be coerced to type '%s'",
            num_lit_str,
            buf_ptr(val_buf),
            buf_ptr(&other_type->name)));
    return false;
}

static bool is_tagged_union(ZigType *type) {
    if (type->id != ZigTypeIdUnion)
        return false;
    return (type->data.unionation.decl_node->data.container_decl.auto_enum ||
        type->data.unionation.decl_node->data.container_decl.init_arg_expr != nullptr);
}

static void populate_error_set_table(ErrorTableEntry **errors, ZigType *set) {
    assert(set->id == ZigTypeIdErrorSet);
    for (uint32_t i = 0; i < set->data.error_set.err_count; i += 1) {
        ErrorTableEntry *error_entry = set->data.error_set.errors[i];
        assert(errors[error_entry->value] == nullptr);
        errors[error_entry->value] = error_entry;
    }
}

static ErrorTableEntry *better_documented_error(ErrorTableEntry *preferred, ErrorTableEntry *other) {
    if (preferred->decl_node->type == NodeTypeErrorSetField)
        return preferred;
    if (other->decl_node->type == NodeTypeErrorSetField)
        return other;
    return preferred;
}

static ZigType *get_error_set_intersection(IrAnalyze *ira, ZigType *set1, ZigType *set2,
        AstNode *source_node)
{
    assert(set1->id == ZigTypeIdErrorSet);
    assert(set2->id == ZigTypeIdErrorSet);

    if (!resolve_inferred_error_set(ira->codegen, set1, source_node)) {
        return ira->codegen->builtin_types.entry_invalid;
    }
    if (!resolve_inferred_error_set(ira->codegen, set2, source_node)) {
        return ira->codegen->builtin_types.entry_invalid;
    }
    if (type_is_global_error_set(set1)) {
        return set2;
    }
    if (type_is_global_error_set(set2)) {
        return set1;
    }
    size_t errors_count = ira->codegen->errors_by_index.length;
    ErrorTableEntry **errors = heap::c_allocator.allocate<ErrorTableEntry *>(errors_count);
    populate_error_set_table(errors, set1);
    ZigList<ErrorTableEntry *> intersection_list = {};

    ZigType *err_set_type = new_type_table_entry(ZigTypeIdErrorSet);
    buf_resize(&err_set_type->name, 0);
    buf_appendf(&err_set_type->name, "error{");

    bool need_comma = false;
    for (uint32_t i = 0; i < set2->data.error_set.err_count; i += 1) {
        ErrorTableEntry *error_entry = set2->data.error_set.errors[i];
        ErrorTableEntry *existing_entry = errors[error_entry->value];
        if (existing_entry != nullptr) {
            // prefer the one with docs
            const char *comma = need_comma ? "," : "";
            need_comma = true;
            ErrorTableEntry *existing_entry_with_docs = better_documented_error(existing_entry, error_entry);
            intersection_list.append(existing_entry_with_docs);
            buf_appendf(&err_set_type->name, "%s%s", comma, buf_ptr(&existing_entry_with_docs->name));
        }
    }
    heap::c_allocator.deallocate(errors, errors_count);

    err_set_type->data.error_set.err_count = intersection_list.length;
    err_set_type->data.error_set.errors = intersection_list.items;
    err_set_type->size_in_bits = ira->codegen->builtin_types.entry_global_error_set->size_in_bits;
    err_set_type->abi_align = ira->codegen->builtin_types.entry_global_error_set->abi_align;
    err_set_type->abi_size = ira->codegen->builtin_types.entry_global_error_set->abi_size;

    buf_appendf(&err_set_type->name, "}");

    return err_set_type;
}

static ConstCastOnly types_match_const_cast_only(IrAnalyze *ira, ZigType *wanted_type,
        ZigType *actual_type, AstNode *source_node, bool wanted_is_mutable)
{
    CodeGen *g = ira->codegen;
    ConstCastOnly result = {};
    result.id = ConstCastResultIdOk;

    Error err;

    if (wanted_type == actual_type)
        return result;

    // If pointers have the same representation in memory, they can be "const-casted".
    // `const` attribute can be gained
    // `volatile` attribute can be gained
    // `allowzero` attribute can be gained (whether from explicit attribute, C pointer, or optional pointer)
    //   but only if !wanted_is_mutable
    // alignment can be decreased
    // bit offset attributes must match exactly
    // PtrLenSingle/PtrLenUnknown must match exactly, but PtrLenC matches either one
    // sentinel-terminated pointers can coerce into PtrLenUnknown
    ZigType *wanted_ptr_type = get_src_ptr_type(wanted_type);
    ZigType *actual_ptr_type = get_src_ptr_type(actual_type);
    bool wanted_allows_zero = ptr_allows_addr_zero(wanted_type);
    bool actual_allows_zero = ptr_allows_addr_zero(actual_type);
    bool wanted_is_c_ptr = wanted_type->id == ZigTypeIdPointer && wanted_type->data.pointer.ptr_len == PtrLenC;
    bool actual_is_c_ptr = actual_type->id == ZigTypeIdPointer && actual_type->data.pointer.ptr_len == PtrLenC;
    bool wanted_opt_or_ptr = wanted_ptr_type != nullptr && wanted_ptr_type->id == ZigTypeIdPointer;
    bool actual_opt_or_ptr = actual_ptr_type != nullptr && actual_ptr_type->id == ZigTypeIdPointer;
    if (wanted_opt_or_ptr && actual_opt_or_ptr) {
        bool ok_null_term_ptrs =
            wanted_ptr_type->data.pointer.sentinel == nullptr ||
            (actual_ptr_type->data.pointer.sentinel != nullptr &&
             const_values_equal(ira->codegen, wanted_ptr_type->data.pointer.sentinel,
                 actual_ptr_type->data.pointer.sentinel)) ||
            actual_ptr_type->data.pointer.ptr_len == PtrLenC;
        if (!ok_null_term_ptrs) {
            result.id = ConstCastResultIdPtrSentinel;
            result.data.bad_ptr_sentinel = heap::c_allocator.allocate_nonzero<ConstCastPtrSentinel>(1);
            result.data.bad_ptr_sentinel->wanted_type = wanted_ptr_type;
            result.data.bad_ptr_sentinel->actual_type = actual_ptr_type;
            return result;
        }
        bool ptr_lens_equal = actual_ptr_type->data.pointer.ptr_len == wanted_ptr_type->data.pointer.ptr_len;
        if (!(ptr_lens_equal || wanted_is_c_ptr || actual_is_c_ptr)) {
            result.id = ConstCastResultIdPtrLens;
            return result;
        }

        bool ok_cv_qualifiers =
            (!actual_ptr_type->data.pointer.is_const || wanted_ptr_type->data.pointer.is_const) &&
            (!actual_ptr_type->data.pointer.is_volatile || wanted_ptr_type->data.pointer.is_volatile);
        if (!ok_cv_qualifiers) {
            result.id = ConstCastResultIdCV;
            result.data.bad_cv = heap::c_allocator.allocate_nonzero<ConstCastBadCV>(1);
            result.data.bad_cv->wanted_type = wanted_ptr_type;
            result.data.bad_cv->actual_type = actual_ptr_type;
            return result;
        }

        ConstCastOnly child = types_match_const_cast_only(ira, wanted_ptr_type->data.pointer.child_type,
                actual_ptr_type->data.pointer.child_type, source_node, !wanted_ptr_type->data.pointer.is_const);
        if (child.id == ConstCastResultIdInvalid)
            return child;
        if (child.id != ConstCastResultIdOk) {
            result.id = ConstCastResultIdPointerChild;
            result.data.pointer_mismatch = heap::c_allocator.allocate_nonzero<ConstCastPointerMismatch>(1);
            result.data.pointer_mismatch->child = child;
            result.data.pointer_mismatch->wanted_child = wanted_ptr_type->data.pointer.child_type;
            result.data.pointer_mismatch->actual_child = actual_ptr_type->data.pointer.child_type;
            return result;
        }
        bool ok_allows_zero = (wanted_allows_zero &&
                (actual_allows_zero || !wanted_is_mutable)) ||
            (!wanted_allows_zero && !actual_allows_zero);
        if (!ok_allows_zero) {
            result.id = ConstCastResultIdBadAllowsZero;
            result.data.bad_allows_zero = heap::c_allocator.allocate_nonzero<ConstCastBadAllowsZero>(1);
            result.data.bad_allows_zero->wanted_type = wanted_type;
            result.data.bad_allows_zero->actual_type = actual_type;
            return result;
        }
        if ((err = type_resolve(g, actual_ptr_type->data.pointer.child_type, ResolveStatusAlignmentKnown))) {
            result.id = ConstCastResultIdInvalid;
            return result;
        }
        if ((err = type_resolve(g, wanted_ptr_type->data.pointer.child_type, ResolveStatusAlignmentKnown))) {
            result.id = ConstCastResultIdInvalid;
            return result;
        }
        if ((err = type_resolve(g, wanted_type, ResolveStatusZeroBitsKnown))) {
            result.id = ConstCastResultIdInvalid;
            return result;
        }
        if ((err = type_resolve(g, actual_type, ResolveStatusZeroBitsKnown))) {
            result.id = ConstCastResultIdInvalid;
            return result;
        }
        if (type_has_bits(g, wanted_type) == type_has_bits(g, actual_type) &&
            actual_ptr_type->data.pointer.bit_offset_in_host == wanted_ptr_type->data.pointer.bit_offset_in_host &&
            actual_ptr_type->data.pointer.host_int_bytes == wanted_ptr_type->data.pointer.host_int_bytes &&
            get_ptr_align(ira->codegen, actual_ptr_type) >= get_ptr_align(ira->codegen, wanted_ptr_type))
        {
            return result;
        }
    }

    // arrays
    if (wanted_type->id == ZigTypeIdArray && actual_type->id == ZigTypeIdArray &&
        wanted_type->data.array.len == actual_type->data.array.len)
    {
        ConstCastOnly child = types_match_const_cast_only(ira, wanted_type->data.array.child_type,
                actual_type->data.array.child_type, source_node, wanted_is_mutable);
        if (child.id == ConstCastResultIdInvalid)
            return child;
        if (child.id != ConstCastResultIdOk) {
            result.id = ConstCastResultIdArrayChild;
            result.data.array_mismatch = heap::c_allocator.allocate_nonzero<ConstCastArrayMismatch>(1);
            result.data.array_mismatch->child = child;
            result.data.array_mismatch->wanted_child = wanted_type->data.array.child_type;
            result.data.array_mismatch->actual_child = actual_type->data.array.child_type;
            return result;
        }
        bool ok_null_terminated = (wanted_type->data.array.sentinel == nullptr) ||
            (actual_type->data.array.sentinel != nullptr &&
            const_values_equal(ira->codegen, wanted_type->data.array.sentinel, actual_type->data.array.sentinel));
        if (!ok_null_terminated) {
            result.id = ConstCastResultIdSentinelArrays;
            result.data.sentinel_arrays = heap::c_allocator.allocate_nonzero<ConstCastBadNullTermArrays>(1);
            result.data.sentinel_arrays->child = child;
            result.data.sentinel_arrays->wanted_type = wanted_type;
            result.data.sentinel_arrays->actual_type = actual_type;
            return result;
        }
        return result;
    }

    // slice const
    if (is_slice(wanted_type) && is_slice(actual_type)) {
        ZigType *actual_ptr_type = actual_type->data.structure.fields[slice_ptr_index]->type_entry;
        ZigType *wanted_ptr_type = wanted_type->data.structure.fields[slice_ptr_index]->type_entry;
        if ((err = type_resolve(g, actual_ptr_type->data.pointer.child_type, ResolveStatusAlignmentKnown))) {
            result.id = ConstCastResultIdInvalid;
            return result;
        }
        if ((err = type_resolve(g, wanted_ptr_type->data.pointer.child_type, ResolveStatusAlignmentKnown))) {
            result.id = ConstCastResultIdInvalid;
            return result;
        }
        bool ok_sentinels =
            wanted_ptr_type->data.pointer.sentinel == nullptr ||
            (actual_ptr_type->data.pointer.sentinel != nullptr &&
             const_values_equal(ira->codegen, wanted_ptr_type->data.pointer.sentinel,
                 actual_ptr_type->data.pointer.sentinel));
        if (!ok_sentinels) {
            result.id = ConstCastResultIdPtrSentinel;
            result.data.bad_ptr_sentinel = heap::c_allocator.allocate_nonzero<ConstCastPtrSentinel>(1);
            result.data.bad_ptr_sentinel->wanted_type = wanted_ptr_type;
            result.data.bad_ptr_sentinel->actual_type = actual_ptr_type;
            return result;
        }
        if ((!actual_ptr_type->data.pointer.is_const || wanted_ptr_type->data.pointer.is_const) &&
            (!actual_ptr_type->data.pointer.is_volatile || wanted_ptr_type->data.pointer.is_volatile) &&
            actual_ptr_type->data.pointer.bit_offset_in_host == wanted_ptr_type->data.pointer.bit_offset_in_host &&
            actual_ptr_type->data.pointer.host_int_bytes == wanted_ptr_type->data.pointer.host_int_bytes &&
            get_ptr_align(g, actual_ptr_type) >= get_ptr_align(g, wanted_ptr_type))
        {
            ConstCastOnly child = types_match_const_cast_only(ira, wanted_ptr_type->data.pointer.child_type,
                    actual_ptr_type->data.pointer.child_type, source_node, !wanted_ptr_type->data.pointer.is_const);
            if (child.id == ConstCastResultIdInvalid)
                return child;
            if (child.id != ConstCastResultIdOk) {
                result.id = ConstCastResultIdSliceChild;
                result.data.slice_mismatch = heap::c_allocator.allocate_nonzero<ConstCastSliceMismatch>(1);
                result.data.slice_mismatch->child = child;
                result.data.slice_mismatch->actual_child = actual_ptr_type->data.pointer.child_type;
                result.data.slice_mismatch->wanted_child = wanted_ptr_type->data.pointer.child_type;
            }
            return result;
        }
    }

    // maybe
    if (wanted_type->id == ZigTypeIdOptional && actual_type->id == ZigTypeIdOptional) {
        ConstCastOnly child = types_match_const_cast_only(ira, wanted_type->data.maybe.child_type,
                actual_type->data.maybe.child_type, source_node, wanted_is_mutable);
        if (child.id == ConstCastResultIdInvalid)
            return child;
        if (child.id != ConstCastResultIdOk) {
            result.id = ConstCastResultIdOptionalChild;
            result.data.optional = heap::c_allocator.allocate_nonzero<ConstCastOptionalMismatch>(1);
            result.data.optional->child = child;
            result.data.optional->wanted_child = wanted_type->data.maybe.child_type;
            result.data.optional->actual_child = actual_type->data.maybe.child_type;
        }
        return result;
    }

    // error union
    if (wanted_type->id == ZigTypeIdErrorUnion && actual_type->id == ZigTypeIdErrorUnion) {
        ConstCastOnly payload_child = types_match_const_cast_only(ira, wanted_type->data.error_union.payload_type,
                actual_type->data.error_union.payload_type, source_node, wanted_is_mutable);
        if (payload_child.id == ConstCastResultIdInvalid)
            return payload_child;
        if (payload_child.id != ConstCastResultIdOk) {
            result.id = ConstCastResultIdErrorUnionPayload;
            result.data.error_union_payload = heap::c_allocator.allocate_nonzero<ConstCastErrUnionPayloadMismatch>(1);
            result.data.error_union_payload->child = payload_child;
            result.data.error_union_payload->wanted_payload = wanted_type->data.error_union.payload_type;
            result.data.error_union_payload->actual_payload = actual_type->data.error_union.payload_type;
            return result;
        }
        ConstCastOnly error_set_child = types_match_const_cast_only(ira, wanted_type->data.error_union.err_set_type,
                actual_type->data.error_union.err_set_type, source_node, wanted_is_mutable);
        if (error_set_child.id == ConstCastResultIdInvalid)
            return error_set_child;
        if (error_set_child.id != ConstCastResultIdOk) {
            result.id = ConstCastResultIdErrorUnionErrorSet;
            result.data.error_union_error_set = heap::c_allocator.allocate_nonzero<ConstCastErrUnionErrSetMismatch>(1);
            result.data.error_union_error_set->child = error_set_child;
            result.data.error_union_error_set->wanted_err_set = wanted_type->data.error_union.err_set_type;
            result.data.error_union_error_set->actual_err_set = actual_type->data.error_union.err_set_type;
            return result;
        }
        return result;
    }

    // error set
    if (wanted_type->id == ZigTypeIdErrorSet && actual_type->id == ZigTypeIdErrorSet) {
        ZigType *contained_set = actual_type;
        ZigType *container_set = wanted_type;

        // if the container set is inferred, then this will always work.
        if (container_set->data.error_set.infer_fn != nullptr && container_set->data.error_set.incomplete) {
            return result;
        }
        // if the container set is the global one, it will always work.
        if (type_is_global_error_set(container_set)) {
            return result;
        }

        if (!resolve_inferred_error_set(ira->codegen, contained_set, source_node)) {
            result.id = ConstCastResultIdUnresolvedInferredErrSet;
            return result;
        }

        if (type_is_global_error_set(contained_set)) {
            result.id = ConstCastResultIdErrSetGlobal;
            return result;
        }

        size_t errors_count = g->errors_by_index.length;
        ErrorTableEntry **errors = heap::c_allocator.allocate<ErrorTableEntry *>(errors_count);
        for (uint32_t i = 0; i < container_set->data.error_set.err_count; i += 1) {
            ErrorTableEntry *error_entry = container_set->data.error_set.errors[i];
            assert(errors[error_entry->value] == nullptr);
            errors[error_entry->value] = error_entry;
        }
        for (uint32_t i = 0; i < contained_set->data.error_set.err_count; i += 1) {
            ErrorTableEntry *contained_error_entry = contained_set->data.error_set.errors[i];
            ErrorTableEntry *error_entry = errors[contained_error_entry->value];
            if (error_entry == nullptr) {
                if (result.id == ConstCastResultIdOk) {
                    result.id = ConstCastResultIdErrSet;
                    result.data.error_set_mismatch = heap::c_allocator.create<ConstCastErrSetMismatch>();
                }
                result.data.error_set_mismatch->missing_errors.append(contained_error_entry);
            }
        }
        heap::c_allocator.deallocate(errors, errors_count);
        return result;
    }

    // fn
    if (wanted_type->id == ZigTypeIdFn &&
        actual_type->id == ZigTypeIdFn)
    {
        if (wanted_type->data.fn.fn_type_id.alignment > actual_type->data.fn.fn_type_id.alignment) {
            result.id = ConstCastResultIdFnAlign;
            return result;
        }
        if (wanted_type->data.fn.fn_type_id.is_var_args != actual_type->data.fn.fn_type_id.is_var_args) {
            result.id = ConstCastResultIdFnVarArgs;
            return result;
        }
        if (wanted_type->data.fn.is_generic != actual_type->data.fn.is_generic) {
            result.id = ConstCastResultIdFnIsGeneric;
            return result;
        }
        if (!wanted_type->data.fn.is_generic &&
            actual_type->data.fn.fn_type_id.return_type->id != ZigTypeIdUnreachable)
        {
            ConstCastOnly child = types_match_const_cast_only(ira, wanted_type->data.fn.fn_type_id.return_type,
                    actual_type->data.fn.fn_type_id.return_type, source_node, false);
            if (child.id == ConstCastResultIdInvalid)
                return child;
            if (child.id != ConstCastResultIdOk) {
                result.id = ConstCastResultIdFnReturnType;
                result.data.return_type = heap::c_allocator.allocate_nonzero<ConstCastOnly>(1);
                *result.data.return_type = child;
                return result;
            }
        }
        if (wanted_type->data.fn.fn_type_id.param_count != actual_type->data.fn.fn_type_id.param_count) {
            result.id = ConstCastResultIdFnArgCount;
            return result;
        }
        if (wanted_type->data.fn.fn_type_id.next_param_index != actual_type->data.fn.fn_type_id.next_param_index) {
            result.id = ConstCastResultIdFnGenericArgCount;
            return result;
        }
        assert(wanted_type->data.fn.is_generic ||
                wanted_type->data.fn.fn_type_id.next_param_index  == wanted_type->data.fn.fn_type_id.param_count);
        for (size_t i = 0; i < wanted_type->data.fn.fn_type_id.param_count; i += 1) {
            // note it's reversed for parameters
            FnTypeParamInfo *actual_param_info = &actual_type->data.fn.fn_type_id.param_info[i];
            FnTypeParamInfo *expected_param_info = &wanted_type->data.fn.fn_type_id.param_info[i];

            ConstCastOnly arg_child = types_match_const_cast_only(ira, actual_param_info->type,
                    expected_param_info->type, source_node, false);
            if (arg_child.id == ConstCastResultIdInvalid)
                return arg_child;
            if (arg_child.id != ConstCastResultIdOk) {
                result.id = ConstCastResultIdFnArg;
                result.data.fn_arg.arg_index = i;
                result.data.fn_arg.actual_param_type = actual_param_info->type;
                result.data.fn_arg.expected_param_type = expected_param_info->type;
                result.data.fn_arg.child = heap::c_allocator.allocate_nonzero<ConstCastOnly>(1);
                *result.data.fn_arg.child = arg_child;
                return result;
            }

            if (expected_param_info->is_noalias != actual_param_info->is_noalias) {
                result.id = ConstCastResultIdFnArgNoAlias;
                result.data.arg_no_alias.arg_index = i;
                return result;
            }
        }
        if (wanted_type->data.fn.fn_type_id.cc != actual_type->data.fn.fn_type_id.cc) {
            // ConstCastResultIdFnCC is guaranteed to be the last one reported, meaning everything else is ok.
            result.id = ConstCastResultIdFnCC;
            return result;
        }
        return result;
    }

    if (wanted_type->id == ZigTypeIdInt && actual_type->id == ZigTypeIdInt) {
        if (wanted_type->data.integral.is_signed != actual_type->data.integral.is_signed ||
            wanted_type->data.integral.bit_count != actual_type->data.integral.bit_count)
        {
            result.id = ConstCastResultIdIntShorten;
            result.data.int_shorten = heap::c_allocator.allocate_nonzero<ConstCastIntShorten>(1);
            result.data.int_shorten->wanted_type = wanted_type;
            result.data.int_shorten->actual_type = actual_type;
            return result;
        }
        return result;
    }

    result.id = ConstCastResultIdType;
    result.data.type_mismatch = heap::c_allocator.allocate_nonzero<ConstCastTypeMismatch>(1);
    result.data.type_mismatch->wanted_type = wanted_type;
    result.data.type_mismatch->actual_type = actual_type;
    return result;
}

static void update_errors_helper(CodeGen *g, ErrorTableEntry ***errors, size_t *errors_count) {
    size_t old_errors_count = *errors_count;
    *errors_count = g->errors_by_index.length;
    *errors = heap::c_allocator.reallocate(*errors, old_errors_count, *errors_count);
}

static ZigType *ir_resolve_peer_types(IrAnalyze *ira, AstNode *source_node, ZigType *expected_type,
        IrInstGen **instructions, size_t instruction_count)
{
    Error err;
    assert(instruction_count >= 1);
    IrInstGen *prev_inst;
    size_t i = 0;
    for (;;) {
        prev_inst = instructions[i];
        if (type_is_invalid(prev_inst->value->type)) {
            return ira->codegen->builtin_types.entry_invalid;
        }
        if (prev_inst->value->type->id == ZigTypeIdUnreachable) {
            i += 1;
            if (i == instruction_count) {
                return prev_inst->value->type;
            }
            continue;
        }
        break;
    }
    ErrorTableEntry **errors = nullptr;
    size_t errors_count = 0;
    ZigType *err_set_type = nullptr;
    if (prev_inst->value->type->id == ZigTypeIdErrorSet) {
        if (!resolve_inferred_error_set(ira->codegen, prev_inst->value->type, prev_inst->base.source_node)) {
            return ira->codegen->builtin_types.entry_invalid;
        }
        if (type_is_global_error_set(prev_inst->value->type)) {
            err_set_type = ira->codegen->builtin_types.entry_global_error_set;
        } else {
            err_set_type = prev_inst->value->type;
            update_errors_helper(ira->codegen, &errors, &errors_count);

            for (uint32_t i = 0; i < err_set_type->data.error_set.err_count; i += 1) {
                ErrorTableEntry *error_entry = err_set_type->data.error_set.errors[i];
                assert(errors[error_entry->value] == nullptr);
                errors[error_entry->value] = error_entry;
            }
        }
    }

    bool any_are_null = (prev_inst->value->type->id == ZigTypeIdNull);
    bool convert_to_const_slice = false;
    bool make_the_slice_const = false;
    bool make_the_pointer_const = false;
    for (; i < instruction_count; i += 1) {
        IrInstGen *cur_inst = instructions[i];
        ZigType *cur_type = cur_inst->value->type;
        ZigType *prev_type = prev_inst->value->type;

        if (type_is_invalid(cur_type)) {
            return cur_type;
        }

        if (prev_type == cur_type) {
            continue;
        }

        if (prev_type->id == ZigTypeIdUnreachable) {
            prev_inst = cur_inst;
            continue;
        }

        if (cur_type->id == ZigTypeIdUnreachable) {
            continue;
        }

        if (prev_type->id == ZigTypeIdErrorSet) {
            ir_assert_gen(err_set_type != nullptr, prev_inst);
            if (cur_type->id == ZigTypeIdErrorSet) {
                if (type_is_global_error_set(err_set_type)) {
                    continue;
                }
                bool allow_infer = cur_type->data.error_set.infer_fn != nullptr &&
                        cur_type->data.error_set.infer_fn == ira->new_irb.exec->fn_entry;
                if (!allow_infer && !resolve_inferred_error_set(ira->codegen, cur_type, cur_inst->base.source_node)) {
                    return ira->codegen->builtin_types.entry_invalid;
                }
                if (!allow_infer && type_is_global_error_set(cur_type)) {
                    err_set_type = ira->codegen->builtin_types.entry_global_error_set;
                    prev_inst = cur_inst;
                    continue;
                }

                // number of declared errors might have increased now
                update_errors_helper(ira->codegen, &errors, &errors_count);

                // if err_set_type is a superset of cur_type, keep err_set_type.
                // if cur_type is a superset of err_set_type, switch err_set_type to cur_type
                bool prev_is_superset = true;
                for (uint32_t i = 0; i < cur_type->data.error_set.err_count; i += 1) {
                    ErrorTableEntry *contained_error_entry = cur_type->data.error_set.errors[i];
                    ErrorTableEntry *error_entry = errors[contained_error_entry->value];
                    if (error_entry == nullptr) {
                        prev_is_superset = false;
                        break;
                    }
                }
                if (prev_is_superset) {
                    continue;
                }

                // unset everything in errors
                for (uint32_t i = 0; i < err_set_type->data.error_set.err_count; i += 1) {
                    ErrorTableEntry *error_entry = err_set_type->data.error_set.errors[i];
                    errors[error_entry->value] = nullptr;
                }
                for (uint32_t i = 0, count = ira->codegen->errors_by_index.length; i < count; i += 1) {
                    assert(errors[i] == nullptr);
                }
                for (uint32_t i = 0; i < cur_type->data.error_set.err_count; i += 1) {
                    ErrorTableEntry *error_entry = cur_type->data.error_set.errors[i];
                    assert(errors[error_entry->value] == nullptr);
                    errors[error_entry->value] = error_entry;
                }
                bool cur_is_superset = true;
                for (uint32_t i = 0; i < err_set_type->data.error_set.err_count; i += 1) {
                    ErrorTableEntry *contained_error_entry = err_set_type->data.error_set.errors[i];
                    ErrorTableEntry *error_entry = errors[contained_error_entry->value];
                    if (error_entry == nullptr) {
                        cur_is_superset = false;
                        break;
                    }
                }
                if (cur_is_superset) {
                    err_set_type = cur_type;
                    prev_inst = cur_inst;
                    assert(errors != nullptr);
                    continue;
                }

                // neither of them are supersets. so we invent a new error set type that is a union of both of them
                err_set_type = get_error_set_union(ira->codegen, errors, cur_type, err_set_type, nullptr);
                assert(errors != nullptr);
                continue;
            } else if (cur_type->id == ZigTypeIdErrorUnion) {
                if (type_is_global_error_set(err_set_type)) {
                    prev_inst = cur_inst;
                    continue;
                }
                ZigType *cur_err_set_type = cur_type->data.error_union.err_set_type;
                bool allow_infer = cur_err_set_type->data.error_set.infer_fn != nullptr &&
                    cur_err_set_type->data.error_set.infer_fn == ira->new_irb.exec->fn_entry;
                if (!allow_infer && !resolve_inferred_error_set(ira->codegen, cur_err_set_type, cur_inst->base.source_node)) {
                    return ira->codegen->builtin_types.entry_invalid;
                }
                if (!allow_infer && type_is_global_error_set(cur_err_set_type)) {
                    err_set_type = ira->codegen->builtin_types.entry_global_error_set;
                    prev_inst = cur_inst;
                    continue;
                }

                update_errors_helper(ira->codegen, &errors, &errors_count);

                // test if err_set_type is a subset of cur_type's error set
                // unset everything in errors
                for (uint32_t i = 0; i < err_set_type->data.error_set.err_count; i += 1) {
                    ErrorTableEntry *error_entry = err_set_type->data.error_set.errors[i];
                    errors[error_entry->value] = nullptr;
                }
                for (uint32_t i = 0, count = ira->codegen->errors_by_index.length; i < count; i += 1) {
                    assert(errors[i] == nullptr);
                }
                for (uint32_t i = 0; i < cur_err_set_type->data.error_set.err_count; i += 1) {
                    ErrorTableEntry *error_entry = cur_err_set_type->data.error_set.errors[i];
                    assert(errors[error_entry->value] == nullptr);
                    errors[error_entry->value] = error_entry;
                }
                bool cur_is_superset = true;
                for (uint32_t i = 0; i < err_set_type->data.error_set.err_count; i += 1) {
                    ErrorTableEntry *contained_error_entry = err_set_type->data.error_set.errors[i];
                    ErrorTableEntry *error_entry = errors[contained_error_entry->value];
                    if (error_entry == nullptr) {
                        cur_is_superset = false;
                        break;
                    }
                }
                if (cur_is_superset) {
                    err_set_type = cur_err_set_type;
                    prev_inst = cur_inst;
                    assert(errors != nullptr);
                    continue;
                }

                // not a subset. invent new error set type, union of both of them
                err_set_type = get_error_set_union(ira->codegen, errors, cur_err_set_type, err_set_type, nullptr);
                prev_inst = cur_inst;
                assert(errors != nullptr);
                continue;
            } else {
                prev_inst = cur_inst;
                continue;
            }
        }

        if (cur_type->id == ZigTypeIdErrorSet) {
            bool allow_infer = cur_type->data.error_set.infer_fn != nullptr &&
                    cur_type->data.error_set.infer_fn == ira->new_irb.exec->fn_entry;
            if (!allow_infer && !resolve_inferred_error_set(ira->codegen, cur_type, cur_inst->base.source_node)) {
                return ira->codegen->builtin_types.entry_invalid;
            }
            if (!allow_infer && type_is_global_error_set(cur_type)) {
                err_set_type = ira->codegen->builtin_types.entry_global_error_set;
                continue;
            }
            if (err_set_type != nullptr && type_is_global_error_set(err_set_type)) {
                continue;
            }

            update_errors_helper(ira->codegen, &errors, &errors_count);

            if (err_set_type == nullptr) {
                bool allow_infer = false;
                if (prev_type->id == ZigTypeIdErrorUnion) {
                    err_set_type = prev_type->data.error_union.err_set_type;
                    allow_infer = err_set_type->data.error_set.infer_fn != nullptr &&
                        err_set_type->data.error_set.infer_fn == ira->new_irb.exec->fn_entry;
                } else {
                    err_set_type = cur_type;
                }

                if (!allow_infer && !resolve_inferred_error_set(ira->codegen, err_set_type, cur_inst->base.source_node)) {
                    return ira->codegen->builtin_types.entry_invalid;
                }

                if (!allow_infer && type_is_global_error_set(err_set_type)) {
                    err_set_type = ira->codegen->builtin_types.entry_global_error_set;
                    continue;
                }

                update_errors_helper(ira->codegen, &errors, &errors_count);

                for (uint32_t i = 0; i < err_set_type->data.error_set.err_count; i += 1) {
                    ErrorTableEntry *error_entry = err_set_type->data.error_set.errors[i];
                    assert(errors[error_entry->value] == nullptr);
                    errors[error_entry->value] = error_entry;
                }
                if (err_set_type == cur_type) {
                    continue;
                }
            }
            // check if the cur type error set is a subset
            bool prev_is_superset = true;
            for (uint32_t i = 0; i < cur_type->data.error_set.err_count; i += 1) {
                ErrorTableEntry *contained_error_entry = cur_type->data.error_set.errors[i];
                ErrorTableEntry *error_entry = errors[contained_error_entry->value];
                if (error_entry == nullptr) {
                    prev_is_superset = false;
                    break;
                }
            }
            if (prev_is_superset) {
                continue;
            }
            // not a subset. invent new error set type, union of both of them
            err_set_type = get_error_set_union(ira->codegen, errors, err_set_type, cur_type, nullptr);
            assert(errors != nullptr);
            continue;
        }

        if (prev_type->id == ZigTypeIdErrorUnion && cur_type->id == ZigTypeIdErrorUnion) {
            ZigType *prev_payload_type = prev_type->data.error_union.payload_type;
            ZigType *cur_payload_type = cur_type->data.error_union.payload_type;

            bool const_cast_prev = types_match_const_cast_only(ira, prev_payload_type, cur_payload_type,
                    source_node, false).id == ConstCastResultIdOk;
            bool const_cast_cur = types_match_const_cast_only(ira, cur_payload_type, prev_payload_type,
                    source_node, false).id == ConstCastResultIdOk;

            if (const_cast_prev || const_cast_cur) {
                if (const_cast_cur) {
                    prev_inst = cur_inst;
                }

                ZigType *prev_err_set_type = (err_set_type == nullptr) ? prev_type->data.error_union.err_set_type : err_set_type;
                ZigType *cur_err_set_type = cur_type->data.error_union.err_set_type;
                if (prev_err_set_type == cur_err_set_type)
                    continue;

                bool allow_infer_prev = prev_err_set_type->data.error_set.infer_fn != nullptr &&
                        prev_err_set_type->data.error_set.infer_fn == ira->new_irb.exec->fn_entry;
                bool allow_infer_cur = cur_err_set_type->data.error_set.infer_fn != nullptr &&
                        cur_err_set_type->data.error_set.infer_fn == ira->new_irb.exec->fn_entry;

                if (!allow_infer_prev && !resolve_inferred_error_set(ira->codegen, prev_err_set_type, cur_inst->base.source_node)) {
                    return ira->codegen->builtin_types.entry_invalid;
                }

                if (!allow_infer_cur && !resolve_inferred_error_set(ira->codegen, cur_err_set_type, cur_inst->base.source_node)) {
                    return ira->codegen->builtin_types.entry_invalid;
                }

                if ((!allow_infer_prev && type_is_global_error_set(prev_err_set_type)) ||
                    (!allow_infer_cur && type_is_global_error_set(cur_err_set_type)))
                {
                    err_set_type = ira->codegen->builtin_types.entry_global_error_set;
                    continue;
                }

                update_errors_helper(ira->codegen, &errors, &errors_count);

                if (err_set_type == nullptr) {
                    err_set_type = prev_err_set_type;
                    for (uint32_t i = 0; i < prev_err_set_type->data.error_set.err_count; i += 1) {
                        ErrorTableEntry *error_entry = prev_err_set_type->data.error_set.errors[i];
                        assert(errors[error_entry->value] == nullptr);
                        errors[error_entry->value] = error_entry;
                    }
                }
                bool prev_is_superset = true;
                for (uint32_t i = 0; i < cur_err_set_type->data.error_set.err_count; i += 1) {
                    ErrorTableEntry *contained_error_entry = cur_err_set_type->data.error_set.errors[i];
                    ErrorTableEntry *error_entry = errors[contained_error_entry->value];
                    if (error_entry == nullptr) {
                        prev_is_superset = false;
                        break;
                    }
                }
                if (prev_is_superset) {
                    continue;
                }
                // unset all the errors
                for (uint32_t i = 0; i < err_set_type->data.error_set.err_count; i += 1) {
                    ErrorTableEntry *error_entry = err_set_type->data.error_set.errors[i];
                    errors[error_entry->value] = nullptr;
                }
                for (uint32_t i = 0, count = ira->codegen->errors_by_index.length; i < count; i += 1) {
                    assert(errors[i] == nullptr);
                }
                for (uint32_t i = 0; i < cur_err_set_type->data.error_set.err_count; i += 1) {
                    ErrorTableEntry *error_entry = cur_err_set_type->data.error_set.errors[i];
                    assert(errors[error_entry->value] == nullptr);
                    errors[error_entry->value] = error_entry;
                }
                bool cur_is_superset = true;
                for (uint32_t i = 0; i < prev_err_set_type->data.error_set.err_count; i += 1) {
                    ErrorTableEntry *contained_error_entry = prev_err_set_type->data.error_set.errors[i];
                    ErrorTableEntry *error_entry = errors[contained_error_entry->value];
                    if (error_entry == nullptr) {
                        cur_is_superset = false;
                        break;
                    }
                }
                if (cur_is_superset) {
                    err_set_type = cur_err_set_type;
                    continue;
                }

                err_set_type = get_error_set_union(ira->codegen, errors, cur_err_set_type, prev_err_set_type, nullptr);
                continue;
            }
        }

        if (prev_type->id == ZigTypeIdNull) {
            prev_inst = cur_inst;
            any_are_null = true;
            continue;
        }

        if (cur_type->id == ZigTypeIdNull) {
            any_are_null = true;
            continue;
        }

        if (prev_type->id == ZigTypeIdEnum && cur_type->id == ZigTypeIdEnumLiteral) {
            TypeEnumField *field = find_enum_type_field(prev_type, cur_inst->value->data.x_enum_literal);
            if (field != nullptr) {
                continue;
            }
        }
        if (is_tagged_union(prev_type) && cur_type->id == ZigTypeIdEnumLiteral) {
            TypeUnionField *field = find_union_type_field(prev_type, cur_inst->value->data.x_enum_literal);
            if (field != nullptr) {
                continue;
            }
        }

        if (cur_type->id == ZigTypeIdEnum && prev_type->id == ZigTypeIdEnumLiteral) {
            TypeEnumField *field = find_enum_type_field(cur_type, prev_inst->value->data.x_enum_literal);
            if (field != nullptr) {
                prev_inst = cur_inst;
                continue;
            }
        }

        if (is_tagged_union(cur_type) && prev_type->id == ZigTypeIdEnumLiteral) {
            TypeUnionField *field = find_union_type_field(cur_type, prev_inst->value->data.x_enum_literal);
            if (field != nullptr) {
                prev_inst = cur_inst;
                continue;
            }
        }

        if (prev_type->id == ZigTypeIdPointer && prev_type->data.pointer.ptr_len == PtrLenC &&
            (cur_type->id == ZigTypeIdComptimeInt || cur_type->id == ZigTypeIdInt))
        {
            continue;
        }

        if (cur_type->id == ZigTypeIdPointer && cur_type->data.pointer.ptr_len == PtrLenC &&
            (prev_type->id == ZigTypeIdComptimeInt || prev_type->id == ZigTypeIdInt))
        {
            prev_inst = cur_inst;
            continue;
        }

        if (prev_type->id == ZigTypeIdPointer && cur_type->id == ZigTypeIdPointer) {
            if (prev_type->data.pointer.ptr_len == PtrLenC &&
                types_match_const_cast_only(ira, prev_type->data.pointer.child_type,
                    cur_type->data.pointer.child_type, source_node,
                    !prev_type->data.pointer.is_const).id == ConstCastResultIdOk)
            {
                continue;
            }
            if (cur_type->data.pointer.ptr_len == PtrLenC &&
                types_match_const_cast_only(ira, cur_type->data.pointer.child_type,
                    prev_type->data.pointer.child_type, source_node,
                    !cur_type->data.pointer.is_const).id == ConstCastResultIdOk)
            {
                prev_inst = cur_inst;
                continue;
            }
        }

        if (types_match_const_cast_only(ira, prev_type, cur_type, source_node, false).id == ConstCastResultIdOk) {
            continue;
        }

        if (types_match_const_cast_only(ira, cur_type, prev_type, source_node, false).id == ConstCastResultIdOk) {
            prev_inst = cur_inst;
            continue;
        }

        if (prev_type->id == ZigTypeIdInt &&
                   cur_type->id == ZigTypeIdInt &&
                   prev_type->data.integral.is_signed == cur_type->data.integral.is_signed)
        {
            if (cur_type->data.integral.bit_count > prev_type->data.integral.bit_count) {
                prev_inst = cur_inst;
            }
            continue;
        }

        if (prev_type->id == ZigTypeIdFloat && cur_type->id == ZigTypeIdFloat) {
            if (cur_type->data.floating.bit_count > prev_type->data.floating.bit_count) {
                prev_inst = cur_inst;
            }
            continue;
        }

        if (prev_type->id == ZigTypeIdErrorUnion &&
            types_match_const_cast_only(ira, prev_type->data.error_union.payload_type, cur_type,
                source_node, false).id == ConstCastResultIdOk)
        {
            continue;
        }

        if (cur_type->id == ZigTypeIdErrorUnion &&
            types_match_const_cast_only(ira, cur_type->data.error_union.payload_type, prev_type,
                source_node, false).id == ConstCastResultIdOk)
        {
            if (err_set_type != nullptr) {
                ZigType *cur_err_set_type = cur_type->data.error_union.err_set_type;
                bool allow_infer = cur_err_set_type->data.error_set.infer_fn != nullptr &&
                    cur_err_set_type->data.error_set.infer_fn == ira->new_irb.exec->fn_entry;
                if (!allow_infer && !resolve_inferred_error_set(ira->codegen, cur_err_set_type, cur_inst->base.source_node)) {
                    return ira->codegen->builtin_types.entry_invalid;
                }
                if ((!allow_infer && type_is_global_error_set(cur_err_set_type)) ||
                    type_is_global_error_set(err_set_type))
                {
                    err_set_type = ira->codegen->builtin_types.entry_global_error_set;
                    prev_inst = cur_inst;
                    continue;
                }

                update_errors_helper(ira->codegen, &errors, &errors_count);

                err_set_type = get_error_set_union(ira->codegen, errors, err_set_type, cur_err_set_type, nullptr);
            }
            prev_inst = cur_inst;
            continue;
        }

        if (prev_type->id == ZigTypeIdOptional &&
            types_match_const_cast_only(ira, prev_type->data.maybe.child_type, cur_type,
                source_node, false).id == ConstCastResultIdOk)
        {
            continue;
        }

        if (cur_type->id == ZigTypeIdOptional &&
            types_match_const_cast_only(ira, cur_type->data.maybe.child_type, prev_type,
                source_node, false).id == ConstCastResultIdOk)
        {
            prev_inst = cur_inst;
            continue;
        }

        if (prev_type->id == ZigTypeIdOptional &&
            types_match_const_cast_only(ira, cur_type, prev_type->data.maybe.child_type,
                source_node, false).id == ConstCastResultIdOk)
        {
            prev_inst = cur_inst;
            any_are_null = true;
            continue;
        }

        if (cur_type->id == ZigTypeIdOptional &&
            types_match_const_cast_only(ira, prev_type, cur_type->data.maybe.child_type,
                source_node, false).id == ConstCastResultIdOk)
        {
            any_are_null = true;
            continue;
        }

        if (cur_type->id == ZigTypeIdUndefined) {
            continue;
        }

        if (prev_type->id == ZigTypeIdUndefined) {
            prev_inst = cur_inst;
            continue;
        }

        if (prev_type->id == ZigTypeIdComptimeInt ||
                    prev_type->id == ZigTypeIdComptimeFloat)
        {
            if (ir_num_lit_fits_in_other_type(ira, prev_inst, cur_type, false)) {
                prev_inst = cur_inst;
                continue;
            } else {
                return ira->codegen->builtin_types.entry_invalid;
            }
        }

        if (cur_type->id == ZigTypeIdComptimeInt ||
                   cur_type->id == ZigTypeIdComptimeFloat)
        {
            if (ir_num_lit_fits_in_other_type(ira, cur_inst, prev_type, false)) {
                continue;
            } else {
                return ira->codegen->builtin_types.entry_invalid;
            }
        }

        // *[N]T to [*]T
        if (prev_type->id == ZigTypeIdPointer &&
            prev_type->data.pointer.ptr_len == PtrLenSingle &&
            prev_type->data.pointer.child_type->id == ZigTypeIdArray &&
            ((cur_type->id == ZigTypeIdPointer && cur_type->data.pointer.ptr_len == PtrLenUnknown)))
        {
            convert_to_const_slice = false;
            prev_inst = cur_inst;

            if (prev_type->data.pointer.is_const && !cur_type->data.pointer.is_const) {
                // const array pointer and non-const unknown pointer
                make_the_pointer_const = true;
            }
            continue;
        }

        // *[N]T to [*]T
        if (cur_type->id == ZigTypeIdPointer &&
            cur_type->data.pointer.ptr_len == PtrLenSingle &&
            cur_type->data.pointer.child_type->id == ZigTypeIdArray &&
            ((prev_type->id == ZigTypeIdPointer && prev_type->data.pointer.ptr_len == PtrLenUnknown)))
        {
            if (cur_type->data.pointer.is_const && !prev_type->data.pointer.is_const) {
                // const array pointer and non-const unknown pointer
                make_the_pointer_const = true;
            }
            continue;
        }

        // *[N]T to []T
        // *[N]T to E![]T
        if (cur_type->id == ZigTypeIdPointer &&
            cur_type->data.pointer.ptr_len == PtrLenSingle &&
            cur_type->data.pointer.child_type->id == ZigTypeIdArray &&
            ((prev_type->id == ZigTypeIdErrorUnion && is_slice(prev_type->data.error_union.payload_type)) ||
                is_slice(prev_type)))
        {
            ZigType *array_type = cur_type->data.pointer.child_type;
            ZigType *slice_type = (prev_type->id == ZigTypeIdErrorUnion) ?
                prev_type->data.error_union.payload_type : prev_type;
            ZigType *slice_ptr_type = slice_type->data.structure.fields[slice_ptr_index]->type_entry;
            if (types_match_const_cast_only(ira, slice_ptr_type->data.pointer.child_type,
                    array_type->data.array.child_type, source_node, false).id == ConstCastResultIdOk)
            {
                bool const_ok = (slice_ptr_type->data.pointer.is_const || array_type->data.array.len == 0 ||
                        !cur_type->data.pointer.is_const);
                if (!const_ok) make_the_slice_const = true;
                convert_to_const_slice = false;
                continue;
            }
        }

        // *[N]T to []T
        // *[N]T to E![]T
        if (prev_type->id == ZigTypeIdPointer &&
            prev_type->data.pointer.child_type->id == ZigTypeIdArray &&
            prev_type->data.pointer.ptr_len == PtrLenSingle &&
            ((cur_type->id == ZigTypeIdErrorUnion && is_slice(cur_type->data.error_union.payload_type)) ||
             (cur_type->id == ZigTypeIdOptional && is_slice(cur_type->data.maybe.child_type)) ||
             is_slice(cur_type)))
        {
            ZigType *array_type = prev_type->data.pointer.child_type;
            ZigType *slice_type;
            switch (cur_type->id) {
                case ZigTypeIdErrorUnion:
                    slice_type = cur_type->data.error_union.payload_type;
                    break;
                case ZigTypeIdOptional:
                    slice_type = cur_type->data.maybe.child_type;
                    break;
                default:
                    slice_type = cur_type;
                    break;
            }
            ZigType *slice_ptr_type = slice_type->data.structure.fields[slice_ptr_index]->type_entry;
            if (types_match_const_cast_only(ira, slice_ptr_type->data.pointer.child_type,
                    array_type->data.array.child_type, source_node, false).id == ConstCastResultIdOk)
            {
                bool const_ok = (slice_ptr_type->data.pointer.is_const || array_type->data.array.len == 0 ||
                        !prev_type->data.pointer.is_const);
                if (!const_ok) make_the_slice_const = true;
                prev_inst = cur_inst;
                convert_to_const_slice = false;
                continue;
            }
        }

        // *[N]T and *[M]T
        if (cur_type->id == ZigTypeIdPointer && cur_type->data.pointer.ptr_len == PtrLenSingle &&
                cur_type->data.pointer.child_type->id == ZigTypeIdArray &&
            prev_type->id == ZigTypeIdPointer && prev_type->data.pointer.ptr_len == PtrLenSingle &&
                prev_type->data.pointer.child_type->id == ZigTypeIdArray &&
            (
                prev_type->data.pointer.child_type->data.array.sentinel == nullptr ||
                (cur_type->data.pointer.child_type->data.array.sentinel != nullptr &&
                const_values_equal(ira->codegen, prev_type->data.pointer.child_type->data.array.sentinel,
                    cur_type->data.pointer.child_type->data.array.sentinel))
            ) &&
            types_match_const_cast_only(ira,
                cur_type->data.pointer.child_type->data.array.child_type,
                prev_type->data.pointer.child_type->data.array.child_type,
                source_node, !cur_type->data.pointer.is_const).id == ConstCastResultIdOk)
        {
            bool const_ok = (cur_type->data.pointer.is_const || !prev_type->data.pointer.is_const ||
                prev_type->data.pointer.child_type->data.array.len == 0);
            if (!const_ok) make_the_slice_const = true;
            prev_inst = cur_inst;
            convert_to_const_slice = true;
            continue;
        }
        if (prev_type->id == ZigTypeIdPointer && prev_type->data.pointer.ptr_len == PtrLenSingle &&
                prev_type->data.pointer.child_type->id == ZigTypeIdArray &&
            cur_type->id == ZigTypeIdPointer && cur_type->data.pointer.ptr_len == PtrLenSingle &&
                cur_type->data.pointer.child_type->id == ZigTypeIdArray &&
            (
                cur_type->data.pointer.child_type->data.array.sentinel == nullptr ||
                (prev_type->data.pointer.child_type->data.array.sentinel != nullptr &&
                const_values_equal(ira->codegen, cur_type->data.pointer.child_type->data.array.sentinel,
                    prev_type->data.pointer.child_type->data.array.sentinel))
            ) &&
            types_match_const_cast_only(ira,
                prev_type->data.pointer.child_type->data.array.child_type,
                cur_type->data.pointer.child_type->data.array.child_type,
                source_node, !prev_type->data.pointer.is_const).id == ConstCastResultIdOk)
        {
            bool const_ok = (prev_type->data.pointer.is_const || !cur_type->data.pointer.is_const ||
                cur_type->data.pointer.child_type->data.array.len == 0);
            if (!const_ok) make_the_slice_const = true;
            convert_to_const_slice = true;
            continue;
        }

        if (prev_type->id == ZigTypeIdEnum && cur_type->id == ZigTypeIdUnion &&
            (cur_type->data.unionation.decl_node->data.container_decl.auto_enum || cur_type->data.unionation.decl_node->data.container_decl.init_arg_expr != nullptr))
        {
            if ((err = type_resolve(ira->codegen, cur_type, ResolveStatusZeroBitsKnown)))
                return ira->codegen->builtin_types.entry_invalid;
            if (cur_type->data.unionation.tag_type == prev_type) {
                continue;
            }
        }

        if (cur_type->id == ZigTypeIdEnum && prev_type->id == ZigTypeIdUnion &&
            (prev_type->data.unionation.decl_node->data.container_decl.auto_enum || prev_type->data.unionation.decl_node->data.container_decl.init_arg_expr != nullptr))
        {
            if ((err = type_resolve(ira->codegen, prev_type, ResolveStatusZeroBitsKnown)))
                return ira->codegen->builtin_types.entry_invalid;
            if (prev_type->data.unionation.tag_type == cur_type) {
                prev_inst = cur_inst;
                continue;
            }
        }

        ErrorMsg *msg = ir_add_error_node(ira, source_node,
            buf_sprintf("incompatible types: '%s' and '%s'",
                buf_ptr(&prev_type->name), buf_ptr(&cur_type->name)));
        add_error_note(ira->codegen, msg, prev_inst->base.source_node,
            buf_sprintf("type '%s' here", buf_ptr(&prev_type->name)));
        add_error_note(ira->codegen, msg, cur_inst->base.source_node,
            buf_sprintf("type '%s' here", buf_ptr(&cur_type->name)));

        return ira->codegen->builtin_types.entry_invalid;
    }

    heap::c_allocator.deallocate(errors, errors_count);

    if (convert_to_const_slice) {
        if (prev_inst->value->type->id == ZigTypeIdPointer) {
            ZigType *array_type = prev_inst->value->type->data.pointer.child_type;
            src_assert(array_type->id == ZigTypeIdArray, source_node);
            ZigType *ptr_type = get_pointer_to_type_extra2(
                    ira->codegen, array_type->data.array.child_type,
                    prev_inst->value->type->data.pointer.is_const || make_the_slice_const, false,
                    PtrLenUnknown,
                    0, 0, 0, false,
                    VECTOR_INDEX_NONE, nullptr, array_type->data.array.sentinel);
            ZigType *slice_type = get_slice_type(ira->codegen, ptr_type);
            if (err_set_type != nullptr) {
                return get_error_union_type(ira->codegen, err_set_type, slice_type);
            } else {
                return slice_type;
            }
        } else {
            zig_unreachable();
        }
    } else if (err_set_type != nullptr) {
        if (prev_inst->value->type->id == ZigTypeIdErrorSet) {
            return err_set_type;
        } else if (prev_inst->value->type->id == ZigTypeIdErrorUnion) {
            ZigType *payload_type = prev_inst->value->type->data.error_union.payload_type;
            if ((err = type_resolve(ira->codegen, payload_type, ResolveStatusSizeKnown)))
                return ira->codegen->builtin_types.entry_invalid;
            return get_error_union_type(ira->codegen, err_set_type, payload_type);
        } else if (expected_type != nullptr && expected_type->id == ZigTypeIdErrorUnion) {
            ZigType *payload_type = expected_type->data.error_union.payload_type;
            if ((err = type_resolve(ira->codegen, payload_type, ResolveStatusSizeKnown)))
                return ira->codegen->builtin_types.entry_invalid;
            return get_error_union_type(ira->codegen, err_set_type, payload_type);
        } else {
            if (prev_inst->value->type->id == ZigTypeIdComptimeInt ||
                prev_inst->value->type->id == ZigTypeIdComptimeFloat)
            {
                ir_add_error_node(ira, source_node,
                    buf_sprintf("unable to make error union out of number literal"));
                return ira->codegen->builtin_types.entry_invalid;
            } else if (prev_inst->value->type->id == ZigTypeIdNull) {
                ir_add_error_node(ira, source_node,
                    buf_sprintf("unable to make error union out of null literal"));
                return ira->codegen->builtin_types.entry_invalid;
            } else {
                if ((err = type_resolve(ira->codegen, prev_inst->value->type, ResolveStatusSizeKnown)))
                    return ira->codegen->builtin_types.entry_invalid;
                return get_error_union_type(ira->codegen, err_set_type, prev_inst->value->type);
            }
        }
    } else if (any_are_null && prev_inst->value->type->id != ZigTypeIdNull) {
        if (prev_inst->value->type->id == ZigTypeIdOptional) {
            return prev_inst->value->type;
        } else {
            if ((err = type_resolve(ira->codegen, prev_inst->value->type, ResolveStatusSizeKnown)))
                return ira->codegen->builtin_types.entry_invalid;
            return get_optional_type(ira->codegen, prev_inst->value->type);
        }
    } else if (make_the_slice_const) {
        ZigType *slice_type;
        if (prev_inst->value->type->id == ZigTypeIdErrorUnion) {
            slice_type = prev_inst->value->type->data.error_union.payload_type;
        } else if (is_slice(prev_inst->value->type)) {
            slice_type = prev_inst->value->type;
        } else {
            zig_unreachable();
        }
        ZigType *slice_ptr_type = slice_type->data.structure.fields[slice_ptr_index]->type_entry;
        ZigType *adjusted_ptr_type = adjust_ptr_const(ira->codegen, slice_ptr_type, make_the_slice_const);
        ZigType *adjusted_slice_type = get_slice_type(ira->codegen, adjusted_ptr_type);
        if (prev_inst->value->type->id == ZigTypeIdErrorUnion) {
            return get_error_union_type(ira->codegen, prev_inst->value->type->data.error_union.err_set_type,
                    adjusted_slice_type);
        } else if (is_slice(prev_inst->value->type)) {
            return adjusted_slice_type;
        } else {
            zig_unreachable();
        }
    } else if (make_the_pointer_const) {
        return adjust_ptr_const(ira->codegen, prev_inst->value->type, make_the_pointer_const);
    } else {
        return prev_inst->value->type;
    }
}

static bool eval_const_expr_implicit_cast(IrAnalyze *ira, IrInst *source_instr,
        CastOp cast_op,
        ZigValue *other_val, ZigType *other_type,
        ZigValue *const_val, ZigType *new_type)
{
    const_val->special = other_val->special;

    assert(other_val != const_val);
    switch (cast_op) {
        case CastOpNoCast:
            zig_unreachable();
        case CastOpErrSet:
        case CastOpBitCast:
            zig_panic("TODO");
        case CastOpNoop: {
            copy_const_val(ira->codegen, const_val, other_val);
            const_val->type = new_type;
            break;
        }
        case CastOpNumLitToConcrete:
            if (other_val->type->id == ZigTypeIdComptimeFloat) {
                assert(new_type->id == ZigTypeIdFloat);
                switch (new_type->data.floating.bit_count) {
                    case 16:
                        const_val->data.x_f16 = bigfloat_to_f16(&other_val->data.x_bigfloat);
                        break;
                    case 32:
                        const_val->data.x_f32 = bigfloat_to_f32(&other_val->data.x_bigfloat);
                        break;
                    case 64:
                        const_val->data.x_f64 = bigfloat_to_f64(&other_val->data.x_bigfloat);
                        break;
                    case 80:
                        zig_panic("TODO");
                    case 128:
                        const_val->data.x_f128 = bigfloat_to_f128(&other_val->data.x_bigfloat);
                        break;
                    default:
                        zig_unreachable();
                }
            } else if (other_val->type->id == ZigTypeIdComptimeInt) {
                bigint_init_bigint(&const_val->data.x_bigint, &other_val->data.x_bigint);
            } else {
                zig_unreachable();
            }
            const_val->type = new_type;
            break;
        case CastOpIntToFloat:
            if (new_type->id == ZigTypeIdFloat) {
                BigFloat bigfloat;
                bigfloat_init_bigint(&bigfloat, &other_val->data.x_bigint);
                switch (new_type->data.floating.bit_count) {
                    case 16:
                        const_val->data.x_f16 = bigfloat_to_f16(&bigfloat);
                        break;
                    case 32:
                        const_val->data.x_f32 = bigfloat_to_f32(&bigfloat);
                        break;
                    case 64:
                        const_val->data.x_f64 = bigfloat_to_f64(&bigfloat);
                        break;
                    case 80:
                        zig_panic("TODO");
                    case 128:
                        const_val->data.x_f128 = bigfloat_to_f128(&bigfloat);
                        break;
                    default:
                        zig_unreachable();
                }
            } else if (new_type->id == ZigTypeIdComptimeFloat) {
                bigfloat_init_bigint(&const_val->data.x_bigfloat, &other_val->data.x_bigint);
            } else {
                zig_unreachable();
            }
            const_val->special = ConstValSpecialStatic;
            break;
        case CastOpFloatToInt:
            float_init_bigint(&const_val->data.x_bigint, other_val);
            if (new_type->id == ZigTypeIdInt) {
                if (!bigint_fits_in_bits(&const_val->data.x_bigint, new_type->data.integral.bit_count,
                    new_type->data.integral.is_signed))
                {
                    Buf *int_buf = buf_alloc();
                    bigint_append_buf(int_buf, &const_val->data.x_bigint, 10);

                    ir_add_error(ira, source_instr,
                        buf_sprintf("integer value '%s' cannot be stored in type '%s'",
                            buf_ptr(int_buf), buf_ptr(&new_type->name)));
                    return false;
                }
            }

            const_val->special = ConstValSpecialStatic;
            break;
        case CastOpBoolToInt:
            bigint_init_unsigned(&const_val->data.x_bigint, other_val->data.x_bool ? 1 : 0);
            const_val->special = ConstValSpecialStatic;
            break;
    }
    return true;
}

static IrInstGen *ir_const(IrAnalyze *ira, IrInst *inst, ZigType *ty) {
    IrInstGenConst *const_instruction = ir_create_inst_gen<IrInstGenConst>(&ira->new_irb,
            inst->scope, inst->source_node);
    IrInstGen *new_instruction = &const_instruction->base;
    new_instruction->value->type = ty;
    new_instruction->value->special = ConstValSpecialStatic;
    ira->new_irb.constants.append(&heap::c_allocator, const_instruction);
    return new_instruction;
}

static IrInstGen *ir_const_noval(IrAnalyze *ira, IrInst *old_instruction) {
    IrInstGenConst *const_instruction = ir_create_inst_noval<IrInstGenConst>(&ira->new_irb,
            old_instruction->scope, old_instruction->source_node);
    ira->new_irb.constants.append(&heap::c_allocator, const_instruction);
    return &const_instruction->base;
}

// This function initializes the new IrInstGen with the provided ZigValue,
// rather than creating a new one.
static IrInstGen *ir_const_move(IrAnalyze *ira, IrInst *old_instruction, ZigValue *val) {
    IrInstGen *result = ir_const_noval(ira, old_instruction);
    result->value = val;
    return result;
}

static IrInstGen *ir_resolve_cast(IrAnalyze *ira, IrInst *source_instr, IrInstGen *value,
        ZigType *wanted_type, CastOp cast_op)
{
    if (instr_is_comptime(value) || !type_has_bits(ira->codegen, wanted_type)) {
        IrInstGen *result = ir_const(ira, source_instr, wanted_type);
        if (!eval_const_expr_implicit_cast(ira, source_instr, cast_op, value->value, value->value->type,
            result->value, wanted_type))
        {
            return ira->codegen->invalid_inst_gen;
        }
        return result;
    } else {
        return ir_build_cast(ira, source_instr, wanted_type, value, cast_op);
    }
}

static IrInstGen *ir_resolve_ptr_of_array_to_unknown_len_ptr(IrAnalyze *ira, IrInst* source_instr,
        IrInstGen *value, ZigType *wanted_type)
{
    ir_assert(value->value->type->id == ZigTypeIdPointer, source_instr);

    Error err;

    if ((err = type_resolve(ira->codegen, value->value->type->data.pointer.child_type,
                    ResolveStatusAlignmentKnown)))
    {
        return ira->codegen->invalid_inst_gen;
    }

    wanted_type = adjust_ptr_align(ira->codegen, wanted_type, get_ptr_align(ira->codegen, value->value->type));

    if (instr_is_comptime(value)) {
        ZigValue *val = ir_resolve_const(ira, value, UndefOk);
        if (val == nullptr)
            return ira->codegen->invalid_inst_gen;
        if (val->special == ConstValSpecialUndef)
            return ir_const_undef(ira, source_instr, wanted_type);

        ZigValue *pointee = const_ptr_pointee(ira, ira->codegen, val, source_instr->source_node);
        if (pointee == nullptr)
            return ira->codegen->invalid_inst_gen;
        if (pointee->special != ConstValSpecialRuntime) {
            IrInstGen *result = ir_const(ira, source_instr, wanted_type);
            result->value->data.x_ptr.special = ConstPtrSpecialBaseArray;
            result->value->data.x_ptr.mut = val->data.x_ptr.mut;
            result->value->data.x_ptr.data.base_array.array_val = pointee;
            result->value->data.x_ptr.data.base_array.elem_index = 0;
            return result;
        }
    }

    return ir_build_cast(ira, source_instr, wanted_type, value, CastOpBitCast);
}

static IrInstGen *ir_resolve_ptr_of_array_to_slice(IrAnalyze *ira, IrInst* source_instr,
        IrInstGen *array_ptr, ZigType *wanted_type, ResultLoc *result_loc)
{
    Error err;

    assert(array_ptr->value->type->id == ZigTypeIdPointer);
    assert(array_ptr->value->type->data.pointer.child_type->id == ZigTypeIdArray);

    ZigType *array_type = array_ptr->value->type->data.pointer.child_type;
    size_t array_len = array_type->data.array.len;

    // A zero-sized array can be casted regardless of the destination alignment, or
    // whether the pointer is undefined, and the result is always comptime known.
    // TODO However, this is exposing a result location bug that I failed to solve on the first try.
    // If you want to try to fix the bug, uncomment this block and get the tests passing.
    //if (array_len == 0 && array_type->data.array.sentinel == nullptr) {
    //    ZigValue *undef_array = ira->codegen->pass1_arena->create<ZigValue>();
    //    undef_array->special = ConstValSpecialUndef;
    //    undef_array->type = array_type;

    //    IrInstGen *result = ir_const(ira, source_instr, wanted_type);
    //    init_const_slice(ira->codegen, result->value, undef_array, 0, 0, false);
    //    result->value->data.x_struct.fields[slice_ptr_index]->data.x_ptr.mut = ConstPtrMutComptimeConst;
    //    result->value->type = wanted_type;
    //    return result;
    //}

    if ((err = type_resolve(ira->codegen, array_ptr->value->type, ResolveStatusAlignmentKnown))) {
        return ira->codegen->invalid_inst_gen;
    }

    if (array_len != 0) {
        wanted_type = adjust_slice_align(ira->codegen, wanted_type,
            get_ptr_align(ira->codegen, array_ptr->value->type));
    }

    if (instr_is_comptime(array_ptr)) {
        UndefAllowed undef_allowed = (array_len == 0) ? UndefOk : UndefBad;
        ZigValue *array_ptr_val = ir_resolve_const(ira, array_ptr, undef_allowed);
        if (array_ptr_val == nullptr)
            return ira->codegen->invalid_inst_gen;
        ir_assert(is_slice(wanted_type), source_instr);
        if (array_ptr_val->special == ConstValSpecialUndef) {
            ZigValue *undef_array = ira->codegen->pass1_arena->create<ZigValue>();
            undef_array->special = ConstValSpecialUndef;
            undef_array->type = array_type;

            IrInstGen *result = ir_const(ira, source_instr, wanted_type);
            init_const_slice(ira->codegen, result->value, undef_array, 0, 0, false);
            result->value->data.x_struct.fields[slice_ptr_index]->data.x_ptr.mut = ConstPtrMutComptimeConst;
            result->value->type = wanted_type;
            return result;
        }
        bool wanted_const = wanted_type->data.structure.fields[slice_ptr_index]->type_entry->data.pointer.is_const;
        // Optimization to avoid creating unnecessary ZigValue in const_ptr_pointee
        if (array_ptr_val->data.x_ptr.special == ConstPtrSpecialSubArray) {
            ZigValue *array_val = array_ptr_val->data.x_ptr.data.base_array.array_val;
            if (array_val->special != ConstValSpecialRuntime) {
                IrInstGen *result = ir_const(ira, source_instr, wanted_type);
                init_const_slice(ira->codegen, result->value, array_val,
                        array_ptr_val->data.x_ptr.data.base_array.elem_index,
                        array_type->data.array.len, wanted_const);
                result->value->data.x_struct.fields[slice_ptr_index]->data.x_ptr.mut = array_ptr_val->data.x_ptr.mut;
                result->value->type = wanted_type;
                return result;
            }
        } else if (array_ptr_val->data.x_ptr.special != ConstPtrSpecialHardCodedAddr) {
            ZigValue *pointee = const_ptr_pointee(ira, ira->codegen, array_ptr_val, source_instr->source_node);
            if (pointee == nullptr)
                return ira->codegen->invalid_inst_gen;
            if (pointee->special != ConstValSpecialRuntime) {
                assert(array_ptr_val->type->id == ZigTypeIdPointer);

                IrInstGen *result = ir_const(ira, source_instr, wanted_type);
                init_const_slice(ira->codegen, result->value, pointee, 0, array_type->data.array.len, wanted_const);
                result->value->data.x_struct.fields[slice_ptr_index]->data.x_ptr.mut = array_ptr_val->data.x_ptr.mut;
                result->value->type = wanted_type;
                return result;
            }
        }
    }

    if (result_loc == nullptr) result_loc = no_result_loc();
    IrInstGen *result_loc_inst = ir_resolve_result(ira, source_instr, result_loc, wanted_type, nullptr, true, true);
    if (type_is_invalid(result_loc_inst->value->type) ||
        result_loc_inst->value->type->id == ZigTypeIdUnreachable)
    {
        return result_loc_inst;
    }
    return ir_build_ptr_of_array_to_slice(ira, source_instr, wanted_type, array_ptr, result_loc_inst);
}

static IrBasicBlockGen *ir_get_new_bb(IrAnalyze *ira, IrBasicBlockSrc *old_bb, IrInst *ref_old_instruction) {
    assert(old_bb);

    if (old_bb->child) {
        if (ref_old_instruction == nullptr || old_bb->child->ref_instruction != ref_old_instruction) {
            return old_bb->child;
        }
    }

    IrBasicBlockGen *new_bb = ir_build_bb_from(ira, old_bb);
    new_bb->ref_instruction = ref_old_instruction;

    return new_bb;
}

static IrBasicBlockGen *ir_get_new_bb_runtime(IrAnalyze *ira, IrBasicBlockSrc *old_bb, IrInst *ref_old_instruction) {
    assert(ref_old_instruction != nullptr);
    IrBasicBlockGen *new_bb = ir_get_new_bb(ira, old_bb, ref_old_instruction);
    if (new_bb->must_be_comptime_source_instr) {
        ErrorMsg *msg = ir_add_error(ira, ref_old_instruction,
            buf_sprintf("control flow attempts to use compile-time variable at runtime"));
        add_error_note(ira->codegen, msg, new_bb->must_be_comptime_source_instr->source_node,
                buf_sprintf("compile-time variable assigned here"));
        return nullptr;
    }
    return new_bb;
}

static void ir_start_bb(IrAnalyze *ira, IrBasicBlockSrc *old_bb, IrBasicBlockSrc *const_predecessor_bb) {
    ir_assert(!old_bb->suspended, (old_bb->instruction_list.length != 0) ? &old_bb->instruction_list.at(0)->base : nullptr);
    ira->instruction_index = 0;
    ira->old_irb.current_basic_block = old_bb;
    ira->const_predecessor_bb = const_predecessor_bb;
    ira->old_bb_index = old_bb->index;
}

static IrInstGen *ira_suspend(IrAnalyze *ira, IrInst *old_instruction, IrBasicBlockSrc *next_bb,
        IrSuspendPosition *suspend_pos)
{
    if (ira->codegen->verbose_ir) {
        fprintf(stderr, "suspend %s_%" PRIu32 " %s_%" PRIu32 " #%" PRIu32 " (%zu,%zu)\n",
                ira->old_irb.current_basic_block->name_hint,
                ira->old_irb.current_basic_block->debug_id,
                ira->old_irb.exec->basic_block_list.at(ira->old_bb_index)->name_hint,
                ira->old_irb.exec->basic_block_list.at(ira->old_bb_index)->debug_id,
                ira->old_irb.current_basic_block->instruction_list.at(ira->instruction_index)->base.debug_id,
                ira->old_bb_index, ira->instruction_index);
    }
    suspend_pos->basic_block_index = ira->old_bb_index;
    suspend_pos->instruction_index = ira->instruction_index;

    ira->old_irb.current_basic_block->suspended = true;

    // null next_bb means that the caller plans to call ira_resume before returning
    if (next_bb != nullptr) {
        ira->old_bb_index = next_bb->index;
        ira->old_irb.current_basic_block = ira->old_irb.exec->basic_block_list.at(ira->old_bb_index);
        assert(ira->old_irb.current_basic_block == next_bb);
        ira->instruction_index = 0;
        ira->const_predecessor_bb = nullptr;
        next_bb->child = ir_get_new_bb_runtime(ira, next_bb, old_instruction);
        ira->new_irb.current_basic_block = next_bb->child;
    }
    return ira->codegen->unreach_instruction;
}

static IrInstGen *ira_resume(IrAnalyze *ira) {
    IrSuspendPosition pos = ira->resume_stack.pop();
    if (ira->codegen->verbose_ir) {
        fprintf(stderr, "resume (%zu,%zu) ", pos.basic_block_index, pos.instruction_index);
    }
    ira->old_bb_index = pos.basic_block_index;
    ira->old_irb.current_basic_block = ira->old_irb.exec->basic_block_list.at(ira->old_bb_index);
    assert(ira->old_irb.current_basic_block->in_resume_stack);
    ira->old_irb.current_basic_block->in_resume_stack = false;
    ira->old_irb.current_basic_block->suspended = false;
    ira->instruction_index = pos.instruction_index;
    assert(pos.instruction_index < ira->old_irb.current_basic_block->instruction_list.length);
    if (ira->codegen->verbose_ir) {
        fprintf(stderr, "%s_%" PRIu32 " #%" PRIu32 "\n", ira->old_irb.current_basic_block->name_hint,
                ira->old_irb.current_basic_block->debug_id,
                ira->old_irb.current_basic_block->instruction_list.at(pos.instruction_index)->base.debug_id);
    }
    ira->const_predecessor_bb = nullptr;
    ira->new_irb.current_basic_block = ira->old_irb.current_basic_block->child;
    assert(ira->new_irb.current_basic_block != nullptr);
    return ira->codegen->unreach_instruction;
}

static void ir_start_next_bb(IrAnalyze *ira) {
    ira->old_bb_index += 1;

    bool need_repeat = true;
    for (;;) {
        while (ira->old_bb_index < ira->old_irb.exec->basic_block_list.length) {
            IrBasicBlockSrc *old_bb = ira->old_irb.exec->basic_block_list.at(ira->old_bb_index);
            if (old_bb->child == nullptr && old_bb->suspend_instruction_ref == nullptr) {
                ira->old_bb_index += 1;
                continue;
            }
            // if it's already started, or
            // if it's a suspended block,
            // then skip it
            if (old_bb->suspended ||
                (old_bb->child != nullptr && old_bb->child->instruction_list.length != 0) ||
                (old_bb->child != nullptr && old_bb->child->already_appended))
            {
                ira->old_bb_index += 1;
                continue;
            }

            // if there is a resume_stack, pop one from there rather than moving on.
            // the last item of the resume stack will be a basic block that will
            // move on to the next one below
            if (ira->resume_stack.length != 0) {
                ira_resume(ira);
                return;
            }

            if (old_bb->child == nullptr) {
                old_bb->child = ir_get_new_bb_runtime(ira, old_bb, old_bb->suspend_instruction_ref);
            }
            ira->new_irb.current_basic_block = old_bb->child;
            ir_start_bb(ira, old_bb, nullptr);
            return;
        }
        if (!need_repeat) {
            if (ira->resume_stack.length != 0) {
                ira_resume(ira);
            }
            return;
        }
        need_repeat = false;
        ira->old_bb_index = 0;
        continue;
    }
}

static void ir_finish_bb(IrAnalyze *ira) {
    if (!ira->new_irb.current_basic_block->already_appended) {
        ir_append_basic_block_gen(&ira->new_irb, ira->new_irb.current_basic_block);
        if (ira->codegen->verbose_ir) {
            fprintf(stderr, "append new bb %s_%" PRIu32 "\n", ira->new_irb.current_basic_block->name_hint,
                    ira->new_irb.current_basic_block->debug_id);
        }
    }
    ira->instruction_index += 1;
    while (ira->instruction_index < ira->old_irb.current_basic_block->instruction_list.length) {
        IrInstSrc *next_instruction = ira->old_irb.current_basic_block->instruction_list.at(ira->instruction_index);
        if (!next_instruction->is_gen) {
            ir_add_error(ira, &next_instruction->base, buf_sprintf("unreachable code"));
            break;
        }
        ira->instruction_index += 1;
    }

    ir_start_next_bb(ira);
}

static IrInstGen *ir_unreach_error(IrAnalyze *ira) {
    ira->old_bb_index = SIZE_MAX;
    if (ira->new_irb.exec->first_err_trace_msg == nullptr) {
        ira->new_irb.exec->first_err_trace_msg = ira->codegen->trace_err;
    }
    return ira->codegen->unreach_instruction;
}

static bool ir_emit_backward_branch(IrAnalyze *ira, IrInst* source_instruction) {
    size_t *bbc = ira->new_irb.exec->backward_branch_count;
    size_t *quota = ira->new_irb.exec->backward_branch_quota;

    // If we're already over quota, we've already given an error message for this.
    if (*bbc > *quota) {
        assert(ira->codegen->errors.length > 0);
        return false;
    }

    *bbc += 1;
    if (*bbc > *quota) {
        ir_add_error(ira, source_instruction,
                buf_sprintf("evaluation exceeded %" ZIG_PRI_usize " backwards branches", *quota));
        return false;
    }
    return true;
}

static IrInstGen *ir_inline_bb(IrAnalyze *ira, IrInst* source_instruction, IrBasicBlockSrc *old_bb) {
    if (old_bb->debug_id <= ira->old_irb.current_basic_block->debug_id) {
        if (!ir_emit_backward_branch(ira, source_instruction))
            return ir_unreach_error(ira);
    }

    old_bb->child = ira->old_irb.current_basic_block->child;
    ir_start_bb(ira, old_bb, ira->old_irb.current_basic_block);
    return ira->codegen->unreach_instruction;
}

static IrInstGen *ir_finish_anal(IrAnalyze *ira, IrInstGen *instruction) {
    if (instruction->value->type->id == ZigTypeIdUnreachable)
        ir_finish_bb(ira);
    return instruction;
}

static IrInstGen *ir_const_fn(IrAnalyze *ira, IrInst *source_instr, ZigFn *fn_entry) {
    IrInstGen *result = ir_const(ira, source_instr, fn_entry->type_entry);
    result->value->special = ConstValSpecialStatic;
    result->value->data.x_ptr.data.fn.fn_entry = fn_entry;
    result->value->data.x_ptr.mut = ConstPtrMutComptimeConst;
    result->value->data.x_ptr.special = ConstPtrSpecialFunction;
    return result;
}

static IrInstGen *ir_const_bound_fn(IrAnalyze *ira, IrInst *src_inst, ZigFn *fn_entry, IrInstGen *first_arg,
        IrInst *first_arg_src)
{
    // This is unfortunately required to avoid improperly freeing first_arg_src
    ira_ref(ira);

    IrInstGen *result = ir_const(ira, src_inst, get_bound_fn_type(ira->codegen, fn_entry));
    result->value->data.x_bound_fn.fn = fn_entry;
    result->value->data.x_bound_fn.first_arg = first_arg;
    result->value->data.x_bound_fn.first_arg_src = first_arg_src;
    return result;
}

static IrInstGen *ir_const_type(IrAnalyze *ira, IrInst *source_instruction, ZigType *ty) {
    IrInstGen *result = ir_const(ira, source_instruction, ira->codegen->builtin_types.entry_type);
    result->value->data.x_type = ty;
    return result;
}

static IrInstGen *ir_const_bool(IrAnalyze *ira, IrInst *source_instruction, bool value) {
    IrInstGen *result = ir_const(ira, source_instruction, ira->codegen->builtin_types.entry_bool);
    result->value->data.x_bool = value;
    return result;
}

static IrInstGen *ir_const_undef(IrAnalyze *ira, IrInst *source_instruction, ZigType *ty) {
    IrInstGen *result = ir_const(ira, source_instruction, ty);
    result->value->special = ConstValSpecialUndef;
    return result;
}

static IrInstGen *ir_const_unreachable(IrAnalyze *ira, IrInst *source_instruction) {
    IrInstGen *result = ir_const_noval(ira, source_instruction);
    result->value = ira->codegen->intern.for_unreachable();
    return result;
}

static IrInstGen *ir_const_void(IrAnalyze *ira, IrInst *source_instruction) {
    IrInstGen *result = ir_const_noval(ira, source_instruction);
    result->value = ira->codegen->intern.for_void();
    return result;
}

static IrInstGen *ir_const_unsigned(IrAnalyze *ira, IrInst *source_instruction, uint64_t value) {
    IrInstGen *result = ir_const(ira, source_instruction, ira->codegen->builtin_types.entry_num_lit_int);
    bigint_init_unsigned(&result->value->data.x_bigint, value);
    return result;
}

static IrInstGen *ir_get_const_ptr(IrAnalyze *ira, IrInst *instruction,
        ZigValue *pointee, ZigType *pointee_type,
        ConstPtrMut ptr_mut, bool ptr_is_const, bool ptr_is_volatile, uint32_t ptr_align)
{
    ZigType *ptr_type = get_pointer_to_type_extra(ira->codegen, pointee_type,
            ptr_is_const, ptr_is_volatile, PtrLenSingle, ptr_align, 0, 0, false);
    IrInstGen *const_instr = ir_const(ira, instruction, ptr_type);
    ZigValue *const_val = const_instr->value;
    const_val->data.x_ptr.special = ConstPtrSpecialRef;
    const_val->data.x_ptr.mut = ptr_mut;
    const_val->data.x_ptr.data.ref.pointee = pointee;
    return const_instr;
}

static Error ir_resolve_const_val(CodeGen *codegen, IrExecutableGen *exec, AstNode *source_node,
        ZigValue *val, UndefAllowed undef_allowed)
{
    Error err;
    for (;;) {
        switch (val->special) {
            case ConstValSpecialStatic:
                return ErrorNone;
            case ConstValSpecialRuntime:
                if (!type_has_bits(codegen, val->type))
                    return ErrorNone;

                exec_add_error_node_gen(codegen, exec, source_node,
                        buf_sprintf("unable to evaluate constant expression"));
                return ErrorSemanticAnalyzeFail;
            case ConstValSpecialUndef:
                if (undef_allowed == UndefOk || undef_allowed == LazyOk)
                    return ErrorNone;

                exec_add_error_node_gen(codegen, exec, source_node,
                        buf_sprintf("use of undefined value here causes undefined behavior"));
                return ErrorSemanticAnalyzeFail;
            case ConstValSpecialLazy:
                if (undef_allowed == LazyOk || undef_allowed == LazyOkNoUndef)
                    return ErrorNone;

                if ((err = ir_resolve_lazy(codegen, source_node, val)))
                    return err;

                continue;
        }
    }
}

static ZigValue *ir_resolve_const(IrAnalyze *ira, IrInstGen *value, UndefAllowed undef_allowed) {
    Error err;
    if ((err = ir_resolve_const_val(ira->codegen, ira->new_irb.exec, value->base.source_node,
                    value->value, undef_allowed)))
    {
        return nullptr;
    }
    return value->value;
}

Error ir_eval_const_value(CodeGen *codegen, Scope *scope, AstNode *node,
        ZigValue *return_ptr, size_t *backward_branch_count, size_t *backward_branch_quota,
        ZigFn *fn_entry, Buf *c_import_buf, AstNode *source_node, Buf *exec_name,
        IrExecutableGen *parent_exec, AstNode *expected_type_source_node, UndefAllowed undef_allowed)
{
    Error err;

    src_assert(return_ptr->type->id == ZigTypeIdPointer, source_node);

    if (type_is_invalid(return_ptr->type))
        return ErrorSemanticAnalyzeFail;

    IrExecutableSrc *ir_executable = heap::c_allocator.create<IrExecutableSrc>();
    ir_executable->source_node = source_node;
    ir_executable->parent_exec = parent_exec;
    ir_executable->name = exec_name;
    ir_executable->is_inline = true;
    ir_executable->fn_entry = fn_entry;
    ir_executable->c_import_buf = c_import_buf;
    ir_executable->begin_scope = scope;

    if (!ir_gen(codegen, node, scope, ir_executable))
        return ErrorSemanticAnalyzeFail;

    if (ir_executable->first_err_trace_msg != nullptr) {
        codegen->trace_err = ir_executable->first_err_trace_msg;
        return ErrorSemanticAnalyzeFail;
    }

    if (codegen->verbose_ir) {
        fprintf(stderr, "\nSource: ");
        ast_render(stderr, node, 4);
        fprintf(stderr, "\n{ // (IR)\n");
        ir_print_src(codegen, stderr, ir_executable, 2);
        fprintf(stderr, "}\n");
    }
    IrExecutableGen *analyzed_executable = heap::c_allocator.create<IrExecutableGen>();
    analyzed_executable->source_node = source_node;
    analyzed_executable->parent_exec = parent_exec;
    analyzed_executable->source_exec = ir_executable;
    analyzed_executable->name = exec_name;
    analyzed_executable->is_inline = true;
    analyzed_executable->fn_entry = fn_entry;
    analyzed_executable->c_import_buf = c_import_buf;
    analyzed_executable->backward_branch_count = backward_branch_count;
    analyzed_executable->backward_branch_quota = backward_branch_quota;
    analyzed_executable->begin_scope = scope;
    ZigType *result_type = ir_analyze(codegen, ir_executable, analyzed_executable,
            return_ptr->type->data.pointer.child_type, expected_type_source_node, return_ptr);
    if (type_is_invalid(result_type)) {
        return ErrorSemanticAnalyzeFail;
    }

    if (codegen->verbose_ir) {
        fprintf(stderr, "{ // (analyzed)\n");
        ir_print_gen(codegen, stderr, analyzed_executable, 2);
        fprintf(stderr, "}\n");
    }

    if ((err = ir_exec_scan_for_side_effects(codegen, analyzed_executable)))
        return err;

    ZigValue *result = const_ptr_pointee(nullptr, codegen, return_ptr, source_node);
    if (result == nullptr)
        return ErrorSemanticAnalyzeFail;
    if ((err = ir_resolve_const_val(codegen, analyzed_executable, node, result, undef_allowed)))
        return err;

    return ErrorNone;
}

static ErrorTableEntry *ir_resolve_error(IrAnalyze *ira, IrInstGen *err_value) {
    if (type_is_invalid(err_value->value->type))
        return nullptr;

    if (err_value->value->type->id != ZigTypeIdErrorSet) {
        ir_add_error_node(ira, err_value->base.source_node,
                buf_sprintf("expected error, found '%s'", buf_ptr(&err_value->value->type->name)));
        return nullptr;
    }

    ZigValue *const_val = ir_resolve_const(ira, err_value, UndefBad);
    if (!const_val)
        return nullptr;

    assert(const_val->data.x_err_set != nullptr);
    return const_val->data.x_err_set;
}

static ZigType *ir_resolve_const_type(CodeGen *codegen, IrExecutableGen *exec, AstNode *source_node,
        ZigValue *val)
{
    Error err;
    if ((err = ir_resolve_const_val(codegen, exec, source_node, val, UndefBad)))
        return codegen->builtin_types.entry_invalid;

    assert(val->data.x_type != nullptr);
    return val->data.x_type;
}

static ZigValue *ir_resolve_type_lazy(IrAnalyze *ira, IrInstGen *type_value) {
    if (type_is_invalid(type_value->value->type))
        return nullptr;

    if (type_value->value->type->id != ZigTypeIdMetaType) {
        ir_add_error_node(ira, type_value->base.source_node,
                buf_sprintf("expected type 'type', found '%s'", buf_ptr(&type_value->value->type->name)));
        return nullptr;
    }

    Error err;
    if ((err = ir_resolve_const_val(ira->codegen, ira->new_irb.exec, type_value->base.source_node,
                    type_value->value, LazyOk)))
    {
        return nullptr;
    }

    return type_value->value;
}

static ZigType *ir_resolve_type(IrAnalyze *ira, IrInstGen *type_value) {
    ZigValue *val = ir_resolve_type_lazy(ira, type_value);
    if (val == nullptr)
        return ira->codegen->builtin_types.entry_invalid;

    return ir_resolve_const_type(ira->codegen, ira->new_irb.exec, type_value->base.source_node, val);
}

static Error ir_validate_vector_elem_type(IrAnalyze *ira, AstNode *source_node, ZigType *elem_type) {
    Error err;
    bool is_valid;
    if ((err = is_valid_vector_elem_type(ira->codegen, elem_type, &is_valid)))
        return err;
    if (!is_valid) {
        ir_add_error_node(ira, source_node,
            buf_sprintf("vector element type must be integer, float, bool, or pointer; '%s' is invalid",
                buf_ptr(&elem_type->name)));
        return ErrorSemanticAnalyzeFail;
    }
    return ErrorNone;
}

static ZigType *ir_resolve_vector_elem_type(IrAnalyze *ira, IrInstGen *elem_type_value) {
    Error err;
    ZigType *elem_type = ir_resolve_type(ira, elem_type_value);
    if (type_is_invalid(elem_type))
        return ira->codegen->builtin_types.entry_invalid;
    if ((err = ir_validate_vector_elem_type(ira, elem_type_value->base.source_node, elem_type)))
        return ira->codegen->builtin_types.entry_invalid;
    return elem_type;
}

static ZigType *ir_resolve_int_type(IrAnalyze *ira, IrInstGen *type_value) {
    ZigType *ty = ir_resolve_type(ira, type_value);
    if (type_is_invalid(ty))
        return ira->codegen->builtin_types.entry_invalid;

    if (ty->id != ZigTypeIdInt) {
        ErrorMsg *msg = ir_add_error_node(ira, type_value->base.source_node,
            buf_sprintf("expected integer type, found '%s'", buf_ptr(&ty->name)));
        if (ty->id == ZigTypeIdVector &&
            ty->data.vector.elem_type->id == ZigTypeIdInt)
        {
            add_error_note(ira->codegen, msg, type_value->base.source_node,
                buf_sprintf("represent vectors with their element types, i.e. '%s'",
                    buf_ptr(&ty->data.vector.elem_type->name)));
        }
        return ira->codegen->builtin_types.entry_invalid;
    }

    return ty;
}

static ZigType *ir_resolve_error_set_type(IrAnalyze *ira, IrInst *op_source, IrInstGen *type_value) {
    if (type_is_invalid(type_value->value->type))
        return ira->codegen->builtin_types.entry_invalid;

    if (type_value->value->type->id != ZigTypeIdMetaType) {
        ErrorMsg *msg = ir_add_error_node(ira, type_value->base.source_node,
                buf_sprintf("expected error set type, found '%s'", buf_ptr(&type_value->value->type->name)));
        add_error_note(ira->codegen, msg, op_source->source_node,
                buf_sprintf("`||` merges error sets; `or` performs boolean OR"));
        return ira->codegen->builtin_types.entry_invalid;
    }

    ZigValue *const_val = ir_resolve_const(ira, type_value, UndefBad);
    if (!const_val)
        return ira->codegen->builtin_types.entry_invalid;

    assert(const_val->data.x_type != nullptr);
    ZigType *result_type = const_val->data.x_type;
    if (result_type->id != ZigTypeIdErrorSet) {
        ErrorMsg *msg = ir_add_error_node(ira, type_value->base.source_node,
                buf_sprintf("expected error set type, found type '%s'", buf_ptr(&result_type->name)));
        add_error_note(ira->codegen, msg, op_source->source_node,
                buf_sprintf("`||` merges error sets; `or` performs boolean OR"));
        return ira->codegen->builtin_types.entry_invalid;
    }
    return result_type;
}

static ZigFn *ir_resolve_fn(IrAnalyze *ira, IrInstGen *fn_value) {
    if (type_is_invalid(fn_value->value->type))
        return nullptr;

    if (fn_value->value->type->id != ZigTypeIdFn) {
        ir_add_error_node(ira, fn_value->base.source_node,
                buf_sprintf("expected function type, found '%s'", buf_ptr(&fn_value->value->type->name)));
        return nullptr;
    }

    ZigValue *const_val = ir_resolve_const(ira, fn_value, UndefBad);
    if (!const_val)
        return nullptr;

    // May be a ConstPtrSpecialHardCodedAddr
    if (const_val->data.x_ptr.special != ConstPtrSpecialFunction)
        return nullptr;

    return const_val->data.x_ptr.data.fn.fn_entry;
}

static IrInstGen *ir_analyze_optional_wrap(IrAnalyze *ira, IrInst* source_instr,
        IrInstGen *value, ZigType *wanted_type, ResultLoc *result_loc)
{
    assert(wanted_type->id == ZigTypeIdOptional);

    if (instr_is_comptime(value)) {
        ZigType *payload_type = wanted_type->data.maybe.child_type;
        IrInstGen *casted_payload = ir_implicit_cast(ira, value, payload_type);
        if (type_is_invalid(casted_payload->value->type))
            return ira->codegen->invalid_inst_gen;

        ZigValue *val = ir_resolve_const(ira, casted_payload, UndefOk);
        if (!val)
            return ira->codegen->invalid_inst_gen;

        IrInstGenConst *const_instruction = ir_create_inst_gen<IrInstGenConst>(&ira->new_irb,
                source_instr->scope, source_instr->source_node);
        const_instruction->base.value->special = ConstValSpecialStatic;
        if (types_have_same_zig_comptime_repr(ira->codegen, wanted_type, payload_type)) {
            copy_const_val(ira->codegen, const_instruction->base.value, val);
        } else {
            const_instruction->base.value->data.x_optional = val;
        }
        const_instruction->base.value->type = wanted_type;
        return &const_instruction->base;
    }

    if (result_loc == nullptr && handle_is_ptr(ira->codegen, wanted_type)) {
        result_loc = no_result_loc();
    }
    IrInstGen *result_loc_inst = nullptr;
    if (result_loc != nullptr) {
        result_loc_inst = ir_resolve_result(ira, source_instr, result_loc, wanted_type, nullptr, true, true);
        if (type_is_invalid(result_loc_inst->value->type) ||
            result_loc_inst->value->type->id == ZigTypeIdUnreachable)
        {
            return result_loc_inst;
        }
    }
    IrInstGen *result = ir_build_optional_wrap(ira, source_instr, wanted_type, value, result_loc_inst);
    result->value->data.rh_maybe = RuntimeHintOptionalNonNull;
    return result;
}

static IrInstGen *ir_analyze_err_wrap_payload(IrAnalyze *ira, IrInst* source_instr,
        IrInstGen *value, ZigType *wanted_type, ResultLoc *result_loc)
{
    assert(wanted_type->id == ZigTypeIdErrorUnion);

    ZigType *payload_type = wanted_type->data.error_union.payload_type;
    ZigType *err_set_type = wanted_type->data.error_union.err_set_type;
    if (instr_is_comptime(value)) {
        IrInstGen *casted_payload = ir_implicit_cast(ira, value, payload_type);
        if (type_is_invalid(casted_payload->value->type))
            return ira->codegen->invalid_inst_gen;

        ZigValue *val = ir_resolve_const(ira, casted_payload, UndefOk);
        if (val == nullptr)
            return ira->codegen->invalid_inst_gen;

        ZigValue *err_set_val = ira->codegen->pass1_arena->create<ZigValue>();
        err_set_val->type = err_set_type;
        err_set_val->special = ConstValSpecialStatic;
        err_set_val->data.x_err_set = nullptr;

        IrInstGenConst *const_instruction = ir_create_inst_gen<IrInstGenConst>(&ira->new_irb,
                source_instr->scope, source_instr->source_node);
        const_instruction->base.value->type = wanted_type;
        const_instruction->base.value->special = ConstValSpecialStatic;
        const_instruction->base.value->data.x_err_union.error_set = err_set_val;
        const_instruction->base.value->data.x_err_union.payload = val;
        return &const_instruction->base;
    }

    IrInstGen *result_loc_inst;
    if (handle_is_ptr(ira->codegen, wanted_type)) {
        if (result_loc == nullptr) result_loc = no_result_loc();
        result_loc_inst = ir_resolve_result(ira, source_instr, result_loc, wanted_type, nullptr, true, true);
        if (type_is_invalid(result_loc_inst->value->type) ||
            result_loc_inst->value->type->id == ZigTypeIdUnreachable) {
            return result_loc_inst;
        }
    } else {
        result_loc_inst = nullptr;
    }

    IrInstGen *result = ir_build_err_wrap_payload(ira, source_instr, wanted_type, value, result_loc_inst);
    result->value->data.rh_error_union = RuntimeHintErrorUnionNonError;
    return result;
}

static IrInstGen *ir_analyze_err_set_cast(IrAnalyze *ira, IrInst* source_instr, IrInstGen *value,
        ZigType *wanted_type)
{
    assert(value->value->type->id == ZigTypeIdErrorSet);
    assert(wanted_type->id == ZigTypeIdErrorSet);

    if (instr_is_comptime(value)) {
        ZigValue *val = ir_resolve_const(ira, value, UndefBad);
        if (!val)
            return ira->codegen->invalid_inst_gen;

        if (!resolve_inferred_error_set(ira->codegen, wanted_type, source_instr->source_node)) {
            return ira->codegen->invalid_inst_gen;
        }
        if (!type_is_global_error_set(wanted_type)) {
            bool subset = false;
            for (uint32_t i = 0, count = wanted_type->data.error_set.err_count; i < count; i += 1) {
                if (wanted_type->data.error_set.errors[i]->value == val->data.x_err_set->value) {
                    subset = true;
                    break;
                }
            }
            if (!subset) {
                ir_add_error(ira, source_instr,
                    buf_sprintf("error.%s not a member of error set '%s'",
                        buf_ptr(&val->data.x_err_set->name), buf_ptr(&wanted_type->name)));
                return ira->codegen->invalid_inst_gen;
            }
        }

        IrInstGenConst *const_instruction = ir_create_inst_gen<IrInstGenConst>(&ira->new_irb,
                source_instr->scope, source_instr->source_node);
        const_instruction->base.value->type = wanted_type;
        const_instruction->base.value->special = ConstValSpecialStatic;
        const_instruction->base.value->data.x_err_set = val->data.x_err_set;
        return &const_instruction->base;
    }

    return ir_build_cast(ira, source_instr, wanted_type, value, CastOpErrSet);
}

static IrInstGen *ir_analyze_frame_ptr_to_anyframe(IrAnalyze *ira, IrInst* source_instr,
        IrInstGen *frame_ptr, ZigType *wanted_type)
{
    if (instr_is_comptime(frame_ptr)) {
        ZigValue *ptr_val = ir_resolve_const(ira, frame_ptr, UndefBad);
        if (ptr_val == nullptr)
            return ira->codegen->invalid_inst_gen;

        ir_assert(ptr_val->type->id == ZigTypeIdPointer, source_instr);
        if (ptr_val->data.x_ptr.mut != ConstPtrMutRuntimeVar) {
            zig_panic("TODO comptime frame pointer");
        }
    }

    return ir_build_cast(ira, source_instr, wanted_type, frame_ptr, CastOpBitCast);
}

static IrInstGen *ir_analyze_anyframe_to_anyframe(IrAnalyze *ira, IrInst* source_instr,
        IrInstGen *value, ZigType *wanted_type)
{
    if (instr_is_comptime(value)) {
        zig_panic("TODO comptime anyframe->T to anyframe");
    }

    return ir_build_cast(ira, source_instr, wanted_type, value, CastOpBitCast);
}


static IrInstGen *ir_analyze_err_wrap_code(IrAnalyze *ira, IrInst* source_instr, IrInstGen *value,
        ZigType *wanted_type, ResultLoc *result_loc)
{
    assert(wanted_type->id == ZigTypeIdErrorUnion);

    IrInstGen *casted_value = ir_implicit_cast(ira, value, wanted_type->data.error_union.err_set_type);

    if (instr_is_comptime(casted_value)) {
        ZigValue *val = ir_resolve_const(ira, casted_value, UndefBad);
        if (!val)
            return ira->codegen->invalid_inst_gen;

        ZigValue *err_set_val = ira->codegen->pass1_arena->create<ZigValue>();
        err_set_val->special = ConstValSpecialStatic;
        err_set_val->type = wanted_type->data.error_union.err_set_type;
        err_set_val->data.x_err_set = val->data.x_err_set;

        IrInstGenConst *const_instruction = ir_create_inst_gen<IrInstGenConst>(&ira->new_irb,
                source_instr->scope, source_instr->source_node);
        const_instruction->base.value->type = wanted_type;
        const_instruction->base.value->special = ConstValSpecialStatic;
        const_instruction->base.value->data.x_err_union.error_set = err_set_val;
        const_instruction->base.value->data.x_err_union.payload = nullptr;
        return &const_instruction->base;
    }

    IrInstGen *result_loc_inst;
    if (handle_is_ptr(ira->codegen, wanted_type)) {
        if (result_loc == nullptr) result_loc = no_result_loc();
        result_loc_inst = ir_resolve_result(ira, source_instr, result_loc, wanted_type, nullptr, true, true);
        if (type_is_invalid(result_loc_inst->value->type) ||
            result_loc_inst->value->type->id == ZigTypeIdUnreachable)
        {
            return result_loc_inst;
        }
    } else {
        result_loc_inst = nullptr;
    }


    IrInstGen *result = ir_build_err_wrap_code(ira, source_instr, wanted_type, value, result_loc_inst);
    result->value->data.rh_error_union = RuntimeHintErrorUnionError;
    return result;
}

static IrInstGen *ir_analyze_null_to_maybe(IrAnalyze *ira, IrInst *source_instr, IrInstGen *value, ZigType *wanted_type) {
    assert(wanted_type->id == ZigTypeIdOptional);
    assert(instr_is_comptime(value));

    ZigValue *val = ir_resolve_const(ira, value, UndefBad);
    assert(val != nullptr);

    IrInstGen *result = ir_const(ira, source_instr, wanted_type);
    result->value->special = ConstValSpecialStatic;

    if (get_src_ptr_type(wanted_type) != nullptr) {
        result->value->data.x_ptr.special = ConstPtrSpecialNull;
    } else if (is_opt_err_set(wanted_type)) {
        result->value->data.x_err_set = nullptr;
    } else {
        result->value->data.x_optional = nullptr;
    }
    return result;
}

static IrInstGen *ir_analyze_null_to_c_pointer(IrAnalyze *ira, IrInst *source_instr,
        IrInstGen *value, ZigType *wanted_type)
{
    assert(wanted_type->id == ZigTypeIdPointer);
    assert(wanted_type->data.pointer.ptr_len == PtrLenC);
    assert(instr_is_comptime(value));

    ZigValue *val = ir_resolve_const(ira, value, UndefBad);
    assert(val != nullptr);

    IrInstGen *result = ir_const(ira, source_instr, wanted_type);
    result->value->data.x_ptr.special = ConstPtrSpecialNull;
    result->value->data.x_ptr.mut = ConstPtrMutComptimeConst;
    return result;
}

static IrInstGen *ir_get_ref2(IrAnalyze *ira, IrInst* source_instruction, IrInstGen *value,
        ZigType *elem_type, bool is_const, bool is_volatile)
{
    Error err;

    if (type_is_invalid(elem_type))
        return ira->codegen->invalid_inst_gen;

    if (instr_is_comptime(value)) {
        ZigValue *val = ir_resolve_const(ira, value, LazyOk);
        if (!val)
            return ira->codegen->invalid_inst_gen;
        return ir_get_const_ptr(ira, source_instruction, val, elem_type,
                ConstPtrMutComptimeConst, is_const, is_volatile, 0);
    }

    ZigType *ptr_type = get_pointer_to_type_extra(ira->codegen, elem_type,
            is_const, is_volatile, PtrLenSingle, 0, 0, 0, false);

    if ((err = type_resolve(ira->codegen, ptr_type, ResolveStatusZeroBitsKnown)))
        return ira->codegen->invalid_inst_gen;

    IrInstGen *result_loc;
    if (type_has_bits(ira->codegen, ptr_type) && !handle_is_ptr(ira->codegen, elem_type)) {
        result_loc = ir_resolve_result(ira, source_instruction, no_result_loc(), elem_type, nullptr, true, true);
    } else {
        result_loc = nullptr;
    }

    IrInstGen *new_instruction = ir_build_ref_gen(ira, source_instruction, ptr_type, value, result_loc);
    new_instruction->value->data.rh_ptr = RuntimeHintPtrStack;
    return new_instruction;
}

static IrInstGen *ir_get_ref(IrAnalyze *ira, IrInst* source_instruction, IrInstGen *value,
        bool is_const, bool is_volatile)
{
    return ir_get_ref2(ira, source_instruction, value, value->value->type, is_const, is_volatile);
}

static ZigType *ir_resolve_union_tag_type(IrAnalyze *ira, AstNode *source_node, ZigType *union_type) {
    assert(union_type->id == ZigTypeIdUnion);

    Error err;
    if ((err = type_resolve(ira->codegen, union_type, ResolveStatusSizeKnown)))
        return ira->codegen->builtin_types.entry_invalid;

    AstNode *decl_node = union_type->data.unionation.decl_node;
    if (decl_node->data.container_decl.auto_enum || decl_node->data.container_decl.init_arg_expr != nullptr) {
        assert(union_type->data.unionation.tag_type != nullptr);
        return union_type->data.unionation.tag_type;
    } else {
        ErrorMsg *msg = ir_add_error_node(ira, source_node, buf_sprintf("union '%s' has no tag",
            buf_ptr(&union_type->name)));
        add_error_note(ira->codegen, msg, decl_node, buf_sprintf("consider 'union(enum)' here"));
        return ira->codegen->builtin_types.entry_invalid;
    }
}

static IrInstGen *ir_analyze_enum_to_int(IrAnalyze *ira, IrInst *source_instr, IrInstGen *target) {
    Error err;

    IrInstGen *enum_target;
    ZigType *enum_type;
    if (target->value->type->id == ZigTypeIdUnion) {
        enum_type = ir_resolve_union_tag_type(ira, target->base.source_node, target->value->type);
        if (type_is_invalid(enum_type))
            return ira->codegen->invalid_inst_gen;
        enum_target = ir_implicit_cast(ira, target, enum_type);
        if (type_is_invalid(enum_target->value->type))
            return ira->codegen->invalid_inst_gen;
    } else if (target->value->type->id == ZigTypeIdEnum) {
        enum_target = target;
        enum_type = target->value->type;
    } else {
        ir_add_error_node(ira, target->base.source_node,
            buf_sprintf("expected enum, found type '%s'", buf_ptr(&target->value->type->name)));
        return ira->codegen->invalid_inst_gen;
    }

    if ((err = type_resolve(ira->codegen, enum_type, ResolveStatusSizeKnown)))
        return ira->codegen->invalid_inst_gen;

    ZigType *tag_type = enum_type->data.enumeration.tag_int_type;
    assert(tag_type->id == ZigTypeIdInt || tag_type->id == ZigTypeIdComptimeInt);

    // If there is only one possible tag, then we know at comptime what it is.
    if (enum_type->data.enumeration.layout == ContainerLayoutAuto &&
        enum_type->data.enumeration.src_field_count == 1)
    {
        IrInstGen *result = ir_const(ira, source_instr, tag_type);
        init_const_bigint(result->value, tag_type,
                &enum_type->data.enumeration.fields[0].value);
        return result;
    }

    if (instr_is_comptime(enum_target)) {
        ZigValue *val = ir_resolve_const(ira, enum_target, UndefBad);
        if (!val)
            return ira->codegen->invalid_inst_gen;
        IrInstGen *result = ir_const(ira, source_instr, tag_type);
        init_const_bigint(result->value, tag_type, &val->data.x_enum_tag);
        return result;
    }

    return ir_build_widen_or_shorten(ira, source_instr->scope, source_instr->source_node, enum_target, tag_type);
}

static IrInstGen *ir_analyze_union_to_tag(IrAnalyze *ira, IrInst* source_instr,
        IrInstGen *target, ZigType *wanted_type)
{
    assert(target->value->type->id == ZigTypeIdUnion);
    assert(wanted_type->id == ZigTypeIdEnum);
    assert(wanted_type == target->value->type->data.unionation.tag_type);

    if (instr_is_comptime(target)) {
        ZigValue *val = ir_resolve_const(ira, target, UndefBad);
        if (!val)
            return ira->codegen->invalid_inst_gen;
        IrInstGen *result = ir_const(ira, source_instr, wanted_type);
        result->value->special = ConstValSpecialStatic;
        result->value->type = wanted_type;
        bigint_init_bigint(&result->value->data.x_enum_tag, &val->data.x_union.tag);
        return result;
    }

    // If there is only 1 possible tag, then we know at comptime what it is.
    if (wanted_type->data.enumeration.layout == ContainerLayoutAuto &&
        wanted_type->data.enumeration.src_field_count == 1)
    {
        IrInstGen *result = ir_const(ira, source_instr, wanted_type);
        result->value->special = ConstValSpecialStatic;
        result->value->type = wanted_type;
        TypeEnumField *enum_field = target->value->type->data.unionation.fields[0].enum_field;
        bigint_init_bigint(&result->value->data.x_enum_tag, &enum_field->value);
        return result;
    }

    return ir_build_union_tag(ira, source_instr, target, wanted_type);
}

static IrInstGen *ir_analyze_undefined_to_anything(IrAnalyze *ira, IrInst* source_instr,
        IrInstGen *target, ZigType *wanted_type)
{
    IrInstGen *result = ir_const(ira, source_instr, wanted_type);
    result->value->special = ConstValSpecialUndef;
    return result;
}

static IrInstGen *ir_analyze_enum_to_union(IrAnalyze *ira, IrInst* source_instr,
        IrInstGen *uncasted_target, ZigType *wanted_type)
{
    Error err;
    assert(wanted_type->id == ZigTypeIdUnion);

    if ((err = type_resolve(ira->codegen, wanted_type, ResolveStatusZeroBitsKnown)))
        return ira->codegen->invalid_inst_gen;

    IrInstGen *target = ir_implicit_cast(ira, uncasted_target, wanted_type->data.unionation.tag_type);
    if (type_is_invalid(target->value->type))
        return ira->codegen->invalid_inst_gen;

    if (instr_is_comptime(target)) {
        ZigValue *val = ir_resolve_const(ira, target, UndefBad);
        if (!val)
            return ira->codegen->invalid_inst_gen;
        TypeUnionField *union_field = find_union_field_by_tag(wanted_type, &val->data.x_enum_tag);
        assert(union_field != nullptr);
        ZigType *field_type = resolve_union_field_type(ira->codegen, union_field);
        if (field_type == nullptr)
            return ira->codegen->invalid_inst_gen;
        if ((err = type_resolve(ira->codegen, field_type, ResolveStatusZeroBitsKnown)))
            return ira->codegen->invalid_inst_gen;

        switch (type_has_one_possible_value(ira->codegen, field_type)) {
            case OnePossibleValueInvalid:
                return ira->codegen->invalid_inst_gen;
            case OnePossibleValueNo: {
                AstNode *field_node = wanted_type->data.unionation.decl_node->data.container_decl.fields.at(
                        union_field->enum_field->decl_index);
                ErrorMsg *msg = ir_add_error(ira, source_instr,
                        buf_sprintf("cast to union '%s' must initialize '%s' field '%s'",
                            buf_ptr(&wanted_type->name),
                            buf_ptr(&field_type->name),
                            buf_ptr(union_field->name)));
                add_error_note(ira->codegen, msg, field_node,
                        buf_sprintf("field '%s' declared here", buf_ptr(union_field->name)));
                return ira->codegen->invalid_inst_gen;
            }
            case OnePossibleValueYes:
                break;
        }

        IrInstGen *result = ir_const(ira, source_instr, wanted_type);
        result->value->special = ConstValSpecialStatic;
        result->value->type = wanted_type;
        bigint_init_bigint(&result->value->data.x_union.tag, &val->data.x_enum_tag);
        result->value->data.x_union.payload = ira->codegen->pass1_arena->create<ZigValue>();
        result->value->data.x_union.payload->special = ConstValSpecialStatic;
        result->value->data.x_union.payload->type = field_type;
        return result;
    }

    // if the union has all fields 0 bits, we can do it
    // and in fact it's a noop cast because the union value is just the enum value
    if (wanted_type->data.unionation.gen_field_count == 0) {
        return ir_build_cast(ira, &target->base, wanted_type, target, CastOpNoop);
    }

    ErrorMsg *msg = ir_add_error(ira, source_instr,
            buf_sprintf("runtime cast to union '%s' which has non-void fields",
                buf_ptr(&wanted_type->name)));
    for (uint32_t i = 0; i < wanted_type->data.unionation.src_field_count; i += 1) {
        TypeUnionField *union_field = &wanted_type->data.unionation.fields[i];
        ZigType *field_type = resolve_union_field_type(ira->codegen, union_field);
        if (field_type == nullptr)
            return ira->codegen->invalid_inst_gen;
        bool has_bits;
        if ((err = type_has_bits2(ira->codegen, field_type, &has_bits)))
            return ira->codegen->invalid_inst_gen;
        if (has_bits) {
            AstNode *field_node = wanted_type->data.unionation.decl_node->data.container_decl.fields.at(i);
            add_error_note(ira->codegen, msg, field_node,
                    buf_sprintf("field '%s' has type '%s'",
                        buf_ptr(union_field->name),
                        buf_ptr(&field_type->name)));
        }
    }
    return ira->codegen->invalid_inst_gen;
}

static IrInstGen *ir_analyze_widen_or_shorten(IrAnalyze *ira, IrInst* source_instr,
        IrInstGen *target, ZigType *wanted_type)
{
    assert(wanted_type->id == ZigTypeIdInt || wanted_type->id == ZigTypeIdFloat);

    if (instr_is_comptime(target)) {
        ZigValue *val = ir_resolve_const(ira, target, UndefBad);
        if (!val)
            return ira->codegen->invalid_inst_gen;
        if (wanted_type->id == ZigTypeIdInt) {
            if (bigint_cmp_zero(&val->data.x_bigint) == CmpLT && !wanted_type->data.integral.is_signed) {
                ir_add_error(ira, source_instr,
                    buf_sprintf("attempt to cast negative value to unsigned integer"));
                return ira->codegen->invalid_inst_gen;
            }
            if (!bigint_fits_in_bits(&val->data.x_bigint, wanted_type->data.integral.bit_count,
                    wanted_type->data.integral.is_signed))
            {
                ir_add_error(ira, source_instr,
                    buf_sprintf("cast from '%s' to '%s' truncates bits",
                        buf_ptr(&target->value->type->name), buf_ptr(&wanted_type->name)));
                return ira->codegen->invalid_inst_gen;
            }
        }
        IrInstGen *result = ir_const(ira, source_instr, wanted_type);
        result->value->type = wanted_type;
        if (wanted_type->id == ZigTypeIdInt) {
            bigint_init_bigint(&result->value->data.x_bigint, &val->data.x_bigint);
        } else {
            float_init_float(result->value, val);
        }
        return result;
    }

    // If the destination integer type has no bits, then we can emit a comptime
    // zero. However, we still want to emit a runtime safety check to make sure
    // the target is zero.
    if (!type_has_bits(ira->codegen, wanted_type)) {
        assert(wanted_type->id == ZigTypeIdInt);
        assert(type_has_bits(ira->codegen, target->value->type));
        ir_build_assert_zero(ira, source_instr, target);
        IrInstGen *result = ir_const_unsigned(ira, source_instr, 0);
        result->value->type = wanted_type;
        return result;
    }

    return ir_build_widen_or_shorten(ira, source_instr->scope, source_instr->source_node, target, wanted_type);
}

static IrInstGen *ir_analyze_int_to_enum(IrAnalyze *ira, IrInst* source_instr,
        IrInstGen *target, ZigType *wanted_type)
{
    Error err;
    assert(wanted_type->id == ZigTypeIdEnum);

    ZigType *actual_type = target->value->type;

    if ((err = type_resolve(ira->codegen, wanted_type, ResolveStatusSizeKnown)))
        return ira->codegen->invalid_inst_gen;

    if (actual_type != wanted_type->data.enumeration.tag_int_type) {
        ir_add_error(ira, source_instr,
                buf_sprintf("integer to enum cast from '%s' instead of its tag type, '%s'",
                    buf_ptr(&actual_type->name),
                    buf_ptr(&wanted_type->data.enumeration.tag_int_type->name)));
        return ira->codegen->invalid_inst_gen;
    }

    assert(actual_type->id == ZigTypeIdInt || actual_type->id == ZigTypeIdComptimeInt);

    if (instr_is_comptime(target)) {
        ZigValue *val = ir_resolve_const(ira, target, UndefBad);
        if (!val)
            return ira->codegen->invalid_inst_gen;

        TypeEnumField *field = find_enum_field_by_tag(wanted_type, &val->data.x_bigint);
        if (field == nullptr && !wanted_type->data.enumeration.non_exhaustive) {
            Buf *val_buf = buf_alloc();
            bigint_append_buf(val_buf, &val->data.x_bigint, 10);
            ErrorMsg *msg = ir_add_error(ira, source_instr,
                buf_sprintf("enum '%s' has no tag matching integer value %s",
                    buf_ptr(&wanted_type->name), buf_ptr(val_buf)));
            add_error_note(ira->codegen, msg, wanted_type->data.enumeration.decl_node,
                    buf_sprintf("'%s' declared here", buf_ptr(&wanted_type->name)));
            return ira->codegen->invalid_inst_gen;
        }

        IrInstGen *result = ir_const(ira, source_instr, wanted_type);
        bigint_init_bigint(&result->value->data.x_enum_tag, &val->data.x_bigint);
        return result;
    }

    return ir_build_int_to_enum_gen(ira, source_instr->scope, source_instr->source_node, wanted_type, target);
}

static IrInstGen *ir_analyze_number_to_literal(IrAnalyze *ira, IrInst* source_instr,
        IrInstGen *target, ZigType *wanted_type)
{
    ZigValue *val = ir_resolve_const(ira, target, UndefBad);
    if (!val)
        return ira->codegen->invalid_inst_gen;

    IrInstGen *result = ir_const(ira, source_instr, wanted_type);
    if (wanted_type->id == ZigTypeIdComptimeFloat) {
        float_init_float(result->value, val);
    } else if (wanted_type->id == ZigTypeIdComptimeInt) {
        bigint_init_bigint(&result->value->data.x_bigint, &val->data.x_bigint);
    } else {
        zig_unreachable();
    }
    return result;
}

static IrInstGen *ir_analyze_int_to_err(IrAnalyze *ira, IrInst* source_instr, IrInstGen *target,
    ZigType *wanted_type)
{
    assert(target->value->type->id == ZigTypeIdInt);
    assert(!target->value->type->data.integral.is_signed);
    assert(wanted_type->id == ZigTypeIdErrorSet);

    if (instr_is_comptime(target)) {
        ZigValue *val = ir_resolve_const(ira, target, UndefBad);
        if (!val)
            return ira->codegen->invalid_inst_gen;

        IrInstGen *result = ir_const(ira, source_instr, wanted_type);

        if (!resolve_inferred_error_set(ira->codegen, wanted_type, source_instr->source_node)) {
            return ira->codegen->invalid_inst_gen;
        }

        if (type_is_global_error_set(wanted_type)) {
            BigInt err_count;
            bigint_init_unsigned(&err_count, ira->codegen->errors_by_index.length);

            if (bigint_cmp_zero(&val->data.x_bigint) == CmpEQ || bigint_cmp(&val->data.x_bigint, &err_count) != CmpLT) {
                Buf *val_buf = buf_alloc();
                bigint_append_buf(val_buf, &val->data.x_bigint, 10);
                ir_add_error(ira, source_instr,
                    buf_sprintf("integer value %s represents no error", buf_ptr(val_buf)));
                return ira->codegen->invalid_inst_gen;
            }

            size_t index = bigint_as_usize(&val->data.x_bigint);
            result->value->data.x_err_set = ira->codegen->errors_by_index.at(index);
            return result;
        } else {
            ErrorTableEntry *err = nullptr;
            BigInt err_int;

            for (uint32_t i = 0, count = wanted_type->data.error_set.err_count; i < count; i += 1) {
                ErrorTableEntry *this_err = wanted_type->data.error_set.errors[i];
                bigint_init_unsigned(&err_int, this_err->value);
                if (bigint_cmp(&val->data.x_bigint, &err_int) == CmpEQ) {
                    err = this_err;
                    break;
                }
            }

            if (err == nullptr) {
                Buf *val_buf = buf_alloc();
                bigint_append_buf(val_buf, &val->data.x_bigint, 10);
                ir_add_error(ira, source_instr,
                    buf_sprintf("integer value %s represents no error in '%s'", buf_ptr(val_buf), buf_ptr(&wanted_type->name)));
                return ira->codegen->invalid_inst_gen;
            }

            result->value->data.x_err_set = err;
            return result;
        }
    }

    return ir_build_int_to_err_gen(ira, source_instr->scope, source_instr->source_node, target, wanted_type);
}

static IrInstGen *ir_analyze_err_to_int(IrAnalyze *ira, IrInst* source_instr, IrInstGen *target,
        ZigType *wanted_type)
{
    assert(wanted_type->id == ZigTypeIdInt);

    ZigType *err_type = target->value->type;

    if (instr_is_comptime(target)) {
        ZigValue *val = ir_resolve_const(ira, target, UndefBad);
        if (!val)
            return ira->codegen->invalid_inst_gen;

        IrInstGen *result = ir_const(ira, source_instr, wanted_type);

        ErrorTableEntry *err;
        if (err_type->id == ZigTypeIdErrorUnion) {
            err = val->data.x_err_union.error_set->data.x_err_set;
        } else if (err_type->id == ZigTypeIdErrorSet) {
            err = val->data.x_err_set;
        } else {
            zig_unreachable();
        }
        result->value->type = wanted_type;
        uint64_t err_value = err ? err->value : 0;
        bigint_init_unsigned(&result->value->data.x_bigint, err_value);

        if (!bigint_fits_in_bits(&result->value->data.x_bigint,
            wanted_type->data.integral.bit_count, wanted_type->data.integral.is_signed))
        {
            ir_add_error_node(ira, source_instr->source_node,
                    buf_sprintf("error code '%s' does not fit in '%s'",
                        buf_ptr(&err->name), buf_ptr(&wanted_type->name)));
            return ira->codegen->invalid_inst_gen;
        }

        return result;
    }

    ZigType *err_set_type;
    if (err_type->id == ZigTypeIdErrorUnion) {
        err_set_type = err_type->data.error_union.err_set_type;
    } else if (err_type->id == ZigTypeIdErrorSet) {
        err_set_type = err_type;
    } else {
        zig_unreachable();
    }
    if (!type_is_global_error_set(err_set_type)) {
        if (!resolve_inferred_error_set(ira->codegen, err_set_type, source_instr->source_node)) {
            return ira->codegen->invalid_inst_gen;
        }
        if (err_set_type->data.error_set.err_count == 0) {
            IrInstGen *result = ir_const(ira, source_instr, wanted_type);
            bigint_init_unsigned(&result->value->data.x_bigint, 0);
            return result;
        } else if (err_set_type->data.error_set.err_count == 1) {
            IrInstGen *result = ir_const(ira, source_instr, wanted_type);
            ErrorTableEntry *err = err_set_type->data.error_set.errors[0];
            bigint_init_unsigned(&result->value->data.x_bigint, err->value);
            return result;
        }
    }

    BigInt bn;
    bigint_init_unsigned(&bn, ira->codegen->errors_by_index.length);
    if (!bigint_fits_in_bits(&bn, wanted_type->data.integral.bit_count, wanted_type->data.integral.is_signed)) {
        ir_add_error_node(ira, source_instr->source_node,
                buf_sprintf("too many error values to fit in '%s'", buf_ptr(&wanted_type->name)));
        return ira->codegen->invalid_inst_gen;
    }

    return ir_build_err_to_int_gen(ira, source_instr->scope, source_instr->source_node, target, wanted_type);
}

static IrInstGen *ir_analyze_ptr_to_array(IrAnalyze *ira, IrInst* source_instr, IrInstGen *target,
        ZigType *wanted_type)
{
    assert(wanted_type->id == ZigTypeIdPointer);
    Error err;
    if ((err = type_resolve(ira->codegen, target->value->type->data.pointer.child_type, ResolveStatusAlignmentKnown)))
        return ira->codegen->invalid_inst_gen;
    assert((wanted_type->data.pointer.is_const && target->value->type->data.pointer.is_const) || !target->value->type->data.pointer.is_const);
    wanted_type = adjust_ptr_align(ira->codegen, wanted_type, get_ptr_align(ira->codegen, target->value->type));
    ZigType *array_type = wanted_type->data.pointer.child_type;
    assert(array_type->id == ZigTypeIdArray);
    assert(array_type->data.array.len == 1);

    if (instr_is_comptime(target)) {
        ZigValue *val = ir_resolve_const(ira, target, UndefBad);
        if (!val)
            return ira->codegen->invalid_inst_gen;

        assert(val->type->id == ZigTypeIdPointer);
        ZigValue *pointee = const_ptr_pointee(ira, ira->codegen, val, source_instr->source_node);
        if (pointee == nullptr)
            return ira->codegen->invalid_inst_gen;
        if (pointee->special != ConstValSpecialRuntime) {
            ZigValue *array_val = ira->codegen->pass1_arena->create<ZigValue>();
            array_val->special = ConstValSpecialStatic;
            array_val->type = array_type;
            array_val->data.x_array.special = ConstArraySpecialNone;
            array_val->data.x_array.data.s_none.elements = pointee;
            array_val->parent.id = ConstParentIdScalar;
            array_val->parent.data.p_scalar.scalar_val = pointee;

            IrInstGenConst *const_instruction = ir_create_inst_gen<IrInstGenConst>(&ira->new_irb,
                    source_instr->scope, source_instr->source_node);
            const_instruction->base.value->type = wanted_type;
            const_instruction->base.value->special = ConstValSpecialStatic;
            const_instruction->base.value->data.x_ptr.special = ConstPtrSpecialRef;
            const_instruction->base.value->data.x_ptr.data.ref.pointee = array_val;
            const_instruction->base.value->data.x_ptr.mut = val->data.x_ptr.mut;
            return &const_instruction->base;
        }
    }

    // pointer to array and pointer to single item are represented the same way at runtime
    return ir_build_cast(ira, &target->base, wanted_type, target, CastOpBitCast);
}

static void report_recursive_error(IrAnalyze *ira, AstNode *source_node, ConstCastOnly *cast_result,
        ErrorMsg *parent_msg)
{
    switch (cast_result->id) {
        case ConstCastResultIdOk:
            zig_unreachable();
        case ConstCastResultIdInvalid:
            zig_unreachable();
        case ConstCastResultIdOptionalChild: {
            ErrorMsg *msg = add_error_note(ira->codegen, parent_msg, source_node,
                    buf_sprintf("optional type child '%s' cannot cast into optional type child '%s'",
                        buf_ptr(&cast_result->data.optional->actual_child->name),
                        buf_ptr(&cast_result->data.optional->wanted_child->name)));
            report_recursive_error(ira, source_node, &cast_result->data.optional->child, msg);
            break;
        }
        case ConstCastResultIdErrorUnionErrorSet: {
            ErrorMsg *msg = add_error_note(ira->codegen, parent_msg, source_node,
                    buf_sprintf("error set '%s' cannot cast into error set '%s'",
                        buf_ptr(&cast_result->data.error_union_error_set->actual_err_set->name),
                        buf_ptr(&cast_result->data.error_union_error_set->wanted_err_set->name)));
            report_recursive_error(ira, source_node, &cast_result->data.error_union_error_set->child, msg);
            break;
        }
        case ConstCastResultIdErrSet: {
            ZigList<ErrorTableEntry *> *missing_errors = &cast_result->data.error_set_mismatch->missing_errors;
            for (size_t i = 0; i < missing_errors->length; i += 1) {
                ErrorTableEntry *error_entry = missing_errors->at(i);
                add_error_note(ira->codegen, parent_msg, ast_field_to_symbol_node(error_entry->decl_node),
                    buf_sprintf("'error.%s' not a member of destination error set", buf_ptr(&error_entry->name)));
            }
            break;
        }
        case ConstCastResultIdErrSetGlobal: {
            add_error_note(ira->codegen, parent_msg, source_node,
                    buf_sprintf("cannot cast global error set into smaller set"));
            break;
        }
        case ConstCastResultIdPointerChild: {
            ErrorMsg *msg = add_error_note(ira->codegen, parent_msg, source_node,
                    buf_sprintf("pointer type child '%s' cannot cast into pointer type child '%s'",
                        buf_ptr(&cast_result->data.pointer_mismatch->actual_child->name),
                        buf_ptr(&cast_result->data.pointer_mismatch->wanted_child->name)));
            report_recursive_error(ira, source_node, &cast_result->data.pointer_mismatch->child, msg);
            break;
        }
        case ConstCastResultIdSliceChild: {
            ErrorMsg *msg = add_error_note(ira->codegen, parent_msg, source_node,
                    buf_sprintf("slice type child '%s' cannot cast into slice type child '%s'",
                        buf_ptr(&cast_result->data.slice_mismatch->actual_child->name),
                        buf_ptr(&cast_result->data.slice_mismatch->wanted_child->name)));
            report_recursive_error(ira, source_node, &cast_result->data.slice_mismatch->child, msg);
            break;
        }
        case ConstCastResultIdErrorUnionPayload: {
            ErrorMsg *msg = add_error_note(ira->codegen, parent_msg, source_node,
                    buf_sprintf("error union payload '%s' cannot cast into error union payload '%s'",
                        buf_ptr(&cast_result->data.error_union_payload->actual_payload->name),
                        buf_ptr(&cast_result->data.error_union_payload->wanted_payload->name)));
            report_recursive_error(ira, source_node, &cast_result->data.error_union_payload->child, msg);
            break;
        }
        case ConstCastResultIdType: {
            AstNode *wanted_decl_node = type_decl_node(cast_result->data.type_mismatch->wanted_type);
            AstNode *actual_decl_node = type_decl_node(cast_result->data.type_mismatch->actual_type);
            if (wanted_decl_node != nullptr) {
                add_error_note(ira->codegen, parent_msg, wanted_decl_node,
                    buf_sprintf("%s declared here",
                        buf_ptr(&cast_result->data.type_mismatch->wanted_type->name)));
            }
            if (actual_decl_node != nullptr) {
                add_error_note(ira->codegen, parent_msg, actual_decl_node,
                    buf_sprintf("%s declared here",
                        buf_ptr(&cast_result->data.type_mismatch->actual_type->name)));
            }
            break;
        }
        case ConstCastResultIdFnArg: {
            ErrorMsg *msg = add_error_note(ira->codegen, parent_msg, source_node,
                    buf_sprintf("parameter %" ZIG_PRI_usize ": '%s' cannot cast into '%s'",
                        cast_result->data.fn_arg.arg_index,
                        buf_ptr(&cast_result->data.fn_arg.actual_param_type->name),
                        buf_ptr(&cast_result->data.fn_arg.expected_param_type->name)));
            report_recursive_error(ira, source_node, cast_result->data.fn_arg.child, msg);
            break;
        }
        case ConstCastResultIdBadAllowsZero: {
            ZigType *wanted_type = cast_result->data.bad_allows_zero->wanted_type;
            ZigType *actual_type = cast_result->data.bad_allows_zero->actual_type;
            bool wanted_allows_zero = ptr_allows_addr_zero(wanted_type);
            bool actual_allows_zero = ptr_allows_addr_zero(actual_type);
            if (actual_allows_zero && !wanted_allows_zero) {
                add_error_note(ira->codegen, parent_msg, source_node,
                        buf_sprintf("'%s' could have null values which are illegal in type '%s'",
                            buf_ptr(&actual_type->name),
                            buf_ptr(&wanted_type->name)));
            } else {
                add_error_note(ira->codegen, parent_msg, source_node,
                        buf_sprintf("mutable '%s' allows illegal null values stored to type '%s'",
                            buf_ptr(&wanted_type->name),
                            buf_ptr(&actual_type->name)));
            }
            break;
        }
        case ConstCastResultIdPtrLens: {
            add_error_note(ira->codegen, parent_msg, source_node,
                    buf_sprintf("pointer length mismatch"));
            break;
        }
        case ConstCastResultIdPtrSentinel: {
            ZigType *actual_type = cast_result->data.bad_ptr_sentinel->actual_type;
            ZigType *wanted_type = cast_result->data.bad_ptr_sentinel->wanted_type;
            {
                Buf *txt_msg = buf_sprintf("destination pointer requires a terminating '");
                render_const_value(ira->codegen, txt_msg, wanted_type->data.pointer.sentinel);
                buf_appendf(txt_msg, "' sentinel");
                if (actual_type->data.pointer.sentinel != nullptr) {
                    buf_appendf(txt_msg, ", but source pointer has a terminating '");
                    render_const_value(ira->codegen, txt_msg, actual_type->data.pointer.sentinel);
                    buf_appendf(txt_msg, "' sentinel");
                }
                add_error_note(ira->codegen, parent_msg, source_node, txt_msg);
            }
            break;
        }
        case ConstCastResultIdSentinelArrays: {
            ZigType *actual_type = cast_result->data.sentinel_arrays->actual_type;
            ZigType *wanted_type = cast_result->data.sentinel_arrays->wanted_type;
            Buf *txt_msg = buf_sprintf("destination array requires a terminating '");
            render_const_value(ira->codegen, txt_msg, wanted_type->data.array.sentinel);
            buf_appendf(txt_msg, "' sentinel");
            if (actual_type->data.array.sentinel != nullptr) {
                buf_appendf(txt_msg, ", but source array has a terminating '");
                render_const_value(ira->codegen, txt_msg, actual_type->data.array.sentinel);
                buf_appendf(txt_msg, "' sentinel");
            }
            add_error_note(ira->codegen, parent_msg, source_node, txt_msg);
            break;
        }
        case ConstCastResultIdCV: {
            ZigType *wanted_type = cast_result->data.bad_cv->wanted_type;
            ZigType *actual_type = cast_result->data.bad_cv->actual_type;
            bool ok_const = !actual_type->data.pointer.is_const || wanted_type->data.pointer.is_const;
            bool ok_volatile = !actual_type->data.pointer.is_volatile || wanted_type->data.pointer.is_volatile;
            if (!ok_const) {
                add_error_note(ira->codegen, parent_msg, source_node, buf_sprintf("cast discards const qualifier"));
            } else if (!ok_volatile) {
                add_error_note(ira->codegen, parent_msg, source_node, buf_sprintf("cast discards volatile qualifier"));
            } else {
                zig_unreachable();
            }
            break;
        }
        case ConstCastResultIdFnIsGeneric:
            add_error_note(ira->codegen, parent_msg, source_node,
                    buf_sprintf("only one of the functions is generic"));
            break;
        case ConstCastResultIdFnCC:
            add_error_note(ira->codegen, parent_msg, source_node,
                    buf_sprintf("calling convention mismatch"));
            break;
        case ConstCastResultIdIntShorten: {
            ZigType *wanted_type = cast_result->data.int_shorten->wanted_type;
            ZigType *actual_type = cast_result->data.int_shorten->actual_type;
            const char *wanted_signed = wanted_type->data.integral.is_signed ? "signed" : "unsigned";
            const char *actual_signed = actual_type->data.integral.is_signed ? "signed" : "unsigned";
            add_error_note(ira->codegen, parent_msg, source_node,
                buf_sprintf("%s %" PRIu32 "-bit int cannot represent all possible %s %" PRIu32 "-bit values",
                    wanted_signed, wanted_type->data.integral.bit_count,
                    actual_signed, actual_type->data.integral.bit_count));
            break;
        }
        case ConstCastResultIdFnAlign: // TODO
        case ConstCastResultIdFnVarArgs: // TODO
        case ConstCastResultIdFnReturnType: // TODO
        case ConstCastResultIdFnArgCount: // TODO
        case ConstCastResultIdFnGenericArgCount: // TODO
        case ConstCastResultIdFnArgNoAlias: // TODO
        case ConstCastResultIdUnresolvedInferredErrSet: // TODO
        case ConstCastResultIdAsyncAllocatorType: // TODO
        case ConstCastResultIdArrayChild: // TODO
            break;
    }
}

static IrInstGen *ir_analyze_array_to_vector(IrAnalyze *ira, IrInst* source_instr,
    IrInstGen *array, ZigType *vector_type)
{
    if (instr_is_comptime(array)) {
        // arrays and vectors have the same ZigValue representation
        IrInstGen *result = ir_const(ira, source_instr, vector_type);
        copy_const_val(ira->codegen, result->value, array->value);
        result->value->type = vector_type;
        return result;
    }
    return ir_build_array_to_vector(ira, source_instr, array, vector_type);
}

static IrInstGen *ir_analyze_vector_to_array(IrAnalyze *ira, IrInst* source_instr,
    IrInstGen *vector, ZigType *array_type, ResultLoc *result_loc)
{
    if (instr_is_comptime(vector)) {
        // arrays and vectors have the same ZigValue representation
        IrInstGen *result = ir_const(ira, source_instr, array_type);
        copy_const_val(ira->codegen, result->value, vector->value);
        result->value->type = array_type;
        return result;
    }
    if (result_loc == nullptr) {
        result_loc = no_result_loc();
    }
    IrInstGen *result_loc_inst = ir_resolve_result(ira, source_instr, result_loc, array_type, nullptr, true, true);
    if (type_is_invalid(result_loc_inst->value->type) || result_loc_inst->value->type->id == ZigTypeIdUnreachable) {
        return result_loc_inst;
    }
    return ir_build_vector_to_array(ira, source_instr, array_type, vector, result_loc_inst);
}

static IrInstGen *ir_analyze_int_to_c_ptr(IrAnalyze *ira, IrInst* source_instr,
        IrInstGen *integer, ZigType *dest_type)
{
    IrInstGen *unsigned_integer;
    if (instr_is_comptime(integer)) {
        unsigned_integer = integer;
    } else {
        assert(integer->value->type->id == ZigTypeIdInt);

        if (integer->value->type->data.integral.bit_count >
            ira->codegen->builtin_types.entry_usize->data.integral.bit_count)
        {
            ir_add_error(ira, source_instr,
                buf_sprintf("integer type '%s' too big for implicit @intToPtr to type '%s'",
                    buf_ptr(&integer->value->type->name),
                    buf_ptr(&dest_type->name)));
            return ira->codegen->invalid_inst_gen;
        }

        if (integer->value->type->data.integral.is_signed) {
            ZigType *unsigned_int_type = get_int_type(ira->codegen, false,
                    integer->value->type->data.integral.bit_count);
            unsigned_integer = ir_analyze_bit_cast(ira, source_instr, integer, unsigned_int_type);
            if (type_is_invalid(unsigned_integer->value->type))
                return ira->codegen->invalid_inst_gen;
        } else {
            unsigned_integer = integer;
        }
    }

    return ir_analyze_int_to_ptr(ira, source_instr, unsigned_integer, dest_type);
}

static bool is_pointery_and_elem_is_not_pointery(ZigType *ty) {
    if (ty->id == ZigTypeIdPointer) return ty->data.pointer.child_type->id != ZigTypeIdPointer;
    if (ty->id == ZigTypeIdFn) return true;
    if (ty->id == ZigTypeIdOptional) {
        ZigType *ptr_ty = ty->data.maybe.child_type;
        if (ptr_ty->id == ZigTypeIdPointer) return ptr_ty->data.pointer.child_type->id != ZigTypeIdPointer;
        if (ptr_ty->id == ZigTypeIdFn) return true;
    }
    return false;
}

static IrInstGen *ir_analyze_enum_literal(IrAnalyze *ira, IrInst* source_instr, IrInstGen *value,
        ZigType *enum_type)
{
    assert(enum_type->id == ZigTypeIdEnum);

    Error err;
    if ((err = type_resolve(ira->codegen, enum_type, ResolveStatusZeroBitsKnown)))
        return ira->codegen->invalid_inst_gen;

    TypeEnumField *field = find_enum_type_field(enum_type, value->value->data.x_enum_literal);
    if (field == nullptr) {
        ErrorMsg *msg = ir_add_error(ira, source_instr, buf_sprintf("enum '%s' has no field named '%s'",
                buf_ptr(&enum_type->name), buf_ptr(value->value->data.x_enum_literal)));
        add_error_note(ira->codegen, msg, enum_type->data.enumeration.decl_node,
                buf_sprintf("'%s' declared here", buf_ptr(&enum_type->name)));
        return ira->codegen->invalid_inst_gen;
    }
    IrInstGen *result = ir_const(ira, source_instr, enum_type);
    bigint_init_bigint(&result->value->data.x_enum_tag, &field->value);

    return result;
}

static IrInstGen *ir_analyze_struct_literal_to_array(IrAnalyze *ira, IrInst* source_instr,
        IrInstGen *value, ZigType *wanted_type)
{
    ir_add_error(ira, source_instr, buf_sprintf("TODO: type coercion of anon list literal to array"));
    return ira->codegen->invalid_inst_gen;
}

static IrInstGen *ir_analyze_struct_literal_to_struct(IrAnalyze *ira, IrInst* source_instr,
        IrInstGen *struct_operand, ZigType *wanted_type)
{
    Error err;

    IrInstGen *struct_ptr = ir_get_ref(ira, source_instr, struct_operand, true, false);
    if (type_is_invalid(struct_ptr->value->type))
        return ira->codegen->invalid_inst_gen;

    if (wanted_type->data.structure.resolve_status == ResolveStatusBeingInferred) {
        ir_add_error(ira, source_instr, buf_sprintf("type coercion of anon struct literal to inferred struct"));
        return ira->codegen->invalid_inst_gen;
    }

    if ((err = type_resolve(ira->codegen, wanted_type, ResolveStatusSizeKnown)))
        return ira->codegen->invalid_inst_gen;

    size_t actual_field_count = wanted_type->data.structure.src_field_count;
    size_t instr_field_count = struct_operand->value->type->data.structure.src_field_count;

    bool need_comptime = ir_should_inline(ira->old_irb.exec, source_instr->scope)
        || type_requires_comptime(ira->codegen, wanted_type) == ReqCompTimeYes;
    bool is_comptime = true;

    // Determine if the struct_operand will be comptime.
    // Also emit compile errors for missing fields and duplicate fields.
    AstNode **field_assign_nodes = heap::c_allocator.allocate<AstNode *>(actual_field_count);
    ZigValue **field_values = heap::c_allocator.allocate<ZigValue *>(actual_field_count);
    IrInstGen **casted_fields = heap::c_allocator.allocate<IrInstGen *>(actual_field_count);
    IrInstGen *const_result = ir_const(ira, source_instr, wanted_type);

    for (size_t i = 0; i < instr_field_count; i += 1) {
        TypeStructField *src_field = struct_operand->value->type->data.structure.fields[i];
        TypeStructField *dst_field = find_struct_type_field(wanted_type, src_field->name);
        if (dst_field == nullptr) {
            ErrorMsg *msg = ir_add_error(ira, source_instr, buf_sprintf("no field named '%s' in struct '%s'",
                    buf_ptr(src_field->name), buf_ptr(&wanted_type->name)));
            if (wanted_type->data.structure.decl_node) {
                add_error_note(ira->codegen, msg, wanted_type->data.structure.decl_node,
                    buf_sprintf("struct '%s' declared here", buf_ptr(&wanted_type->name)));
            }
            add_error_note(ira->codegen, msg, src_field->decl_node,
                buf_sprintf("field '%s' declared here", buf_ptr(src_field->name)));
            return ira->codegen->invalid_inst_gen;
        }

        ir_assert(src_field->decl_node != nullptr, source_instr);
        AstNode *existing_assign_node = field_assign_nodes[dst_field->src_index];
        if (existing_assign_node != nullptr) {
            ErrorMsg *msg = ir_add_error(ira, source_instr, buf_sprintf("duplicate field"));
            add_error_note(ira->codegen, msg, existing_assign_node, buf_sprintf("other field here"));
            return ira->codegen->invalid_inst_gen;
        }
        field_assign_nodes[dst_field->src_index] = src_field->decl_node;

        IrInstGen *field_ptr = ir_analyze_struct_field_ptr(ira, source_instr, src_field, struct_ptr,
                struct_operand->value->type, false);
        if (type_is_invalid(field_ptr->value->type))
            return ira->codegen->invalid_inst_gen;
        IrInstGen *field_value = ir_get_deref(ira, source_instr, field_ptr, nullptr);
        if (type_is_invalid(field_value->value->type))
            return ira->codegen->invalid_inst_gen;
        IrInstGen *casted_value = ir_implicit_cast(ira, field_value, dst_field->type_entry);
        if (type_is_invalid(casted_value->value->type))
            return ira->codegen->invalid_inst_gen;

        casted_fields[dst_field->src_index] = casted_value;
        if (need_comptime || instr_is_comptime(casted_value)) {
            ZigValue *field_val = ir_resolve_const(ira, casted_value, UndefOk);
            if (field_val == nullptr)
                return ira->codegen->invalid_inst_gen;
            field_val->parent.id = ConstParentIdStruct;
            field_val->parent.data.p_struct.struct_val = const_result->value;
            field_val->parent.data.p_struct.field_index = dst_field->src_index;
            field_values[dst_field->src_index] = field_val;
        } else {
            is_comptime = false;
        }
    }

    bool any_missing = false;
    for (size_t i = 0; i < actual_field_count; i += 1) {
        if (field_assign_nodes[i] != nullptr) continue;

        // look for a default field value
        TypeStructField *field = wanted_type->data.structure.fields[i];
        memoize_field_init_val(ira->codegen, wanted_type, field);
        if (field->init_val == nullptr) {
            ir_add_error(ira, source_instr,
                buf_sprintf("missing field: '%s'", buf_ptr(field->name)));
            any_missing = true;
            continue;
        }
        if (type_is_invalid(field->init_val->type))
            return ira->codegen->invalid_inst_gen;
        ZigValue *init_val_copy = ira->codegen->pass1_arena->create<ZigValue>();
        copy_const_val(ira->codegen, init_val_copy, field->init_val);
        init_val_copy->parent.id = ConstParentIdStruct;
        init_val_copy->parent.data.p_struct.struct_val = const_result->value;
        init_val_copy->parent.data.p_struct.field_index = i;
        field_values[i] = init_val_copy;
        casted_fields[i] = ir_const_move(ira, source_instr, init_val_copy);
    }
    if (any_missing)
        return ira->codegen->invalid_inst_gen;

    if (is_comptime) {
        heap::c_allocator.deallocate(field_assign_nodes, actual_field_count);
        IrInstGen *const_result = ir_const(ira, source_instr, wanted_type);
        const_result->value->data.x_struct.fields = field_values;
        return const_result;
    }

    IrInstGen *result_loc_inst = ir_resolve_result(ira, source_instr, no_result_loc(),
        wanted_type, nullptr, true, true);
    if (type_is_invalid(result_loc_inst->value->type) || result_loc_inst->value->type->id == ZigTypeIdUnreachable) {
        return ira->codegen->invalid_inst_gen;
    }

    for (size_t i = 0; i < actual_field_count; i += 1) {
        TypeStructField *field = wanted_type->data.structure.fields[i];
        IrInstGen *field_ptr = ir_analyze_struct_field_ptr(ira, source_instr, field, result_loc_inst, wanted_type, true);
        if (type_is_invalid(field_ptr->value->type))
            return ira->codegen->invalid_inst_gen;
        IrInstGen *store_ptr_inst = ir_analyze_store_ptr(ira, source_instr, field_ptr, casted_fields[i], true);
        if (type_is_invalid(store_ptr_inst->value->type))
            return ira->codegen->invalid_inst_gen;
    }

    heap::c_allocator.deallocate(field_assign_nodes, actual_field_count);
    heap::c_allocator.deallocate(field_values, actual_field_count);
    heap::c_allocator.deallocate(casted_fields, actual_field_count);

    return ir_get_deref(ira, source_instr, result_loc_inst, nullptr);
}

static IrInstGen *ir_analyze_struct_literal_to_union(IrAnalyze *ira, IrInst* source_instr,
        IrInstGen *value, ZigType *union_type)
{
    Error err;
    ZigType *struct_type = value->value->type;

    assert(struct_type->id == ZigTypeIdStruct);
    assert(union_type->id == ZigTypeIdUnion);
    assert(struct_type->data.structure.src_field_count == 1);

    TypeStructField *only_field = struct_type->data.structure.fields[0];

    if ((err = type_resolve(ira->codegen, union_type, ResolveStatusZeroBitsKnown)))
        return ira->codegen->invalid_inst_gen;

    TypeUnionField *union_field = find_union_type_field(union_type, only_field->name);
    if (union_field == nullptr) {
        ir_add_error_node(ira, only_field->decl_node,
            buf_sprintf("no field named '%s' in union '%s'",
                buf_ptr(only_field->name), buf_ptr(&union_type->name)));
        return ira->codegen->invalid_inst_gen;
    }

    ZigType *payload_type = resolve_union_field_type(ira->codegen, union_field);
    if (payload_type == nullptr)
        return ira->codegen->invalid_inst_gen;

    IrInstGen *field_value = ir_analyze_struct_value_field_value(ira, source_instr, value, only_field);
    if (type_is_invalid(field_value->value->type))
        return ira->codegen->invalid_inst_gen;

    IrInstGen *casted_value = ir_implicit_cast(ira, field_value, payload_type);
    if (type_is_invalid(casted_value->value->type))
        return ira->codegen->invalid_inst_gen;

    if (instr_is_comptime(casted_value)) {
        ZigValue *val = ir_resolve_const(ira, casted_value, UndefBad);
        if (val == nullptr)
            return ira->codegen->invalid_inst_gen;

        IrInstGen *result = ir_const(ira, source_instr, union_type);
        bigint_init_bigint(&result->value->data.x_union.tag, &union_field->enum_field->value);
        result->value->data.x_union.payload = val;

        val->parent.id = ConstParentIdUnion;
        val->parent.data.p_union.union_val = result->value;

        return result;
    }

    IrInstGen *result_loc_inst = ir_resolve_result(ira, source_instr, no_result_loc(),
        union_type, nullptr, true, true);
    if (type_is_invalid(result_loc_inst->value->type) || result_loc_inst->value->type->id == ZigTypeIdUnreachable) {
        return ira->codegen->invalid_inst_gen;
    }

    IrInstGen *payload_ptr = ir_analyze_container_field_ptr(ira, only_field->name, source_instr,
        result_loc_inst, source_instr, union_type, true);
    if (type_is_invalid(payload_ptr->value->type))
        return ira->codegen->invalid_inst_gen;

    IrInstGen *store_ptr_inst = ir_analyze_store_ptr(ira, source_instr, payload_ptr, casted_value, false);
    if (type_is_invalid(store_ptr_inst->value->type))
        return ira->codegen->invalid_inst_gen;

    return ir_get_deref(ira, source_instr, result_loc_inst, nullptr);
}

// Add a compile error and return ErrorSemanticAnalyzeFail if the pointer alignment does not work,
// otherwise return ErrorNone. Does not emit any instructions.
// Assumes that the pointer types have element types with the same ABI alignment. Avoids resolving the
// pointer types' alignments if both of the pointer types are ABI aligned.
static Error ir_cast_ptr_align(IrAnalyze *ira, IrInst* source_instr, ZigType *dest_ptr_type,
        ZigType *src_ptr_type, AstNode *src_source_node)
{
    Error err;

    ir_assert(dest_ptr_type->id == ZigTypeIdPointer, source_instr);
    ir_assert(src_ptr_type->id == ZigTypeIdPointer, source_instr);

    if (dest_ptr_type->data.pointer.explicit_alignment == 0 &&
        src_ptr_type->data.pointer.explicit_alignment == 0)
    {
        return ErrorNone;
    }

    if ((err = type_resolve(ira->codegen, dest_ptr_type->data.pointer.child_type, ResolveStatusAlignmentKnown)))
        return ErrorSemanticAnalyzeFail;

    if ((err = type_resolve(ira->codegen, src_ptr_type->data.pointer.child_type, ResolveStatusAlignmentKnown)))
        return ErrorSemanticAnalyzeFail;

    uint32_t wanted_align = get_ptr_align(ira->codegen, dest_ptr_type);
    uint32_t actual_align = get_ptr_align(ira->codegen, src_ptr_type);
    if (wanted_align > actual_align) {
        ErrorMsg *msg = ir_add_error(ira, source_instr, buf_sprintf("cast increases pointer alignment"));
        add_error_note(ira->codegen, msg, src_source_node,
                buf_sprintf("'%s' has alignment %" PRIu32, buf_ptr(&src_ptr_type->name), actual_align));
        add_error_note(ira->codegen, msg, source_instr->source_node,
                buf_sprintf("'%s' has alignment %" PRIu32, buf_ptr(&dest_ptr_type->name), wanted_align));
        return ErrorSemanticAnalyzeFail;
    }

    return ErrorNone;
}

static IrInstGen *ir_analyze_struct_value_field_value(IrAnalyze *ira, IrInst* source_instr,
        IrInstGen *struct_operand, TypeStructField *field)
{
    IrInstGen *struct_ptr = ir_get_ref(ira, source_instr, struct_operand, true, false);
    if (type_is_invalid(struct_ptr->value->type))
        return ira->codegen->invalid_inst_gen;
    IrInstGen *field_ptr = ir_analyze_struct_field_ptr(ira, source_instr, field, struct_ptr,
            struct_operand->value->type, false);
    if (type_is_invalid(field_ptr->value->type))
        return ira->codegen->invalid_inst_gen;
    return ir_get_deref(ira, source_instr, field_ptr, nullptr);
}

static IrInstGen *ir_analyze_optional_value_payload_value(IrAnalyze *ira, IrInst* source_instr,
        IrInstGen *optional_operand, bool safety_check_on)
{
    IrInstGen *opt_ptr = ir_get_ref(ira, source_instr, optional_operand, true, false);
    IrInstGen *payload_ptr = ir_analyze_unwrap_optional_payload(ira, source_instr, opt_ptr,
            safety_check_on, false);
    return ir_get_deref(ira, source_instr, payload_ptr, nullptr);
}

static IrInstGen *ir_analyze_cast(IrAnalyze *ira, IrInst *source_instr,
    ZigType *wanted_type, IrInstGen *value)
{
    Error err;
    ZigType *actual_type = value->value->type;
    AstNode *source_node = source_instr->source_node;

    if (type_is_invalid(wanted_type) || type_is_invalid(actual_type)) {
        return ira->codegen->invalid_inst_gen;
    }

    // This means the wanted type is anything.
    if (wanted_type == ira->codegen->builtin_types.entry_var) {
        return value;
    }

    // perfect match or non-const to const
    ConstCastOnly const_cast_result = types_match_const_cast_only(ira, wanted_type, actual_type,
            source_node, false);
    if (const_cast_result.id == ConstCastResultIdInvalid)
        return ira->codegen->invalid_inst_gen;
    if (const_cast_result.id == ConstCastResultIdOk) {
        return ir_resolve_cast(ira, source_instr, value, wanted_type, CastOpNoop);
    }

    if (const_cast_result.id == ConstCastResultIdFnCC) {
        ir_assert(value->value->type->id == ZigTypeIdFn, source_instr);
        // ConstCastResultIdFnCC is guaranteed to be the last one reported, meaning everything else is ok.
        if (wanted_type->data.fn.fn_type_id.cc == CallingConventionAsync &&
            actual_type->data.fn.fn_type_id.cc == CallingConventionUnspecified)
        {
            ir_assert(value->value->data.x_ptr.special == ConstPtrSpecialFunction, source_instr);
            ZigFn *fn = value->value->data.x_ptr.data.fn.fn_entry;
            if (fn->inferred_async_node == nullptr) {
                fn->inferred_async_node = source_instr->source_node;
            }
            return ir_resolve_cast(ira, source_instr, value, wanted_type, CastOpNoop);
        }
    }

    // cast from T to ?T
    // note that the *T to ?*T case is handled via the "ConstCastOnly" mechanism
    if (wanted_type->id == ZigTypeIdOptional) {
        ZigType *wanted_child_type = wanted_type->data.maybe.child_type;
        if (types_match_const_cast_only(ira, wanted_child_type, actual_type, source_node,
            false).id == ConstCastResultIdOk)
        {
            return ir_analyze_optional_wrap(ira, source_instr, value, wanted_type, nullptr);
        } else if (actual_type->id == ZigTypeIdComptimeInt ||
                   actual_type->id == ZigTypeIdComptimeFloat)
        {
            if (ir_num_lit_fits_in_other_type(ira, value, wanted_child_type, true)) {
                return ir_analyze_optional_wrap(ira, source_instr, value, wanted_type, nullptr);
            } else {
                return ira->codegen->invalid_inst_gen;
            }
        } else if (
            wanted_child_type->id == ZigTypeIdPointer &&
            wanted_child_type->data.pointer.ptr_len == PtrLenUnknown &&
            actual_type->id == ZigTypeIdPointer &&
            actual_type->data.pointer.ptr_len == PtrLenSingle &&
            actual_type->data.pointer.child_type->id == ZigTypeIdArray)
        {
            if ((err = type_resolve(ira->codegen, actual_type->data.pointer.child_type, ResolveStatusAlignmentKnown)))
                return ira->codegen->invalid_inst_gen;
            if ((err = type_resolve(ira->codegen, wanted_child_type->data.pointer.child_type, ResolveStatusAlignmentKnown)))
                return ira->codegen->invalid_inst_gen;
            if (get_ptr_align(ira->codegen, actual_type) >= get_ptr_align(ira->codegen, wanted_child_type) &&
                types_match_const_cast_only(ira, wanted_child_type->data.pointer.child_type,
                actual_type->data.pointer.child_type->data.array.child_type, source_node,
                !wanted_child_type->data.pointer.is_const).id == ConstCastResultIdOk)
            {
                IrInstGen *cast1 = ir_resolve_ptr_of_array_to_unknown_len_ptr(ira, source_instr, value,
                        wanted_child_type);
                if (type_is_invalid(cast1->value->type))
                    return ira->codegen->invalid_inst_gen;
                return ir_analyze_optional_wrap(ira, source_instr, cast1, wanted_type, nullptr);
            }
        }
    }

    // T to E!T
    if (wanted_type->id == ZigTypeIdErrorUnion) {
        if (types_match_const_cast_only(ira, wanted_type->data.error_union.payload_type, actual_type,
            source_node, false).id == ConstCastResultIdOk)
        {
            return ir_analyze_err_wrap_payload(ira, source_instr, value, wanted_type, nullptr);
        } else if (actual_type->id == ZigTypeIdComptimeInt ||
                   actual_type->id == ZigTypeIdComptimeFloat)
        {
            if (ir_num_lit_fits_in_other_type(ira, value, wanted_type->data.error_union.payload_type, true)) {
                return ir_analyze_err_wrap_payload(ira, source_instr, value, wanted_type, nullptr);
            } else {
                return ira->codegen->invalid_inst_gen;
            }
        }
    }

    // cast from T to E!?T
    if (wanted_type->id == ZigTypeIdErrorUnion &&
        wanted_type->data.error_union.payload_type->id == ZigTypeIdOptional &&
        actual_type->id != ZigTypeIdOptional)
    {
        ZigType *wanted_child_type = wanted_type->data.error_union.payload_type->data.maybe.child_type;
        if (types_match_const_cast_only(ira, wanted_child_type, actual_type, source_node, false).id == ConstCastResultIdOk ||
            actual_type->id == ZigTypeIdNull ||
            actual_type->id == ZigTypeIdComptimeInt ||
            actual_type->id == ZigTypeIdComptimeFloat)
        {
            IrInstGen *cast1 = ir_analyze_cast(ira, source_instr, wanted_type->data.error_union.payload_type, value);
            if (type_is_invalid(cast1->value->type))
                return ira->codegen->invalid_inst_gen;

            IrInstGen *cast2 = ir_analyze_cast(ira, source_instr, wanted_type, cast1);
            if (type_is_invalid(cast2->value->type))
                return ira->codegen->invalid_inst_gen;

            return cast2;
        }
    }


    // cast from comptime-known number to another number type
    if (instr_is_comptime(value) &&
        (actual_type->id == ZigTypeIdInt || actual_type->id == ZigTypeIdComptimeInt ||
        actual_type->id == ZigTypeIdFloat || actual_type->id == ZigTypeIdComptimeFloat) &&
        (wanted_type->id == ZigTypeIdInt || wanted_type->id == ZigTypeIdComptimeInt ||
        wanted_type->id == ZigTypeIdFloat || wanted_type->id == ZigTypeIdComptimeFloat))
    {
        if (value->value->special == ConstValSpecialUndef) {
            IrInstGen *result = ir_const(ira, source_instr, wanted_type);
            result->value->special = ConstValSpecialUndef;
            return result;
        }
        if (ir_num_lit_fits_in_other_type(ira, value, wanted_type, true)) {
            if (wanted_type->id == ZigTypeIdComptimeInt || wanted_type->id == ZigTypeIdInt) {
                IrInstGen *result = ir_const(ira, source_instr, wanted_type);
                if (actual_type->id == ZigTypeIdComptimeInt || actual_type->id == ZigTypeIdInt) {
                    copy_const_val(ira->codegen, result->value, value->value);
                    result->value->type = wanted_type;
                } else {
                    float_init_bigint(&result->value->data.x_bigint, value->value);
                }
                return result;
            } else if (wanted_type->id == ZigTypeIdComptimeFloat || wanted_type->id == ZigTypeIdFloat) {
                IrInstGen *result = ir_const(ira, source_instr, wanted_type);
                if (actual_type->id == ZigTypeIdComptimeInt || actual_type->id == ZigTypeIdInt) {
                    BigFloat bf;
                    bigfloat_init_bigint(&bf, &value->value->data.x_bigint);
                    float_init_bigfloat(result->value, &bf);
                } else {
                    float_init_float(result->value, value->value);
                }
                return result;
            }
            zig_unreachable();
        } else {
            return ira->codegen->invalid_inst_gen;
        }
    }

    // widening conversion
    if (wanted_type->id == ZigTypeIdInt &&
        actual_type->id == ZigTypeIdInt &&
        wanted_type->data.integral.is_signed == actual_type->data.integral.is_signed &&
        wanted_type->data.integral.bit_count >= actual_type->data.integral.bit_count)
    {
        return ir_analyze_widen_or_shorten(ira, source_instr, value, wanted_type);
    }

    // small enough unsigned ints can get casted to large enough signed ints
    if (wanted_type->id == ZigTypeIdInt && wanted_type->data.integral.is_signed &&
        actual_type->id == ZigTypeIdInt && !actual_type->data.integral.is_signed &&
        wanted_type->data.integral.bit_count > actual_type->data.integral.bit_count)
    {
        return ir_analyze_widen_or_shorten(ira, source_instr, value, wanted_type);
    }

    // float widening conversion
    if (wanted_type->id == ZigTypeIdFloat &&
        actual_type->id == ZigTypeIdFloat &&
        wanted_type->data.floating.bit_count >= actual_type->data.floating.bit_count)
    {
        return ir_analyze_widen_or_shorten(ira, source_instr, value, wanted_type);
    }

    // *[N]T to ?[]T
    if (wanted_type->id == ZigTypeIdOptional &&
        is_slice(wanted_type->data.maybe.child_type) &&
        actual_type->id == ZigTypeIdPointer &&
        actual_type->data.pointer.ptr_len == PtrLenSingle &&
        actual_type->data.pointer.child_type->id == ZigTypeIdArray)
    {
        IrInstGen *cast1 = ir_analyze_cast(ira, source_instr, wanted_type->data.maybe.child_type, value);
        if (type_is_invalid(cast1->value->type))
            return ira->codegen->invalid_inst_gen;

        IrInstGen *cast2 = ir_analyze_cast(ira, source_instr, wanted_type, cast1);
        if (type_is_invalid(cast2->value->type))
            return ira->codegen->invalid_inst_gen;

        return cast2;
    }

    // *[N]T to [*]T and [*c]T
    if (wanted_type->id == ZigTypeIdPointer &&
        (wanted_type->data.pointer.ptr_len == PtrLenUnknown || wanted_type->data.pointer.ptr_len == PtrLenC) &&
        actual_type->id == ZigTypeIdPointer &&
        actual_type->data.pointer.ptr_len == PtrLenSingle &&
        actual_type->data.pointer.child_type->id == ZigTypeIdArray &&
        (!actual_type->data.pointer.is_const || wanted_type->data.pointer.is_const) &&
        (!actual_type->data.pointer.is_volatile || wanted_type->data.pointer.is_volatile))
    {
        ZigType *actual_array_type = actual_type->data.pointer.child_type;
        if (wanted_type->data.pointer.sentinel == nullptr ||
            (actual_array_type->data.array.sentinel != nullptr &&
             const_values_equal(ira->codegen, wanted_type->data.pointer.sentinel,
                 actual_array_type->data.array.sentinel)))
        {
            if ((err = type_resolve(ira->codegen, actual_type->data.pointer.child_type, ResolveStatusAlignmentKnown)))
                return ira->codegen->invalid_inst_gen;
            if ((err = type_resolve(ira->codegen, wanted_type->data.pointer.child_type, ResolveStatusAlignmentKnown)))
                return ira->codegen->invalid_inst_gen;
            if (get_ptr_align(ira->codegen, actual_type) >= get_ptr_align(ira->codegen, wanted_type) &&
                types_match_const_cast_only(ira, wanted_type->data.pointer.child_type,
                    actual_type->data.pointer.child_type->data.array.child_type, source_node,
                    !wanted_type->data.pointer.is_const).id == ConstCastResultIdOk)
            {
                return ir_resolve_ptr_of_array_to_unknown_len_ptr(ira, source_instr, value, wanted_type);
            }
        }
    }

    // *[N]T to []T
    // *[N]T to E![]T
    if ((is_slice(wanted_type) ||
            (wanted_type->id == ZigTypeIdErrorUnion &&
            is_slice(wanted_type->data.error_union.payload_type))) &&
        actual_type->id == ZigTypeIdPointer &&
        actual_type->data.pointer.ptr_len == PtrLenSingle &&
        actual_type->data.pointer.child_type->id == ZigTypeIdArray)
    {
        ZigType *slice_type = (wanted_type->id == ZigTypeIdErrorUnion) ?
            wanted_type->data.error_union.payload_type : wanted_type;
        ZigType *slice_ptr_type = slice_type->data.structure.fields[slice_ptr_index]->type_entry;
        assert(slice_ptr_type->id == ZigTypeIdPointer);
        ZigType *array_type = actual_type->data.pointer.child_type;
        bool const_ok = (slice_ptr_type->data.pointer.is_const || array_type->data.array.len == 0
                || !actual_type->data.pointer.is_const);
        if (const_ok && types_match_const_cast_only(ira, slice_ptr_type->data.pointer.child_type,
            array_type->data.array.child_type, source_node,
            !slice_ptr_type->data.pointer.is_const).id == ConstCastResultIdOk)
        {
            // If the pointers both have ABI align, it works.
            // Or if the array length is 0, alignment doesn't matter.
            bool ok_align = array_type->data.array.len == 0 ||
                (slice_ptr_type->data.pointer.explicit_alignment == 0 &&
                actual_type->data.pointer.explicit_alignment == 0);
            if (!ok_align) {
                // If either one has non ABI align, we have to resolve them both
                if ((err = type_resolve(ira->codegen, actual_type->data.pointer.child_type,
                                ResolveStatusAlignmentKnown)))
                {
                    return ira->codegen->invalid_inst_gen;
                }
                if ((err = type_resolve(ira->codegen, slice_ptr_type->data.pointer.child_type,
                                ResolveStatusAlignmentKnown)))
                {
                    return ira->codegen->invalid_inst_gen;
                }
                ok_align = get_ptr_align(ira->codegen, actual_type) >= get_ptr_align(ira->codegen, slice_ptr_type);
            }
            if (ok_align) {
                if (wanted_type->id == ZigTypeIdErrorUnion) {
                    IrInstGen *cast1 = ir_analyze_cast(ira, source_instr, slice_type, value);
                    if (type_is_invalid(cast1->value->type))
                        return ira->codegen->invalid_inst_gen;

                    IrInstGen *cast2 = ir_analyze_cast(ira, source_instr, wanted_type, cast1);
                    if (type_is_invalid(cast2->value->type))
                        return ira->codegen->invalid_inst_gen;

                    return cast2;
                } else {
                    return ir_resolve_ptr_of_array_to_slice(ira, source_instr, value, slice_type, nullptr);
                }
            }
        }
    }

    // @Vector(N,T1) to @Vector(N,T2)
    if (actual_type->id == ZigTypeIdVector && wanted_type->id == ZigTypeIdVector) {
        if (actual_type->data.vector.len == wanted_type->data.vector.len &&
            types_match_const_cast_only(ira, wanted_type->data.vector.elem_type,
                actual_type->data.vector.elem_type, source_node, false).id == ConstCastResultIdOk)
        {
            return ir_analyze_bit_cast(ira, source_instr, value, wanted_type);
        }
    }

    // *@Frame(func) to anyframe->T or anyframe
    // *@Frame(func) to ?anyframe->T or ?anyframe
    // *@Frame(func) to E!anyframe->T or E!anyframe
    if (actual_type->id == ZigTypeIdPointer && actual_type->data.pointer.ptr_len == PtrLenSingle &&
        !actual_type->data.pointer.is_const &&
        actual_type->data.pointer.child_type->id == ZigTypeIdFnFrame)
    {
        ZigType *anyframe_type;
        if (wanted_type->id == ZigTypeIdAnyFrame) {
            anyframe_type = wanted_type;
        } else if (wanted_type->id == ZigTypeIdOptional &&
                wanted_type->data.maybe.child_type->id == ZigTypeIdAnyFrame)
        {
            anyframe_type = wanted_type->data.maybe.child_type;
        } else if (wanted_type->id == ZigTypeIdErrorUnion &&
                wanted_type->data.error_union.payload_type->id == ZigTypeIdAnyFrame)
        {
            anyframe_type = wanted_type->data.error_union.payload_type;
        } else {
            anyframe_type = nullptr;
        }
        if (anyframe_type != nullptr) {
            bool ok = true;
            if (anyframe_type->data.any_frame.result_type != nullptr) {
                ZigFn *fn = actual_type->data.pointer.child_type->data.frame.fn;
                ZigType *fn_return_type = fn->type_entry->data.fn.fn_type_id.return_type;
                if (anyframe_type->data.any_frame.result_type != fn_return_type) {
                    ok = false;
                }
            }
            if (ok) {
                IrInstGen *cast1 = ir_analyze_frame_ptr_to_anyframe(ira, source_instr, value, anyframe_type);
                if (anyframe_type == wanted_type)
                    return cast1;
                return ir_analyze_cast(ira, source_instr, wanted_type, cast1);
            }
        }
    }

    // anyframe->T to anyframe
    if (actual_type->id == ZigTypeIdAnyFrame && actual_type->data.any_frame.result_type != nullptr &&
        wanted_type->id == ZigTypeIdAnyFrame && wanted_type->data.any_frame.result_type == nullptr)
    {
        return ir_analyze_anyframe_to_anyframe(ira, source_instr, value, wanted_type);
    }

    // cast from null literal to maybe type
    if (wanted_type->id == ZigTypeIdOptional &&
        actual_type->id == ZigTypeIdNull)
    {
        return ir_analyze_null_to_maybe(ira, source_instr, value, wanted_type);
    }

    // cast from null literal to C pointer
    if (wanted_type->id == ZigTypeIdPointer && wanted_type->data.pointer.ptr_len == PtrLenC &&
        actual_type->id == ZigTypeIdNull)
    {
        return ir_analyze_null_to_c_pointer(ira, source_instr, value, wanted_type);
    }

    // cast from E to E!T
    if (wanted_type->id == ZigTypeIdErrorUnion &&
        actual_type->id == ZigTypeIdErrorSet)
    {
        return ir_analyze_err_wrap_code(ira, source_instr, value, wanted_type, nullptr);
    }

    // cast from typed number to integer or float literal.
    // works when the number is known at compile time
    if (instr_is_comptime(value) &&
        ((actual_type->id == ZigTypeIdInt && wanted_type->id == ZigTypeIdComptimeInt) ||
        (actual_type->id == ZigTypeIdFloat && wanted_type->id == ZigTypeIdComptimeFloat)))
    {
        return ir_analyze_number_to_literal(ira, source_instr, value, wanted_type);
    }

    // cast from enum literal to enum with matching field name
    if (actual_type->id == ZigTypeIdEnumLiteral && wanted_type->id == ZigTypeIdEnum)
    {
        return ir_analyze_enum_literal(ira, source_instr, value, wanted_type);
    }

    // cast from enum literal to optional enum
    if (actual_type->id == ZigTypeIdEnumLiteral &&
        (wanted_type->id == ZigTypeIdOptional && wanted_type->data.maybe.child_type->id == ZigTypeIdEnum))
    {
        IrInstGen *result = ir_analyze_enum_literal(ira, source_instr, value, wanted_type->data.maybe.child_type);
        if (type_is_invalid(result->value->type))
            return result;

        return ir_analyze_optional_wrap(ira, source_instr, value, wanted_type, nullptr);
    }

    // cast from enum literal to error union when payload is an enum
    if (actual_type->id == ZigTypeIdEnumLiteral &&
        (wanted_type->id == ZigTypeIdErrorUnion && wanted_type->data.error_union.payload_type->id == ZigTypeIdEnum))
    {
        IrInstGen *result = ir_analyze_enum_literal(ira, source_instr, value, wanted_type->data.error_union.payload_type);
        if (type_is_invalid(result->value->type))
            return result;

        return ir_analyze_err_wrap_payload(ira, source_instr, value, wanted_type, nullptr);
    }

    // cast from union to the enum type of the union
    if (actual_type->id == ZigTypeIdUnion && wanted_type->id == ZigTypeIdEnum) {
        if ((err = type_resolve(ira->codegen, actual_type, ResolveStatusZeroBitsKnown)))
            return ira->codegen->invalid_inst_gen;

        if (actual_type->data.unionation.tag_type == wanted_type) {
            return ir_analyze_union_to_tag(ira, source_instr, value, wanted_type);
        }
    }

    // enum to union which has the enum as the tag type, or
    // enum literal to union which has a matching enum as the tag type
    if (is_tagged_union(wanted_type) && (actual_type->id == ZigTypeIdEnum ||
                actual_type->id == ZigTypeIdEnumLiteral))
    {
        return ir_analyze_enum_to_union(ira, source_instr, value, wanted_type);
    }

    // cast from *T to *[1]T
    if (wanted_type->id == ZigTypeIdPointer && wanted_type->data.pointer.ptr_len == PtrLenSingle &&
        actual_type->id == ZigTypeIdPointer && actual_type->data.pointer.ptr_len == PtrLenSingle)
    {
        ZigType *array_type = wanted_type->data.pointer.child_type;
        if (array_type->id == ZigTypeIdArray && array_type->data.array.len == 1 &&
            types_match_const_cast_only(ira, array_type->data.array.child_type,
            actual_type->data.pointer.child_type, source_node,
            !wanted_type->data.pointer.is_const).id == ConstCastResultIdOk &&
            // `types_match_const_cast_only` only gets info for child_types
            (!actual_type->data.pointer.is_const || wanted_type->data.pointer.is_const) &&
            (!actual_type->data.pointer.is_volatile || wanted_type->data.pointer.is_volatile))
        {
            if ((err = ir_cast_ptr_align(ira, source_instr, wanted_type, actual_type, value->base.source_node)))
                return ira->codegen->invalid_inst_gen;

            return ir_analyze_ptr_to_array(ira, source_instr, value, wanted_type);
        }
    }

    // [:x]T to [*:x]T
    // [:x]T to [*c]T
    if (wanted_type->id == ZigTypeIdPointer && is_slice(actual_type) &&
        ((wanted_type->data.pointer.ptr_len == PtrLenUnknown && wanted_type->data.pointer.sentinel != nullptr) ||
         wanted_type->data.pointer.ptr_len == PtrLenC))
    {
        ZigType *slice_ptr_type = resolve_struct_field_type(ira->codegen,
                actual_type->data.structure.fields[slice_ptr_index]);
        if (types_match_const_cast_only(ira, wanted_type->data.pointer.child_type,
                slice_ptr_type->data.pointer.child_type, source_node,
                !wanted_type->data.pointer.is_const).id == ConstCastResultIdOk &&
            (slice_ptr_type->data.pointer.sentinel != nullptr &&
                (wanted_type->data.pointer.ptr_len == PtrLenC ||
                    const_values_equal(ira->codegen, wanted_type->data.pointer.sentinel,
                        slice_ptr_type->data.pointer.sentinel))))
        {
            TypeStructField *ptr_field = actual_type->data.structure.fields[slice_ptr_index];
            IrInstGen *slice_ptr = ir_analyze_struct_value_field_value(ira, source_instr, value, ptr_field);
            return ir_implicit_cast2(ira, source_instr, slice_ptr, wanted_type);
        }
    }

    // cast from *T and [*]T to *c_void and ?*c_void
    // but don't do it if the actual type is a double pointer
    if (is_pointery_and_elem_is_not_pointery(actual_type)) {
        ZigType *dest_ptr_type = nullptr;
        if (wanted_type->id == ZigTypeIdPointer &&
            actual_type->id != ZigTypeIdOptional &&
            wanted_type->data.pointer.child_type == ira->codegen->builtin_types.entry_c_void)
        {
            dest_ptr_type = wanted_type;
        } else if (wanted_type->id == ZigTypeIdOptional &&
            wanted_type->data.maybe.child_type->id == ZigTypeIdPointer &&
            wanted_type->data.maybe.child_type->data.pointer.child_type == ira->codegen->builtin_types.entry_c_void)
        {
            dest_ptr_type = wanted_type->data.maybe.child_type;
        }
        if (dest_ptr_type != nullptr) {
            return ir_analyze_ptr_cast(ira, source_instr, value, source_instr, wanted_type, source_instr, true,
                    false);
        }
    }

    // cast from T to *T where T is zero bits
    if (wanted_type->id == ZigTypeIdPointer && wanted_type->data.pointer.ptr_len == PtrLenSingle &&
        types_match_const_cast_only(ira, wanted_type->data.pointer.child_type,
            actual_type, source_node, !wanted_type->data.pointer.is_const).id == ConstCastResultIdOk)
    {
        bool has_bits;
        if ((err = type_has_bits2(ira->codegen, actual_type, &has_bits)))
            return ira->codegen->invalid_inst_gen;
        if (!has_bits) {
            return ir_get_ref(ira, source_instr, value, false, false);
        }
    }

    // cast from @Vector(N, T) to [N]T
    if (wanted_type->id == ZigTypeIdArray && actual_type->id == ZigTypeIdVector &&
        wanted_type->data.array.len == actual_type->data.vector.len &&
        types_match_const_cast_only(ira, wanted_type->data.array.child_type,
            actual_type->data.vector.elem_type, source_node, false).id == ConstCastResultIdOk)
    {
        return ir_analyze_vector_to_array(ira, source_instr, value, wanted_type, nullptr);
    }

    // cast from [N]T to @Vector(N, T)
    if (actual_type->id == ZigTypeIdArray && wanted_type->id == ZigTypeIdVector &&
        actual_type->data.array.len == wanted_type->data.vector.len &&
        types_match_const_cast_only(ira, actual_type->data.array.child_type,
            wanted_type->data.vector.elem_type, source_node, false).id == ConstCastResultIdOk)
    {
        return ir_analyze_array_to_vector(ira, source_instr, value, wanted_type);
    }

    // casting between C pointers and normal pointers
    if (wanted_type->id == ZigTypeIdPointer && actual_type->id == ZigTypeIdPointer &&
        (wanted_type->data.pointer.ptr_len == PtrLenC || actual_type->data.pointer.ptr_len == PtrLenC) &&
        types_match_const_cast_only(ira, wanted_type->data.pointer.child_type,
            actual_type->data.pointer.child_type, source_node,
            !wanted_type->data.pointer.is_const).id == ConstCastResultIdOk)
    {
        return ir_analyze_ptr_cast(ira, source_instr, value, source_instr, wanted_type, source_instr, true, false);
    }

    // cast from integer to C pointer
    if (wanted_type->id == ZigTypeIdPointer && wanted_type->data.pointer.ptr_len == PtrLenC &&
        (actual_type->id == ZigTypeIdInt || actual_type->id == ZigTypeIdComptimeInt))
    {
        return ir_analyze_int_to_c_ptr(ira, source_instr, value, wanted_type);
    }

    // cast from inferred struct type to array, union, or struct
    if (is_anon_container(actual_type)) {
        const bool is_array_init =
            actual_type->data.structure.special == StructSpecialInferredTuple;
        const uint32_t field_count = actual_type->data.structure.src_field_count;

        if (wanted_type->id == ZigTypeIdArray && (is_array_init || field_count == 0) &&
            wanted_type->data.array.len == field_count)
        {
            return ir_analyze_struct_literal_to_array(ira, source_instr, value, wanted_type);
        } else if (wanted_type->id == ZigTypeIdStruct &&
                (!is_array_init || field_count == 0))
        {
            return ir_analyze_struct_literal_to_struct(ira, source_instr, value, wanted_type);
        } else if (wanted_type->id == ZigTypeIdUnion && !is_array_init && field_count == 1) {
            return ir_analyze_struct_literal_to_union(ira, source_instr, value, wanted_type);
        }
    }

    // cast from undefined to anything
    if (actual_type->id == ZigTypeIdUndefined) {
        return ir_analyze_undefined_to_anything(ira, source_instr, value, wanted_type);
    }

    // T to ?U, where T implicitly casts to U
    if (wanted_type->id == ZigTypeIdOptional && actual_type->id != ZigTypeIdOptional) {
        IrInstGen *cast1 = ir_implicit_cast2(ira, source_instr, value, wanted_type->data.maybe.child_type);
        if (type_is_invalid(cast1->value->type))
            return ira->codegen->invalid_inst_gen;
        return ir_implicit_cast2(ira, source_instr, cast1, wanted_type);
    }

    // T to E!U, where T implicitly casts to U
    if (wanted_type->id == ZigTypeIdErrorUnion && actual_type->id != ZigTypeIdErrorUnion &&
        actual_type->id != ZigTypeIdErrorSet)
    {
        IrInstGen *cast1 = ir_implicit_cast2(ira, source_instr, value, wanted_type->data.error_union.payload_type);
        if (type_is_invalid(cast1->value->type))
            return ira->codegen->invalid_inst_gen;
        return ir_implicit_cast2(ira, source_instr, cast1, wanted_type);
    }

    ErrorMsg *parent_msg = ir_add_error_node(ira, source_instr->source_node,
        buf_sprintf("expected type '%s', found '%s'",
            buf_ptr(&wanted_type->name),
            buf_ptr(&actual_type->name)));
    report_recursive_error(ira, source_instr->source_node, &const_cast_result, parent_msg);
    return ira->codegen->invalid_inst_gen;
}

static IrInstGen *ir_implicit_cast2(IrAnalyze *ira, IrInst *value_source_instr,
        IrInstGen *value, ZigType *expected_type)
{
    assert(value);
    assert(!expected_type || !type_is_invalid(expected_type));
    assert(value->value->type);
    assert(!type_is_invalid(value->value->type));
    if (expected_type == nullptr)
        return value; // anything will do
    if (expected_type == value->value->type)
        return value; // match
    if (value->value->type->id == ZigTypeIdUnreachable)
        return value;

    return ir_analyze_cast(ira, value_source_instr, expected_type, value);
}

static IrInstGen *ir_implicit_cast(IrAnalyze *ira, IrInstGen *value, ZigType *expected_type) {
    return ir_implicit_cast2(ira, &value->base, value, expected_type);
}

static ZigType *get_ptr_elem_type(CodeGen *g, IrInstGen *ptr) {
    ir_assert_gen(ptr->value->type->id == ZigTypeIdPointer, ptr);
    ZigType *elem_type = ptr->value->type->data.pointer.child_type;
    if (elem_type != g->builtin_types.entry_var)
        return elem_type;

    if (ir_resolve_lazy(g, ptr->base.source_node, ptr->value))
        return g->builtin_types.entry_invalid;

    assert(value_is_comptime(ptr->value));
    ZigValue *pointee = const_ptr_pointee_unchecked(g, ptr->value);
    return pointee->type;
}

static IrInstGen *ir_get_deref(IrAnalyze *ira, IrInst* source_instruction, IrInstGen *ptr,
        ResultLoc *result_loc)
{
    Error err;
    ZigType *ptr_type = ptr->value->type;
    if (type_is_invalid(ptr_type))
        return ira->codegen->invalid_inst_gen;

    if (ptr_type->id != ZigTypeIdPointer) {
        ir_add_error_node(ira, source_instruction->source_node,
            buf_sprintf("attempt to dereference non-pointer type '%s'",
                buf_ptr(&ptr_type->name)));
        return ira->codegen->invalid_inst_gen;
    }

    ZigType *child_type = ptr_type->data.pointer.child_type;
    if (type_is_invalid(child_type))
        return ira->codegen->invalid_inst_gen;
    // if the child type has one possible value, the deref is comptime
    switch (type_has_one_possible_value(ira->codegen, child_type)) {
        case OnePossibleValueInvalid:
            return ira->codegen->invalid_inst_gen;
        case OnePossibleValueYes:
            return ir_const_move(ira, source_instruction,
                     get_the_one_possible_value(ira->codegen, child_type));
        case OnePossibleValueNo:
            break;
    }
    if (instr_is_comptime(ptr)) {
        if (ptr->value->special == ConstValSpecialUndef) {
            // If we are in a TypeOf call, we return an undefined value instead of erroring
            // since we know the type.
            if (get_scope_typeof(source_instruction->scope)) {
                return ir_const_undef(ira, source_instruction, child_type);
            }

            ir_add_error(ira, &ptr->base, buf_sprintf("attempt to dereference undefined value"));
            return ira->codegen->invalid_inst_gen;
        }
        if (ptr->value->data.x_ptr.mut != ConstPtrMutRuntimeVar) {
            ZigValue *pointee = const_ptr_pointee_unchecked(ira->codegen, ptr->value);
            if (child_type == ira->codegen->builtin_types.entry_var) {
                child_type = pointee->type;
            }
            if (pointee->special != ConstValSpecialRuntime) {
                IrInstGen *result = ir_const(ira, source_instruction, child_type);

                if ((err = ir_read_const_ptr(ira, ira->codegen, source_instruction->source_node, result->value,
                                ptr->value)))
                {
                    return ira->codegen->invalid_inst_gen;
                }
                result->value->type = child_type;
                return result;
            }
        }
    }

    // if the instruction is a const ref instruction we can skip it
    if (ptr->id == IrInstGenIdRef) {
        IrInstGenRef *ref_inst = reinterpret_cast<IrInstGenRef *>(ptr);
        return ref_inst->operand;
    }

    // If the instruction is a element pointer instruction to a vector, we emit
    // vector element extract instruction rather than load pointer. If the
    // pointer type has non-VECTOR_INDEX_RUNTIME value, it would have been
    // possible to implement this in the codegen for IrInstGenLoadPtr.
    // However if it has VECTOR_INDEX_RUNTIME then we must emit a compile error
    // if the vector index cannot be determined right here, right now, because
    // the type information does not contain enough information to actually
    // perform a dereference.
    if (ptr_type->data.pointer.vector_index == VECTOR_INDEX_RUNTIME) {
        if (ptr->id == IrInstGenIdElemPtr) {
            IrInstGenElemPtr *elem_ptr = (IrInstGenElemPtr *)ptr;
            IrInstGen *vector_loaded = ir_get_deref(ira, &elem_ptr->array_ptr->base,
                    elem_ptr->array_ptr, nullptr);
            IrInstGen *elem_index = elem_ptr->elem_index;
            return ir_build_vector_extract_elem(ira, source_instruction, vector_loaded, elem_index);
        }
        ir_add_error(ira, &ptr->base,
            buf_sprintf("unable to determine vector element index of type '%s'", buf_ptr(&ptr_type->name)));
        return ira->codegen->invalid_inst_gen;
    }

    IrInstGen *result_loc_inst;
    if (ptr_type->data.pointer.host_int_bytes != 0 && handle_is_ptr(ira->codegen, child_type)) {
        if (result_loc == nullptr) result_loc = no_result_loc();
        result_loc_inst = ir_resolve_result(ira, source_instruction, result_loc, child_type, nullptr, true, true);
        if (type_is_invalid(result_loc_inst->value->type) || result_loc_inst->value->type->id == ZigTypeIdUnreachable) {
            return result_loc_inst;
        }
    } else {
        result_loc_inst = nullptr;
    }

    return ir_build_load_ptr_gen(ira, source_instruction, ptr, child_type, result_loc_inst);
}

static bool ir_resolve_const_align(CodeGen *codegen, IrExecutableGen *exec, AstNode *source_node,
        ZigValue *const_val, uint32_t *out)
{
    Error err;
    if ((err = ir_resolve_const_val(codegen, exec, source_node, const_val, UndefBad)))
        return false;

    uint32_t align_bytes = bigint_as_u32(&const_val->data.x_bigint);
    if (align_bytes == 0) {
        exec_add_error_node_gen(codegen, exec, source_node, buf_sprintf("alignment must be >= 1"));
        return false;
    }

    if (!is_power_of_2(align_bytes)) {
        exec_add_error_node_gen(codegen, exec, source_node,
                buf_sprintf("alignment value %" PRIu32 " is not a power of 2", align_bytes));
        return false;
    }

    *out = align_bytes;
    return true;
}

static bool ir_resolve_align(IrAnalyze *ira, IrInstGen *value, ZigType *elem_type, uint32_t *out) {
    if (type_is_invalid(value->value->type))
        return false;

    // Look for this pattern: `*align(@alignOf(T)) T`.
    // This can be resolved to be `*out = 0` without resolving any alignment.
    if (elem_type != nullptr && value->value->special == ConstValSpecialLazy &&
        value->value->data.x_lazy->id == LazyValueIdAlignOf)
    {
        LazyValueAlignOf *lazy_align_of = reinterpret_cast<LazyValueAlignOf *>(value->value->data.x_lazy);

        ZigType *lazy_elem_type = ir_resolve_type(lazy_align_of->ira, lazy_align_of->target_type);
        if (type_is_invalid(lazy_elem_type))
            return false;

        if (elem_type == lazy_elem_type) {
            *out = 0;
            return true;
        }
    }

    IrInstGen *casted_value = ir_implicit_cast(ira, value, get_align_amt_type(ira->codegen));
    if (type_is_invalid(casted_value->value->type))
        return false;

    return ir_resolve_const_align(ira->codegen, ira->new_irb.exec, value->base.source_node,
            casted_value->value, out);
}

static bool ir_resolve_unsigned(IrAnalyze *ira, IrInstGen *value, ZigType *int_type, uint64_t *out) {
    if (type_is_invalid(value->value->type))
        return false;

    IrInstGen *casted_value = ir_implicit_cast(ira, value, int_type);
    if (type_is_invalid(casted_value->value->type))
        return false;

    ZigValue *const_val = ir_resolve_const(ira, casted_value, UndefBad);
    if (!const_val)
        return false;

    *out = bigint_as_u64(&const_val->data.x_bigint);
    return true;
}

static bool ir_resolve_usize(IrAnalyze *ira, IrInstGen *value, uint64_t *out) {
    return ir_resolve_unsigned(ira, value, ira->codegen->builtin_types.entry_usize, out);
}

static bool ir_resolve_bool(IrAnalyze *ira, IrInstGen *value, bool *out) {
    if (type_is_invalid(value->value->type))
        return false;

    IrInstGen *casted_value = ir_implicit_cast(ira, value, ira->codegen->builtin_types.entry_bool);
    if (type_is_invalid(casted_value->value->type))
        return false;

    ZigValue *const_val = ir_resolve_const(ira, casted_value, UndefBad);
    if (!const_val)
        return false;

    *out = const_val->data.x_bool;
    return true;
}

static bool ir_resolve_comptime(IrAnalyze *ira, IrInstGen *value, bool *out) {
    if (!value) {
        *out = false;
        return true;
    }
    return ir_resolve_bool(ira, value, out);
}

static bool ir_resolve_atomic_order(IrAnalyze *ira, IrInstGen *value, AtomicOrder *out) {
    if (type_is_invalid(value->value->type))
        return false;

    ZigType *atomic_order_type = get_builtin_type(ira->codegen, "AtomicOrder");

    IrInstGen *casted_value = ir_implicit_cast(ira, value, atomic_order_type);
    if (type_is_invalid(casted_value->value->type))
        return false;

    ZigValue *const_val = ir_resolve_const(ira, casted_value, UndefBad);
    if (!const_val)
        return false;

    *out = (AtomicOrder)bigint_as_u32(&const_val->data.x_enum_tag);
    return true;
}

static bool ir_resolve_atomic_rmw_op(IrAnalyze *ira, IrInstGen *value, AtomicRmwOp *out) {
    if (type_is_invalid(value->value->type))
        return false;

    ZigType *atomic_rmw_op_type = get_builtin_type(ira->codegen, "AtomicRmwOp");

    IrInstGen *casted_value = ir_implicit_cast(ira, value, atomic_rmw_op_type);
    if (type_is_invalid(casted_value->value->type))
        return false;

    ZigValue *const_val = ir_resolve_const(ira, casted_value, UndefBad);
    if (!const_val)
        return false;

    *out = (AtomicRmwOp)bigint_as_u32(&const_val->data.x_enum_tag);
    return true;
}

static bool ir_resolve_global_linkage(IrAnalyze *ira, IrInstGen *value, GlobalLinkageId *out) {
    if (type_is_invalid(value->value->type))
        return false;

    ZigType *global_linkage_type = get_builtin_type(ira->codegen, "GlobalLinkage");

    IrInstGen *casted_value = ir_implicit_cast(ira, value, global_linkage_type);
    if (type_is_invalid(casted_value->value->type))
        return false;

    ZigValue *const_val = ir_resolve_const(ira, casted_value, UndefBad);
    if (!const_val)
        return false;

    *out = (GlobalLinkageId)bigint_as_u32(&const_val->data.x_enum_tag);
    return true;
}

static bool ir_resolve_float_mode(IrAnalyze *ira, IrInstGen *value, FloatMode *out) {
    if (type_is_invalid(value->value->type))
        return false;

    ZigType *float_mode_type = get_builtin_type(ira->codegen, "FloatMode");

    IrInstGen *casted_value = ir_implicit_cast(ira, value, float_mode_type);
    if (type_is_invalid(casted_value->value->type))
        return false;

    ZigValue *const_val = ir_resolve_const(ira, casted_value, UndefBad);
    if (!const_val)
        return false;

    *out = (FloatMode)bigint_as_u32(&const_val->data.x_enum_tag);
    return true;
}

static Buf *ir_resolve_str(IrAnalyze *ira, IrInstGen *value) {
    if (type_is_invalid(value->value->type))
        return nullptr;

    ZigType *ptr_type = get_pointer_to_type_extra(ira->codegen, ira->codegen->builtin_types.entry_u8,
            true, false, PtrLenUnknown, 0, 0, 0, false);
    ZigType *str_type = get_slice_type(ira->codegen, ptr_type);
    IrInstGen *casted_value = ir_implicit_cast(ira, value, str_type);
    if (type_is_invalid(casted_value->value->type))
        return nullptr;

    ZigValue *const_val = ir_resolve_const(ira, casted_value, UndefBad);
    if (!const_val)
        return nullptr;

    ZigValue *ptr_field = const_val->data.x_struct.fields[slice_ptr_index];
    ZigValue *len_field = const_val->data.x_struct.fields[slice_len_index];

    assert(ptr_field->data.x_ptr.special == ConstPtrSpecialBaseArray);
    ZigValue *array_val = ptr_field->data.x_ptr.data.base_array.array_val;
    expand_undef_array(ira->codegen, array_val);
    size_t len = bigint_as_usize(&len_field->data.x_bigint);
    if (array_val->data.x_array.special == ConstArraySpecialBuf && len == buf_len(array_val->data.x_array.data.s_buf)) {
        return array_val->data.x_array.data.s_buf;
    }
    Buf *result = buf_alloc();
    buf_resize(result, len);
    for (size_t i = 0; i < len; i += 1) {
        size_t new_index = ptr_field->data.x_ptr.data.base_array.elem_index + i;
        ZigValue *char_val = &array_val->data.x_array.data.s_none.elements[new_index];
        if (char_val->special == ConstValSpecialUndef) {
            ir_add_error(ira, &casted_value->base, buf_sprintf("use of undefined value"));
            return nullptr;
        }
        uint64_t big_c = bigint_as_u64(&char_val->data.x_bigint);
        assert(big_c <= UINT8_MAX);
        uint8_t c = (uint8_t)big_c;
        buf_ptr(result)[i] = c;
    }
    return result;
}

static IrInstGen *ir_analyze_instruction_add_implicit_return_type(IrAnalyze *ira,
        IrInstSrcAddImplicitReturnType *instruction)
{
    IrInstGen *value = instruction->value->child;
    if (type_is_invalid(value->value->type))
        return ir_unreach_error(ira);

    if (instruction->result_loc_ret == nullptr || !instruction->result_loc_ret->implicit_return_type_done) {
        ira->src_implicit_return_type_list.append(value);
    }

    return ir_const_void(ira, &instruction->base.base);
}

static IrInstGen *ir_analyze_instruction_return(IrAnalyze *ira, IrInstSrcReturn *instruction) {
    if (instruction->operand == nullptr) {
        // result location mechanism took care of it.
        IrInstGen *result = ir_build_return_gen(ira, &instruction->base.base, nullptr);
        return ir_finish_anal(ira, result);
    }

    IrInstGen *operand = instruction->operand->child;
    if (type_is_invalid(operand->value->type))
        return ir_unreach_error(ira);

    IrInstGen *casted_operand = ir_implicit_cast(ira, operand, ira->explicit_return_type);
    if (type_is_invalid(casted_operand->value->type)) {
        AstNode *source_node = ira->explicit_return_type_source_node;
        if (source_node != nullptr) {
            ErrorMsg *msg = ira->codegen->errors.last();
            add_error_note(ira->codegen, msg, source_node,
                buf_sprintf("return type declared here"));
        }
        return ir_unreach_error(ira);
    }

    if (!instr_is_comptime(operand) && ira->explicit_return_type != nullptr &&
            handle_is_ptr(ira->codegen, ira->explicit_return_type))
    {
        // result location mechanism took care of it.
        IrInstGen *result = ir_build_return_gen(ira, &instruction->base.base, nullptr);
        return ir_finish_anal(ira, result);
    }

    if (casted_operand->value->special == ConstValSpecialRuntime &&
        casted_operand->value->type->id == ZigTypeIdPointer &&
        casted_operand->value->data.rh_ptr == RuntimeHintPtrStack)
    {
        ir_add_error(ira, &instruction->operand->base, buf_sprintf("function returns address of local variable"));
        return ir_unreach_error(ira);
    }

    IrInstGen *result = ir_build_return_gen(ira, &instruction->base.base, casted_operand);
    return ir_finish_anal(ira, result);
}

static IrInstGen *ir_analyze_instruction_const(IrAnalyze *ira, IrInstSrcConst *instruction) {
    return ir_const_move(ira, &instruction->base.base, instruction->value);
}

static IrInstGen *ir_analyze_bin_op_bool(IrAnalyze *ira, IrInstSrcBinOp *bin_op_instruction) {
    IrInstGen *op1 = bin_op_instruction->op1->child;
    if (type_is_invalid(op1->value->type))
        return ira->codegen->invalid_inst_gen;

    IrInstGen *op2 = bin_op_instruction->op2->child;
    if (type_is_invalid(op2->value->type))
        return ira->codegen->invalid_inst_gen;

    ZigType *bool_type = ira->codegen->builtin_types.entry_bool;

    IrInstGen *casted_op1 = ir_implicit_cast(ira, op1, bool_type);
    if (type_is_invalid(casted_op1->value->type))
        return ira->codegen->invalid_inst_gen;

    IrInstGen *casted_op2 = ir_implicit_cast(ira, op2, bool_type);
    if (type_is_invalid(casted_op2->value->type))
        return ira->codegen->invalid_inst_gen;

    if (instr_is_comptime(casted_op1) && instr_is_comptime(casted_op2)) {
        ZigValue *op1_val = ir_resolve_const(ira, casted_op1, UndefBad);
        if (op1_val == nullptr)
            return ira->codegen->invalid_inst_gen;

        ZigValue *op2_val = ir_resolve_const(ira, casted_op2, UndefBad);
        if (op2_val == nullptr)
            return ira->codegen->invalid_inst_gen;

        assert(casted_op1->value->type->id == ZigTypeIdBool);
        assert(casted_op2->value->type->id == ZigTypeIdBool);
        bool result_bool;
        if (bin_op_instruction->op_id == IrBinOpBoolOr) {
            result_bool = op1_val->data.x_bool || op2_val->data.x_bool;
        } else if (bin_op_instruction->op_id == IrBinOpBoolAnd) {
            result_bool = op1_val->data.x_bool && op2_val->data.x_bool;
        } else {
            zig_unreachable();
        }
        return ir_const_bool(ira, &bin_op_instruction->base.base, result_bool);
    }

    return ir_build_bin_op_gen(ira, &bin_op_instruction->base.base, bool_type,
            bin_op_instruction->op_id, casted_op1, casted_op2, bin_op_instruction->safety_check_on);
}

static bool resolve_cmp_op_id(IrBinOp op_id, Cmp cmp) {
    switch (op_id) {
        case IrBinOpCmpEq:
            return cmp == CmpEQ;
        case IrBinOpCmpNotEq:
            return cmp != CmpEQ;
        case IrBinOpCmpLessThan:
            return cmp == CmpLT;
        case IrBinOpCmpGreaterThan:
            return cmp == CmpGT;
        case IrBinOpCmpLessOrEq:
            return cmp != CmpGT;
        case IrBinOpCmpGreaterOrEq:
            return cmp != CmpLT;
        default:
            zig_unreachable();
    }
}

static void set_optional_value_to_null(ZigValue *val) {
    assert(val->special == ConstValSpecialStatic);
    if (val->type->id == ZigTypeIdNull) return; // nothing to do
    assert(val->type->id == ZigTypeIdOptional);
    if (get_src_ptr_type(val->type) != nullptr) {
        val->data.x_ptr.special = ConstPtrSpecialNull;
    } else if (is_opt_err_set(val->type)) {
        val->data.x_err_set = nullptr;
    } else {
        val->data.x_optional = nullptr;
    }
}

static void set_optional_payload(ZigValue *opt_val, ZigValue *payload) {
    assert(opt_val->special == ConstValSpecialStatic);
    assert(opt_val->type->id == ZigTypeIdOptional);
    if (payload == nullptr) {
        set_optional_value_to_null(opt_val);
    } else if (is_opt_err_set(opt_val->type)) {
        assert(payload->type->id == ZigTypeIdErrorSet);
        opt_val->data.x_err_set = payload->data.x_err_set;
    } else {
        opt_val->data.x_optional = payload;
    }
}

static IrInstGen *ir_evaluate_bin_op_cmp(IrAnalyze *ira, ZigType *resolved_type,
    ZigValue *op1_val, ZigValue *op2_val, IrInst *source_instr, IrBinOp op_id,
    bool one_possible_value)
{
    if (op1_val->special == ConstValSpecialUndef ||
        op2_val->special == ConstValSpecialUndef)
        return ir_const_undef(ira, source_instr, resolved_type);
    if (resolved_type->id == ZigTypeIdPointer && op_id != IrBinOpCmpEq && op_id != IrBinOpCmpNotEq) {
        if ((op1_val->data.x_ptr.special == ConstPtrSpecialHardCodedAddr ||
                op1_val->data.x_ptr.special == ConstPtrSpecialNull) &&
            (op2_val->data.x_ptr.special == ConstPtrSpecialHardCodedAddr ||
                op2_val->data.x_ptr.special == ConstPtrSpecialNull))
        {
            uint64_t op1_addr = op1_val->data.x_ptr.special == ConstPtrSpecialNull ?
                0 : op1_val->data.x_ptr.data.hard_coded_addr.addr;
            uint64_t op2_addr = op2_val->data.x_ptr.special == ConstPtrSpecialNull ?
                0 : op2_val->data.x_ptr.data.hard_coded_addr.addr;
            Cmp cmp_result;
            if (op1_addr > op2_addr) {
                cmp_result = CmpGT;
            } else if (op1_addr < op2_addr) {
                cmp_result = CmpLT;
            } else {
                cmp_result = CmpEQ;
            }
            bool answer = resolve_cmp_op_id(op_id, cmp_result);
            return ir_const_bool(ira, source_instr, answer);
        }
    } else {
        bool are_equal = one_possible_value || const_values_equal(ira->codegen, op1_val, op2_val);
        bool answer;
        if (op_id == IrBinOpCmpEq) {
            answer = are_equal;
        } else if (op_id == IrBinOpCmpNotEq) {
            answer = !are_equal;
        } else {
            zig_unreachable();
        }
        return ir_const_bool(ira, source_instr, answer);
    }
    zig_unreachable();
}

static IrInstGen *ir_try_evaluate_bin_op_cmp_const(IrAnalyze *ira, IrInst *source_instr, IrInstGen *op1, IrInstGen *op2,
        ZigType *resolved_type, IrBinOp op_id)
{
    assert(op1->value->type == resolved_type && op2->value->type == resolved_type);
    bool one_possible_value;
    switch (type_has_one_possible_value(ira->codegen, resolved_type)) {
        case OnePossibleValueInvalid:
            return ira->codegen->invalid_inst_gen;
        case OnePossibleValueYes:
            one_possible_value = true;
            break;
        case OnePossibleValueNo:
            one_possible_value = false;
            break;
    }

    if (one_possible_value || (instr_is_comptime(op1) && instr_is_comptime(op2))) {
        ZigValue *op1_val = one_possible_value ? op1->value : ir_resolve_const(ira, op1, UndefBad);
        if (op1_val == nullptr)
            return ira->codegen->invalid_inst_gen;
        ZigValue *op2_val = one_possible_value ? op2->value : ir_resolve_const(ira, op2, UndefBad);
        if (op2_val == nullptr)
            return ira->codegen->invalid_inst_gen;
        if (resolved_type->id != ZigTypeIdVector)
            return ir_evaluate_bin_op_cmp(ira, resolved_type, op1_val, op2_val, source_instr, op_id, one_possible_value);
        IrInstGen *result = ir_const(ira, source_instr,
                                     get_vector_type(ira->codegen, resolved_type->data.vector.len, ira->codegen->builtin_types.entry_bool));
        result->value->data.x_array.data.s_none.elements =
                ira->codegen->pass1_arena->allocate<ZigValue>(resolved_type->data.vector.len);

        expand_undef_array(ira->codegen, result->value);
        for (size_t i = 0;i < resolved_type->data.vector.len;i++) {
            IrInstGen *cur_res = ir_evaluate_bin_op_cmp(ira, resolved_type->data.vector.elem_type,
                                                        &op1_val->data.x_array.data.s_none.elements[i],
                                                        &op2_val->data.x_array.data.s_none.elements[i],
                                                        source_instr, op_id, one_possible_value);
            copy_const_val(ira->codegen, &result->value->data.x_array.data.s_none.elements[i], cur_res->value);
        }
        return result;
    } else {
        return nullptr;
    }
}

// Returns ErrorNotLazy when the value cannot be determined
static Error lazy_cmp_zero(CodeGen *codegen, AstNode *source_node, ZigValue *val, Cmp *result) {
    Error err;

    switch (type_has_one_possible_value(codegen, val->type)) {
        case OnePossibleValueInvalid:
            return ErrorSemanticAnalyzeFail;
        case OnePossibleValueNo:
            break;
        case OnePossibleValueYes:
            switch (val->type->id) {
                case ZigTypeIdInt:
                    src_assert(val->type->data.integral.bit_count == 0, source_node);
                    *result = CmpEQ;
                    return ErrorNone;
                case ZigTypeIdUndefined:
                    return ErrorNotLazy;
                default:
                    zig_unreachable();
            }
    }

    switch (val->special) {
        case ConstValSpecialRuntime:
        case ConstValSpecialUndef:
            return ErrorNotLazy;
        case ConstValSpecialStatic:
            switch (val->type->id) {
                case ZigTypeIdComptimeInt:
                case ZigTypeIdInt:
                    *result = bigint_cmp_zero(&val->data.x_bigint);
                    return ErrorNone;
                case ZigTypeIdComptimeFloat:
                case ZigTypeIdFloat:
                    if (float_is_nan(val))
                        return ErrorNotLazy;
                    *result = float_cmp_zero(val);
                    return ErrorNone;
                default:
                    return ErrorNotLazy;
            }
        case ConstValSpecialLazy:
            switch (val->data.x_lazy->id) {
                case LazyValueIdInvalid:
                    zig_unreachable();
                case LazyValueIdAlignOf: {
                    LazyValueAlignOf *lazy_align_of = reinterpret_cast<LazyValueAlignOf *>(val->data.x_lazy);
                    IrAnalyze *ira = lazy_align_of->ira;

                    bool is_zero_bits;
                    if ((err = type_val_resolve_zero_bits(ira->codegen, lazy_align_of->target_type->value,
                                    nullptr, nullptr, &is_zero_bits)))
                    {
                        return err;
                    }

                    *result = is_zero_bits ? CmpEQ : CmpGT;
                    return ErrorNone;
                }
                case LazyValueIdSizeOf: {
                    LazyValueSizeOf *lazy_size_of = reinterpret_cast<LazyValueSizeOf *>(val->data.x_lazy);
                    IrAnalyze *ira = lazy_size_of->ira;
                    bool is_zero_bits;
                    if ((err = type_val_resolve_zero_bits(ira->codegen, lazy_size_of->target_type->value,
                        nullptr, nullptr, &is_zero_bits)))
                    {
                        return err;
                    }
                    *result = is_zero_bits ? CmpEQ : CmpGT;
                    return ErrorNone;
                }
                default:
                    return ErrorNotLazy;
            }
    }
    zig_unreachable();
}

static ErrorMsg *ir_eval_bin_op_cmp_scalar(IrAnalyze *ira, IrInst* source_instr,
    ZigValue *op1_val, IrBinOp op_id, ZigValue *op2_val, ZigValue *out_val)
{
    Error err;
    {
        // Before resolving the values, we special case comparisons against zero. These can often
        // be done without resolving lazy values, preventing potential dependency loops.
        Cmp op1_cmp_zero;
        if ((err = lazy_cmp_zero(ira->codegen, source_instr->source_node, op1_val, &op1_cmp_zero))) {
            if (err == ErrorNotLazy) goto never_mind_just_calculate_it_normally;
            return ira->codegen->trace_err;
        }
        Cmp op2_cmp_zero;
        if ((err = lazy_cmp_zero(ira->codegen, source_instr->source_node, op2_val, &op2_cmp_zero))) {
            if (err == ErrorNotLazy) goto never_mind_just_calculate_it_normally;
            return ira->codegen->trace_err;
        }
        bool can_cmp_zero = false;
        Cmp cmp_result;
        if (op1_cmp_zero == CmpEQ && op2_cmp_zero == CmpEQ) {
            can_cmp_zero = true;
            cmp_result = CmpEQ;
        } else if (op1_cmp_zero == CmpGT && op2_cmp_zero == CmpEQ) {
            can_cmp_zero = true;
            cmp_result = CmpGT;
        } else if (op1_cmp_zero == CmpEQ && op2_cmp_zero == CmpGT) {
            can_cmp_zero = true;
            cmp_result = CmpLT;
        } else if (op1_cmp_zero == CmpLT && op2_cmp_zero == CmpEQ) {
            can_cmp_zero = true;
            cmp_result = CmpLT;
        } else if (op1_cmp_zero == CmpEQ && op2_cmp_zero == CmpLT) {
            can_cmp_zero = true;
            cmp_result = CmpGT;
        } else if (op1_cmp_zero == CmpLT && op2_cmp_zero == CmpGT) {
            can_cmp_zero = true;
            cmp_result = CmpLT;
        } else if (op1_cmp_zero == CmpGT && op2_cmp_zero == CmpLT) {
            can_cmp_zero = true;
            cmp_result = CmpGT;
        }
        if (can_cmp_zero) {
            bool answer = resolve_cmp_op_id(op_id, cmp_result);
            out_val->special = ConstValSpecialStatic;
            out_val->data.x_bool = answer;
            return nullptr;
        }
    }
never_mind_just_calculate_it_normally:

    if ((err = ir_resolve_const_val(ira->codegen, ira->new_irb.exec, source_instr->source_node,
                    op1_val, UndefOk)))
    {
        return ira->codegen->trace_err;
    }
    if ((err = ir_resolve_const_val(ira->codegen, ira->new_irb.exec, source_instr->source_node,
                    op2_val, UndefOk)))
    {
        return ira->codegen->trace_err;
    }


    if (op1_val->special == ConstValSpecialUndef || op2_val->special == ConstValSpecialUndef ||
        op1_val->type->id == ZigTypeIdUndefined || op2_val->type->id == ZigTypeIdUndefined)
    {
        out_val->special = ConstValSpecialUndef;
        return nullptr;
    }

    bool op1_is_float = op1_val->type->id == ZigTypeIdFloat || op1_val->type->id == ZigTypeIdComptimeFloat;
    bool op2_is_float = op2_val->type->id == ZigTypeIdFloat || op2_val->type->id == ZigTypeIdComptimeFloat;
    if (op1_is_float && op2_is_float) {
        if (float_is_nan(op1_val) || float_is_nan(op2_val)) {
            out_val->special = ConstValSpecialStatic;
            out_val->data.x_bool = op_id == IrBinOpCmpNotEq;
            return nullptr;
        }
        if (op1_val->type->id == ZigTypeIdComptimeFloat) {
            IrInstGen *tmp = ir_const_noval(ira, source_instr);
            tmp->value = op1_val;
            IrInstGen *casted = ir_implicit_cast(ira, tmp, op2_val->type);
            op1_val = casted->value;
        } else if (op2_val->type->id == ZigTypeIdComptimeFloat) {
            IrInstGen *tmp = ir_const_noval(ira, source_instr);
            tmp->value = op2_val;
            IrInstGen *casted = ir_implicit_cast(ira, tmp, op1_val->type);
            op2_val = casted->value;
        }
        Cmp cmp_result = float_cmp(op1_val, op2_val);
        out_val->special = ConstValSpecialStatic;
        out_val->data.x_bool = resolve_cmp_op_id(op_id, cmp_result);
        return nullptr;
    }

    bool op1_is_int = op1_val->type->id == ZigTypeIdInt || op1_val->type->id == ZigTypeIdComptimeInt;
    bool op2_is_int = op2_val->type->id == ZigTypeIdInt || op2_val->type->id == ZigTypeIdComptimeInt;

    if (op1_is_int && op2_is_int) {
        Cmp cmp_result = bigint_cmp(&op1_val->data.x_bigint, &op2_val->data.x_bigint);
        out_val->special = ConstValSpecialStatic;
        out_val->data.x_bool = resolve_cmp_op_id(op_id, cmp_result);

        return nullptr;
    }

    // Handle the case where one of the two operands is a fp value and the other
    // is an integer value
    ZigValue *float_val;
    if (op1_is_int && op2_is_float) {
        float_val = op2_val;
    } else if (op1_is_float && op2_is_int) {
        float_val = op1_val;
    } else {
        zig_unreachable();
    }

    // They can never be equal if the fp value has a non-zero decimal part
    if (op_id == IrBinOpCmpEq || op_id == IrBinOpCmpNotEq) {
        if (float_has_fraction(float_val)) {
            out_val->special = ConstValSpecialStatic;
            out_val->data.x_bool = op_id == IrBinOpCmpNotEq;
            return nullptr;
        }
    }

    // Cast the integer operand into a fp value to perform the comparison
    BigFloat op1_bigfloat;
    BigFloat op2_bigfloat;
    value_to_bigfloat(&op1_bigfloat, op1_val);
    value_to_bigfloat(&op2_bigfloat, op2_val);

    Cmp cmp_result = bigfloat_cmp(&op1_bigfloat, &op2_bigfloat);
    out_val->special = ConstValSpecialStatic;
    out_val->data.x_bool = resolve_cmp_op_id(op_id, cmp_result);

    return nullptr;
}

static IrInstGen *ir_analyze_bin_op_cmp_numeric(IrAnalyze *ira, IrInst *source_instr,
        IrInstGen *op1, IrInstGen *op2, IrBinOp op_id)
{
    Error err;

    ZigType *scalar_result_type = ira->codegen->builtin_types.entry_bool;
    ZigType *result_type = scalar_result_type;
    ZigType *op1_scalar_type = op1->value->type;
    ZigType *op2_scalar_type = op2->value->type;
    if (op1->value->type->id == ZigTypeIdVector && op2->value->type->id == ZigTypeIdVector) {
        if (op1->value->type->data.vector.len != op2->value->type->data.vector.len) {
            ir_add_error(ira, source_instr,
                buf_sprintf("vector length mismatch: %" PRIu64 " and %" PRIu64,
                    op1->value->type->data.vector.len, op2->value->type->data.vector.len));
            return ira->codegen->invalid_inst_gen;
        }
        result_type = get_vector_type(ira->codegen, op1->value->type->data.vector.len, scalar_result_type);
        op1_scalar_type = op1->value->type->data.vector.elem_type;
        op2_scalar_type = op2->value->type->data.vector.elem_type;
    } else if (op1->value->type->id == ZigTypeIdVector || op2->value->type->id == ZigTypeIdVector) {
        ir_add_error(ira, source_instr,
            buf_sprintf("mixed scalar and vector operands to comparison operator: '%s' and '%s'",
                buf_ptr(&op1->value->type->name), buf_ptr(&op2->value->type->name)));
        return ira->codegen->invalid_inst_gen;
    }

    bool opv_op1;
    switch (type_has_one_possible_value(ira->codegen, op1->value->type)) {
        case OnePossibleValueInvalid:
            return ira->codegen->invalid_inst_gen;
        case OnePossibleValueYes:
            opv_op1 = true;
            break;
        case OnePossibleValueNo:
            opv_op1 = false;
            break;
    }
    bool opv_op2;
    switch (type_has_one_possible_value(ira->codegen, op2->value->type)) {
        case OnePossibleValueInvalid:
            return ira->codegen->invalid_inst_gen;
        case OnePossibleValueYes:
            opv_op2 = true;
            break;
        case OnePossibleValueNo:
            opv_op2 = false;
            break;
    }
    Cmp op1_cmp_zero;
    bool have_op1_cmp_zero = false;
    if ((err = lazy_cmp_zero(ira->codegen, source_instr->source_node, op1->value, &op1_cmp_zero))) {
        if (err != ErrorNotLazy) return ira->codegen->invalid_inst_gen;
    } else {
        have_op1_cmp_zero = true;
    }
    Cmp op2_cmp_zero;
    bool have_op2_cmp_zero = false;
    if ((err = lazy_cmp_zero(ira->codegen, source_instr->source_node, op2->value, &op2_cmp_zero))) {
        if (err != ErrorNotLazy) return ira->codegen->invalid_inst_gen;
    } else {
        have_op2_cmp_zero = true;
    }
    if (((opv_op1 || instr_is_comptime(op1)) && (opv_op2 || instr_is_comptime(op2))) ||
        (have_op1_cmp_zero && have_op2_cmp_zero))
    {
        IrInstGen *result_instruction = ir_const(ira, source_instr, result_type);
        ZigValue *out_val = result_instruction->value;
        if (result_type->id == ZigTypeIdVector) {
            size_t len = result_type->data.vector.len;
            expand_undef_array(ira->codegen, op1->value);
            expand_undef_array(ira->codegen, op2->value);
            out_val->special = ConstValSpecialUndef;
            expand_undef_array(ira->codegen, out_val);
            for (size_t i = 0; i < len; i += 1) {
                ZigValue *scalar_op1_val = &op1->value->data.x_array.data.s_none.elements[i];
                ZigValue *scalar_op2_val = &op2->value->data.x_array.data.s_none.elements[i];
                ZigValue *scalar_out_val = &out_val->data.x_array.data.s_none.elements[i];
                assert(scalar_out_val->type == scalar_result_type);
                ErrorMsg *msg = ir_eval_bin_op_cmp_scalar(ira, source_instr,
                        scalar_op1_val, op_id, scalar_op2_val, scalar_out_val);
                if (msg != nullptr) {
                    add_error_note(ira->codegen, msg, source_instr->source_node,
                        buf_sprintf("when computing vector element at index %" ZIG_PRI_usize, i));
                    return ira->codegen->invalid_inst_gen;
                }
            }
            out_val->type = result_type;
            out_val->special = ConstValSpecialStatic;
        } else {
            if (ir_eval_bin_op_cmp_scalar(ira, source_instr, op1->value, op_id,
                        op2->value, out_val) != nullptr)
            {
                return ira->codegen->invalid_inst_gen;
            }
        }
        return result_instruction;
    }

    // If one operand has a comptime-known comparison with 0, and the other operand is unsigned, we might
    // know the answer, depending on the operator.
    // TODO make this work with vectors
    if (have_op1_cmp_zero && op2_scalar_type->id == ZigTypeIdInt && !op2_scalar_type->data.integral.is_signed) {
        if (op1_cmp_zero == CmpEQ) {
            // 0 <= unsigned_x    // true
            // 0 >  unsigned_x    // false
            switch (op_id) {
                case IrBinOpCmpLessOrEq:
                    return ir_const_bool(ira, source_instr, true);
                case IrBinOpCmpGreaterThan:
                    return ir_const_bool(ira, source_instr, false);
                default:
                    break;
            }
        } else if (op1_cmp_zero == CmpLT) {
            // -1 != unsigned_x   // true
            // -1 <= unsigned_x   // true
            // -1 <  unsigned_x   // true
            // -1 == unsigned_x   // false
            // -1 >= unsigned_x   // false
            // -1 >  unsigned_x   // false
            switch (op_id) {
                case IrBinOpCmpNotEq:
                case IrBinOpCmpLessOrEq:
                case IrBinOpCmpLessThan:
                    return ir_const_bool(ira, source_instr, true);
                case IrBinOpCmpEq:
                case IrBinOpCmpGreaterOrEq:
                case IrBinOpCmpGreaterThan:
                    return ir_const_bool(ira, source_instr, false);
                default:
                    break;
            }
        }
    }
    if (have_op2_cmp_zero && op1_scalar_type->id == ZigTypeIdInt && !op1_scalar_type->data.integral.is_signed) {
        if (op2_cmp_zero == CmpEQ) {
            // unsigned_x <  0    // false
            // unsigned_x >= 0    // true
            switch (op_id) {
                case IrBinOpCmpLessThan:
                    return ir_const_bool(ira, source_instr, false);
                case IrBinOpCmpGreaterOrEq:
                    return ir_const_bool(ira, source_instr, true);
                default:
                    break;
            }
        } else if (op2_cmp_zero == CmpLT) {
            // unsigned_x != -1   // true
            // unsigned_x >= -1   // true
            // unsigned_x >  -1   // true
            // unsigned_x == -1   // false
            // unsigned_x <  -1   // false
            // unsigned_x <= -1   // false
            switch (op_id) {
                case IrBinOpCmpNotEq:
                case IrBinOpCmpGreaterOrEq:
                case IrBinOpCmpGreaterThan:
                    return ir_const_bool(ira, source_instr, true);
                case IrBinOpCmpEq:
                case IrBinOpCmpLessThan:
                case IrBinOpCmpLessOrEq:
                    return ir_const_bool(ira, source_instr, false);
                default:
                    break;
            }
        }
    }

    // It must be a runtime comparison.
    // For floats, emit a float comparison instruction.
    bool op1_is_float = op1_scalar_type->id == ZigTypeIdFloat || op1_scalar_type->id == ZigTypeIdComptimeFloat;
    bool op2_is_float = op2_scalar_type->id == ZigTypeIdFloat || op2_scalar_type->id == ZigTypeIdComptimeFloat;
    if (op1_is_float && op2_is_float) {
        // Implicit cast the smaller one to the larger one.
        ZigType *dest_scalar_type;
        if (op1_scalar_type->id == ZigTypeIdComptimeFloat) {
            dest_scalar_type = op2_scalar_type;
        } else if (op2_scalar_type->id == ZigTypeIdComptimeFloat) {
            dest_scalar_type = op1_scalar_type;
        } else if (op1_scalar_type->data.floating.bit_count >= op2_scalar_type->data.floating.bit_count) {
            dest_scalar_type = op1_scalar_type;
        } else {
            dest_scalar_type = op2_scalar_type;
        }
        ZigType *dest_type = (result_type->id == ZigTypeIdVector) ?
            get_vector_type(ira->codegen, result_type->data.vector.len, dest_scalar_type) : dest_scalar_type;
        IrInstGen *casted_op1 = ir_implicit_cast(ira, op1, dest_type);
        IrInstGen *casted_op2 = ir_implicit_cast(ira, op2, dest_type);
        if (type_is_invalid(casted_op1->value->type) || type_is_invalid(casted_op2->value->type))
            return ira->codegen->invalid_inst_gen;
        return ir_build_bin_op_gen(ira, source_instr, result_type, op_id, casted_op1, casted_op2, true);
    }

    // For mixed unsigned integer sizes, implicit cast both operands to the larger integer.
    // For mixed signed and unsigned integers, implicit cast both operands to a signed
    // integer with + 1 bit.
    // For mixed floats and integers, extract the integer part from the float, cast that to
    // a signed integer with mantissa bits + 1, and if there was any non-integral part of the float,
    // add/subtract 1.
    bool dest_int_is_signed = false;
    if (have_op1_cmp_zero) {
        if (op1_cmp_zero == CmpLT) dest_int_is_signed = true;
    } else if (op1_is_float) {
        dest_int_is_signed = true;
    } else if (op1_scalar_type->id == ZigTypeIdInt && op1_scalar_type->data.integral.is_signed) {
        dest_int_is_signed = true;
    }
    if (have_op2_cmp_zero) {
        if (op2_cmp_zero == CmpLT) dest_int_is_signed = true;
    } else if (op2_is_float) {
        dest_int_is_signed = true;
    } else if (op2->value->type->id == ZigTypeIdInt && op2->value->type->data.integral.is_signed) {
        dest_int_is_signed = true;
    }
    ZigType *dest_float_type = nullptr;
    uint32_t op1_bits;
    if (instr_is_comptime(op1)) {
        ZigValue *op1_val = ir_resolve_const(ira, op1, UndefOk);
        if (op1_val == nullptr)
            return ira->codegen->invalid_inst_gen;
        if (op1_val->special == ConstValSpecialUndef)
            return ir_const_undef(ira, source_instr, ira->codegen->builtin_types.entry_bool);
        if (result_type->id == ZigTypeIdVector) {
            ir_add_error(ira, &op1->base, buf_sprintf("compiler bug: TODO: support comptime vector here"));
            return ira->codegen->invalid_inst_gen;
        }
        bool is_unsigned;
        if (op1_is_float) {
            BigInt bigint = {};
            float_init_bigint(&bigint, op1_val);
            Cmp zcmp = float_cmp_zero(op1_val);
            if (float_has_fraction(op1_val)) {
                if (op_id == IrBinOpCmpEq || op_id == IrBinOpCmpNotEq) {
                    return ir_const_bool(ira, source_instr, op_id == IrBinOpCmpNotEq);
                }
                if (zcmp == CmpLT) {
                    bigint_decr(&bigint);
                } else {
                    bigint_incr(&bigint);
                }
            }
            op1_bits = bigint_bits_needed(&bigint);
            is_unsigned = zcmp != CmpLT;
        } else {
            op1_bits = bigint_bits_needed(&op1_val->data.x_bigint);
            is_unsigned = bigint_cmp_zero(&op1_val->data.x_bigint) != CmpLT;
        }
        if (is_unsigned && dest_int_is_signed) {
            op1_bits += 1;
        }
    } else if (op1_is_float) {
        dest_float_type = op1_scalar_type;
    } else {
        ir_assert(op1_scalar_type->id == ZigTypeIdInt, source_instr);
        op1_bits = op1_scalar_type->data.integral.bit_count;
        if (!op1_scalar_type->data.integral.is_signed && dest_int_is_signed) {
            op1_bits += 1;
        }
    }
    uint32_t op2_bits;
    if (instr_is_comptime(op2)) {
        ZigValue *op2_val = ir_resolve_const(ira, op2, UndefOk);
        if (op2_val == nullptr)
            return ira->codegen->invalid_inst_gen;
        if (op2_val->special == ConstValSpecialUndef)
            return ir_const_undef(ira, source_instr, ira->codegen->builtin_types.entry_bool);
        if (result_type->id == ZigTypeIdVector) {
            ir_add_error(ira, &op2->base, buf_sprintf("compiler bug: TODO: support comptime vector here"));
            return ira->codegen->invalid_inst_gen;
        }
        bool is_unsigned;
        if (op2_is_float) {
            BigInt bigint = {};
            float_init_bigint(&bigint, op2_val);
            Cmp zcmp = float_cmp_zero(op2_val);
            if (float_has_fraction(op2_val)) {
                if (op_id == IrBinOpCmpEq || op_id == IrBinOpCmpNotEq) {
                    return ir_const_bool(ira, source_instr, op_id == IrBinOpCmpNotEq);
                }
                if (zcmp == CmpLT) {
                    bigint_decr(&bigint);
                } else {
                    bigint_incr(&bigint);
                }
            }
            op2_bits = bigint_bits_needed(&bigint);
            is_unsigned = zcmp != CmpLT;
        } else {
            op2_bits = bigint_bits_needed(&op2_val->data.x_bigint);
            is_unsigned = bigint_cmp_zero(&op2_val->data.x_bigint) != CmpLT;
        }
        if (is_unsigned && dest_int_is_signed) {
            op2_bits += 1;
        }
    } else if (op2_is_float) {
        dest_float_type = op2_scalar_type;
    } else {
        ir_assert(op2_scalar_type->id == ZigTypeIdInt, source_instr);
        op2_bits = op2_scalar_type->data.integral.bit_count;
        if (!op2_scalar_type->data.integral.is_signed && dest_int_is_signed) {
            op2_bits += 1;
        }
    }
    ZigType *dest_scalar_type = (dest_float_type == nullptr) ?
        get_int_type(ira->codegen, dest_int_is_signed, (op1_bits > op2_bits) ? op1_bits : op2_bits) :
        dest_float_type;
    ZigType *dest_type = (result_type->id == ZigTypeIdVector) ?
        get_vector_type(ira->codegen, result_type->data.vector.len, dest_scalar_type) : dest_scalar_type;

    IrInstGen *casted_op1 = ir_implicit_cast(ira, op1, dest_type);
    if (type_is_invalid(casted_op1->value->type))
        return ira->codegen->invalid_inst_gen;
    IrInstGen *casted_op2 = ir_implicit_cast(ira, op2, dest_type);
    if (type_is_invalid(casted_op2->value->type))
        return ira->codegen->invalid_inst_gen;
    return ir_build_bin_op_gen(ira, source_instr, result_type, op_id, casted_op1, casted_op2, true);
}

static bool type_is_self_comparable(ZigType *ty, bool is_equality_cmp) {
    if (type_is_numeric(ty)) {
        return true;
    }
    switch (ty->id) {
        case ZigTypeIdInvalid:
            zig_unreachable();

        case ZigTypeIdComptimeFloat:
        case ZigTypeIdComptimeInt:
        case ZigTypeIdInt:
        case ZigTypeIdFloat:
            zig_unreachable(); // handled with the type_is_numeric check above

        case ZigTypeIdVector:
            // Not every case is handled by the type_is_numeric check above,
            // vectors of bool trigger this code path
        case ZigTypeIdBool:
        case ZigTypeIdMetaType:
        case ZigTypeIdVoid:
        case ZigTypeIdErrorSet:
        case ZigTypeIdFn:
        case ZigTypeIdOpaque:
        case ZigTypeIdBoundFn:
        case ZigTypeIdEnum:
        case ZigTypeIdEnumLiteral:
        case ZigTypeIdAnyFrame:
            return is_equality_cmp;

        case ZigTypeIdPointer:
            return is_equality_cmp || (ty->data.pointer.ptr_len == PtrLenC);

        case ZigTypeIdUnreachable:
        case ZigTypeIdArray:
        case ZigTypeIdStruct:
        case ZigTypeIdUndefined:
        case ZigTypeIdNull:
        case ZigTypeIdErrorUnion:
        case ZigTypeIdUnion:
        case ZigTypeIdFnFrame:
            return false;

        case ZigTypeIdOptional:
            return is_equality_cmp && get_src_ptr_type(ty) != nullptr;
    }
    zig_unreachable();
}

static IrInstGen *ir_try_evaluate_cmp_optional_non_optional_const(IrAnalyze *ira, IrInst *source_instr, ZigType *child_type,
        IrInstGen *optional, IrInstGen *non_optional, IrBinOp op_id)
{
    assert(optional->value->type->id == ZigTypeIdOptional);
    assert(optional->value->type->data.maybe.child_type == non_optional->value->type);
    assert(non_optional->value->type == child_type);
    assert(op_id == IrBinOpCmpEq || op_id == IrBinOpCmpNotEq);

    if (instr_is_comptime(optional) && instr_is_comptime(non_optional)) {
        ZigValue *optional_val = ir_resolve_const(ira, optional, UndefBad);
        if (!optional_val) {
            return ira->codegen->invalid_inst_gen;
        }

        ZigValue *non_optional_val = ir_resolve_const(ira, non_optional, UndefBad);
        if (!non_optional_val) {
            return ira->codegen->invalid_inst_gen;
        }

        if (!optional_value_is_null(optional_val)) {
            IrInstGen *optional_unwrapped = ir_analyze_optional_value_payload_value(ira, source_instr, optional, false);
            if (type_is_invalid(optional_unwrapped->value->type)) {
                return ira->codegen->invalid_inst_gen;
            }

            IrInstGen *ret = ir_try_evaluate_bin_op_cmp_const(ira, source_instr, optional_unwrapped, non_optional, child_type, op_id);
            assert(ret != nullptr);
            return ret;
        }
        return ir_const_bool(ira, source_instr, (op_id != IrBinOpCmpEq));
    } else {
        return nullptr;
    }
}

static IrInstGen *ir_evaluate_cmp_optional_non_optional(IrAnalyze *ira, IrInst *source_instr, ZigType *child_type,
        IrInstGen *optional, IrInstGen *non_optional, IrBinOp op_id)
{
    assert(optional->value->type->id == ZigTypeIdOptional);
    assert(optional->value->type->data.maybe.child_type == non_optional->value->type);
    assert(non_optional->value->type == child_type);
    assert(op_id == IrBinOpCmpEq || op_id == IrBinOpCmpNotEq);

    ZigType *result_type = ira->codegen->builtin_types.entry_bool;
    ir_append_basic_block_gen(&ira->new_irb, ira->new_irb.current_basic_block);

    IrBasicBlockGen *null_block = ir_create_basic_block_gen(ira, source_instr->scope, "CmpOptionalNonOptionalOptionalNull");
    IrBasicBlockGen *non_null_block = ir_create_basic_block_gen(ira, source_instr->scope, "CmpOptionalNonOptionalOptionalNotNull");
    IrBasicBlockGen *end_block = ir_create_basic_block_gen(ira, source_instr->scope, "CmpOptionalNonOptionalEnd");

    IrInstGen *is_non_null = ir_build_test_non_null_gen(ira, source_instr, optional);
    ir_build_cond_br_gen(ira, source_instr, is_non_null, non_null_block, null_block);

    ir_set_cursor_at_end_and_append_block_gen(&ira->new_irb, non_null_block);
    IrInstGen *optional_unwrapped = ir_analyze_optional_value_payload_value(ira, source_instr, optional, false);
    if (type_is_invalid(optional_unwrapped->value->type)) {
        return ira->codegen->invalid_inst_gen;
    }
    IrInstGen *non_null_cmp_result = ir_build_bin_op_gen(ira, source_instr, result_type, op_id,
            optional_unwrapped, non_optional, false); // safety check unnecessary for comparison operators
    ir_build_br_gen(ira, source_instr, end_block);


    ir_set_cursor_at_end_and_append_block_gen(&ira->new_irb, null_block);
    IrInstGen *null_result = ir_const_bool(ira, source_instr, (op_id != IrBinOpCmpEq));
    ir_build_br_gen(ira, source_instr, end_block);

    ir_set_cursor_at_end_gen(&ira->new_irb, end_block);
    int incoming_count = 2;
    IrBasicBlockGen **incoming_blocks = heap::c_allocator.allocate_nonzero<IrBasicBlockGen *>(incoming_count);
    incoming_blocks[0] = null_block;
    incoming_blocks[1] = non_null_block;
    IrInstGen **incoming_values = heap::c_allocator.allocate_nonzero<IrInstGen *>(incoming_count);
    incoming_values[0] = null_result;
    incoming_values[1] = non_null_cmp_result;

    return ir_build_phi_gen(ira, source_instr, incoming_count, incoming_blocks, incoming_values, result_type);
}

static IrInstGen *ir_analyze_cmp_optional_non_optional(IrAnalyze *ira, IrInst *source_instr,
        IrInstGen *op1, IrInstGen *op2, IrInstGen *optional, IrBinOp op_id)
{
    assert(op_id == IrBinOpCmpEq || op_id == IrBinOpCmpNotEq);
    assert(optional->value->type->id == ZigTypeIdOptional);
    assert(get_src_ptr_type(optional->value->type) == nullptr);

    IrInstGen *non_optional;
    if (op1 == optional) {
        non_optional = op2;
    } else if (op2 == optional) {
        non_optional = op1;
    } else {
        zig_unreachable();
    }

    ZigType *child_type = optional->value->type->data.maybe.child_type;
    bool child_type_matches = (child_type == non_optional->value->type);
    if (!child_type_matches || !type_is_self_comparable(child_type, true)) {
        ErrorMsg *msg = ir_add_error_node(ira, source_instr->source_node, buf_sprintf("cannot compare types '%s' and '%s'",
            buf_ptr(&op1->value->type->name),
            buf_ptr(&op2->value->type->name)));

        if (!child_type_matches) {
            if (non_optional->value->type->id == ZigTypeIdOptional) {
                add_error_note(ira->codegen, msg, source_instr->source_node, buf_sprintf(
                        "optional to optional comparison is only supported for optional pointer types"));
            } else {
                add_error_note(ira->codegen, msg, source_instr->source_node,
                        buf_sprintf("optional child type '%s' must be the same as non-optional type '%s'",
                                buf_ptr(&child_type->name),
                                buf_ptr(&non_optional->value->type->name)));
            }
        } else {
            add_error_note(ira->codegen, msg, source_instr->source_node,
                    buf_sprintf("operator not supported for type '%s'",
                            buf_ptr(&child_type->name)));
        }
        return ira->codegen->invalid_inst_gen;
    }

    if (child_type->id == ZigTypeIdVector) {
        ir_add_error_node(ira, source_instr->source_node, buf_sprintf("TODO add comparison of optional vector"));
        return ira->codegen->invalid_inst_gen;
    }

    if (IrInstGen *const_result = ir_try_evaluate_cmp_optional_non_optional_const(ira, source_instr, child_type,
            optional, non_optional, op_id))
    {
        return const_result;
    }

    return ir_evaluate_cmp_optional_non_optional(ira, source_instr, child_type, optional, non_optional, op_id);
}

static IrInstGen *ir_analyze_bin_op_cmp(IrAnalyze *ira, IrInstSrcBinOp *bin_op_instruction) {
    IrInstGen *op1 = bin_op_instruction->op1->child;
    if (type_is_invalid(op1->value->type))
        return ira->codegen->invalid_inst_gen;

    IrInstGen *op2 = bin_op_instruction->op2->child;
    if (type_is_invalid(op2->value->type))
        return ira->codegen->invalid_inst_gen;

    AstNode *source_node = bin_op_instruction->base.base.source_node;

    IrBinOp op_id = bin_op_instruction->op_id;
    bool is_equality_cmp = (op_id == IrBinOpCmpEq || op_id == IrBinOpCmpNotEq);
    if (is_equality_cmp && op1->value->type->id == ZigTypeIdNull && op2->value->type->id == ZigTypeIdNull) {
        return ir_const_bool(ira, &bin_op_instruction->base.base, (op_id == IrBinOpCmpEq));
    } else if (is_equality_cmp &&
        ((op1->value->type->id == ZigTypeIdNull && op2->value->type->id == ZigTypeIdOptional) ||
        (op2->value->type->id == ZigTypeIdNull && op1->value->type->id == ZigTypeIdOptional)))
    {
        IrInstGen *maybe_op;
        if (op1->value->type->id == ZigTypeIdNull) {
            maybe_op = op2;
        } else if (op2->value->type->id == ZigTypeIdNull) {
            maybe_op = op1;
        } else {
            zig_unreachable();
        }
        if (instr_is_comptime(maybe_op)) {
            ZigValue *maybe_val = ir_resolve_const(ira, maybe_op, UndefBad);
            if (!maybe_val)
                return ira->codegen->invalid_inst_gen;
            bool is_null = optional_value_is_null(maybe_val);
            bool bool_result = (op_id == IrBinOpCmpEq) ? is_null : !is_null;
            return ir_const_bool(ira, &bin_op_instruction->base.base, bool_result);
        }

        IrInstGen *is_non_null = ir_build_test_non_null_gen(ira, &bin_op_instruction->base.base, maybe_op);

        if (op_id == IrBinOpCmpEq) {
            return ir_build_bool_not_gen(ira, &bin_op_instruction->base.base, is_non_null);
        } else {
            return is_non_null;
        }
    } else if (is_equality_cmp &&
        ((op1->value->type->id == ZigTypeIdNull && op2->value->type->id == ZigTypeIdPointer &&
            op2->value->type->data.pointer.ptr_len == PtrLenC) ||
        (op2->value->type->id == ZigTypeIdNull && op1->value->type->id == ZigTypeIdPointer &&
            op1->value->type->data.pointer.ptr_len == PtrLenC)))
    {
        IrInstGen *c_ptr_op;
        if (op1->value->type->id == ZigTypeIdNull) {
            c_ptr_op = op2;
        } else if (op2->value->type->id == ZigTypeIdNull) {
            c_ptr_op = op1;
        } else {
            zig_unreachable();
        }
        if (instr_is_comptime(c_ptr_op)) {
            ZigValue *c_ptr_val = ir_resolve_const(ira, c_ptr_op, UndefOk);
            if (!c_ptr_val)
                return ira->codegen->invalid_inst_gen;
            if (c_ptr_val->special == ConstValSpecialUndef)
                return ir_const_undef(ira, &bin_op_instruction->base.base, ira->codegen->builtin_types.entry_bool);
            bool is_null = c_ptr_val->data.x_ptr.special == ConstPtrSpecialNull ||
                (c_ptr_val->data.x_ptr.special == ConstPtrSpecialHardCodedAddr &&
                    c_ptr_val->data.x_ptr.data.hard_coded_addr.addr == 0);
            bool bool_result = (op_id == IrBinOpCmpEq) ? is_null : !is_null;
            return ir_const_bool(ira, &bin_op_instruction->base.base, bool_result);
        }
        IrInstGen *is_non_null = ir_build_test_non_null_gen(ira, &bin_op_instruction->base.base, c_ptr_op);

        if (op_id == IrBinOpCmpEq) {
            return ir_build_bool_not_gen(ira, &bin_op_instruction->base.base, is_non_null);
        } else {
            return is_non_null;
        }
    } else if (is_equality_cmp &&
        (op1->value->type->id == ZigTypeIdOptional && get_src_ptr_type(op1->value->type) == nullptr))
    {
        return ir_analyze_cmp_optional_non_optional(ira, &bin_op_instruction->base.base, op1, op2, op1, op_id);
    } else if(is_equality_cmp &&
        (op2->value->type->id == ZigTypeIdOptional && get_src_ptr_type(op2->value->type) == nullptr))
    {
        return ir_analyze_cmp_optional_non_optional(ira, &bin_op_instruction->base.base, op1, op2, op2, op_id);
    } else if (op1->value->type->id == ZigTypeIdNull || op2->value->type->id == ZigTypeIdNull) {
        ZigType *non_null_type = (op1->value->type->id == ZigTypeIdNull) ? op2->value->type : op1->value->type;
        ir_add_error_node(ira, source_node, buf_sprintf("comparison of '%s' with null",
            buf_ptr(&non_null_type->name)));
        return ira->codegen->invalid_inst_gen;
    } else if (is_equality_cmp && (
        (op1->value->type->id == ZigTypeIdEnumLiteral && op2->value->type->id == ZigTypeIdUnion) ||
        (op2->value->type->id == ZigTypeIdEnumLiteral && op1->value->type->id == ZigTypeIdUnion)))
    {
        // Support equality comparison between a union's tag value and a enum literal
        IrInstGen *union_val = op1->value->type->id == ZigTypeIdUnion ? op1 : op2;
        IrInstGen *enum_val = op1->value->type->id == ZigTypeIdUnion ? op2 : op1;

        if (!is_tagged_union(union_val->value->type)) {
            ErrorMsg *msg = ir_add_error_node(ira, source_node,
                buf_sprintf("comparison of union and enum literal is only valid for tagged union types"));
            add_error_note(ira->codegen, msg, union_val->value->type->data.unionation.decl_node,
                buf_sprintf("type %s is not a tagged union",
                    buf_ptr(&union_val->value->type->name)));
            return ira->codegen->invalid_inst_gen;
        }

        ZigType *tag_type = union_val->value->type->data.unionation.tag_type;
        assert(tag_type != nullptr);

        IrInstGen *casted_union = ir_implicit_cast(ira, union_val, tag_type);
        if (type_is_invalid(casted_union->value->type))
            return ira->codegen->invalid_inst_gen;

        IrInstGen *casted_val = ir_implicit_cast(ira, enum_val, tag_type);
        if (type_is_invalid(casted_val->value->type))
            return ira->codegen->invalid_inst_gen;

        if (instr_is_comptime(casted_union)) {
            ZigValue *const_union_val = ir_resolve_const(ira, casted_union, UndefBad);
            if (!const_union_val)
                return ira->codegen->invalid_inst_gen;

            ZigValue *const_enum_val = ir_resolve_const(ira, casted_val, UndefBad);
            if (!const_enum_val)
                return ira->codegen->invalid_inst_gen;

            Cmp cmp_result = bigint_cmp(&const_union_val->data.x_union.tag, &const_enum_val->data.x_enum_tag);
            bool bool_result = (op_id == IrBinOpCmpEq) ? cmp_result == CmpEQ : cmp_result != CmpEQ;

            return ir_const_bool(ira, &bin_op_instruction->base.base, bool_result);
        }

        return ir_build_bin_op_gen(ira, &bin_op_instruction->base.base, ira->codegen->builtin_types.entry_bool,
            op_id, casted_union, casted_val, bin_op_instruction->safety_check_on);
    }

    if (op1->value->type->id == ZigTypeIdErrorSet && op2->value->type->id == ZigTypeIdErrorSet) {
        if (!is_equality_cmp) {
            ir_add_error_node(ira, source_node, buf_sprintf("operator not allowed for errors"));
            return ira->codegen->invalid_inst_gen;
        }
        ZigType *intersect_type = get_error_set_intersection(ira, op1->value->type, op2->value->type, source_node);
        if (type_is_invalid(intersect_type)) {
            return ira->codegen->invalid_inst_gen;
        }

        if (!resolve_inferred_error_set(ira->codegen, intersect_type, source_node)) {
            return ira->codegen->invalid_inst_gen;
        }

        // exception if one of the operators has the type of the empty error set, we allow the comparison
        // (and make it comptime known)
        // this is a function which is evaluated at comptime and returns an inferred error set will have an empty
        // error set.
        if (op1->value->type->data.error_set.err_count == 0 || op2->value->type->data.error_set.err_count == 0) {
            bool are_equal = false;
            bool answer;
            if (op_id == IrBinOpCmpEq) {
                answer = are_equal;
            } else if (op_id == IrBinOpCmpNotEq) {
                answer = !are_equal;
            } else {
                zig_unreachable();
            }
            return ir_const_bool(ira, &bin_op_instruction->base.base, answer);
        }

        if (!type_is_global_error_set(intersect_type)) {
            if (intersect_type->data.error_set.err_count == 0) {
                ir_add_error_node(ira, source_node,
                    buf_sprintf("error sets '%s' and '%s' have no common errors",
                        buf_ptr(&op1->value->type->name), buf_ptr(&op2->value->type->name)));
                return ira->codegen->invalid_inst_gen;
            }
            if (op1->value->type->data.error_set.err_count == 1 && op2->value->type->data.error_set.err_count == 1) {
                bool are_equal = true;
                bool answer;
                if (op_id == IrBinOpCmpEq) {
                    answer = are_equal;
                } else if (op_id == IrBinOpCmpNotEq) {
                    answer = !are_equal;
                } else {
                    zig_unreachable();
                }
                return ir_const_bool(ira, &bin_op_instruction->base.base, answer);
            }
        }

        if (instr_is_comptime(op1) && instr_is_comptime(op2)) {
            ZigValue *op1_val = ir_resolve_const(ira, op1, UndefBad);
            if (op1_val == nullptr)
                return ira->codegen->invalid_inst_gen;
            ZigValue *op2_val = ir_resolve_const(ira, op2, UndefBad);
            if (op2_val == nullptr)
                return ira->codegen->invalid_inst_gen;

            bool answer;
            bool are_equal = op1_val->data.x_err_set->value == op2_val->data.x_err_set->value;
            if (op_id == IrBinOpCmpEq) {
                answer = are_equal;
            } else if (op_id == IrBinOpCmpNotEq) {
                answer = !are_equal;
            } else {
                zig_unreachable();
            }

            return ir_const_bool(ira, &bin_op_instruction->base.base, answer);
        }

        return ir_build_bin_op_gen(ira, &bin_op_instruction->base.base, ira->codegen->builtin_types.entry_bool,
                op_id, op1, op2, bin_op_instruction->safety_check_on);
    }

    if (type_is_numeric(op1->value->type) && type_is_numeric(op2->value->type)) {
        // This operation allows any combination of integer and float types, regardless of the
        // signed-ness, comptime-ness, and bit-width. So peer type resolution is incorrect for
        // numeric types.
        return ir_analyze_bin_op_cmp_numeric(ira, &bin_op_instruction->base.base, op1, op2, op_id);
    }

    IrInstGen *instructions[] = {op1, op2};
    ZigType *resolved_type = ir_resolve_peer_types(ira, source_node, nullptr, instructions, 2);
    if (type_is_invalid(resolved_type))
        return ira->codegen->invalid_inst_gen;

    bool operator_allowed = type_is_self_comparable(resolved_type, is_equality_cmp);

    if (!operator_allowed) {
        ir_add_error_node(ira, source_node,
            buf_sprintf("operator not allowed for type '%s'", buf_ptr(&resolved_type->name)));
        return ira->codegen->invalid_inst_gen;
    }

    IrInstGen *casted_op1 = ir_implicit_cast(ira, op1, resolved_type);
    if (type_is_invalid(casted_op1->value->type))
        return ira->codegen->invalid_inst_gen;

    IrInstGen *casted_op2 = ir_implicit_cast(ira, op2, resolved_type);
    if (type_is_invalid(casted_op2->value->type))
        return ira->codegen->invalid_inst_gen;

    IrInstGen *resolve_const_result = ir_try_evaluate_bin_op_cmp_const(ira, &bin_op_instruction->base.base, casted_op1,
                                                                       casted_op2, resolved_type, op_id);
    if (resolve_const_result != nullptr) {
        return resolve_const_result;
    }

    ZigType *res_type = (resolved_type->id == ZigTypeIdVector) ?
        get_vector_type(ira->codegen, resolved_type->data.vector.len, ira->codegen->builtin_types.entry_bool) :
        ira->codegen->builtin_types.entry_bool;
    return ir_build_bin_op_gen(ira, &bin_op_instruction->base.base, res_type,
            op_id, casted_op1, casted_op2, bin_op_instruction->safety_check_on);
}

static ErrorMsg *ir_eval_math_op_scalar(IrAnalyze *ira, IrInst* source_instr, ZigType *type_entry,
        ZigValue *op1_val, IrBinOp op_id, ZigValue *op2_val, ZigValue *out_val)
{
    bool is_int;
    bool is_float;
    Cmp op2_zcmp;
    if (type_entry->id == ZigTypeIdInt || type_entry->id == ZigTypeIdComptimeInt) {
        is_int = true;
        is_float = false;
        op2_zcmp = bigint_cmp_zero(&op2_val->data.x_bigint);
    } else if (type_entry->id == ZigTypeIdFloat ||
                type_entry->id == ZigTypeIdComptimeFloat)
    {
        is_int = false;
        is_float = true;
        op2_zcmp = float_cmp_zero(op2_val);
    } else {
        zig_unreachable();
    }

    if ((op_id == IrBinOpDivUnspecified || op_id == IrBinOpRemRem || op_id == IrBinOpRemMod ||
        op_id == IrBinOpDivTrunc || op_id == IrBinOpDivFloor) && op2_zcmp == CmpEQ)
    {
        return ir_add_error(ira, source_instr, buf_sprintf("division by zero"));
    }
    if ((op_id == IrBinOpRemRem || op_id == IrBinOpRemMod) && op2_zcmp == CmpLT) {
        return ir_add_error(ira, source_instr, buf_sprintf("negative denominator"));
    }

    switch (op_id) {
        case IrBinOpInvalid:
        case IrBinOpBoolOr:
        case IrBinOpBoolAnd:
        case IrBinOpCmpEq:
        case IrBinOpCmpNotEq:
        case IrBinOpCmpLessThan:
        case IrBinOpCmpGreaterThan:
        case IrBinOpCmpLessOrEq:
        case IrBinOpCmpGreaterOrEq:
        case IrBinOpArrayCat:
        case IrBinOpArrayMult:
        case IrBinOpRemUnspecified:
            zig_unreachable();
        case IrBinOpBinOr:
            assert(is_int);
            bigint_or(&out_val->data.x_bigint, &op1_val->data.x_bigint, &op2_val->data.x_bigint);
            break;
        case IrBinOpBinXor:
            assert(is_int);
            bigint_xor(&out_val->data.x_bigint, &op1_val->data.x_bigint, &op2_val->data.x_bigint);
            break;
        case IrBinOpBinAnd:
            assert(is_int);
            bigint_and(&out_val->data.x_bigint, &op1_val->data.x_bigint, &op2_val->data.x_bigint);
            break;
        case IrBinOpBitShiftLeftExact:
            assert(is_int);
            bigint_shl(&out_val->data.x_bigint, &op1_val->data.x_bigint, &op2_val->data.x_bigint);
            break;
        case IrBinOpBitShiftLeftLossy:
            assert(type_entry->id == ZigTypeIdInt);
            bigint_shl_trunc(&out_val->data.x_bigint, &op1_val->data.x_bigint, &op2_val->data.x_bigint,
                    type_entry->data.integral.bit_count, type_entry->data.integral.is_signed);
            break;
        case IrBinOpBitShiftRightExact:
            {
                assert(is_int);
                bigint_shr(&out_val->data.x_bigint, &op1_val->data.x_bigint, &op2_val->data.x_bigint);
                BigInt orig_bigint;
                bigint_shl(&orig_bigint, &out_val->data.x_bigint, &op2_val->data.x_bigint);
                if (bigint_cmp(&op1_val->data.x_bigint, &orig_bigint) != CmpEQ) {
                    return ir_add_error(ira, source_instr, buf_sprintf("exact shift shifted out 1 bits"));
                }
                break;
            }
        case IrBinOpBitShiftRightLossy:
            assert(is_int);
            bigint_shr(&out_val->data.x_bigint, &op1_val->data.x_bigint, &op2_val->data.x_bigint);
            break;
        case IrBinOpAdd:
            if (is_int) {
                bigint_add(&out_val->data.x_bigint, &op1_val->data.x_bigint, &op2_val->data.x_bigint);
            } else {
                float_add(out_val, op1_val, op2_val);
            }
            break;
        case IrBinOpAddWrap:
            assert(type_entry->id == ZigTypeIdInt);
            bigint_add_wrap(&out_val->data.x_bigint, &op1_val->data.x_bigint, &op2_val->data.x_bigint,
                    type_entry->data.integral.bit_count, type_entry->data.integral.is_signed);
            break;
        case IrBinOpSub:
            if (is_int) {
                bigint_sub(&out_val->data.x_bigint, &op1_val->data.x_bigint, &op2_val->data.x_bigint);
            } else {
                float_sub(out_val, op1_val, op2_val);
            }
            break;
        case IrBinOpSubWrap:
            assert(type_entry->id == ZigTypeIdInt);
            bigint_sub_wrap(&out_val->data.x_bigint, &op1_val->data.x_bigint, &op2_val->data.x_bigint,
                    type_entry->data.integral.bit_count, type_entry->data.integral.is_signed);
            break;
        case IrBinOpMult:
            if (is_int) {
                bigint_mul(&out_val->data.x_bigint, &op1_val->data.x_bigint, &op2_val->data.x_bigint);
            } else {
                float_mul(out_val, op1_val, op2_val);
            }
            break;
        case IrBinOpMultWrap:
            assert(type_entry->id == ZigTypeIdInt);
            bigint_mul_wrap(&out_val->data.x_bigint, &op1_val->data.x_bigint, &op2_val->data.x_bigint,
                    type_entry->data.integral.bit_count, type_entry->data.integral.is_signed);
            break;
        case IrBinOpDivUnspecified:
            assert(is_float);
            float_div(out_val, op1_val, op2_val);
            break;
        case IrBinOpDivTrunc:
            if (is_int) {
                bigint_div_trunc(&out_val->data.x_bigint, &op1_val->data.x_bigint, &op2_val->data.x_bigint);
            } else {
                float_div_trunc(out_val, op1_val, op2_val);
            }
            break;
        case IrBinOpDivFloor:
            if (is_int) {
                bigint_div_floor(&out_val->data.x_bigint, &op1_val->data.x_bigint, &op2_val->data.x_bigint);
            } else {
                float_div_floor(out_val, op1_val, op2_val);
            }
            break;
        case IrBinOpDivExact:
            if (is_int) {
                bigint_div_trunc(&out_val->data.x_bigint, &op1_val->data.x_bigint, &op2_val->data.x_bigint);
                BigInt remainder;
                bigint_rem(&remainder, &op1_val->data.x_bigint, &op2_val->data.x_bigint);
                if (bigint_cmp_zero(&remainder) != CmpEQ) {
                    return ir_add_error(ira, source_instr, buf_sprintf("exact division had a remainder"));
                }
            } else {
                float_div_trunc(out_val, op1_val, op2_val);
                ZigValue remainder = {};
                float_rem(&remainder, op1_val, op2_val);
                if (float_cmp_zero(&remainder) != CmpEQ) {
                    return ir_add_error(ira, source_instr, buf_sprintf("exact division had a remainder"));
                }
            }
            break;
        case IrBinOpRemRem:
            if (is_int) {
                bigint_rem(&out_val->data.x_bigint, &op1_val->data.x_bigint, &op2_val->data.x_bigint);
            } else {
                float_rem(out_val, op1_val, op2_val);
            }
            break;
        case IrBinOpRemMod:
            if (is_int) {
                bigint_mod(&out_val->data.x_bigint, &op1_val->data.x_bigint, &op2_val->data.x_bigint);
            } else {
                float_mod(out_val, op1_val, op2_val);
            }
            break;
    }

    if (type_entry->id == ZigTypeIdInt) {
        if (!bigint_fits_in_bits(&out_val->data.x_bigint, type_entry->data.integral.bit_count,
                type_entry->data.integral.is_signed))
        {
            return ir_add_error(ira, source_instr, buf_sprintf("operation caused overflow"));
        }
    }

    out_val->type = type_entry;
    out_val->special = ConstValSpecialStatic;
    return nullptr;
}

// This works on operands that have already been checked to be comptime known.
static IrInstGen *ir_analyze_math_op(IrAnalyze *ira, IrInst* source_instr,
        ZigType *type_entry, ZigValue *op1_val, IrBinOp op_id, ZigValue *op2_val)
{
    IrInstGen *result_instruction = ir_const(ira, source_instr, type_entry);
    ZigValue *out_val = result_instruction->value;
    if (type_entry->id == ZigTypeIdVector) {
        expand_undef_array(ira->codegen, op1_val);
        expand_undef_array(ira->codegen, op2_val);
        out_val->special = ConstValSpecialUndef;
        expand_undef_array(ira->codegen, out_val);
        size_t len = type_entry->data.vector.len;
        ZigType *scalar_type = type_entry->data.vector.elem_type;
        for (size_t i = 0; i < len; i += 1) {
            ZigValue *scalar_op1_val = &op1_val->data.x_array.data.s_none.elements[i];
            ZigValue *scalar_op2_val = &op2_val->data.x_array.data.s_none.elements[i];
            ZigValue *scalar_out_val = &out_val->data.x_array.data.s_none.elements[i];
            assert(scalar_op1_val->type == scalar_type);
            assert(scalar_out_val->type == scalar_type);
            ErrorMsg *msg = ir_eval_math_op_scalar(ira, source_instr, scalar_type,
                    scalar_op1_val, op_id, scalar_op2_val, scalar_out_val);
            if (msg != nullptr) {
                add_error_note(ira->codegen, msg, source_instr->source_node,
                    buf_sprintf("when computing vector element at index %" ZIG_PRI_usize, i));
                return ira->codegen->invalid_inst_gen;
            }
        }
        out_val->type = type_entry;
        out_val->special = ConstValSpecialStatic;
    } else {
        if (ir_eval_math_op_scalar(ira, source_instr, type_entry, op1_val, op_id, op2_val, out_val) != nullptr) {
            return ira->codegen->invalid_inst_gen;
        }
    }
    return ir_implicit_cast(ira, result_instruction, type_entry);
}

static IrInstGen *ir_analyze_bit_shift(IrAnalyze *ira, IrInstSrcBinOp *bin_op_instruction) {
    IrInstGen *op1 = bin_op_instruction->op1->child;
    if (type_is_invalid(op1->value->type))
        return ira->codegen->invalid_inst_gen;

    IrInstGen *op2 = bin_op_instruction->op2->child;
    if (type_is_invalid(op2->value->type))
        return ira->codegen->invalid_inst_gen;

    ZigType *op1_type = op1->value->type;
    ZigType *op2_type = op2->value->type;

    if (op1_type->id == ZigTypeIdVector && op2_type->id != ZigTypeIdVector) {
        ir_add_error(ira, &bin_op_instruction->op1->base,
            buf_sprintf("bit shifting operation expected vector type, found '%s'",
                buf_ptr(&op2_type->name)));
        return ira->codegen->invalid_inst_gen;
    }

    if (op1_type->id != ZigTypeIdVector && op2_type->id == ZigTypeIdVector) {
        ir_add_error(ira, &bin_op_instruction->op1->base,
            buf_sprintf("bit shifting operation expected vector type, found '%s'",
                buf_ptr(&op1_type->name)));
        return ira->codegen->invalid_inst_gen;
    }

    ZigType *op1_scalar_type = (op1_type->id == ZigTypeIdVector) ?
        op1_type->data.vector.elem_type : op1_type;
    ZigType *op2_scalar_type = (op2_type->id == ZigTypeIdVector) ?
        op2_type->data.vector.elem_type : op2_type;

    if (op1_scalar_type->id != ZigTypeIdInt && op1_scalar_type->id != ZigTypeIdComptimeInt) {
        ir_add_error(ira, &bin_op_instruction->op1->base,
            buf_sprintf("bit shifting operation expected integer type, found '%s'",
                buf_ptr(&op1_scalar_type->name)));
        return ira->codegen->invalid_inst_gen;
    }

    if (op2_scalar_type->id != ZigTypeIdInt && op2_scalar_type->id != ZigTypeIdComptimeInt) {
        ir_add_error(ira, &bin_op_instruction->op2->base,
            buf_sprintf("shift amount has to be an integer type, but found '%s'",
                buf_ptr(&op2_scalar_type->name)));
        return ira->codegen->invalid_inst_gen;
    }

    IrInstGen *casted_op2;
    IrBinOp op_id = bin_op_instruction->op_id;
    if (op1_scalar_type->id == ZigTypeIdComptimeInt) {
        // comptime_int has no finite bit width
        casted_op2 = op2;

        if (op_id == IrBinOpBitShiftLeftLossy) {
            op_id = IrBinOpBitShiftLeftExact;
        }

        if (!instr_is_comptime(op2)) {
            ir_add_error(ira, &bin_op_instruction->base.base,
                buf_sprintf("LHS of shift must be a fixed-width integer type, or RHS must be compile-time known"));
            return ira->codegen->invalid_inst_gen;
        }

        ZigValue *op2_val = ir_resolve_const(ira, casted_op2, UndefBad);
        if (op2_val == nullptr)
            return ira->codegen->invalid_inst_gen;

        if (op2_val->data.x_bigint.is_negative) {
            Buf *val_buf = buf_alloc();
            bigint_append_buf(val_buf, &op2_val->data.x_bigint, 10);
            ir_add_error(ira, &casted_op2->base,
                buf_sprintf("shift by negative value %s", buf_ptr(val_buf)));
            return ira->codegen->invalid_inst_gen;
        }
    } else {
        const unsigned bit_count = op1_scalar_type->data.integral.bit_count;
        ZigType *shift_amt_type = get_smallest_unsigned_int_type(ira->codegen,
            bit_count > 0 ? bit_count - 1 : 0);

        if (op1_type->id == ZigTypeIdVector) {
            shift_amt_type = get_vector_type(ira->codegen, op1_type->data.vector.len,
                shift_amt_type);
        }

        casted_op2 = ir_implicit_cast(ira, op2, shift_amt_type);
        if (type_is_invalid(casted_op2->value->type))
            return ira->codegen->invalid_inst_gen;

        // This check is only valid iff op1 has at least one bit
        if (bit_count > 0 && instr_is_comptime(casted_op2)) {
            ZigValue *op2_val = ir_resolve_const(ira, casted_op2, UndefBad);
            if (op2_val == nullptr)
                return ira->codegen->invalid_inst_gen;

            ZigValue bit_count_value = {};
            init_const_usize(ira->codegen, &bit_count_value, bit_count);

            if (!value_cmp_numeric_val_all(op2_val, CmpLT, &bit_count_value)) {
                ErrorMsg* msg = ir_add_error(ira,
                    &bin_op_instruction->base.base,
                    buf_sprintf("RHS of shift is too large for LHS type"));
                add_error_note(ira->codegen, msg, op1->base.source_node,
                    buf_sprintf("type %s has only %u bits",
                        buf_ptr(&op1->value->type->name), bit_count));

                return ira->codegen->invalid_inst_gen;
            }
        }
    }

    // Fast path for zero RHS
    if (instr_is_comptime(casted_op2)) {
        ZigValue *op2_val = ir_resolve_const(ira, casted_op2, UndefBad);
        if (op2_val == nullptr)
            return ira->codegen->invalid_inst_gen;

        if (value_cmp_numeric_val_all(op2_val, CmpEQ, nullptr))
            return ir_analyze_cast(ira, &bin_op_instruction->base.base, op1->value->type, op1);
    }

    if (instr_is_comptime(op1) && instr_is_comptime(casted_op2)) {
        ZigValue *op1_val = ir_resolve_const(ira, op1, UndefBad);
        if (op1_val == nullptr)
            return ira->codegen->invalid_inst_gen;

        ZigValue *op2_val = ir_resolve_const(ira, casted_op2, UndefBad);
        if (op2_val == nullptr)
            return ira->codegen->invalid_inst_gen;

        return ir_analyze_math_op(ira, &bin_op_instruction->base.base, op1_type, op1_val, op_id, op2_val);
    }

    return ir_build_bin_op_gen(ira, &bin_op_instruction->base.base, op1->value->type,
            op_id, op1, casted_op2, bin_op_instruction->safety_check_on);
}

static bool ok_float_op(IrBinOp op) {
    switch (op) {
        case IrBinOpInvalid:
            zig_unreachable();
        case IrBinOpAdd:
        case IrBinOpSub:
        case IrBinOpMult:
        case IrBinOpDivUnspecified:
        case IrBinOpDivTrunc:
        case IrBinOpDivFloor:
        case IrBinOpDivExact:
        case IrBinOpRemRem:
        case IrBinOpRemMod:
        case IrBinOpRemUnspecified:
            return true;

        case IrBinOpBoolOr:
        case IrBinOpBoolAnd:
        case IrBinOpCmpEq:
        case IrBinOpCmpNotEq:
        case IrBinOpCmpLessThan:
        case IrBinOpCmpGreaterThan:
        case IrBinOpCmpLessOrEq:
        case IrBinOpCmpGreaterOrEq:
        case IrBinOpBinOr:
        case IrBinOpBinXor:
        case IrBinOpBinAnd:
        case IrBinOpBitShiftLeftLossy:
        case IrBinOpBitShiftLeftExact:
        case IrBinOpBitShiftRightLossy:
        case IrBinOpBitShiftRightExact:
        case IrBinOpAddWrap:
        case IrBinOpSubWrap:
        case IrBinOpMultWrap:
        case IrBinOpArrayCat:
        case IrBinOpArrayMult:
            return false;
    }
    zig_unreachable();
}

static bool is_pointer_arithmetic_allowed(ZigType *lhs_type, IrBinOp op) {
    switch (op) {
        case IrBinOpAdd:
        case IrBinOpSub:
            break;
        default:
            return false;
    }
    if (lhs_type->id != ZigTypeIdPointer)
        return false;
    switch (lhs_type->data.pointer.ptr_len) {
        case PtrLenSingle:
            return lhs_type->data.pointer.child_type->id == ZigTypeIdArray;
        case PtrLenUnknown:
        case PtrLenC:
            return true;
    }
    zig_unreachable();
}

static bool value_cmp_numeric_val(ZigValue *left, Cmp predicate, ZigValue *right, bool any) {
    assert(left->special == ConstValSpecialStatic);
    assert(right == nullptr || right->special == ConstValSpecialStatic);

    switch (left->type->id) {
        case ZigTypeIdComptimeInt:
        case ZigTypeIdInt: {
            const Cmp result = right ?
                bigint_cmp(&left->data.x_bigint, &right->data.x_bigint) :
                bigint_cmp_zero(&left->data.x_bigint);
            return result == predicate;
        }
        case ZigTypeIdComptimeFloat:
        case ZigTypeIdFloat: {
            if (float_is_nan(left))
                return false;
            if (right != nullptr && float_is_nan(right))
                return false;

            const Cmp result = right ? float_cmp(left, right) : float_cmp_zero(left);
            return result == predicate;
        }
        case ZigTypeIdVector: {
            for (size_t i = 0; i < left->type->data.vector.len; i++) {
                ZigValue *scalar_val = &left->data.x_array.data.s_none.elements[i];
                const bool result = value_cmp_numeric_val(scalar_val, predicate, right, any);

                if (any && result)
                    return true; // This element satisfies the predicate
                else if (!any && !result)
                    return false; // This element doesn't satisfy the predicate
            }
            return any ? false : true;
        }
        default:
            zig_unreachable();
    }
}

static bool value_cmp_numeric_val_any(ZigValue *left, Cmp predicate, ZigValue *right) {
    return value_cmp_numeric_val(left, predicate, right, true);
}

static bool value_cmp_numeric_val_all(ZigValue *left, Cmp predicate, ZigValue *right) {
    return value_cmp_numeric_val(left, predicate, right, false);
}

static IrInstGen *ir_analyze_bin_op_math(IrAnalyze *ira, IrInstSrcBinOp *instruction) {
    Error err;

    IrInstGen *op1 = instruction->op1->child;
    if (type_is_invalid(op1->value->type))
        return ira->codegen->invalid_inst_gen;

    IrInstGen *op2 = instruction->op2->child;
    if (type_is_invalid(op2->value->type))
        return ira->codegen->invalid_inst_gen;

    IrBinOp op_id = instruction->op_id;

    // look for pointer math
    if (is_pointer_arithmetic_allowed(op1->value->type, op_id)) {
        IrInstGen *casted_op2 = ir_implicit_cast(ira, op2, ira->codegen->builtin_types.entry_usize);
        if (type_is_invalid(casted_op2->value->type))
            return ira->codegen->invalid_inst_gen;

        // If either operand is undef, result is undef.
        ZigValue *op1_val = nullptr;
        ZigValue *op2_val = nullptr;
        if (instr_is_comptime(op1)) {
            op1_val = ir_resolve_const(ira, op1, UndefOk);
            if (op1_val == nullptr)
                return ira->codegen->invalid_inst_gen;
            if (op1_val->special == ConstValSpecialUndef)
                return ir_const_undef(ira, &instruction->base.base, op1->value->type);
        }
        if (instr_is_comptime(casted_op2)) {
            op2_val = ir_resolve_const(ira, casted_op2, UndefOk);
            if (op2_val == nullptr)
                return ira->codegen->invalid_inst_gen;
            if (op2_val->special == ConstValSpecialUndef)
                return ir_const_undef(ira, &instruction->base.base, op1->value->type);
        }

        ZigType *elem_type = op1->value->type->data.pointer.child_type;
        if ((err = type_resolve(ira->codegen, elem_type, ResolveStatusSizeKnown)))
            return ira->codegen->invalid_inst_gen;

        // NOTE: this variable is meaningful iff op2_val is not null!
        uint64_t byte_offset;
        if (op2_val != nullptr) {
            uint64_t elem_offset;
            if (!ir_resolve_usize(ira, casted_op2, &elem_offset))
                return ira->codegen->invalid_inst_gen;

            byte_offset = type_size(ira->codegen, elem_type) * elem_offset;
        }

        // Fast path for cases where the RHS is zero
        if (op2_val != nullptr && byte_offset == 0) {
            return op1;
        }

        ZigType *result_type = op1->value->type;
        // Calculate the new alignment of the pointer
        {
            uint32_t align_bytes;
            if ((err = resolve_ptr_align(ira, op1->value->type, &align_bytes)))
                return ira->codegen->invalid_inst_gen;

            // If the addend is not a comptime-known value we can still count on
            // it being a multiple of the type size
            uint32_t addend = op2_val ? byte_offset : type_size(ira->codegen, elem_type);

            // The resulting pointer is aligned to the lcd between the
            // offset (an arbitrary number) and the alignment factor (always
            // a power of two, non zero)
            uint32_t new_align = 1 << ctzll(addend | align_bytes);
            // Rough guard to prevent overflows
            assert(new_align);
            result_type = adjust_ptr_align(ira->codegen, result_type, new_align);
        }

        if (op2_val != nullptr && op1_val != nullptr &&
            (op1->value->data.x_ptr.special == ConstPtrSpecialHardCodedAddr ||
            op1->value->data.x_ptr.special == ConstPtrSpecialNull))
        {
            uint64_t start_addr = (op1_val->data.x_ptr.special == ConstPtrSpecialNull) ?
                0 : op1_val->data.x_ptr.data.hard_coded_addr.addr;
            uint64_t new_addr;
            if (op_id == IrBinOpAdd) {
                new_addr = start_addr + byte_offset;
            } else if (op_id == IrBinOpSub) {
                new_addr = start_addr - byte_offset;
            } else {
                zig_unreachable();
            }
            IrInstGen *result = ir_const(ira, &instruction->base.base, result_type);
            result->value->data.x_ptr.special = ConstPtrSpecialHardCodedAddr;
            result->value->data.x_ptr.mut = ConstPtrMutRuntimeVar;
            result->value->data.x_ptr.data.hard_coded_addr.addr = new_addr;
            return result;
        }

        return ir_build_bin_op_gen(ira, &instruction->base.base, result_type, op_id, op1, casted_op2, true);
    }

    IrInstGen *instructions[] = {op1, op2};
    ZigType *resolved_type = ir_resolve_peer_types(ira, instruction->base.base.source_node, nullptr, instructions, 2);
    if (type_is_invalid(resolved_type))
        return ira->codegen->invalid_inst_gen;

    ZigType *scalar_type = (resolved_type->id == ZigTypeIdVector) ?
        resolved_type->data.vector.elem_type : resolved_type;

    bool is_int = scalar_type->id == ZigTypeIdInt || scalar_type->id == ZigTypeIdComptimeInt;
    bool is_float = scalar_type->id == ZigTypeIdFloat || scalar_type->id == ZigTypeIdComptimeFloat;

    if (!is_int && !(is_float && ok_float_op(op_id))) {
        AstNode *source_node = instruction->base.base.source_node;
        ir_add_error_node(ira, source_node,
            buf_sprintf("invalid operands to binary expression: '%s' and '%s'",
                buf_ptr(&op1->value->type->name),
                buf_ptr(&op2->value->type->name)));
        return ira->codegen->invalid_inst_gen;
    }

    IrInstGen *casted_op1 = ir_implicit_cast(ira, op1, resolved_type);
    if (type_is_invalid(casted_op1->value->type))
        return ira->codegen->invalid_inst_gen;

    IrInstGen *casted_op2 = ir_implicit_cast(ira, op2, resolved_type);
    if (type_is_invalid(casted_op2->value->type))
        return ira->codegen->invalid_inst_gen;

    // Comptime integers have no fixed size
    if (scalar_type->id == ZigTypeIdComptimeInt) {
        if (op_id == IrBinOpAddWrap) {
            op_id = IrBinOpAdd;
        } else if (op_id == IrBinOpSubWrap) {
            op_id = IrBinOpSub;
        } else if (op_id == IrBinOpMultWrap) {
            op_id = IrBinOpMult;
        }
    }

    if (instr_is_comptime(casted_op1) && instr_is_comptime(casted_op2)) {
        ZigValue *op1_val = ir_resolve_const(ira, casted_op1, UndefBad);
        if (op1_val == nullptr)
            return ira->codegen->invalid_inst_gen;

        ZigValue *op2_val = ir_resolve_const(ira, casted_op2, UndefBad);
        if (op2_val == nullptr)
            return ira->codegen->invalid_inst_gen;

        // Promote division with negative numbers to signed
        bool is_signed_div = value_cmp_numeric_val_any(op1_val, CmpLT, nullptr) ||
            value_cmp_numeric_val_any(op2_val, CmpLT, nullptr);

        if (op_id == IrBinOpDivUnspecified && is_int) {
            // Default to truncating division and check if it's valid for the
            // given operands if signed
            op_id = IrBinOpDivTrunc;

            if (is_signed_div) {
                bool ok = false;

                if (value_cmp_numeric_val_any(op2_val, CmpEQ, nullptr)) {
                    // the division by zero error will be caught later, but we don't have a
                    // division function ambiguity problem.
                    ok = true;
                } else {
                    IrInstGen *trunc_val = ir_analyze_math_op(ira, &instruction->base.base, resolved_type,
                        op1_val, IrBinOpDivTrunc, op2_val);
                    if (type_is_invalid(trunc_val->value->type))
                        return ira->codegen->invalid_inst_gen;

                    IrInstGen *floor_val = ir_analyze_math_op(ira, &instruction->base.base, resolved_type,
                        op1_val, IrBinOpDivFloor, op2_val);
                    if (type_is_invalid(floor_val->value->type))
                        return ira->codegen->invalid_inst_gen;

                    IrInstGen *cmp_val = ir_analyze_bin_op_cmp_numeric(ira, &instruction->base.base,
                        trunc_val, floor_val, IrBinOpCmpEq);
                    if (type_is_invalid(cmp_val->value->type))
                        return ira->codegen->invalid_inst_gen;

                    // We can "upgrade" the operator only if trunc(a/b) == floor(a/b)
                    if (!ir_resolve_bool(ira, cmp_val, &ok))
                        return ira->codegen->invalid_inst_gen;
                }

                if (!ok) {
                    ir_add_error(ira, &instruction->base.base,
                        buf_sprintf("division with '%s' and '%s': signed integers must use @divTrunc, @divFloor, or @divExact",
                            buf_ptr(&op1->value->type->name),
                            buf_ptr(&op2->value->type->name)));
                    return ira->codegen->invalid_inst_gen;
                }
            }
        } else if (op_id == IrBinOpRemUnspecified) {
            op_id = IrBinOpRemRem;

            if (is_signed_div) {
                bool ok = false;

                if (value_cmp_numeric_val_any(op2_val, CmpEQ, nullptr)) {
                    // the division by zero error will be caught later, but we don't have a
                    // division function ambiguity problem.
                    ok = true;
                } else {
                    IrInstGen *rem_val = ir_analyze_math_op(ira, &instruction->base.base, resolved_type,
                        op1_val, IrBinOpRemRem, op2_val);
                    if (type_is_invalid(rem_val->value->type))
                        return ira->codegen->invalid_inst_gen;

                    IrInstGen *mod_val = ir_analyze_math_op(ira, &instruction->base.base, resolved_type,
                        op1_val, IrBinOpRemMod, op2_val);
                    if (type_is_invalid(mod_val->value->type))
                        return ira->codegen->invalid_inst_gen;

                    IrInstGen *cmp_val = ir_analyze_bin_op_cmp_numeric(ira, &instruction->base.base,
                        rem_val, mod_val, IrBinOpCmpEq);
                    if (type_is_invalid(cmp_val->value->type))
                        return ira->codegen->invalid_inst_gen;

                    // We can "upgrade" the operator only if mod(a,b) == rem(a,b)
                    if (!ir_resolve_bool(ira, cmp_val, &ok))
                        return ira->codegen->invalid_inst_gen;
                }

                if (!ok) {
                    ir_add_error(ira, &instruction->base.base,
                        buf_sprintf("remainder division with '%s' and '%s': signed integers and floats must use @rem or @mod",
                            buf_ptr(&op1->value->type->name),
                            buf_ptr(&op2->value->type->name)));
                    return ira->codegen->invalid_inst_gen;
                }
            }
        }

        return ir_analyze_math_op(ira, &instruction->base.base, resolved_type, op1_val, op_id, op2_val);
    }

    const bool is_signed_div =
        (scalar_type->id == ZigTypeIdInt && scalar_type->data.integral.is_signed) ||
        scalar_type->id == ZigTypeIdFloat;

    // Warn the user to use the proper operators here
    if (op_id == IrBinOpDivUnspecified && is_int) {
        op_id = IrBinOpDivTrunc;

        if (is_signed_div) {
            ir_add_error(ira, &instruction->base.base,
                buf_sprintf("division with '%s' and '%s': signed integers must use @divTrunc, @divFloor, or @divExact",
                    buf_ptr(&op1->value->type->name),
                    buf_ptr(&op2->value->type->name)));
            return ira->codegen->invalid_inst_gen;
        }
    } else if (op_id == IrBinOpRemUnspecified) {
        op_id = IrBinOpRemRem;

        if (is_signed_div) {
            ir_add_error(ira, &instruction->base.base,
                buf_sprintf("remainder division with '%s' and '%s': signed integers and floats must use @rem or @mod",
                    buf_ptr(&op1->value->type->name),
                    buf_ptr(&op2->value->type->name)));
            return ira->codegen->invalid_inst_gen;
        }
    }

    return ir_build_bin_op_gen(ira, &instruction->base.base, resolved_type,
            op_id, casted_op1, casted_op2, instruction->safety_check_on);
}

static IrInstGen *ir_analyze_tuple_cat(IrAnalyze *ira, IrInst* source_instr,
        IrInstGen *op1, IrInstGen *op2)
{
    Error err;
    ZigType *op1_type = op1->value->type;
    ZigType *op2_type = op2->value->type;

    uint32_t op1_field_count = op1_type->data.structure.src_field_count;
    uint32_t op2_field_count = op2_type->data.structure.src_field_count;

    Buf *bare_name = buf_alloc();
    Buf *name = get_anon_type_name(ira->codegen, nullptr, container_string(ContainerKindStruct),
            source_instr->scope, source_instr->source_node, bare_name);
    ZigType *new_type = get_partial_container_type(ira->codegen, source_instr->scope,
        ContainerKindStruct, source_instr->source_node, buf_ptr(name), bare_name, ContainerLayoutAuto);
    new_type->data.structure.special = StructSpecialInferredTuple;
    new_type->data.structure.resolve_status = ResolveStatusBeingInferred;
    uint32_t new_field_count = op1_field_count + op2_field_count;

    new_type->data.structure.src_field_count = new_field_count;
    new_type->data.structure.fields = realloc_type_struct_fields(new_type->data.structure.fields,
            0, new_field_count);

    IrInstGen *new_struct_ptr = ir_resolve_result(ira, source_instr, no_result_loc(),
            new_type, nullptr, false, true);

    for (uint32_t i = 0; i < new_field_count; i += 1) {
        TypeStructField *src_field;
        if (i < op1_field_count) {
            src_field = op1_type->data.structure.fields[i];
        } else {
            src_field = op2_type->data.structure.fields[i - op1_field_count];
        }
        TypeStructField *new_field = new_type->data.structure.fields[i];
        new_field->name = buf_sprintf("%" PRIu32, i);
        new_field->type_entry = src_field->type_entry;
        new_field->type_val = src_field->type_val;
        new_field->src_index = i;
        new_field->decl_node = src_field->decl_node;
        new_field->init_val = src_field->init_val;
        new_field->is_comptime = src_field->is_comptime;
    }
    if ((err = type_resolve(ira->codegen, new_type, ResolveStatusZeroBitsKnown)))
        return ira->codegen->invalid_inst_gen;

    ZigList<IrInstGen *> const_ptrs = {};
    for (uint32_t i = 0; i < new_field_count; i += 1) {
        TypeStructField *dst_field = new_type->data.structure.fields[i];
        IrInstGen *src_struct_op;
        TypeStructField *src_field;
        if (i < op1_field_count) {
            src_field = op1_type->data.structure.fields[i];
            src_struct_op = op1;
        } else {
            src_field = op2_type->data.structure.fields[i - op1_field_count];
            src_struct_op = op2;
        }
        IrInstGen *field_value = ir_analyze_struct_value_field_value(ira, source_instr,
                src_struct_op, src_field);
        if (type_is_invalid(field_value->value->type))
            return ira->codegen->invalid_inst_gen;
        IrInstGen *dest_ptr = ir_analyze_struct_field_ptr(ira, source_instr, dst_field,
                new_struct_ptr, new_type, true);
        if (type_is_invalid(dest_ptr->value->type))
            return ira->codegen->invalid_inst_gen;
        if (instr_is_comptime(field_value)) {
            const_ptrs.append(dest_ptr);
        }
        IrInstGen *store_ptr_inst = ir_analyze_store_ptr(ira, source_instr, dest_ptr, field_value,
                true);
        if (type_is_invalid(store_ptr_inst->value->type))
            return ira->codegen->invalid_inst_gen;
    }
    if (const_ptrs.length != new_field_count) {
        new_struct_ptr->value->special = ConstValSpecialRuntime;
        for (size_t i = 0; i < const_ptrs.length; i += 1) {
            IrInstGen *elem_result_loc = const_ptrs.at(i);
            assert(elem_result_loc->value->special == ConstValSpecialStatic);
            if (elem_result_loc->value->type->data.pointer.inferred_struct_field != nullptr) {
                // This field will be generated comptime; no need to do this.
                continue;
            }
            IrInstGen *deref = ir_get_deref(ira, &elem_result_loc->base, elem_result_loc, nullptr);
            if (!type_requires_comptime(ira->codegen, elem_result_loc->value->type->data.pointer.child_type)) {
                elem_result_loc->value->special = ConstValSpecialRuntime;
            }
            ir_analyze_store_ptr(ira, &elem_result_loc->base, elem_result_loc, deref, true);
        }
    }

    const_ptrs.deinit();

    return ir_get_deref(ira, source_instr, new_struct_ptr, nullptr);
}

static IrInstGen *ir_analyze_array_cat(IrAnalyze *ira, IrInstSrcBinOp *instruction) {
    IrInstGen *op1 = instruction->op1->child;
    ZigType *op1_type = op1->value->type;
    if (type_is_invalid(op1_type))
        return ira->codegen->invalid_inst_gen;

    IrInstGen *op2 = instruction->op2->child;
    ZigType *op2_type = op2->value->type;
    if (type_is_invalid(op2_type))
        return ira->codegen->invalid_inst_gen;

    if (is_tuple(op1_type) && is_tuple(op2_type)) {
        return ir_analyze_tuple_cat(ira, &instruction->base.base, op1, op2);
    }

    ZigValue *op1_val = ir_resolve_const(ira, op1, UndefBad);
    if (!op1_val)
        return ira->codegen->invalid_inst_gen;

    ZigValue *op2_val = ir_resolve_const(ira, op2, UndefBad);
    if (!op2_val)
        return ira->codegen->invalid_inst_gen;

    ZigValue *sentinel1 = nullptr;
    ZigValue *op1_array_val;
    size_t op1_array_index;
    size_t op1_array_end;
    ZigType *child_type;
    if (op1_type->id == ZigTypeIdArray) {
        child_type = op1_type->data.array.child_type;
        op1_array_val = op1_val;
        op1_array_index = 0;
        op1_array_end = op1_type->data.array.len;
        sentinel1 = op1_type->data.array.sentinel;
    } else if (op1_type->id == ZigTypeIdPointer &&
        op1_type->data.pointer.child_type == ira->codegen->builtin_types.entry_u8 &&
        op1_type->data.pointer.sentinel != nullptr &&
        op1_val->data.x_ptr.special == ConstPtrSpecialBaseArray)
    {
        child_type = op1_type->data.pointer.child_type;
        op1_array_val = op1_val->data.x_ptr.data.base_array.array_val;
        op1_array_index = op1_val->data.x_ptr.data.base_array.elem_index;
        op1_array_end = op1_array_val->type->data.array.len;
        sentinel1 = op1_type->data.pointer.sentinel;
    } else if (is_slice(op1_type)) {
        ZigType *ptr_type = op1_type->data.structure.fields[slice_ptr_index]->type_entry;
        child_type = ptr_type->data.pointer.child_type;
        ZigValue *ptr_val = op1_val->data.x_struct.fields[slice_ptr_index];
        assert(ptr_val->data.x_ptr.special == ConstPtrSpecialBaseArray);
        op1_array_val = ptr_val->data.x_ptr.data.base_array.array_val;
        op1_array_index = ptr_val->data.x_ptr.data.base_array.elem_index;
        ZigValue *len_val = op1_val->data.x_struct.fields[slice_len_index];
        op1_array_end = op1_array_index + bigint_as_usize(&len_val->data.x_bigint);
        sentinel1 = ptr_type->data.pointer.sentinel;
    } else if (op1_type->id == ZigTypeIdPointer &&
               op1_type->data.pointer.ptr_len == PtrLenSingle &&
               op1_type->data.pointer.child_type->id == ZigTypeIdArray)
    {
        ZigType *array_type = op1_type->data.pointer.child_type;
        child_type = array_type->data.array.child_type;
        op1_array_val = const_ptr_pointee(ira, ira->codegen, op1_val, op1->base.source_node);
        if (op1_array_val == nullptr)
            return ira->codegen->invalid_inst_gen;
        op1_array_index = 0;
        op1_array_end = array_type->data.array.len;
        sentinel1 = array_type->data.array.sentinel;
    } else {
        ir_add_error(ira, &op1->base, buf_sprintf("expected array, found '%s'", buf_ptr(&op1->value->type->name)));
        return ira->codegen->invalid_inst_gen;
    }

    ZigValue *sentinel2 = nullptr;
    ZigValue *op2_array_val;
    size_t op2_array_index;
    size_t op2_array_end;
    bool op2_type_valid;
    if (op2_type->id == ZigTypeIdArray) {
        op2_type_valid = op2_type->data.array.child_type == child_type;
        op2_array_val = op2_val;
        op2_array_index = 0;
        op2_array_end = op2_array_val->type->data.array.len;
        sentinel2 = op2_type->data.array.sentinel;
    } else if (op2_type->id == ZigTypeIdPointer &&
        op2_type->data.pointer.sentinel != nullptr &&
        op2_val->data.x_ptr.special == ConstPtrSpecialBaseArray)
    {
        op2_type_valid = op2_type->data.pointer.child_type == child_type;
        op2_array_val = op2_val->data.x_ptr.data.base_array.array_val;
        op2_array_index = op2_val->data.x_ptr.data.base_array.elem_index;
        op2_array_end = op2_array_val->type->data.array.len;

        sentinel2 = op2_type->data.pointer.sentinel;
    } else if (is_slice(op2_type)) {
        ZigType *ptr_type = op2_type->data.structure.fields[slice_ptr_index]->type_entry;
        op2_type_valid = ptr_type->data.pointer.child_type == child_type;
        ZigValue *ptr_val = op2_val->data.x_struct.fields[slice_ptr_index];
        assert(ptr_val->data.x_ptr.special == ConstPtrSpecialBaseArray);
        op2_array_val = ptr_val->data.x_ptr.data.base_array.array_val;
        op2_array_index = ptr_val->data.x_ptr.data.base_array.elem_index;
        ZigValue *len_val = op2_val->data.x_struct.fields[slice_len_index];
        op2_array_end = op2_array_index + bigint_as_usize(&len_val->data.x_bigint);

        sentinel2 = ptr_type->data.pointer.sentinel;
    } else if (op2_type->id == ZigTypeIdPointer && op2_type->data.pointer.ptr_len == PtrLenSingle &&
            op2_type->data.pointer.child_type->id == ZigTypeIdArray)
    {
        ZigType *array_type = op2_type->data.pointer.child_type;
        op2_type_valid = array_type->data.array.child_type == child_type;
        op2_array_val = const_ptr_pointee(ira, ira->codegen, op2_val, op2->base.source_node);
        if (op2_array_val == nullptr)
            return ira->codegen->invalid_inst_gen;
        op2_array_index = 0;
        op2_array_end = array_type->data.array.len;

        sentinel2 = array_type->data.array.sentinel;
    } else {
        ir_add_error(ira, &op2->base,
            buf_sprintf("expected array or C string literal, found '%s'", buf_ptr(&op2->value->type->name)));
        return ira->codegen->invalid_inst_gen;
    }
    if (!op2_type_valid) {
        ir_add_error(ira, &op2->base, buf_sprintf("expected array of type '%s', found '%s'",
                    buf_ptr(&child_type->name),
                    buf_ptr(&op2->value->type->name)));
        return ira->codegen->invalid_inst_gen;
    }

    ZigValue *sentinel;
    if (sentinel1 != nullptr && sentinel2 != nullptr) {
        // When there is a sentinel mismatch, no sentinel on the result. The type system
        // will catch this if it is a problem.
        sentinel = const_values_equal(ira->codegen, sentinel1, sentinel2) ? sentinel1 : nullptr;
    } else if (sentinel1 != nullptr) {
        sentinel = sentinel1;
    } else if (sentinel2 != nullptr) {
        sentinel = sentinel2;
    } else {
        sentinel = nullptr;
    }

    // The type of result is populated in the following if blocks
    IrInstGen *result = ir_const(ira, &instruction->base.base, nullptr);
    ZigValue *out_val = result->value;

    ZigValue *out_array_val;
    size_t new_len = (op1_array_end - op1_array_index) + (op2_array_end - op2_array_index);
    if (op1_type->id == ZigTypeIdPointer || op2_type->id == ZigTypeIdPointer) {
        out_array_val = ira->codegen->pass1_arena->create<ZigValue>();
        out_array_val->special = ConstValSpecialStatic;
        out_array_val->type = get_array_type(ira->codegen, child_type, new_len, sentinel);

        out_val->data.x_ptr.special = ConstPtrSpecialRef;
        out_val->data.x_ptr.data.ref.pointee = out_array_val;
        out_val->type = get_pointer_to_type(ira->codegen, out_array_val->type, true);
    } else if (is_slice(op1_type) || is_slice(op2_type)) {
        ZigType *ptr_type = get_pointer_to_type_extra2(ira->codegen, child_type,
                true, false, PtrLenUnknown, 0, 0, 0, false,
                VECTOR_INDEX_NONE, nullptr, sentinel);
        result->value->type = get_slice_type(ira->codegen, ptr_type);
        out_array_val = ira->codegen->pass1_arena->create<ZigValue>();
        out_array_val->special = ConstValSpecialStatic;
        out_array_val->type = get_array_type(ira->codegen, child_type, new_len, sentinel);

        out_val->data.x_struct.fields = alloc_const_vals_ptrs(ira->codegen, 2);

        out_val->data.x_struct.fields[slice_ptr_index]->type = ptr_type;
        out_val->data.x_struct.fields[slice_ptr_index]->special = ConstValSpecialStatic;
        out_val->data.x_struct.fields[slice_ptr_index]->data.x_ptr.special = ConstPtrSpecialBaseArray;
        out_val->data.x_struct.fields[slice_ptr_index]->data.x_ptr.data.base_array.array_val = out_array_val;
        out_val->data.x_struct.fields[slice_ptr_index]->data.x_ptr.data.base_array.elem_index = 0;

        out_val->data.x_struct.fields[slice_len_index]->type = ira->codegen->builtin_types.entry_usize;
        out_val->data.x_struct.fields[slice_len_index]->special = ConstValSpecialStatic;
        bigint_init_unsigned(&out_val->data.x_struct.fields[slice_len_index]->data.x_bigint, new_len);
    } else if (op1_type->id == ZigTypeIdArray || op2_type->id == ZigTypeIdArray) {
        result->value->type = get_array_type(ira->codegen, child_type, new_len, sentinel);
        out_array_val = out_val;
    } else {
        result->value->type = get_pointer_to_type_extra2(ira->codegen, child_type, true, false, PtrLenUnknown,
                0, 0, 0, false, VECTOR_INDEX_NONE, nullptr, sentinel);
        out_array_val = ira->codegen->pass1_arena->create<ZigValue>();
        out_array_val->special = ConstValSpecialStatic;
        out_array_val->type = get_array_type(ira->codegen, child_type, new_len, sentinel);
        out_val->data.x_ptr.special = ConstPtrSpecialBaseArray;
        out_val->data.x_ptr.data.base_array.array_val = out_array_val;
        out_val->data.x_ptr.data.base_array.elem_index = 0;
    }

    if (op1_array_val->data.x_array.special == ConstArraySpecialUndef &&
        op2_array_val->data.x_array.special == ConstArraySpecialUndef)
    {
        out_array_val->data.x_array.special = ConstArraySpecialUndef;
        return result;
    }

    uint64_t full_len = new_len + ((sentinel != nullptr) ? 1 : 0);
    out_array_val->data.x_array.data.s_none.elements = ira->codegen->pass1_arena->allocate<ZigValue>(full_len);
    // TODO handle the buf case here for an optimization
    expand_undef_array(ira->codegen, op1_array_val);
    expand_undef_array(ira->codegen, op2_array_val);

    size_t next_index = 0;
    for (size_t i = op1_array_index; i < op1_array_end; i += 1, next_index += 1) {
        ZigValue *elem_dest_val = &out_array_val->data.x_array.data.s_none.elements[next_index];
        copy_const_val(ira->codegen, elem_dest_val, &op1_array_val->data.x_array.data.s_none.elements[i]);
        elem_dest_val->parent.id = ConstParentIdArray;
        elem_dest_val->parent.data.p_array.array_val = out_array_val;
        elem_dest_val->parent.data.p_array.elem_index = next_index;
    }
    for (size_t i = op2_array_index; i < op2_array_end; i += 1, next_index += 1) {
        ZigValue *elem_dest_val = &out_array_val->data.x_array.data.s_none.elements[next_index];
        copy_const_val(ira->codegen, elem_dest_val, &op2_array_val->data.x_array.data.s_none.elements[i]);
        elem_dest_val->parent.id = ConstParentIdArray;
        elem_dest_val->parent.data.p_array.array_val = out_array_val;
        elem_dest_val->parent.data.p_array.elem_index = next_index;
    }
    if (next_index < full_len) {
        ZigValue *elem_dest_val = &out_array_val->data.x_array.data.s_none.elements[next_index];
        copy_const_val(ira->codegen, elem_dest_val, sentinel);
        elem_dest_val->parent.id = ConstParentIdArray;
        elem_dest_val->parent.data.p_array.array_val = out_array_val;
        elem_dest_val->parent.data.p_array.elem_index = next_index;
        next_index += 1;
    }
    assert(next_index == full_len);

    return result;
}

static IrInstGen *ir_analyze_tuple_mult(IrAnalyze *ira, IrInst* source_instr,
                                        IrInstGen *op1, IrInstGen *op2)
{
    Error err;
    ZigType *op1_type = op1->value->type;
    uint64_t op1_field_count = op1_type->data.structure.src_field_count;

    uint64_t mult_amt;
    if (!ir_resolve_usize(ira, op2, &mult_amt))
        return ira->codegen->invalid_inst_gen;

    uint64_t new_field_count;
    if (mul_u64_overflow(op1_field_count, mult_amt, &new_field_count)) {
        ir_add_error(ira, source_instr, buf_sprintf("operation results in overflow"));
        return ira->codegen->invalid_inst_gen;
    }

    Buf *bare_name = buf_alloc();
    Buf *name = get_anon_type_name(ira->codegen, nullptr, container_string(ContainerKindStruct),
        source_instr->scope, source_instr->source_node, bare_name);
    ZigType *new_type = get_partial_container_type(ira->codegen, source_instr->scope,
        ContainerKindStruct, source_instr->source_node, buf_ptr(name), bare_name, ContainerLayoutAuto);
    new_type->data.structure.special = StructSpecialInferredTuple;
    new_type->data.structure.resolve_status = ResolveStatusBeingInferred;
    new_type->data.structure.src_field_count = new_field_count;
    new_type->data.structure.fields = realloc_type_struct_fields(
        new_type->data.structure.fields, 0, new_field_count);

    IrInstGen *new_struct_ptr = ir_resolve_result(ira, source_instr, no_result_loc(),
        new_type, nullptr, false, true);

    for (uint64_t i = 0; i < new_field_count; i += 1) {
        TypeStructField *src_field = op1_type->data.structure.fields[i % op1_field_count];
        TypeStructField *new_field = new_type->data.structure.fields[i];

        new_field->name = buf_sprintf("%" ZIG_PRI_u64, i);
        new_field->type_entry = src_field->type_entry;
        new_field->type_val = src_field->type_val;
        new_field->src_index = i;
        new_field->decl_node = src_field->decl_node;
        new_field->init_val = src_field->init_val;
        new_field->is_comptime = src_field->is_comptime;
    }

    if ((err = type_resolve(ira->codegen, new_type, ResolveStatusZeroBitsKnown)))
        return ira->codegen->invalid_inst_gen;

    ZigList<IrInstGen *> const_ptrs = {};
    for (uint64_t i = 0; i < new_field_count; i += 1) {
        TypeStructField *src_field = op1_type->data.structure.fields[i % op1_field_count];
        TypeStructField *dst_field = new_type->data.structure.fields[i];

        IrInstGen *field_value = ir_analyze_struct_value_field_value(
            ira, source_instr, op1, src_field);
        if (type_is_invalid(field_value->value->type))
            return ira->codegen->invalid_inst_gen;

        IrInstGen *dest_ptr = ir_analyze_struct_field_ptr(
            ira, source_instr, dst_field, new_struct_ptr, new_type, true);
        if (type_is_invalid(dest_ptr->value->type))
            return ira->codegen->invalid_inst_gen;

        if (instr_is_comptime(field_value)) {
            const_ptrs.append(dest_ptr);
        }

        IrInstGen *store_ptr_inst = ir_analyze_store_ptr(
            ira, source_instr, dest_ptr, field_value, true);
        if (type_is_invalid(store_ptr_inst->value->type))
            return ira->codegen->invalid_inst_gen;
    }

    if (const_ptrs.length != new_field_count) {
        new_struct_ptr->value->special = ConstValSpecialRuntime;
        for (size_t i = 0; i < const_ptrs.length; i += 1) {
            IrInstGen *elem_result_loc = const_ptrs.at(i);
            assert(elem_result_loc->value->special == ConstValSpecialStatic);
            if (elem_result_loc->value->type->data.pointer.inferred_struct_field != nullptr) {
                // This field will be generated comptime; no need to do this.
                continue;
            }
            IrInstGen *deref = ir_get_deref(ira, &elem_result_loc->base, elem_result_loc, nullptr);
            if (!type_requires_comptime(ira->codegen, elem_result_loc->value->type->data.pointer.child_type)) {
                elem_result_loc->value->special = ConstValSpecialRuntime;
            }
            IrInstGen *store_ptr_inst = ir_analyze_store_ptr(
                ira, &elem_result_loc->base, elem_result_loc, deref, true);
            if (type_is_invalid(store_ptr_inst->value->type))
                return ira->codegen->invalid_inst_gen;
        }
    }

    const_ptrs.deinit();

    return ir_get_deref(ira, source_instr, new_struct_ptr, nullptr);
}

static IrInstGen *ir_analyze_array_mult(IrAnalyze *ira, IrInstSrcBinOp *instruction) {
    IrInstGen *op1 = instruction->op1->child;
    if (type_is_invalid(op1->value->type))
        return ira->codegen->invalid_inst_gen;

    IrInstGen *op2 = instruction->op2->child;
    if (type_is_invalid(op2->value->type))
        return ira->codegen->invalid_inst_gen;

    bool want_ptr_to_array = false;
    ZigType *array_type;
    ZigValue *array_val;
    if (op1->value->type->id == ZigTypeIdArray) {
        array_type = op1->value->type;
        array_val = ir_resolve_const(ira, op1, UndefOk);
        if (array_val == nullptr)
            return ira->codegen->invalid_inst_gen;
    } else if (op1->value->type->id == ZigTypeIdPointer &&
               op1->value->type->data.pointer.ptr_len == PtrLenSingle &&
               op1->value->type->data.pointer.child_type->id == ZigTypeIdArray)
    {
        array_type = op1->value->type->data.pointer.child_type;
        IrInstGen *array_inst = ir_get_deref(ira, &op1->base, op1, nullptr);
        if (type_is_invalid(array_inst->value->type))
            return ira->codegen->invalid_inst_gen;
        array_val = ir_resolve_const(ira, array_inst, UndefOk);
        if (array_val == nullptr)
            return ira->codegen->invalid_inst_gen;
        want_ptr_to_array = true;
    } else if (is_tuple(op1->value->type)) {
        return ir_analyze_tuple_mult(ira, &instruction->base.base, op1, op2);
    } else {
        ir_add_error(ira, &op1->base, buf_sprintf("expected array type, found '%s'", buf_ptr(&op1->value->type->name)));
        return ira->codegen->invalid_inst_gen;
    }

    uint64_t mult_amt;
    if (!ir_resolve_usize(ira, op2, &mult_amt))
        return ira->codegen->invalid_inst_gen;

    uint64_t old_array_len = array_type->data.array.len;
    uint64_t new_array_len;

    if (mul_u64_overflow(old_array_len, mult_amt, &new_array_len)) {
        ir_add_error(ira, &instruction->base.base, buf_sprintf("operation results in overflow"));
        return ira->codegen->invalid_inst_gen;
    }

    ZigType *child_type = array_type->data.array.child_type;
    ZigType *result_array_type = get_array_type(ira->codegen, child_type, new_array_len,
            array_type->data.array.sentinel);

    IrInstGen *array_result;
    if (array_val->special == ConstValSpecialUndef || array_val->data.x_array.special == ConstArraySpecialUndef) {
        array_result = ir_const_undef(ira, &instruction->base.base, result_array_type);
    } else {
        array_result = ir_const(ira, &instruction->base.base, result_array_type);
        ZigValue *out_val = array_result->value;

        switch (type_has_one_possible_value(ira->codegen, result_array_type)) {
            case OnePossibleValueInvalid:
                return ira->codegen->invalid_inst_gen;
            case OnePossibleValueYes:
                goto skip_computation;
            case OnePossibleValueNo:
                break;
        }

        // TODO optimize the buf case
        expand_undef_array(ira->codegen, array_val);
        size_t extra_null_term = (array_type->data.array.sentinel != nullptr) ? 1 : 0;
        out_val->data.x_array.data.s_none.elements = ira->codegen->pass1_arena->allocate<ZigValue>(new_array_len + extra_null_term);

        uint64_t i = 0;
        for (uint64_t x = 0; x < mult_amt; x += 1) {
            for (uint64_t y = 0; y < old_array_len; y += 1) {
                ZigValue *elem_dest_val = &out_val->data.x_array.data.s_none.elements[i];
                copy_const_val(ira->codegen, elem_dest_val, &array_val->data.x_array.data.s_none.elements[y]);
                elem_dest_val->parent.id = ConstParentIdArray;
                elem_dest_val->parent.data.p_array.array_val = out_val;
                elem_dest_val->parent.data.p_array.elem_index = i;
                i += 1;
            }
        }
        assert(i == new_array_len);

        if (array_type->data.array.sentinel != nullptr) {
            ZigValue *elem_dest_val = &out_val->data.x_array.data.s_none.elements[i];
            copy_const_val(ira->codegen, elem_dest_val, array_type->data.array.sentinel);
            elem_dest_val->parent.id = ConstParentIdArray;
            elem_dest_val->parent.data.p_array.array_val = out_val;
            elem_dest_val->parent.data.p_array.elem_index = i;
            i += 1;
        }
    }
skip_computation:
    if (want_ptr_to_array) {
        return ir_get_ref(ira, &instruction->base.base, array_result, true, false);
    } else {
        return array_result;
    }
}

static IrInstGen *ir_analyze_instruction_merge_err_sets(IrAnalyze *ira,
        IrInstSrcMergeErrSets *instruction)
{
    ZigType *op1_type = ir_resolve_error_set_type(ira, &instruction->base.base, instruction->op1->child);
    if (type_is_invalid(op1_type))
        return ira->codegen->invalid_inst_gen;

    ZigType *op2_type = ir_resolve_error_set_type(ira, &instruction->base.base, instruction->op2->child);
    if (type_is_invalid(op2_type))
        return ira->codegen->invalid_inst_gen;

    if (!resolve_inferred_error_set(ira->codegen, op1_type, instruction->op1->child->base.source_node)) {
        return ira->codegen->invalid_inst_gen;
    }

    if (!resolve_inferred_error_set(ira->codegen, op2_type, instruction->op2->child->base.source_node)) {
        return ira->codegen->invalid_inst_gen;
    }

    if (type_is_global_error_set(op1_type) ||
        type_is_global_error_set(op2_type))
    {
        return ir_const_type(ira, &instruction->base.base, ira->codegen->builtin_types.entry_global_error_set);
    }

    size_t errors_count = ira->codegen->errors_by_index.length;
    ErrorTableEntry **errors = heap::c_allocator.allocate<ErrorTableEntry *>(errors_count);
    for (uint32_t i = 0, count = op1_type->data.error_set.err_count; i < count; i += 1) {
        ErrorTableEntry *error_entry = op1_type->data.error_set.errors[i];
        assert(errors[error_entry->value] == nullptr);
        errors[error_entry->value] = error_entry;
    }
    ZigType *result_type = get_error_set_union(ira->codegen, errors, op1_type, op2_type, instruction->type_name);
    heap::c_allocator.deallocate(errors, errors_count);

    return ir_const_type(ira, &instruction->base.base, result_type);
}


static IrInstGen *ir_analyze_instruction_bin_op(IrAnalyze *ira, IrInstSrcBinOp *bin_op_instruction) {
    IrBinOp op_id = bin_op_instruction->op_id;
    switch (op_id) {
        case IrBinOpInvalid:
            zig_unreachable();
        case IrBinOpBoolOr:
        case IrBinOpBoolAnd:
            return ir_analyze_bin_op_bool(ira, bin_op_instruction);
        case IrBinOpCmpEq:
        case IrBinOpCmpNotEq:
        case IrBinOpCmpLessThan:
        case IrBinOpCmpGreaterThan:
        case IrBinOpCmpLessOrEq:
        case IrBinOpCmpGreaterOrEq:
            return ir_analyze_bin_op_cmp(ira, bin_op_instruction);
        case IrBinOpBitShiftLeftLossy:
        case IrBinOpBitShiftLeftExact:
        case IrBinOpBitShiftRightLossy:
        case IrBinOpBitShiftRightExact:
            return ir_analyze_bit_shift(ira, bin_op_instruction);
        case IrBinOpBinOr:
        case IrBinOpBinXor:
        case IrBinOpBinAnd:
        case IrBinOpAdd:
        case IrBinOpAddWrap:
        case IrBinOpSub:
        case IrBinOpSubWrap:
        case IrBinOpMult:
        case IrBinOpMultWrap:
        case IrBinOpDivUnspecified:
        case IrBinOpDivTrunc:
        case IrBinOpDivFloor:
        case IrBinOpDivExact:
        case IrBinOpRemUnspecified:
        case IrBinOpRemRem:
        case IrBinOpRemMod:
            return ir_analyze_bin_op_math(ira, bin_op_instruction);
        case IrBinOpArrayCat:
            return ir_analyze_array_cat(ira, bin_op_instruction);
        case IrBinOpArrayMult:
            return ir_analyze_array_mult(ira, bin_op_instruction);
    }
    zig_unreachable();
}

static IrInstGen *ir_analyze_instruction_decl_var(IrAnalyze *ira, IrInstSrcDeclVar *decl_var_instruction) {
    Error err;
    ZigVar *var = decl_var_instruction->var;

    ZigType *explicit_type = nullptr;
    IrInstGen *var_type = nullptr;
    if (decl_var_instruction->var_type != nullptr) {
        var_type = decl_var_instruction->var_type->child;
        ZigType *proposed_type = ir_resolve_type(ira, var_type);
        explicit_type = validate_var_type(ira->codegen, var_type->base.source_node, proposed_type);
        if (type_is_invalid(explicit_type)) {
            var->var_type = ira->codegen->builtin_types.entry_invalid;
            return ira->codegen->invalid_inst_gen;
        }
    }

    AstNode *source_node = decl_var_instruction->base.base.source_node;

    bool is_comptime_var = ir_get_var_is_comptime(var);

    bool var_class_requires_const = false;

    IrInstGen *var_ptr = decl_var_instruction->ptr->child;
    // if this is null, a compiler error happened and did not initialize the variable.
    // if there are no compile errors there may be a missing ir_expr_wrap in pass1 IR generation.
    if (var_ptr == nullptr || type_is_invalid(var_ptr->value->type)) {
        ir_assert(var_ptr != nullptr || ira->codegen->errors.length != 0, &decl_var_instruction->base.base);
        var->var_type = ira->codegen->builtin_types.entry_invalid;
        return ira->codegen->invalid_inst_gen;
    }

    // The ir_build_var_decl_src call is supposed to pass a pointer to the allocation, not an initialization value.
    ir_assert(var_ptr->value->type->id == ZigTypeIdPointer, &decl_var_instruction->base.base);

    ZigType *result_type = var_ptr->value->type->data.pointer.child_type;
    if (type_is_invalid(result_type)) {
        result_type = ira->codegen->builtin_types.entry_invalid;
    } else if (result_type->id == ZigTypeIdUnreachable || result_type->id == ZigTypeIdOpaque) {
        zig_unreachable();
    }

    ZigValue *init_val = nullptr;
    if (instr_is_comptime(var_ptr) && var_ptr->value->data.x_ptr.mut != ConstPtrMutRuntimeVar) {
        ZigValue *ptr_val = ir_resolve_const(ira, var_ptr, UndefBad);
        if (ptr_val == nullptr)
            return ira->codegen->invalid_inst_gen;

        init_val = const_ptr_pointee(ira, ira->codegen, ptr_val, decl_var_instruction->base.base.source_node);
        if (init_val == nullptr)
            return ira->codegen->invalid_inst_gen;

        if (is_comptime_var) {
            if (var->gen_is_const) {
                var->const_value = init_val;
            } else {
                var->const_value = ira->codegen->pass1_arena->create<ZigValue>();
                copy_const_val(ira->codegen, var->const_value, init_val);
            }
        }
    }

    switch (type_requires_comptime(ira->codegen, result_type)) {
    case ReqCompTimeInvalid:
        result_type = ira->codegen->builtin_types.entry_invalid;
        break;
    case ReqCompTimeYes:
        var_class_requires_const = true;
        if (!var->gen_is_const && !is_comptime_var) {
            ir_add_error_node(ira, source_node,
                buf_sprintf("variable of type '%s' must be const or comptime",
                    buf_ptr(&result_type->name)));
            result_type = ira->codegen->builtin_types.entry_invalid;
        }
        break;
    case ReqCompTimeNo:
        if (init_val != nullptr && value_is_comptime(init_val)) {
            if ((err = ir_resolve_const_val(ira->codegen, ira->new_irb.exec,
                    decl_var_instruction->base.base.source_node, init_val, UndefOk)))
            {
                result_type = ira->codegen->builtin_types.entry_invalid;
            } else if (init_val->type->id == ZigTypeIdFn &&
                init_val->special != ConstValSpecialUndef &&
                init_val->data.x_ptr.special == ConstPtrSpecialFunction &&
                init_val->data.x_ptr.data.fn.fn_entry->fn_inline == FnInlineAlways)
            {
                var_class_requires_const = true;
                if (!var->src_is_const && !is_comptime_var) {
                    ErrorMsg *msg = ir_add_error_node(ira, source_node,
                        buf_sprintf("functions marked inline must be stored in const or comptime var"));
                    AstNode *proto_node = init_val->data.x_ptr.data.fn.fn_entry->proto_node;
                    add_error_note(ira->codegen, msg, proto_node, buf_sprintf("declared here"));
                    result_type = ira->codegen->builtin_types.entry_invalid;
                }
            }
        }
        break;
    }

    while (var->next_var != nullptr) {
        var = var->next_var;
    }

    // This must be done after possibly creating a new variable above
    var->ref_count = 0;

    var->ptr_instruction = var_ptr;
    var->var_type = result_type;
    assert(var->var_type);

    if (type_is_invalid(result_type)) {
        return ir_const_void(ira, &decl_var_instruction->base.base);
    }

    if (decl_var_instruction->align_value == nullptr) {
        if ((err = type_resolve(ira->codegen, result_type, ResolveStatusAlignmentKnown))) {
            var->var_type = ira->codegen->builtin_types.entry_invalid;
            return ir_const_void(ira, &decl_var_instruction->base.base);
        }
        var->align_bytes = get_ptr_align(ira->codegen, var_ptr->value->type);
    } else {
        if (!ir_resolve_align(ira, decl_var_instruction->align_value->child, nullptr, &var->align_bytes)) {
            var->var_type = ira->codegen->builtin_types.entry_invalid;
        }
    }

    if (init_val != nullptr && value_is_comptime(init_val)) {
        // Resolve ConstPtrMutInfer
        if (var->gen_is_const) {
            var_ptr->value->data.x_ptr.mut = ConstPtrMutComptimeConst;
        } else if (is_comptime_var) {
            var_ptr->value->data.x_ptr.mut = ConstPtrMutComptimeVar;
        } else {
            // we need a runtime ptr but we have a comptime val.
            // since it's a comptime val there are no instructions for it.
            // we memcpy the init value here
            IrInstGen *deref = ir_get_deref(ira, &var_ptr->base, var_ptr, nullptr);
            if (type_is_invalid(deref->value->type)) {
                var->var_type = ira->codegen->builtin_types.entry_invalid;
                return ira->codegen->invalid_inst_gen;
            }
            // If this assertion trips, something is wrong with the IR instructions, because
            // we expected the above deref to return a constant value, but it created a runtime
            // instruction.
            assert(deref->value->special != ConstValSpecialRuntime);
            var_ptr->value->special = ConstValSpecialRuntime;
            ir_analyze_store_ptr(ira, &var_ptr->base, var_ptr, deref, false);
        }
        if (instr_is_comptime(var_ptr) && (is_comptime_var || (var_class_requires_const && var->gen_is_const))) {
            return ir_const_void(ira, &decl_var_instruction->base.base);
        }
    } else if (is_comptime_var) {
        ir_add_error(ira, &decl_var_instruction->base.base,
                buf_sprintf("cannot store runtime value in compile time variable"));
        var->var_type = ira->codegen->builtin_types.entry_invalid;
        return ira->codegen->invalid_inst_gen;
    }

    ZigFn *fn_entry = ira->new_irb.exec->fn_entry;
    if (fn_entry)
        fn_entry->variable_list.append(var);

    return ir_build_var_decl_gen(ira, &decl_var_instruction->base.base, var, var_ptr);
}

static IrInstGen *ir_analyze_instruction_export(IrAnalyze *ira, IrInstSrcExport *instruction) {
    IrInstGen *target = instruction->target->child;
    if (type_is_invalid(target->value->type))
        return ira->codegen->invalid_inst_gen;

    IrInstGen *options = instruction->options->child;
    if (type_is_invalid(options->value->type))
        return ira->codegen->invalid_inst_gen;

    ZigType *options_type = options->value->type;
    assert(options_type->id == ZigTypeIdStruct);

    TypeStructField *name_field = find_struct_type_field(options_type, buf_create_from_str("name"));
    ir_assert(name_field != nullptr, &instruction->base.base);
    IrInstGen *name_inst = ir_analyze_struct_value_field_value(ira, &instruction->base.base, options, name_field);
    if (type_is_invalid(name_inst->value->type))
        return ira->codegen->invalid_inst_gen;

    TypeStructField *linkage_field = find_struct_type_field(options_type, buf_create_from_str("linkage"));
    ir_assert(linkage_field != nullptr, &instruction->base.base);
    IrInstGen *linkage_inst = ir_analyze_struct_value_field_value(ira, &instruction->base.base, options, linkage_field);
    if (type_is_invalid(linkage_inst->value->type))
        return ira->codegen->invalid_inst_gen;

    TypeStructField *section_field = find_struct_type_field(options_type, buf_create_from_str("section"));
    ir_assert(section_field != nullptr, &instruction->base.base);
    IrInstGen *section_inst = ir_analyze_struct_value_field_value(ira, &instruction->base.base, options, section_field);
    if (type_is_invalid(section_inst->value->type))
        return ira->codegen->invalid_inst_gen;

    // The `section` field is optional, we have to unwrap it first
    IrInstGen *non_null_check = ir_analyze_test_non_null(ira, &instruction->base.base, section_inst);
    bool is_non_null;
    if (!ir_resolve_bool(ira, non_null_check, &is_non_null))
        return ira->codegen->invalid_inst_gen;

    IrInstGen *section_str_inst = nullptr;
    if (is_non_null) {
        section_str_inst = ir_analyze_optional_value_payload_value(ira, &instruction->base.base, section_inst, false);
        if (type_is_invalid(section_str_inst->value->type))
            return ira->codegen->invalid_inst_gen;
    }

    // Resolve all the comptime values
    Buf *symbol_name = ir_resolve_str(ira, name_inst);
    if (!symbol_name)
        return ira->codegen->invalid_inst_gen;

    if (buf_len(symbol_name) < 1) {
        ir_add_error(ira, &name_inst->base,
            buf_sprintf("exported symbol name cannot be empty"));
        return ira->codegen->invalid_inst_gen;
    }

    GlobalLinkageId global_linkage_id;
    if (!ir_resolve_global_linkage(ira, linkage_inst, &global_linkage_id))
        return ira->codegen->invalid_inst_gen;

    Buf *section_name = nullptr;
    if (section_str_inst != nullptr && !(section_name = ir_resolve_str(ira, section_str_inst)))
        return ira->codegen->invalid_inst_gen;

    // TODO: This function needs to be audited.
    // It's not clear how all the different types are supposed to be handled.
    // Need comprehensive tests for exporting one thing in one file and declaring an extern var
    // in another file.
    TldFn *tld_fn = heap::c_allocator.create<TldFn>();
    tld_fn->base.id = TldIdFn;
    tld_fn->base.source_node = instruction->base.base.source_node;

    auto entry = ira->codegen->exported_symbol_names.put_unique(symbol_name, &tld_fn->base);
    if (entry) {
        AstNode *other_export_node = entry->value->source_node;
        ErrorMsg *msg = ir_add_error(ira, &instruction->base.base,
                buf_sprintf("exported symbol collision: '%s'", buf_ptr(symbol_name)));
        add_error_note(ira->codegen, msg, other_export_node, buf_sprintf("other symbol is here"));
        return ira->codegen->invalid_inst_gen;
    }

    Error err;
    bool want_var_export = false;
    switch (target->value->type->id) {
        case ZigTypeIdInvalid:
        case ZigTypeIdUnreachable:
            zig_unreachable();
        case ZigTypeIdFn: {
            assert(target->value->data.x_ptr.special == ConstPtrSpecialFunction);
            ZigFn *fn_entry = target->value->data.x_ptr.data.fn.fn_entry;
            tld_fn->fn_entry = fn_entry;
            CallingConvention cc = fn_entry->type_entry->data.fn.fn_type_id.cc;
            switch (cc) {
                case CallingConventionUnspecified: {
                    ErrorMsg *msg = ir_add_error(ira, &target->base,
                        buf_sprintf("exported function must specify calling convention"));
                    add_error_note(ira->codegen, msg, fn_entry->proto_node, buf_sprintf("declared here"));
                } break;
                case CallingConventionAsync: {
                    ErrorMsg *msg = ir_add_error(ira, &target->base,
                        buf_sprintf("exported function cannot be async"));
                    add_error_note(ira->codegen, msg, fn_entry->proto_node, buf_sprintf("declared here"));
                } break;
                case CallingConventionC:
                case CallingConventionCold:
                case CallingConventionNaked:
                case CallingConventionInterrupt:
                case CallingConventionSignal:
                case CallingConventionStdcall:
                case CallingConventionFastcall:
                case CallingConventionVectorcall:
                case CallingConventionThiscall:
                case CallingConventionAPCS:
                case CallingConventionAAPCS:
                case CallingConventionAAPCSVFP:
                    add_fn_export(ira->codegen, fn_entry, buf_ptr(symbol_name), global_linkage_id, cc);
                    fn_entry->section_name = section_name;
                    break;
            }
        } break;
        case ZigTypeIdStruct:
            if (is_slice(target->value->type)) {
                ir_add_error(ira, &target->base,
                    buf_sprintf("unable to export value of type '%s'", buf_ptr(&target->value->type->name)));
            } else if (target->value->type->data.structure.layout != ContainerLayoutExtern) {
                ErrorMsg *msg = ir_add_error(ira, &target->base,
                    buf_sprintf("exported struct value must be declared extern"));
                add_error_note(ira->codegen, msg, target->value->type->data.structure.decl_node, buf_sprintf("declared here"));
            } else {
                want_var_export = true;
            }
            break;
        case ZigTypeIdUnion:
            if (target->value->type->data.unionation.layout != ContainerLayoutExtern) {
                ErrorMsg *msg = ir_add_error(ira, &target->base,
                    buf_sprintf("exported union value must be declared extern"));
                add_error_note(ira->codegen, msg, target->value->type->data.unionation.decl_node, buf_sprintf("declared here"));
            } else {
                want_var_export = true;
            }
            break;
        case ZigTypeIdEnum:
            if (target->value->type->data.enumeration.layout != ContainerLayoutExtern) {
                ErrorMsg *msg = ir_add_error(ira, &target->base,
                    buf_sprintf("exported enum value must be declared extern"));
                add_error_note(ira->codegen, msg, target->value->type->data.enumeration.decl_node, buf_sprintf("declared here"));
            } else {
                want_var_export = true;
            }
            break;
        case ZigTypeIdArray: {
            bool ok_type;
            if ((err = type_allowed_in_extern(ira->codegen, target->value->type->data.array.child_type, &ok_type)))
                return ira->codegen->invalid_inst_gen;

            if (!ok_type) {
                ir_add_error(ira, &target->base,
                    buf_sprintf("array element type '%s' not extern-compatible",
                        buf_ptr(&target->value->type->data.array.child_type->name)));
            } else {
                want_var_export = true;
            }
            break;
        }
        case ZigTypeIdMetaType: {
            ZigType *type_value = target->value->data.x_type;
            switch (type_value->id) {
                case ZigTypeIdInvalid:
                    zig_unreachable();
                case ZigTypeIdStruct:
                    if (is_slice(type_value)) {
                        ir_add_error(ira, &target->base,
                            buf_sprintf("unable to export type '%s'", buf_ptr(&type_value->name)));
                    } else if (type_value->data.structure.layout != ContainerLayoutExtern) {
                        ErrorMsg *msg = ir_add_error(ira, &target->base,
                            buf_sprintf("exported struct must be declared extern"));
                        add_error_note(ira->codegen, msg, type_value->data.structure.decl_node, buf_sprintf("declared here"));
                    }
                    break;
                case ZigTypeIdUnion:
                    if (type_value->data.unionation.layout != ContainerLayoutExtern) {
                        ErrorMsg *msg = ir_add_error(ira, &target->base,
                            buf_sprintf("exported union must be declared extern"));
                        add_error_note(ira->codegen, msg, type_value->data.unionation.decl_node, buf_sprintf("declared here"));
                    }
                    break;
                case ZigTypeIdEnum:
                    if (type_value->data.enumeration.layout != ContainerLayoutExtern) {
                        ErrorMsg *msg = ir_add_error(ira, &target->base,
                            buf_sprintf("exported enum must be declared extern"));
                        add_error_note(ira->codegen, msg, type_value->data.enumeration.decl_node, buf_sprintf("declared here"));
                    }
                    break;
                case ZigTypeIdFn: {
                    if (type_value->data.fn.fn_type_id.cc == CallingConventionUnspecified) {
                        ir_add_error(ira, &target->base,
                            buf_sprintf("exported function type must specify calling convention"));
                    }
                } break;
                case ZigTypeIdInt:
                case ZigTypeIdFloat:
                case ZigTypeIdPointer:
                case ZigTypeIdArray:
                case ZigTypeIdBool:
                case ZigTypeIdVector:
                    break;
                case ZigTypeIdMetaType:
                case ZigTypeIdVoid:
                case ZigTypeIdUnreachable:
                case ZigTypeIdComptimeFloat:
                case ZigTypeIdComptimeInt:
                case ZigTypeIdEnumLiteral:
                case ZigTypeIdUndefined:
                case ZigTypeIdNull:
                case ZigTypeIdOptional:
                case ZigTypeIdErrorUnion:
                case ZigTypeIdErrorSet:
                case ZigTypeIdBoundFn:
                case ZigTypeIdOpaque:
                case ZigTypeIdFnFrame:
                case ZigTypeIdAnyFrame:
                    ir_add_error(ira, &target->base,
                        buf_sprintf("invalid export target '%s'", buf_ptr(&type_value->name)));
                    break;
            }
        } break;
        case ZigTypeIdInt:
            want_var_export = true;
            break;
        case ZigTypeIdVoid:
        case ZigTypeIdBool:
        case ZigTypeIdFloat:
        case ZigTypeIdPointer:
        case ZigTypeIdComptimeFloat:
        case ZigTypeIdComptimeInt:
        case ZigTypeIdUndefined:
        case ZigTypeIdNull:
        case ZigTypeIdOptional:
        case ZigTypeIdErrorUnion:
        case ZigTypeIdErrorSet:
        case ZigTypeIdVector:
            zig_panic("TODO export const value of type %s", buf_ptr(&target->value->type->name));
        case ZigTypeIdBoundFn:
        case ZigTypeIdOpaque:
        case ZigTypeIdEnumLiteral:
        case ZigTypeIdFnFrame:
        case ZigTypeIdAnyFrame:
            ir_add_error(ira, &target->base,
                    buf_sprintf("invalid export target type '%s'", buf_ptr(&target->value->type->name)));
            break;
    }

    // TODO audit the various ways to use @export
    if (want_var_export && target->id == IrInstGenIdLoadPtr) {
        IrInstGenLoadPtr *load_ptr = reinterpret_cast<IrInstGenLoadPtr *>(target);
        if (load_ptr->ptr->id == IrInstGenIdVarPtr) {
            IrInstGenVarPtr *var_ptr = reinterpret_cast<IrInstGenVarPtr *>(load_ptr->ptr);
            ZigVar *var = var_ptr->var;
            add_var_export(ira->codegen, var, buf_ptr(symbol_name), global_linkage_id);
            var->section_name = section_name;
        }
    }

    return ir_const_void(ira, &instruction->base.base);
}

static bool exec_has_err_ret_trace(CodeGen *g, IrExecutableSrc *exec) {
    ZigFn *fn_entry = exec_fn_entry(exec);
    return fn_entry != nullptr && fn_entry->calls_or_awaits_errorable_fn && g->have_err_ret_tracing;
}

static IrInstGen *ir_analyze_instruction_error_return_trace(IrAnalyze *ira,
        IrInstSrcErrorReturnTrace *instruction)
{
    ZigType *ptr_to_stack_trace_type = get_pointer_to_type(ira->codegen, get_stack_trace_type(ira->codegen), false);
    if (instruction->optional == IrInstErrorReturnTraceNull) {
        ZigType *optional_type = get_optional_type(ira->codegen, ptr_to_stack_trace_type);
        if (!exec_has_err_ret_trace(ira->codegen, ira->old_irb.exec)) {
            IrInstGen *result = ir_const(ira, &instruction->base.base, optional_type);
            ZigValue *out_val = result->value;
            assert(get_src_ptr_type(optional_type) != nullptr);
            out_val->data.x_ptr.special = ConstPtrSpecialHardCodedAddr;
            out_val->data.x_ptr.data.hard_coded_addr.addr = 0;
            return result;
        }
        return ir_build_error_return_trace_gen(ira, instruction->base.base.scope,
                instruction->base.base.source_node, instruction->optional, optional_type);
    } else {
        assert(ira->codegen->have_err_ret_tracing);
        return ir_build_error_return_trace_gen(ira, instruction->base.base.scope,
                instruction->base.base.source_node, instruction->optional, ptr_to_stack_trace_type);
    }
}

static IrInstGen *ir_analyze_instruction_error_union(IrAnalyze *ira, IrInstSrcErrorUnion *instruction) {
    IrInstGen *result = ir_const(ira, &instruction->base.base, ira->codegen->builtin_types.entry_type);
    result->value->special = ConstValSpecialLazy;

    LazyValueErrUnionType *lazy_err_union_type = heap::c_allocator.create<LazyValueErrUnionType>();
    lazy_err_union_type->ira = ira; ira_ref(ira);
    result->value->data.x_lazy = &lazy_err_union_type->base;
    lazy_err_union_type->base.id = LazyValueIdErrUnionType;

    lazy_err_union_type->err_set_type = instruction->err_set->child;
    if (ir_resolve_type_lazy(ira, lazy_err_union_type->err_set_type) == nullptr)
        return ira->codegen->invalid_inst_gen;

    lazy_err_union_type->payload_type = instruction->payload->child;
    if (ir_resolve_type_lazy(ira, lazy_err_union_type->payload_type) == nullptr)
        return ira->codegen->invalid_inst_gen;

    return result;
}

static IrInstGen *ir_analyze_alloca(IrAnalyze *ira, IrInst *source_inst, ZigType *var_type,
        uint32_t align, const char *name_hint, bool force_comptime)
{
    Error err;

    ZigValue *pointee = ira->codegen->pass1_arena->create<ZigValue>();
    pointee->special = ConstValSpecialUndef;
    pointee->llvm_align = align;

    IrInstGenAlloca *result = ir_build_alloca_gen(ira, source_inst, align, name_hint);
    result->base.value->special = ConstValSpecialStatic;
    result->base.value->data.x_ptr.special = ConstPtrSpecialRef;
    result->base.value->data.x_ptr.mut = force_comptime ? ConstPtrMutComptimeVar : ConstPtrMutInfer;
    result->base.value->data.x_ptr.data.ref.pointee = pointee;

    bool var_type_has_bits;
    if ((err = type_has_bits2(ira->codegen, var_type, &var_type_has_bits)))
        return ira->codegen->invalid_inst_gen;
    if (align != 0) {
        if ((err = type_resolve(ira->codegen, var_type, ResolveStatusAlignmentKnown)))
            return ira->codegen->invalid_inst_gen;
        if (!var_type_has_bits) {
            ir_add_error(ira, source_inst,
                buf_sprintf("variable '%s' of zero-bit type '%s' has no in-memory representation, it cannot be aligned",
                    name_hint, buf_ptr(&var_type->name)));
            return ira->codegen->invalid_inst_gen;
        }
    }
    assert(result->base.value->data.x_ptr.special != ConstPtrSpecialInvalid);

    pointee->type = var_type;
    result->base.value->type = get_pointer_to_type_extra(ira->codegen, var_type, false, false,
            PtrLenSingle, align, 0, 0, false);

    if (!force_comptime) {
        ZigFn *fn_entry = ira->new_irb.exec->fn_entry;
        if (fn_entry != nullptr) {
            fn_entry->alloca_gen_list.append(result);
        }
    }
    return &result->base;
}

static ZigType *ir_result_loc_expected_type(IrAnalyze *ira, IrInst *suspend_source_instr,
        ResultLoc *result_loc)
{
    switch (result_loc->id) {
        case ResultLocIdInvalid:
        case ResultLocIdPeerParent:
            zig_unreachable();
        case ResultLocIdNone:
        case ResultLocIdVar:
        case ResultLocIdBitCast:
        case ResultLocIdCast:
            return nullptr;
        case ResultLocIdInstruction:
            return result_loc->source_instruction->child->value->type;
        case ResultLocIdReturn:
            return ira->explicit_return_type;
        case ResultLocIdPeer:
            return reinterpret_cast<ResultLocPeer*>(result_loc)->parent->resolved_type;
    }
    zig_unreachable();
}

static bool type_can_bit_cast(ZigType *t) {
    switch (t->id) {
        case ZigTypeIdInvalid:
            zig_unreachable();
        case ZigTypeIdMetaType:
        case ZigTypeIdOpaque:
        case ZigTypeIdBoundFn:
        case ZigTypeIdUnreachable:
        case ZigTypeIdComptimeFloat:
        case ZigTypeIdComptimeInt:
        case ZigTypeIdEnumLiteral:
        case ZigTypeIdUndefined:
        case ZigTypeIdNull:
        case ZigTypeIdPointer:
            return false;
        default:
            // TODO list these types out explicitly, there are probably some other invalid ones here
            return true;
    }
}

static void set_up_result_loc_for_inferred_comptime(IrAnalyze *ira, IrInstGen *ptr) {
    ZigValue *undef_child = ira->codegen->pass1_arena->create<ZigValue>();
    undef_child->type = ptr->value->type->data.pointer.child_type;
    undef_child->special = ConstValSpecialUndef;
    ptr->value->special = ConstValSpecialStatic;
    ptr->value->data.x_ptr.mut = ConstPtrMutInfer;
    ptr->value->data.x_ptr.special = ConstPtrSpecialRef;
    ptr->value->data.x_ptr.data.ref.pointee = undef_child;
}

static Error ir_result_has_type(IrAnalyze *ira, ResultLoc *result_loc, bool *out) {
    switch (result_loc->id) {
        case ResultLocIdInvalid:
        case ResultLocIdPeerParent:
            zig_unreachable();
        case ResultLocIdNone:
        case ResultLocIdPeer:
            *out = false;
            return ErrorNone;
        case ResultLocIdReturn:
        case ResultLocIdInstruction:
        case ResultLocIdBitCast:
            *out = true;
            return ErrorNone;
        case ResultLocIdCast: {
            ResultLocCast *result_cast = reinterpret_cast<ResultLocCast *>(result_loc);
            ZigType *dest_type = ir_resolve_type(ira, result_cast->base.source_instruction->child);
            if (type_is_invalid(dest_type))
                return ErrorSemanticAnalyzeFail;
            *out = (dest_type != ira->codegen->builtin_types.entry_var);
            return ErrorNone;
        }
        case ResultLocIdVar:
            *out = reinterpret_cast<ResultLocVar *>(result_loc)->var->decl_node->data.variable_declaration.type != nullptr;
            return ErrorNone;
    }
    zig_unreachable();
}

static IrInstGen *ir_resolve_no_result_loc(IrAnalyze *ira, IrInst *suspend_source_instr,
    ResultLoc *result_loc, ZigType *value_type)
{
    if (type_is_invalid(value_type))
        return ira->codegen->invalid_inst_gen;
    IrInstGenAlloca *alloca_gen = ir_build_alloca_gen(ira, suspend_source_instr, 0, "");
    alloca_gen->base.value->type = get_pointer_to_type_extra(ira->codegen, value_type, false, false,
            PtrLenSingle, 0, 0, 0, false);
    set_up_result_loc_for_inferred_comptime(ira, &alloca_gen->base);
    ZigFn *fn_entry = ira->new_irb.exec->fn_entry;
    if (fn_entry != nullptr && get_scope_typeof(suspend_source_instr->scope) == nullptr) {
        fn_entry->alloca_gen_list.append(alloca_gen);
    }
    result_loc->written = true;
    result_loc->resolved_loc = &alloca_gen->base;
    return result_loc->resolved_loc;
}

static bool result_loc_is_discard(ResultLoc *result_loc_pass1) {
    if (result_loc_pass1->id == ResultLocIdInstruction &&
        result_loc_pass1->source_instruction->id == IrInstSrcIdConst)
    {
        IrInstSrcConst *const_inst = reinterpret_cast<IrInstSrcConst *>(result_loc_pass1->source_instruction);
        if (value_is_comptime(const_inst->value) &&
            const_inst->value->type->id == ZigTypeIdPointer &&
            const_inst->value->data.x_ptr.special == ConstPtrSpecialDiscard)
        {
            return true;
        }
    }
    return false;
}

// when calling this function, at the callsite must check for result type noreturn and propagate it up
static IrInstGen *ir_resolve_result_raw(IrAnalyze *ira, IrInst *suspend_source_instr,
        ResultLoc *result_loc, ZigType *value_type, IrInstGen *value, bool force_runtime,
        bool allow_discard)
{
    Error err;
    if (result_loc->resolved_loc != nullptr) {
        // allow to redo the result location if the value is known and comptime and the previous one isn't
        if (value == nullptr || !instr_is_comptime(value) || instr_is_comptime(result_loc->resolved_loc)) {
            return result_loc->resolved_loc;
        }
    }
    result_loc->gen_instruction = value;
    result_loc->implicit_elem_type = value_type;
    switch (result_loc->id) {
        case ResultLocIdInvalid:
        case ResultLocIdPeerParent:
            zig_unreachable();
        case ResultLocIdNone: {
            if (value != nullptr) {
                return nullptr;
            }
            // need to return a result location and don't have one. use a stack allocation
            return ir_resolve_no_result_loc(ira, suspend_source_instr, result_loc, value_type);
        }
        case ResultLocIdVar: {
            ResultLocVar *result_loc_var = reinterpret_cast<ResultLocVar *>(result_loc);
            assert(result_loc->source_instruction->id == IrInstSrcIdAlloca);
            IrInstSrcAlloca *alloca_src = reinterpret_cast<IrInstSrcAlloca *>(result_loc->source_instruction);

            ZigVar *var = result_loc_var->var;
            if (var->var_type != nullptr && !ir_get_var_is_comptime(var)) {
                // This is at least the second time we've seen this variable declaration during analysis.
                // This means that this is actually a different variable due to, e.g. an inline while loop.
                // We make a new variable so that it can hold a different type, and so the debug info can
                // be distinct.
                ZigVar *new_var = create_local_var(ira->codegen, var->decl_node, var->child_scope,
                    buf_create_from_str(var->name), var->src_is_const, var->gen_is_const,
                    var->shadowable, var->is_comptime, true);
                new_var->align_bytes = var->align_bytes;

                var->next_var = new_var;
                var = new_var;
            }
            if (value_type->id == ZigTypeIdUnreachable || value_type->id == ZigTypeIdOpaque) {
                ir_add_error(ira, &result_loc->source_instruction->base,
                    buf_sprintf("variable of type '%s' not allowed", buf_ptr(&value_type->name)));
                return ira->codegen->invalid_inst_gen;
            }
            if (alloca_src->base.child == nullptr || var->ptr_instruction == nullptr) {
                bool force_comptime;
                if (!ir_resolve_comptime(ira, alloca_src->is_comptime->child, &force_comptime))
                    return ira->codegen->invalid_inst_gen;
                uint32_t align = 0;
                if (alloca_src->align != nullptr && !ir_resolve_align(ira, alloca_src->align->child, nullptr, &align)) {
                    return ira->codegen->invalid_inst_gen;
                }
                IrInstGen *alloca_gen = ir_analyze_alloca(ira, &result_loc->source_instruction->base, value_type,
                        align, alloca_src->name_hint, force_comptime);
                if (force_runtime) {
                    alloca_gen->value->data.x_ptr.mut = ConstPtrMutRuntimeVar;
                    alloca_gen->value->special = ConstValSpecialRuntime;
                }
                if (alloca_src->base.child != nullptr && !result_loc->written) {
                    alloca_src->base.child->base.ref_count = 0;
                }
                alloca_src->base.child = alloca_gen;
                var->ptr_instruction = alloca_gen;
            }
            result_loc->written = true;
            result_loc->resolved_loc = alloca_src->base.child;
            return alloca_src->base.child;
        }
        case ResultLocIdInstruction: {
            result_loc->written = true;
            result_loc->resolved_loc = result_loc->source_instruction->child;
            return result_loc->resolved_loc;
        }
        case ResultLocIdReturn: {
            if (value != nullptr) {
                reinterpret_cast<ResultLocReturn *>(result_loc)->implicit_return_type_done = true;
                ira->src_implicit_return_type_list.append(value);
            }
            result_loc->written = true;
            result_loc->resolved_loc = ira->return_ptr;
            return result_loc->resolved_loc;
        }
        case ResultLocIdPeer: {
            ResultLocPeer *result_peer = reinterpret_cast<ResultLocPeer *>(result_loc);
            ResultLocPeerParent *peer_parent = result_peer->parent;

            if (peer_parent->peers.length == 1) {
                IrInstGen *parent_result_loc = ir_resolve_result(ira, suspend_source_instr, peer_parent->parent,
                        value_type, value, force_runtime, true);
                result_peer->suspend_pos.basic_block_index = SIZE_MAX;
                result_peer->suspend_pos.instruction_index = SIZE_MAX;
                if (parent_result_loc == nullptr || type_is_invalid(parent_result_loc->value->type) ||
                    parent_result_loc->value->type->id == ZigTypeIdUnreachable)
                {
                    return parent_result_loc;
                }
                result_loc->written = true;
                result_loc->resolved_loc = parent_result_loc;
                return result_loc->resolved_loc;
            }

            bool is_condition_comptime;
            if (!ir_resolve_comptime(ira, peer_parent->is_comptime->child, &is_condition_comptime))
                return ira->codegen->invalid_inst_gen;
            if (is_condition_comptime) {
                peer_parent->skipped = true;
                return ir_resolve_result(ira, suspend_source_instr, peer_parent->parent,
                        value_type, value, force_runtime, true);
            }
            bool peer_parent_has_type;
            if ((err = ir_result_has_type(ira, peer_parent->parent, &peer_parent_has_type)))
                return ira->codegen->invalid_inst_gen;
            if (peer_parent_has_type) {
                peer_parent->skipped = true;
                IrInstGen *parent_result_loc = ir_resolve_result(ira, suspend_source_instr, peer_parent->parent,
                        value_type, value, force_runtime || !is_condition_comptime, true);
                if (parent_result_loc == nullptr || type_is_invalid(parent_result_loc->value->type) ||
                    parent_result_loc->value->type->id == ZigTypeIdUnreachable)
                {
                    return parent_result_loc;
                }
                peer_parent->parent->written = true;
                result_loc->written = true;
                result_loc->resolved_loc = parent_result_loc;
                return result_loc->resolved_loc;
            }

            if (peer_parent->resolved_type == nullptr) {
                if (peer_parent->end_bb->suspend_instruction_ref == nullptr) {
                    peer_parent->end_bb->suspend_instruction_ref = suspend_source_instr;
                }
                IrInstGen *unreach_inst = ira_suspend(ira, suspend_source_instr, result_peer->next_bb,
                        &result_peer->suspend_pos);
                if (result_peer->next_bb == nullptr) {
                    ir_start_next_bb(ira);
                }
                return unreach_inst;
            }

            IrInstGen *parent_result_loc = ir_resolve_result(ira, suspend_source_instr, peer_parent->parent,
                    peer_parent->resolved_type, nullptr, force_runtime, true);
            if (parent_result_loc == nullptr || type_is_invalid(parent_result_loc->value->type) ||
                parent_result_loc->value->type->id == ZigTypeIdUnreachable)
            {
                return parent_result_loc;
            }
            // because is_condition_comptime is false, we mark this a runtime pointer
            parent_result_loc->value->special = ConstValSpecialRuntime;
            result_loc->written = true;
            result_loc->resolved_loc = parent_result_loc;
            return result_loc->resolved_loc;
        }
        case ResultLocIdCast: {
            ResultLocCast *result_cast = reinterpret_cast<ResultLocCast *>(result_loc);
            ZigType *dest_type = ir_resolve_type(ira, result_cast->base.source_instruction->child);
            if (type_is_invalid(dest_type))
                return ira->codegen->invalid_inst_gen;

            if (dest_type == ira->codegen->builtin_types.entry_var) {
                return ir_resolve_no_result_loc(ira, suspend_source_instr, result_loc, value_type);
            }

            IrInstGen *casted_value;
            if (value != nullptr) {
                casted_value = ir_implicit_cast2(ira, suspend_source_instr, value, dest_type);
                if (type_is_invalid(casted_value->value->type))
                    return ira->codegen->invalid_inst_gen;
                dest_type = casted_value->value->type;
            } else {
                casted_value = nullptr;
            }

            IrInstGen *parent_result_loc = ir_resolve_result(ira, suspend_source_instr, result_cast->parent,
                    dest_type, casted_value, force_runtime, true);
            if (parent_result_loc == nullptr || type_is_invalid(parent_result_loc->value->type) ||
                parent_result_loc->value->type->id == ZigTypeIdUnreachable)
            {
                return parent_result_loc;
            }

            ZigType *parent_ptr_type = parent_result_loc->value->type;
            assert(parent_ptr_type->id == ZigTypeIdPointer);

            if ((err = type_resolve(ira->codegen, parent_ptr_type->data.pointer.child_type,
                            ResolveStatusAlignmentKnown)))
            {
                return ira->codegen->invalid_inst_gen;
            }
            uint64_t parent_ptr_align = get_ptr_align(ira->codegen, parent_ptr_type);
            if ((err = type_resolve(ira->codegen, value_type, ResolveStatusAlignmentKnown))) {
                return ira->codegen->invalid_inst_gen;
            }
            if (!type_has_bits(ira->codegen, value_type)) {
                parent_ptr_align = 0;
            }
            // If we're casting from a sentinel-terminated array to a non-sentinel-terminated array,
            // we actually need the result location pointer to *not* have a sentinel. Otherwise the generated
            // memcpy will write an extra byte to the destination, and THAT'S NO GOOD.
            ZigType *ptr_elem_type;
            if (value_type->id == ZigTypeIdArray && value_type->data.array.sentinel != nullptr &&
                dest_type->id == ZigTypeIdArray && dest_type->data.array.sentinel == nullptr)
            {
                ptr_elem_type = get_array_type(ira->codegen, value_type->data.array.child_type,
                        value_type->data.array.len, nullptr);
            } else {
                ptr_elem_type = value_type;
            }
            ZigType *ptr_type = get_pointer_to_type_extra(ira->codegen, ptr_elem_type,
                    parent_ptr_type->data.pointer.is_const, parent_ptr_type->data.pointer.is_volatile, PtrLenSingle,
                    parent_ptr_align, 0, 0, parent_ptr_type->data.pointer.allow_zero);

            ConstCastOnly const_cast_result = types_match_const_cast_only(ira,
                    parent_result_loc->value->type, ptr_type,
                    result_cast->base.source_instruction->base.source_node, false);
            if (const_cast_result.id == ConstCastResultIdInvalid)
                return ira->codegen->invalid_inst_gen;
            if (const_cast_result.id != ConstCastResultIdOk) {
                if (allow_discard) {
                    return parent_result_loc;
                }
                // We will not be able to provide a result location for this value. Create
                // a new result location.
                result_cast->parent->written = false;
                return ir_resolve_no_result_loc(ira, suspend_source_instr, result_loc, value_type);
            }

            result_loc->written = true;
            result_loc->resolved_loc = ir_analyze_ptr_cast(ira, suspend_source_instr, parent_result_loc,
                    &parent_result_loc->base, ptr_type, &result_cast->base.source_instruction->base, false, false);
            return result_loc->resolved_loc;
        }
        case ResultLocIdBitCast: {
            ResultLocBitCast *result_bit_cast = reinterpret_cast<ResultLocBitCast *>(result_loc);
            ZigType *dest_type = ir_resolve_type(ira, result_bit_cast->base.source_instruction->child);
            if (type_is_invalid(dest_type))
                return ira->codegen->invalid_inst_gen;

            ZigType *dest_cg_ptr_type;
            if ((err = get_codegen_ptr_type(ira->codegen, dest_type, &dest_cg_ptr_type)))
                return ira->codegen->invalid_inst_gen;
            if (dest_cg_ptr_type != nullptr) {
                ir_add_error(ira, &result_loc->source_instruction->base,
                        buf_sprintf("unable to @bitCast to pointer type '%s'", buf_ptr(&dest_type->name)));
                return ira->codegen->invalid_inst_gen;
            }

            if (!type_can_bit_cast(dest_type)) {
                ir_add_error(ira, &result_loc->source_instruction->base,
                        buf_sprintf("unable to @bitCast to type '%s'", buf_ptr(&dest_type->name)));
                return ira->codegen->invalid_inst_gen;
            }

            ZigType *value_cg_ptr_type;
            if ((err = get_codegen_ptr_type(ira->codegen, value_type, &value_cg_ptr_type)))
                return ira->codegen->invalid_inst_gen;
            if (value_cg_ptr_type != nullptr) {
                ir_add_error(ira, suspend_source_instr,
                    buf_sprintf("unable to @bitCast from pointer type '%s'", buf_ptr(&value_type->name)));
                return ira->codegen->invalid_inst_gen;
            }

            if (!type_can_bit_cast(value_type)) {
                ir_add_error(ira, suspend_source_instr,
                        buf_sprintf("unable to @bitCast from type '%s'", buf_ptr(&value_type->name)));
                return ira->codegen->invalid_inst_gen;
            }

            IrInstGen *bitcasted_value;
            if (value != nullptr) {
                bitcasted_value = ir_analyze_bit_cast(ira, &result_loc->source_instruction->base, value, dest_type);
                dest_type = bitcasted_value->value->type;
            } else {
                bitcasted_value = nullptr;
            }

            if (bitcasted_value != nullptr && type_is_invalid(bitcasted_value->value->type)) {
                return bitcasted_value;
            }

            bool parent_was_written = result_bit_cast->parent->written;
            IrInstGen *parent_result_loc = ir_resolve_result(ira, suspend_source_instr, result_bit_cast->parent,
                    dest_type, bitcasted_value, force_runtime, true);
            if (parent_result_loc == nullptr || type_is_invalid(parent_result_loc->value->type) ||
                parent_result_loc->value->type->id == ZigTypeIdUnreachable)
            {
                return parent_result_loc;
            }
            ZigType *parent_ptr_type = parent_result_loc->value->type;
            assert(parent_ptr_type->id == ZigTypeIdPointer);
            ZigType *child_type = parent_ptr_type->data.pointer.child_type;

            if (result_loc_is_discard(result_bit_cast->parent)) {
                assert(allow_discard);
                return parent_result_loc;
            }

            if ((err = type_resolve(ira->codegen, child_type, ResolveStatusSizeKnown))) {
                return ira->codegen->invalid_inst_gen;
            }

            if ((err = type_resolve(ira->codegen, value_type, ResolveStatusSizeKnown))) {
                return ira->codegen->invalid_inst_gen;
            }

            if (child_type != ira->codegen->builtin_types.entry_var) {
                if (type_size(ira->codegen, child_type) != type_size(ira->codegen, value_type)) {
                    // pointer cast won't work; we need a temporary location.
                    result_bit_cast->parent->written = parent_was_written;
                    result_loc->written = true;
                    result_loc->resolved_loc = ir_resolve_result(ira, suspend_source_instr, no_result_loc(),
                            value_type, bitcasted_value, force_runtime, true);
                    return result_loc->resolved_loc;
                }
            }
            uint64_t parent_ptr_align = 0;
            if (type_has_bits(ira->codegen, value_type)) parent_ptr_align = get_ptr_align(ira->codegen, parent_ptr_type);
            ZigType *ptr_type = get_pointer_to_type_extra(ira->codegen, value_type,
                    parent_ptr_type->data.pointer.is_const, parent_ptr_type->data.pointer.is_volatile, PtrLenSingle,
                    parent_ptr_align, 0, 0, parent_ptr_type->data.pointer.allow_zero);

            result_loc->written = true;
            result_loc->resolved_loc = ir_analyze_ptr_cast(ira, suspend_source_instr, parent_result_loc,
                    &parent_result_loc->base, ptr_type, &result_bit_cast->base.source_instruction->base, false, false);
            return result_loc->resolved_loc;
        }
    }
    zig_unreachable();
}

static IrInstGen *ir_resolve_result(IrAnalyze *ira, IrInst *suspend_source_instr,
        ResultLoc *result_loc_pass1, ZigType *value_type, IrInstGen *value, bool force_runtime,
        bool allow_discard)
{
    if (!allow_discard && result_loc_is_discard(result_loc_pass1)) {
        result_loc_pass1 = no_result_loc();
    }
    bool was_written = result_loc_pass1->written;
    IrInstGen *result_loc = ir_resolve_result_raw(ira, suspend_source_instr, result_loc_pass1, value_type,
            value, force_runtime, allow_discard);
    if (result_loc == nullptr || result_loc->value->type->id == ZigTypeIdUnreachable ||
            type_is_invalid(result_loc->value->type))
    {
        return result_loc;
    }

    if ((force_runtime || (value != nullptr && !instr_is_comptime(value))) &&
        result_loc_pass1->written && result_loc->value->data.x_ptr.mut == ConstPtrMutInfer)
    {
        result_loc->value->special = ConstValSpecialRuntime;
    }

    InferredStructField *isf = result_loc->value->type->data.pointer.inferred_struct_field;
    if (isf != nullptr) {
        TypeStructField *field;
        IrInstGen *casted_ptr;
        if (isf->already_resolved) {
            field = find_struct_type_field(isf->inferred_struct_type, isf->field_name);
            casted_ptr = result_loc;
        } else {
            isf->already_resolved = true;
            // Now it's time to add the field to the struct type.
            uint32_t old_field_count = isf->inferred_struct_type->data.structure.src_field_count;
            uint32_t new_field_count = old_field_count + 1;
            isf->inferred_struct_type->data.structure.src_field_count = new_field_count;
            isf->inferred_struct_type->data.structure.fields = realloc_type_struct_fields(
                    isf->inferred_struct_type->data.structure.fields, old_field_count, new_field_count);

            field = isf->inferred_struct_type->data.structure.fields[old_field_count];
            field->name = isf->field_name;
            field->type_entry = value_type;
            field->type_val = create_const_type(ira->codegen, field->type_entry);
            field->src_index = old_field_count;
            field->decl_node = value ? value->base.source_node : suspend_source_instr->source_node;
            if (value && instr_is_comptime(value)) {
                ZigValue *val = ir_resolve_const(ira, value, UndefOk);
                if (!val)
                    return ira->codegen->invalid_inst_gen;
                field->is_comptime = true;
                field->init_val = ira->codegen->pass1_arena->create<ZigValue>();
                copy_const_val(ira->codegen, field->init_val, val);
                return result_loc;
            }

            ZigType *struct_ptr_type = get_pointer_to_type(ira->codegen, isf->inferred_struct_type, false);
            if (instr_is_comptime(result_loc)) {
                casted_ptr = ir_const(ira, suspend_source_instr, struct_ptr_type);
                copy_const_val(ira->codegen, casted_ptr->value, result_loc->value);
                casted_ptr->value->type = struct_ptr_type;
            } else {
                casted_ptr = result_loc;
            }
            if (instr_is_comptime(casted_ptr)) {
                ZigValue *ptr_val = ir_resolve_const(ira, casted_ptr, UndefBad);
                if (!ptr_val)
                    return ira->codegen->invalid_inst_gen;
                if (ptr_val->data.x_ptr.special != ConstPtrSpecialHardCodedAddr) {
                    ZigValue *struct_val = const_ptr_pointee(ira, ira->codegen, ptr_val,
                            suspend_source_instr->source_node);
                    struct_val->special = ConstValSpecialStatic;
                    struct_val->data.x_struct.fields = realloc_const_vals_ptrs(ira->codegen,
                            struct_val->data.x_struct.fields, old_field_count, new_field_count);

                    ZigValue *field_val = struct_val->data.x_struct.fields[old_field_count];
                    field_val->special = ConstValSpecialUndef;
                    field_val->type = field->type_entry;
                    field_val->parent.id = ConstParentIdStruct;
                    field_val->parent.data.p_struct.struct_val = struct_val;
                    field_val->parent.data.p_struct.field_index = old_field_count;
                }
            }
        }

        result_loc = ir_analyze_struct_field_ptr(ira, suspend_source_instr, field, casted_ptr,
                isf->inferred_struct_type, true);
        result_loc_pass1->resolved_loc = result_loc;
    }

    if (was_written) {
        return result_loc;
    }

    ir_assert(result_loc->value->type->id == ZigTypeIdPointer, suspend_source_instr);
    ZigType *actual_elem_type = result_loc->value->type->data.pointer.child_type;
    if (actual_elem_type->id == ZigTypeIdOptional && value_type->id != ZigTypeIdOptional &&
            value_type->id != ZigTypeIdNull && value_type->id != ZigTypeIdUndefined)
    {
        bool same_comptime_repr = types_have_same_zig_comptime_repr(ira->codegen, actual_elem_type, value_type);
        if (!same_comptime_repr) {
            result_loc_pass1->written = was_written;
            return ir_analyze_unwrap_optional_payload(ira, suspend_source_instr, result_loc, false, true);
        }
    } else if (actual_elem_type->id == ZigTypeIdErrorUnion && value_type->id != ZigTypeIdErrorUnion &&
            value_type->id != ZigTypeIdUndefined)
    {
        if (value_type->id == ZigTypeIdErrorSet) {
            return ir_analyze_unwrap_err_code(ira, suspend_source_instr, result_loc, true);
        } else {
            IrInstGen *unwrapped_err_ptr = ir_analyze_unwrap_error_payload(ira, suspend_source_instr,
                    result_loc, false, true);
            ZigType *actual_payload_type = actual_elem_type->data.error_union.payload_type;
            if (actual_payload_type->id == ZigTypeIdOptional && value_type->id != ZigTypeIdOptional &&
                value_type->id != ZigTypeIdNull && value_type->id != ZigTypeIdUndefined)
            {
                return ir_analyze_unwrap_optional_payload(ira, suspend_source_instr, unwrapped_err_ptr, false, true);
            } else {
                return unwrapped_err_ptr;
            }
        }
    }
    return result_loc;
}

static IrInstGen *ir_analyze_instruction_resolve_result(IrAnalyze *ira, IrInstSrcResolveResult *instruction) {
    ZigType *implicit_elem_type;
    if (instruction->ty == nullptr) {
        if (instruction->result_loc->id == ResultLocIdCast) {
            implicit_elem_type = ir_resolve_type(ira,
                    instruction->result_loc->source_instruction->child);
            if (type_is_invalid(implicit_elem_type))
                return ira->codegen->invalid_inst_gen;
        } else if (instruction->result_loc->id == ResultLocIdReturn) {
            implicit_elem_type = ira->explicit_return_type;
            if (type_is_invalid(implicit_elem_type))
                return ira->codegen->invalid_inst_gen;
        } else {
            implicit_elem_type = ira->codegen->builtin_types.entry_var;
        }
        if (implicit_elem_type == ira->codegen->builtin_types.entry_var) {
            Buf *bare_name = buf_alloc();
            Buf *name = get_anon_type_name(ira->codegen, nullptr, container_string(ContainerKindStruct),
                    instruction->base.base.scope, instruction->base.base.source_node, bare_name);

            StructSpecial struct_special = StructSpecialInferredStruct;
            if (instruction->base.base.source_node->type == NodeTypeContainerInitExpr &&
                instruction->base.base.source_node->data.container_init_expr.kind == ContainerInitKindArray)
            {
                struct_special = StructSpecialInferredTuple;
            }

            ZigType *inferred_struct_type = get_partial_container_type(ira->codegen,
                    instruction->base.base.scope, ContainerKindStruct, instruction->base.base.source_node,
                    buf_ptr(name), bare_name, ContainerLayoutAuto);
            inferred_struct_type->data.structure.special = struct_special;
            inferred_struct_type->data.structure.resolve_status = ResolveStatusBeingInferred;
            implicit_elem_type = inferred_struct_type;
        }
    } else {
        implicit_elem_type = ir_resolve_type(ira, instruction->ty->child);
        if (type_is_invalid(implicit_elem_type))
            return ira->codegen->invalid_inst_gen;
    }
    IrInstGen *result_loc = ir_resolve_result(ira, &instruction->base.base, instruction->result_loc,
            implicit_elem_type, nullptr, false, true);
    if (result_loc != nullptr)
        return result_loc;

    ZigFn *fn = ira->new_irb.exec->fn_entry;
    if (fn != nullptr && fn->type_entry->data.fn.fn_type_id.cc == CallingConventionAsync &&
            instruction->result_loc->id == ResultLocIdReturn)
    {
        result_loc = ir_resolve_result(ira, &instruction->base.base, no_result_loc(),
                implicit_elem_type, nullptr, false, true);
        if (result_loc != nullptr &&
                (type_is_invalid(result_loc->value->type) || result_loc->value->type->id == ZigTypeIdUnreachable))
        {
            return result_loc;
        }
        result_loc->value->special = ConstValSpecialRuntime;
        return result_loc;
    }

    IrInstGen *result = ir_const(ira, &instruction->base.base, implicit_elem_type);
    result->value->special = ConstValSpecialUndef;
    IrInstGen *ptr = ir_get_ref(ira, &instruction->base.base, result, false, false);
    ptr->value->data.x_ptr.mut = ConstPtrMutComptimeVar;
    return ptr;
}

static void ir_reset_result(ResultLoc *result_loc) {
    result_loc->written = false;
    result_loc->resolved_loc = nullptr;
    result_loc->gen_instruction = nullptr;
    result_loc->implicit_elem_type = nullptr;
    switch (result_loc->id) {
        case ResultLocIdInvalid:
            zig_unreachable();
        case ResultLocIdPeerParent: {
            ResultLocPeerParent *peer_parent = reinterpret_cast<ResultLocPeerParent *>(result_loc);
            peer_parent->skipped = false;
            peer_parent->done_resuming = false;
            peer_parent->resolved_type = nullptr;
            for (size_t i = 0; i < peer_parent->peers.length; i += 1) {
                ir_reset_result(&peer_parent->peers.at(i)->base);
            }
            break;
        }
        case ResultLocIdVar: {
            IrInstSrcAlloca *alloca_src = reinterpret_cast<IrInstSrcAlloca *>(result_loc->source_instruction);
            alloca_src->base.child = nullptr;
            break;
        }
        case ResultLocIdReturn:
            reinterpret_cast<ResultLocReturn *>(result_loc)->implicit_return_type_done = false;
            break;
        case ResultLocIdPeer:
        case ResultLocIdNone:
        case ResultLocIdInstruction:
        case ResultLocIdBitCast:
        case ResultLocIdCast:
            break;
    }
}

static IrInstGen *ir_analyze_instruction_reset_result(IrAnalyze *ira, IrInstSrcResetResult *instruction) {
    ir_reset_result(instruction->result_loc);
    return ir_const_void(ira, &instruction->base.base);
}

static IrInstGen *get_async_call_result_loc(IrAnalyze *ira, IrInst* source_instr,
        ZigType *fn_ret_type, bool is_async_call_builtin, IrInstGen **args_ptr, size_t args_len,
        IrInstGen *ret_ptr_uncasted)
{
    ir_assert(is_async_call_builtin, source_instr);
    if (type_is_invalid(ret_ptr_uncasted->value->type))
        return ira->codegen->invalid_inst_gen;
    if (ret_ptr_uncasted->value->type->id == ZigTypeIdVoid) {
        // Result location will be inside the async frame.
        return nullptr;
    }
    return ir_implicit_cast(ira, ret_ptr_uncasted, get_pointer_to_type(ira->codegen, fn_ret_type, false));
}

static IrInstGen *ir_analyze_async_call(IrAnalyze *ira, IrInst* source_instr, ZigFn *fn_entry,
        ZigType *fn_type, IrInstGen *fn_ref, IrInstGen **casted_args, size_t arg_count,
        IrInstGen *casted_new_stack, bool is_async_call_builtin, IrInstGen *ret_ptr_uncasted,
        ResultLoc *call_result_loc)
{
    if (fn_entry == nullptr) {
        if (fn_type->data.fn.fn_type_id.cc != CallingConventionAsync) {
            ir_add_error(ira, &fn_ref->base,
                buf_sprintf("expected async function, found '%s'", buf_ptr(&fn_type->name)));
            return ira->codegen->invalid_inst_gen;
        }
        if (casted_new_stack == nullptr) {
            ir_add_error(ira, &fn_ref->base, buf_sprintf("function is not comptime-known; @asyncCall required"));
            return ira->codegen->invalid_inst_gen;
        }
    }
    if (casted_new_stack != nullptr) {
        ZigType *fn_ret_type = fn_type->data.fn.fn_type_id.return_type;
        IrInstGen *ret_ptr = get_async_call_result_loc(ira, source_instr, fn_ret_type, is_async_call_builtin,
                casted_args, arg_count, ret_ptr_uncasted);
        if (ret_ptr != nullptr && type_is_invalid(ret_ptr->value->type))
            return ira->codegen->invalid_inst_gen;

        ZigType *anyframe_type = get_any_frame_type(ira->codegen, fn_ret_type);

        IrInstGenCall *call_gen = ir_build_call_gen(ira, source_instr, fn_entry, fn_ref,
                arg_count, casted_args, CallModifierAsync, casted_new_stack,
                is_async_call_builtin, ret_ptr, anyframe_type);
        return &call_gen->base;
    } else {
        ZigType *frame_type = get_fn_frame_type(ira->codegen, fn_entry);
        IrInstGen *result_loc = ir_resolve_result(ira, source_instr, call_result_loc,
                frame_type, nullptr, true, false);
        if (type_is_invalid(result_loc->value->type) || result_loc->value->type->id == ZigTypeIdUnreachable) {
            return result_loc;
        }
        result_loc = ir_implicit_cast2(ira, source_instr, result_loc,
                get_pointer_to_type(ira->codegen, frame_type, false));
        if (type_is_invalid(result_loc->value->type))
            return ira->codegen->invalid_inst_gen;
        return &ir_build_call_gen(ira, source_instr, fn_entry, fn_ref, arg_count,
                casted_args, CallModifierAsync, casted_new_stack,
                is_async_call_builtin, result_loc, frame_type)->base;
    }
}
static bool ir_analyze_fn_call_inline_arg(IrAnalyze *ira, AstNode *fn_proto_node,
    IrInstGen *arg, Scope **exec_scope, size_t *next_proto_i)
{
    AstNode *param_decl_node = fn_proto_node->data.fn_proto.params.at(*next_proto_i);
    assert(param_decl_node->type == NodeTypeParamDecl);

    IrInstGen *casted_arg;
    if (param_decl_node->data.param_decl.var_token == nullptr) {
        AstNode *param_type_node = param_decl_node->data.param_decl.type;
        ZigType *param_type = ir_analyze_type_expr(ira, *exec_scope, param_type_node);
        if (type_is_invalid(param_type))
            return false;

        casted_arg = ir_implicit_cast(ira, arg, param_type);
        if (type_is_invalid(casted_arg->value->type))
            return false;
    } else {
        casted_arg = arg;
    }

    ZigValue *arg_val = ir_resolve_const(ira, casted_arg, UndefOk);
    if (!arg_val)
        return false;

    Buf *param_name = param_decl_node->data.param_decl.name;
    ZigVar *var = add_variable(ira->codegen, param_decl_node,
        *exec_scope, param_name, true, arg_val, nullptr, arg_val->type);
    *exec_scope = var->child_scope;
    *next_proto_i += 1;

    return true;
}

static bool ir_analyze_fn_call_generic_arg(IrAnalyze *ira, AstNode *fn_proto_node,
    IrInstGen *arg, IrInst *arg_src, Scope **child_scope, size_t *next_proto_i,
    GenericFnTypeId *generic_id, FnTypeId *fn_type_id, IrInstGen **casted_args,
    ZigFn *impl_fn)
{
    AstNode *param_decl_node = fn_proto_node->data.fn_proto.params.at(*next_proto_i);
    assert(param_decl_node->type == NodeTypeParamDecl);
    bool is_var_args = param_decl_node->data.param_decl.is_var_args;
    bool arg_part_of_generic_id = false;
    IrInstGen *casted_arg;
    if (is_var_args) {
        arg_part_of_generic_id = true;
        casted_arg = arg;
    } else {
        if (param_decl_node->data.param_decl.var_token == nullptr) {
            AstNode *param_type_node = param_decl_node->data.param_decl.type;
            ZigType *param_type = ir_analyze_type_expr(ira, *child_scope, param_type_node);
            if (type_is_invalid(param_type))
                return false;

            casted_arg = ir_implicit_cast2(ira, arg_src, arg, param_type);
            if (type_is_invalid(casted_arg->value->type))
                return false;
        } else {
            arg_part_of_generic_id = true;
            casted_arg = arg;
        }
    }

    bool comptime_arg = param_decl_node->data.param_decl.is_comptime;
    if (!comptime_arg) {
        switch (type_requires_comptime(ira->codegen, casted_arg->value->type)) {
        case ReqCompTimeInvalid:
            return false;
        case ReqCompTimeYes:
            comptime_arg = true;
            break;
        case ReqCompTimeNo:
            break;
        }
    }

    ZigValue *arg_val;

    if (comptime_arg) {
        arg_part_of_generic_id = true;
        arg_val = ir_resolve_const(ira, casted_arg, UndefBad);
        if (!arg_val)
            return false;
    } else {
        arg_val = create_const_runtime(ira->codegen, casted_arg->value->type);
    }
    if (arg_part_of_generic_id) {
        copy_const_val(ira->codegen, &generic_id->params[generic_id->param_count], arg_val);
        generic_id->param_count += 1;
    }

    Buf *param_name = param_decl_node->data.param_decl.name;
    if (!param_name) return false;
    if (!is_var_args) {
        ZigVar *var = add_variable(ira->codegen, param_decl_node,
            *child_scope, param_name, true, arg_val, nullptr, arg_val->type);
        *child_scope = var->child_scope;
        var->shadowable = !comptime_arg;

        *next_proto_i += 1;
    } else if (casted_arg->value->type->id == ZigTypeIdComptimeInt ||
            casted_arg->value->type->id == ZigTypeIdComptimeFloat)
    {
        ir_add_error(ira, &casted_arg->base,
            buf_sprintf("compiler bug: integer and float literals in var args function must be casted. https://github.com/ziglang/zig/issues/557"));
        return false;
    }

    if (!comptime_arg) {
        casted_args[fn_type_id->param_count] = casted_arg;
        FnTypeParamInfo *param_info = &fn_type_id->param_info[fn_type_id->param_count];
        param_info->type = casted_arg->value->type;
        param_info->is_noalias = param_decl_node->data.param_decl.is_noalias;
        impl_fn->param_source_nodes[fn_type_id->param_count] = param_decl_node;
        fn_type_id->param_count += 1;
    }

    return true;
}

static IrInstGen *ir_get_var_ptr(IrAnalyze *ira, IrInst *source_instr, ZigVar *var) {
    while (var->next_var != nullptr) {
        var = var->next_var;
    }

    if (var->var_type == nullptr || type_is_invalid(var->var_type))
        return ira->codegen->invalid_inst_gen;

    bool is_volatile = false;
    ZigType *var_ptr_type = get_pointer_to_type_extra(ira->codegen, var->var_type,
            var->src_is_const, is_volatile, PtrLenSingle, var->align_bytes, 0, 0, false);

    if (var->ptr_instruction != nullptr) {
        return ir_implicit_cast(ira, var->ptr_instruction, var_ptr_type);
    }

    bool comptime_var_mem = ir_get_var_is_comptime(var);
    bool linkage_makes_it_runtime = var->decl_node->data.variable_declaration.is_extern;

    IrInstGen *result = ir_build_var_ptr_gen(ira, source_instr, var);
    result->value->type = var_ptr_type;

    if (!linkage_makes_it_runtime && !var->is_thread_local && value_is_comptime(var->const_value)) {
        ZigValue *val = var->const_value;
        switch (val->special) {
            case ConstValSpecialRuntime:
                break;
            case ConstValSpecialStatic: // fallthrough
            case ConstValSpecialLazy: // fallthrough
            case ConstValSpecialUndef: {
                ConstPtrMut ptr_mut;
                if (comptime_var_mem) {
                    ptr_mut = ConstPtrMutComptimeVar;
                } else if (var->gen_is_const) {
                    ptr_mut = ConstPtrMutComptimeConst;
                } else {
                    assert(!comptime_var_mem);
                    ptr_mut = ConstPtrMutRuntimeVar;
                }
                result->value->special = ConstValSpecialStatic;
                result->value->data.x_ptr.mut = ptr_mut;
                result->value->data.x_ptr.special = ConstPtrSpecialRef;
                result->value->data.x_ptr.data.ref.pointee = val;
                return result;
            }
        }
    }

    bool in_fn_scope = (scope_fn_entry(var->parent_scope) != nullptr);
    result->value->data.rh_ptr = in_fn_scope ? RuntimeHintPtrStack : RuntimeHintPtrNonStack;

    return result;
}

// This function is called when a comptime value becomes accessible at runtime.
static void mark_comptime_value_escape(IrAnalyze *ira, IrInst* source_instr, ZigValue *val) {
    ir_assert(value_is_comptime(val), source_instr);
    if (val->special == ConstValSpecialUndef)
        return;

    if (val->type->id == ZigTypeIdFn && val->type->data.fn.fn_type_id.cc == CallingConventionUnspecified) {
        ir_assert(val->data.x_ptr.special == ConstPtrSpecialFunction, source_instr);
        if (val->data.x_ptr.data.fn.fn_entry->non_async_node == nullptr) {
            val->data.x_ptr.data.fn.fn_entry->non_async_node = source_instr->source_node;
        }
    }
}

static IrInstGen *ir_analyze_store_ptr(IrAnalyze *ira, IrInst* source_instr,
        IrInstGen *ptr, IrInstGen *uncasted_value, bool allow_write_through_const)
{
    assert(ptr->value->type->id == ZigTypeIdPointer);

    if (ptr->value->data.x_ptr.special == ConstPtrSpecialDiscard) {
        if (uncasted_value->value->type->id == ZigTypeIdErrorUnion ||
            uncasted_value->value->type->id == ZigTypeIdErrorSet)
        {
            ir_add_error(ira, source_instr, buf_sprintf("error is discarded"));
            return ira->codegen->invalid_inst_gen;
        }
        return ir_const_void(ira, source_instr);
    }

    if (ptr->value->type->data.pointer.is_const && !allow_write_through_const) {
        ir_add_error(ira, source_instr, buf_sprintf("cannot assign to constant"));
        return ira->codegen->invalid_inst_gen;
    }

    ZigType *child_type = ptr->value->type->data.pointer.child_type;
    IrInstGen *value = ir_implicit_cast(ira, uncasted_value, child_type);
    if (type_is_invalid(value->value->type))
        return ira->codegen->invalid_inst_gen;

    switch (type_has_one_possible_value(ira->codegen, child_type)) {
        case OnePossibleValueInvalid:
            return ira->codegen->invalid_inst_gen;
        case OnePossibleValueYes:
            return ir_const_void(ira, source_instr);
        case OnePossibleValueNo:
            break;
    }

    if (instr_is_comptime(ptr) && ptr->value->data.x_ptr.special != ConstPtrSpecialHardCodedAddr) {
        if (!allow_write_through_const && ptr->value->data.x_ptr.mut == ConstPtrMutComptimeConst) {
            ir_add_error(ira, source_instr, buf_sprintf("cannot assign to constant"));
            return ira->codegen->invalid_inst_gen;
        }
        if ((allow_write_through_const && ptr->value->data.x_ptr.mut == ConstPtrMutComptimeConst) ||
            ptr->value->data.x_ptr.mut == ConstPtrMutComptimeVar ||
            ptr->value->data.x_ptr.mut == ConstPtrMutInfer)
        {
            if (instr_is_comptime(value)) {
                ZigValue *dest_val = const_ptr_pointee(ira, ira->codegen, ptr->value, source_instr->source_node);
                if (dest_val == nullptr)
                    return ira->codegen->invalid_inst_gen;
                if (dest_val->special != ConstValSpecialRuntime) {
                    copy_const_val(ira->codegen, dest_val, value->value);

                    if (ptr->value->data.x_ptr.mut == ConstPtrMutComptimeVar &&
                        !ira->new_irb.current_basic_block->must_be_comptime_source_instr)
                    {
                        ira->new_irb.current_basic_block->must_be_comptime_source_instr = source_instr;
                    }
                    return ir_const_void(ira, source_instr);
                }
            }
            if (ptr->value->data.x_ptr.mut == ConstPtrMutInfer) {
                ptr->value->special = ConstValSpecialRuntime;
            } else {
                ir_add_error(ira, source_instr,
                        buf_sprintf("cannot store runtime value in compile time variable"));
                ZigValue *dest_val = const_ptr_pointee_unchecked(ira->codegen, ptr->value);
                dest_val->type = ira->codegen->builtin_types.entry_invalid;

                return ira->codegen->invalid_inst_gen;
            }
        }
    }

    if (ptr->value->type->data.pointer.inferred_struct_field != nullptr &&
        child_type == ira->codegen->builtin_types.entry_var)
    {
        child_type = ptr->value->type->data.pointer.inferred_struct_field->inferred_struct_type;
    }

    switch (type_requires_comptime(ira->codegen, child_type)) {
        case ReqCompTimeInvalid:
            return ira->codegen->invalid_inst_gen;
        case ReqCompTimeYes:
            switch (type_has_one_possible_value(ira->codegen, ptr->value->type)) {
                case OnePossibleValueInvalid:
                    return ira->codegen->invalid_inst_gen;
                case OnePossibleValueNo:
                    ir_add_error(ira, source_instr,
                            buf_sprintf("cannot store runtime value in type '%s'", buf_ptr(&child_type->name)));
                    return ira->codegen->invalid_inst_gen;
                case OnePossibleValueYes:
                    return ir_const_void(ira, source_instr);
            }
            zig_unreachable();
        case ReqCompTimeNo:
            break;
    }

    if (instr_is_comptime(value)) {
        mark_comptime_value_escape(ira, source_instr, value->value);
    }

    // If this is a store to a pointer with a runtime-known vector index,
    // we have to figure out the IrInstGen which represents the index and
    // emit a IrInstGenVectorStoreElem, or emit a compile error
    // explaining why it is impossible for this store to work. Which is that
    // the pointer address is of the vector; without the element index being known
    // we cannot properly perform the insertion.
    if (ptr->value->type->data.pointer.vector_index == VECTOR_INDEX_RUNTIME) {
        if (ptr->id == IrInstGenIdElemPtr) {
            IrInstGenElemPtr *elem_ptr = (IrInstGenElemPtr *)ptr;
            return ir_build_vector_store_elem(ira, source_instr, elem_ptr->array_ptr,
                    elem_ptr->elem_index, value);
        }
        ir_add_error(ira, &ptr->base,
            buf_sprintf("unable to determine vector element index of type '%s'",
                buf_ptr(&ptr->value->type->name)));
        return ira->codegen->invalid_inst_gen;
    }

    return ir_build_store_ptr_gen(ira, source_instr, ptr, value);
}

static IrInstGen *analyze_casted_new_stack(IrAnalyze *ira, IrInst* source_instr,
        IrInstGen *new_stack, IrInst *new_stack_src, bool is_async_call_builtin, ZigFn *fn_entry)
{
    if (new_stack == nullptr)
        return nullptr;

    if (!is_async_call_builtin &&
        arch_stack_pointer_register_name(ira->codegen->zig_target->arch) == nullptr)
    {
        ir_add_error(ira, source_instr,
            buf_sprintf("target arch '%s' does not support calling with a new stack",
                target_arch_name(ira->codegen->zig_target->arch)));
    }

    if (is_async_call_builtin &&
        fn_entry != nullptr && new_stack->value->type->id == ZigTypeIdPointer &&
        new_stack->value->type->data.pointer.child_type->id == ZigTypeIdFnFrame)
    {
        ZigType *needed_frame_type = get_pointer_to_type(ira->codegen,
                get_fn_frame_type(ira->codegen, fn_entry), false);
        return ir_implicit_cast(ira, new_stack, needed_frame_type);
    } else {
        ZigType *u8_ptr = get_pointer_to_type_extra(ira->codegen, ira->codegen->builtin_types.entry_u8,
                false, false, PtrLenUnknown, target_fn_align(ira->codegen->zig_target), 0, 0, false);
        ZigType *u8_slice = get_slice_type(ira->codegen, u8_ptr);
        ira->codegen->need_frame_size_prefix_data = true;
        return ir_implicit_cast2(ira, new_stack_src, new_stack, u8_slice);
    }
}

static IrInstGen *ir_analyze_fn_call(IrAnalyze *ira, IrInst* source_instr,
    ZigFn *fn_entry, ZigType *fn_type, IrInstGen *fn_ref,
    IrInstGen *first_arg_ptr, IrInst *first_arg_ptr_src, CallModifier modifier,
    IrInstGen *new_stack, IrInst *new_stack_src, bool is_async_call_builtin,
    IrInstGen **args_ptr, size_t args_len, IrInstGen *ret_ptr, ResultLoc *call_result_loc)
{
    Error err;
    FnTypeId *fn_type_id = &fn_type->data.fn.fn_type_id;
    size_t first_arg_1_or_0 = first_arg_ptr ? 1 : 0;

    // for extern functions, the var args argument is not counted.
    // for zig functions, it is.
    size_t var_args_1_or_0;
    if (fn_type_id->cc == CallingConventionC) {
        var_args_1_or_0 = 0;
    } else {
        var_args_1_or_0 = fn_type_id->is_var_args ? 1 : 0;
    }
    size_t src_param_count = fn_type_id->param_count - var_args_1_or_0;
    size_t call_param_count = args_len + first_arg_1_or_0;
    AstNode *source_node = source_instr->source_node;

    AstNode *fn_proto_node = fn_entry ? fn_entry->proto_node : nullptr;;

    if (fn_type_id->cc == CallingConventionNaked) {
        ErrorMsg *msg = ir_add_error(ira, &fn_ref->base, buf_sprintf("unable to call function with naked calling convention"));
        if (fn_proto_node) {
            add_error_note(ira->codegen, msg, fn_proto_node, buf_sprintf("declared here"));
        }
        return ira->codegen->invalid_inst_gen;
    }

    if (fn_type_id->is_var_args) {
        if (call_param_count < src_param_count) {
            ErrorMsg *msg = ir_add_error_node(ira, source_node,
                buf_sprintf("expected at least %" ZIG_PRI_usize " arguments, found %" ZIG_PRI_usize "", src_param_count, call_param_count));
            if (fn_proto_node) {
                add_error_note(ira->codegen, msg, fn_proto_node,
                    buf_sprintf("declared here"));
            }
            return ira->codegen->invalid_inst_gen;
        }
    } else if (src_param_count != call_param_count) {
        ErrorMsg *msg = ir_add_error_node(ira, source_node,
            buf_sprintf("expected %" ZIG_PRI_usize " arguments, found %" ZIG_PRI_usize "", src_param_count, call_param_count));
        if (fn_proto_node) {
            add_error_note(ira->codegen, msg, fn_proto_node,
                buf_sprintf("declared here"));
        }
        return ira->codegen->invalid_inst_gen;
    }

    if (modifier == CallModifierCompileTime) {
        // If we are evaluating an extern function in a TypeOf call, we can return an undefined value
        // of its return type.
        if (fn_entry != nullptr && get_scope_typeof(source_instr->scope) != nullptr &&
            fn_proto_node->data.fn_proto.is_extern) {

            assert(fn_entry->body_node == nullptr);
            AstNode *return_type_node = fn_proto_node->data.fn_proto.return_type;
            ZigType *return_type = ir_analyze_type_expr(ira, source_instr->scope, return_type_node);
            if (type_is_invalid(return_type))
                return ira->codegen->invalid_inst_gen;

            return ir_const_undef(ira, source_instr, return_type);
        }

        // No special handling is needed for compile time evaluation of generic functions.
        if (!fn_entry || fn_entry->body_node == nullptr) {
            ir_add_error(ira, &fn_ref->base, buf_sprintf("unable to evaluate constant expression"));
            return ira->codegen->invalid_inst_gen;
        }

        if (!ir_emit_backward_branch(ira, source_instr))
            return ira->codegen->invalid_inst_gen;

        // Fork a scope of the function with known values for the parameters.
        Scope *exec_scope = &fn_entry->fndef_scope->base;

        size_t next_proto_i = 0;
        if (first_arg_ptr) {
            assert(first_arg_ptr->value->type->id == ZigTypeIdPointer);

            bool first_arg_known_bare = false;
            if (fn_type_id->next_param_index >= 1) {
                ZigType *param_type = fn_type_id->param_info[next_proto_i].type;
                if (type_is_invalid(param_type))
                    return ira->codegen->invalid_inst_gen;
                first_arg_known_bare = param_type->id != ZigTypeIdPointer;
            }

            IrInstGen *first_arg;
            if (!first_arg_known_bare && handle_is_ptr(ira->codegen, first_arg_ptr->value->type->data.pointer.child_type)) {
                first_arg = first_arg_ptr;
            } else {
                first_arg = ir_get_deref(ira, &first_arg_ptr->base, first_arg_ptr, nullptr);
                if (type_is_invalid(first_arg->value->type))
                    return ira->codegen->invalid_inst_gen;
            }

            if (!ir_analyze_fn_call_inline_arg(ira, fn_proto_node, first_arg, &exec_scope, &next_proto_i))
                return ira->codegen->invalid_inst_gen;
        }

        for (size_t call_i = 0; call_i < args_len; call_i += 1) {
            IrInstGen *old_arg = args_ptr[call_i];

            if (!ir_analyze_fn_call_inline_arg(ira, fn_proto_node, old_arg, &exec_scope, &next_proto_i))
                return ira->codegen->invalid_inst_gen;
        }

        AstNode *return_type_node = fn_proto_node->data.fn_proto.return_type;
        ZigType *specified_return_type = ir_analyze_type_expr(ira, exec_scope, return_type_node);
        if (type_is_invalid(specified_return_type))
            return ira->codegen->invalid_inst_gen;
        ZigType *return_type;
        ZigType *inferred_err_set_type = nullptr;
        if (fn_proto_node->data.fn_proto.auto_err_set) {
            inferred_err_set_type = get_auto_err_set_type(ira->codegen, fn_entry);
            if ((err = type_resolve(ira->codegen, specified_return_type, ResolveStatusSizeKnown)))
                return ira->codegen->invalid_inst_gen;
            return_type = get_error_union_type(ira->codegen, inferred_err_set_type, specified_return_type);
        } else {
            return_type = specified_return_type;
        }

        bool cacheable = fn_eval_cacheable(exec_scope, return_type);
        ZigValue *result = nullptr;
        if (cacheable) {
            auto entry = ira->codegen->memoized_fn_eval_table.maybe_get(exec_scope);
            if (entry)
                result = entry->value;
        }

        if (result == nullptr) {
            // Analyze the fn body block like any other constant expression.
            AstNode *body_node = fn_entry->body_node;
            ZigValue *result_ptr;
            create_result_ptr(ira->codegen, return_type, &result, &result_ptr);

            if ((err = ir_eval_const_value(ira->codegen, exec_scope, body_node, result_ptr,
                ira->new_irb.exec->backward_branch_count, ira->new_irb.exec->backward_branch_quota,
                fn_entry, nullptr, source_instr->source_node, nullptr, ira->new_irb.exec, return_type_node,
                UndefOk)))
            {
                return ira->codegen->invalid_inst_gen;
            }

            if (inferred_err_set_type != nullptr) {
                inferred_err_set_type->data.error_set.incomplete = false;
                if (result->type->id == ZigTypeIdErrorUnion) {
                    ErrorTableEntry *err = result->data.x_err_union.error_set->data.x_err_set;
                    if (err != nullptr) {
                        inferred_err_set_type->data.error_set.err_count = 1;
                        inferred_err_set_type->data.error_set.errors = heap::c_allocator.create<ErrorTableEntry *>();
                        inferred_err_set_type->data.error_set.errors[0] = err;
                    }
                    ZigType *fn_inferred_err_set_type = result->type->data.error_union.err_set_type;
                    inferred_err_set_type->data.error_set.err_count = fn_inferred_err_set_type->data.error_set.err_count;
                    inferred_err_set_type->data.error_set.errors = fn_inferred_err_set_type->data.error_set.errors;
                } else if (result->type->id == ZigTypeIdErrorSet) {
                    inferred_err_set_type->data.error_set.err_count = result->type->data.error_set.err_count;
                    inferred_err_set_type->data.error_set.errors = result->type->data.error_set.errors;
                }
            }

            if (cacheable) {
                ira->codegen->memoized_fn_eval_table.put(exec_scope, result);
            }

            if (type_is_invalid(result->type)) {
                return ira->codegen->invalid_inst_gen;
            }
        }

        IrInstGen *new_instruction = ir_const_move(ira, source_instr, result);
        return ir_finish_anal(ira, new_instruction);
    }

    if (fn_type->data.fn.is_generic) {
        if (!fn_entry) {
            ir_add_error(ira, &fn_ref->base,
                buf_sprintf("calling a generic function requires compile-time known function value"));
            return ira->codegen->invalid_inst_gen;
        }

        size_t new_fn_arg_count = first_arg_1_or_0 + args_len;

        IrInstGen **casted_args = heap::c_allocator.allocate<IrInstGen *>(new_fn_arg_count);

        // Fork a scope of the function with known values for the parameters.
        Scope *parent_scope = fn_entry->fndef_scope->base.parent;
        ZigFn *impl_fn = create_fn(ira->codegen, fn_proto_node);
        impl_fn->param_source_nodes = heap::c_allocator.allocate<AstNode *>(new_fn_arg_count);
        buf_init_from_buf(&impl_fn->symbol_name, &fn_entry->symbol_name);
        impl_fn->fndef_scope = create_fndef_scope(ira->codegen, impl_fn->body_node, parent_scope, impl_fn);
        impl_fn->child_scope = &impl_fn->fndef_scope->base;
        FnTypeId inst_fn_type_id = {0};
        init_fn_type_id(&inst_fn_type_id, fn_proto_node, fn_type_id->cc, new_fn_arg_count);
        inst_fn_type_id.param_count = 0;
        inst_fn_type_id.is_var_args = false;

        // TODO maybe GenericFnTypeId can be replaced with using the child_scope directly
        // as the key in generic_table
        GenericFnTypeId *generic_id = heap::c_allocator.create<GenericFnTypeId>();
        generic_id->fn_entry = fn_entry;
        generic_id->param_count = 0;
        generic_id->params = ira->codegen->pass1_arena->allocate<ZigValue>(new_fn_arg_count);
        size_t next_proto_i = 0;

        if (first_arg_ptr) {
            assert(first_arg_ptr->value->type->id == ZigTypeIdPointer);

            bool first_arg_known_bare = false;
            if (fn_type_id->next_param_index >= 1) {
                ZigType *param_type = fn_type_id->param_info[next_proto_i].type;
                if (type_is_invalid(param_type))
                    return ira->codegen->invalid_inst_gen;
                first_arg_known_bare = param_type->id != ZigTypeIdPointer;
            }

            IrInstGen *first_arg;
            if (!first_arg_known_bare && handle_is_ptr(ira->codegen, first_arg_ptr->value->type->data.pointer.child_type)) {
                first_arg = first_arg_ptr;
            } else {
                first_arg = ir_get_deref(ira, &first_arg_ptr->base, first_arg_ptr, nullptr);
                if (type_is_invalid(first_arg->value->type))
                    return ira->codegen->invalid_inst_gen;
            }

            if (!ir_analyze_fn_call_generic_arg(ira, fn_proto_node, first_arg, first_arg_ptr_src,
                    &impl_fn->child_scope, &next_proto_i, generic_id, &inst_fn_type_id, casted_args, impl_fn))
            {
                return ira->codegen->invalid_inst_gen;
            }
        }

        ZigFn *parent_fn_entry = ira->new_irb.exec->fn_entry;
        assert(parent_fn_entry);
        for (size_t call_i = 0; call_i < args_len; call_i += 1) {
            IrInstGen *arg = args_ptr[call_i];

            AstNode *param_decl_node = fn_proto_node->data.fn_proto.params.at(next_proto_i);
            assert(param_decl_node->type == NodeTypeParamDecl);

            if (!ir_analyze_fn_call_generic_arg(ira, fn_proto_node, arg, &arg->base, &impl_fn->child_scope,
                &next_proto_i, generic_id, &inst_fn_type_id, casted_args, impl_fn))
            {
                return ira->codegen->invalid_inst_gen;
            }
        }

        if (fn_proto_node->data.fn_proto.align_expr != nullptr) {
            ZigValue *align_result;
            ZigValue *result_ptr;
            create_result_ptr(ira->codegen, get_align_amt_type(ira->codegen), &align_result, &result_ptr);
            if ((err = ir_eval_const_value(ira->codegen, impl_fn->child_scope,
                fn_proto_node->data.fn_proto.align_expr, result_ptr,
                ira->new_irb.exec->backward_branch_count, ira->new_irb.exec->backward_branch_quota,
                nullptr, nullptr, fn_proto_node->data.fn_proto.align_expr, nullptr, ira->new_irb.exec,
                nullptr, UndefBad)))
            {
                return ira->codegen->invalid_inst_gen;
            }
            IrInstGenConst *const_instruction = ir_create_inst_noval<IrInstGenConst>(&ira->new_irb,
                    impl_fn->child_scope, fn_proto_node->data.fn_proto.align_expr);
            const_instruction->base.value = align_result;

            uint32_t align_bytes = 0;
            ir_resolve_align(ira, &const_instruction->base, nullptr, &align_bytes);
            impl_fn->align_bytes = align_bytes;
            inst_fn_type_id.alignment = align_bytes;
        }

        if (fn_proto_node->data.fn_proto.return_var_token == nullptr) {
            AstNode *return_type_node = fn_proto_node->data.fn_proto.return_type;
            ZigType *specified_return_type = ir_analyze_type_expr(ira, impl_fn->child_scope, return_type_node);
            if (type_is_invalid(specified_return_type))
                return ira->codegen->invalid_inst_gen;

            if(!is_valid_return_type(specified_return_type)){
                ErrorMsg *msg = ir_add_error(ira, source_instr,
                    buf_sprintf("call to generic function with %s return type '%s' not allowed", type_id_name(specified_return_type->id), buf_ptr(&specified_return_type->name)));
                add_error_note(ira->codegen, msg, fn_proto_node, buf_sprintf("function declared here"));

                Tld *tld = find_decl(ira->codegen, &fn_entry->fndef_scope->base, &specified_return_type->name);
                if (tld != nullptr) {
                    add_error_note(ira->codegen, msg, tld->source_node, buf_sprintf("type declared here"));
                }
                return ira->codegen->invalid_inst_gen;
            }

            if (fn_proto_node->data.fn_proto.auto_err_set) {
                ZigType *inferred_err_set_type = get_auto_err_set_type(ira->codegen, impl_fn);
                if ((err = type_resolve(ira->codegen, specified_return_type, ResolveStatusSizeKnown)))
                    return ira->codegen->invalid_inst_gen;
                inst_fn_type_id.return_type = get_error_union_type(ira->codegen, inferred_err_set_type, specified_return_type);
            } else {
                inst_fn_type_id.return_type = specified_return_type;
            }

            switch (type_requires_comptime(ira->codegen, specified_return_type)) {
            case ReqCompTimeYes:
                // Throw out our work and call the function as if it were comptime.
                return ir_analyze_fn_call(ira, source_instr, fn_entry, fn_type, fn_ref, first_arg_ptr,
                        first_arg_ptr_src, CallModifierCompileTime, new_stack, new_stack_src, is_async_call_builtin,
                        args_ptr, args_len, ret_ptr, call_result_loc);
            case ReqCompTimeInvalid:
                return ira->codegen->invalid_inst_gen;
            case ReqCompTimeNo:
                break;
            }
        }

        auto existing_entry = ira->codegen->generic_table.put_unique(generic_id, impl_fn);
        if (existing_entry) {
            // throw away all our work and use the existing function
            impl_fn = existing_entry->value;
        } else {
            // finish instantiating the function
            impl_fn->type_entry = get_fn_type(ira->codegen, &inst_fn_type_id);
            if (type_is_invalid(impl_fn->type_entry))
                return ira->codegen->invalid_inst_gen;

            impl_fn->ir_executable->source_node = source_instr->source_node;
            impl_fn->ir_executable->parent_exec = ira->new_irb.exec;
            impl_fn->analyzed_executable.source_node = source_instr->source_node;
            impl_fn->analyzed_executable.parent_exec = ira->new_irb.exec;
            impl_fn->analyzed_executable.backward_branch_quota = ira->new_irb.exec->backward_branch_quota;
            impl_fn->analyzed_executable.is_generic_instantiation = true;

            ira->codegen->fn_defs.append(impl_fn);
        }

        FnTypeId *impl_fn_type_id = &impl_fn->type_entry->data.fn.fn_type_id;

        if (fn_type_can_fail(impl_fn_type_id)) {
            parent_fn_entry->calls_or_awaits_errorable_fn = true;
        }

        IrInstGen *casted_new_stack = analyze_casted_new_stack(ira, source_instr, new_stack,
                new_stack_src, is_async_call_builtin, impl_fn);
        if (casted_new_stack != nullptr && type_is_invalid(casted_new_stack->value->type))
            return ira->codegen->invalid_inst_gen;

        size_t impl_param_count = impl_fn_type_id->param_count;
        if (modifier == CallModifierAsync) {
            IrInstGen *result = ir_analyze_async_call(ira, source_instr, impl_fn, impl_fn->type_entry,
                nullptr, casted_args, impl_param_count, casted_new_stack, is_async_call_builtin, ret_ptr,
                call_result_loc);
            return ir_finish_anal(ira, result);
        }

        IrInstGen *result_loc;
        if (handle_is_ptr(ira->codegen, impl_fn_type_id->return_type)) {
            result_loc = ir_resolve_result(ira, source_instr, call_result_loc,
                    impl_fn_type_id->return_type, nullptr, true, false);
            if (result_loc != nullptr) {
                if (type_is_invalid(result_loc->value->type) || result_loc->value->type->id == ZigTypeIdUnreachable) {
                    return result_loc;
                }
                if (result_loc->value->type->data.pointer.is_const) {
                    ir_add_error(ira, source_instr, buf_sprintf("cannot assign to constant"));
                    return ira->codegen->invalid_inst_gen;
                }

                IrInstGen *dummy_value = ir_const(ira, source_instr, impl_fn_type_id->return_type);
                dummy_value->value->special = ConstValSpecialRuntime;
                IrInstGen *dummy_result = ir_implicit_cast2(ira, source_instr,
                        dummy_value, result_loc->value->type->data.pointer.child_type);
                if (type_is_invalid(dummy_result->value->type))
                    return ira->codegen->invalid_inst_gen;
                ZigType *res_child_type = result_loc->value->type->data.pointer.child_type;
                if (res_child_type == ira->codegen->builtin_types.entry_var) {
                    res_child_type = impl_fn_type_id->return_type;
                }
                if (!handle_is_ptr(ira->codegen, res_child_type)) {
                    ir_reset_result(call_result_loc);
                    result_loc = nullptr;
                }
            }
        } else if (is_async_call_builtin) {
            result_loc = get_async_call_result_loc(ira, source_instr, impl_fn_type_id->return_type,
                    is_async_call_builtin, args_ptr, args_len, ret_ptr);
            if (result_loc != nullptr && type_is_invalid(result_loc->value->type))
                return ira->codegen->invalid_inst_gen;
        } else {
            result_loc = nullptr;
        }

        if (impl_fn_type_id->cc == CallingConventionAsync &&
            parent_fn_entry->inferred_async_node == nullptr &&
            modifier != CallModifierNoSuspend)
        {
            parent_fn_entry->inferred_async_node = fn_ref->base.source_node;
            parent_fn_entry->inferred_async_fn = impl_fn;
        }

        IrInstGenCall *new_call_instruction = ir_build_call_gen(ira, source_instr,
                impl_fn, nullptr, impl_param_count, casted_args, modifier, casted_new_stack,
                is_async_call_builtin, result_loc, impl_fn_type_id->return_type);

        if (get_scope_typeof(source_instr->scope) == nullptr) {
            parent_fn_entry->call_list.append(new_call_instruction);
        }

        return ir_finish_anal(ira, &new_call_instruction->base);
    }

    ZigFn *parent_fn_entry = ira->new_irb.exec->fn_entry;
    assert(fn_type_id->return_type != nullptr);
    assert(parent_fn_entry != nullptr);
    if (fn_type_can_fail(fn_type_id)) {
        parent_fn_entry->calls_or_awaits_errorable_fn = true;
    }


    IrInstGen **casted_args = heap::c_allocator.allocate<IrInstGen *>(call_param_count);
    size_t next_arg_index = 0;
    if (first_arg_ptr) {
        assert(first_arg_ptr->value->type->id == ZigTypeIdPointer);

        ZigType *param_type = fn_type_id->param_info[next_arg_index].type;
        if (type_is_invalid(param_type))
            return ira->codegen->invalid_inst_gen;

        IrInstGen *first_arg;
        if (param_type->id == ZigTypeIdPointer &&
            handle_is_ptr(ira->codegen, first_arg_ptr->value->type->data.pointer.child_type))
        {
            first_arg = first_arg_ptr;
        } else {
            first_arg = ir_get_deref(ira, &first_arg_ptr->base, first_arg_ptr, nullptr);
            if (type_is_invalid(first_arg->value->type))
                return ira->codegen->invalid_inst_gen;
        }

        IrInstGen *casted_arg = ir_implicit_cast2(ira, first_arg_ptr_src, first_arg, param_type);
        if (type_is_invalid(casted_arg->value->type))
            return ira->codegen->invalid_inst_gen;

        casted_args[next_arg_index] = casted_arg;
        next_arg_index += 1;
    }
    for (size_t call_i = 0; call_i < args_len; call_i += 1) {
        IrInstGen *old_arg = args_ptr[call_i];
        if (type_is_invalid(old_arg->value->type))
            return ira->codegen->invalid_inst_gen;

        IrInstGen *casted_arg;
        if (next_arg_index < src_param_count) {
            ZigType *param_type = fn_type_id->param_info[next_arg_index].type;
            if (type_is_invalid(param_type))
                return ira->codegen->invalid_inst_gen;
            casted_arg = ir_implicit_cast(ira, old_arg, param_type);
            if (type_is_invalid(casted_arg->value->type))
                return ira->codegen->invalid_inst_gen;
        } else {
            casted_arg = old_arg;
        }

        casted_args[next_arg_index] = casted_arg;
        next_arg_index += 1;
    }

    assert(next_arg_index == call_param_count);

    ZigType *return_type = fn_type_id->return_type;
    if (type_is_invalid(return_type))
        return ira->codegen->invalid_inst_gen;

    if (fn_entry != nullptr && fn_entry->fn_inline == FnInlineAlways && modifier == CallModifierNeverInline) {
        ir_add_error(ira, source_instr,
            buf_sprintf("no-inline call of inline function"));
        return ira->codegen->invalid_inst_gen;
    }

    IrInstGen *casted_new_stack = analyze_casted_new_stack(ira, source_instr, new_stack, new_stack_src,
            is_async_call_builtin, fn_entry);
    if (casted_new_stack != nullptr && type_is_invalid(casted_new_stack->value->type))
        return ira->codegen->invalid_inst_gen;

    if (modifier == CallModifierAsync) {
        IrInstGen *result = ir_analyze_async_call(ira, source_instr, fn_entry, fn_type, fn_ref,
                casted_args, call_param_count, casted_new_stack, is_async_call_builtin, ret_ptr, call_result_loc);
        return ir_finish_anal(ira, result);
    }

    if (fn_type_id->cc == CallingConventionAsync &&
        parent_fn_entry->inferred_async_node == nullptr &&
        modifier != CallModifierNoSuspend)
    {
        parent_fn_entry->inferred_async_node = fn_ref->base.source_node;
        parent_fn_entry->inferred_async_fn = fn_entry;
    }

    IrInstGen *result_loc;
    if (handle_is_ptr(ira->codegen, return_type)) {
        result_loc = ir_resolve_result(ira, source_instr, call_result_loc,
                return_type, nullptr, true, false);
        if (result_loc != nullptr) {
            if (type_is_invalid(result_loc->value->type) || result_loc->value->type->id == ZigTypeIdUnreachable) {
                return result_loc;
            }
            if (result_loc->value->type->data.pointer.is_const) {
                ir_add_error(ira, source_instr, buf_sprintf("cannot assign to constant"));
                return ira->codegen->invalid_inst_gen;
            }

            IrInstGen *dummy_value = ir_const(ira, source_instr, return_type);
            dummy_value->value->special = ConstValSpecialRuntime;
            IrInstGen *dummy_result = ir_implicit_cast2(ira, source_instr,
                    dummy_value, result_loc->value->type->data.pointer.child_type);
            if (type_is_invalid(dummy_result->value->type))
                return ira->codegen->invalid_inst_gen;
            ZigType *res_child_type = result_loc->value->type->data.pointer.child_type;
            if (res_child_type == ira->codegen->builtin_types.entry_var) {
                res_child_type = return_type;
            }
            if (!handle_is_ptr(ira->codegen, res_child_type)) {
                ir_reset_result(call_result_loc);
                result_loc = nullptr;
            }
        }
    } else if (is_async_call_builtin) {
        result_loc = get_async_call_result_loc(ira, source_instr, return_type, is_async_call_builtin,
                args_ptr, args_len, ret_ptr);
        if (result_loc != nullptr && type_is_invalid(result_loc->value->type))
            return ira->codegen->invalid_inst_gen;
    } else {
        result_loc = nullptr;
    }

    IrInstGenCall *new_call_instruction = ir_build_call_gen(ira, source_instr, fn_entry, fn_ref,
            call_param_count, casted_args, modifier, casted_new_stack,
            is_async_call_builtin, result_loc, return_type);
    if (get_scope_typeof(source_instr->scope) == nullptr) {
        parent_fn_entry->call_list.append(new_call_instruction);
    }
    return ir_finish_anal(ira, &new_call_instruction->base);
}

static IrInstGen *ir_analyze_fn_call_src(IrAnalyze *ira, IrInstSrcCall *call_instruction,
    ZigFn *fn_entry, ZigType *fn_type, IrInstGen *fn_ref,
    IrInstGen *first_arg_ptr, IrInst *first_arg_ptr_src, CallModifier modifier)
{
    IrInstGen *new_stack = nullptr;
    IrInst *new_stack_src = nullptr;
    if (call_instruction->new_stack) {
        new_stack = call_instruction->new_stack->child;
        if (type_is_invalid(new_stack->value->type))
            return ira->codegen->invalid_inst_gen;
        new_stack_src = &call_instruction->new_stack->base;
    }
    IrInstGen **args_ptr = heap::c_allocator.allocate<IrInstGen *>(call_instruction->arg_count);
    for (size_t i = 0; i < call_instruction->arg_count; i += 1) {
        args_ptr[i] = call_instruction->args[i]->child;
        if (type_is_invalid(args_ptr[i]->value->type))
            return ira->codegen->invalid_inst_gen;
    }
    IrInstGen *ret_ptr = nullptr;
    if (call_instruction->ret_ptr != nullptr) {
        ret_ptr = call_instruction->ret_ptr->child;
        if (type_is_invalid(ret_ptr->value->type))
            return ira->codegen->invalid_inst_gen;
    }
    IrInstGen *result = ir_analyze_fn_call(ira, &call_instruction->base.base, fn_entry, fn_type, fn_ref,
            first_arg_ptr, first_arg_ptr_src, modifier, new_stack, new_stack_src,
            call_instruction->is_async_call_builtin, args_ptr, call_instruction->arg_count, ret_ptr,
            call_instruction->result_loc);
    heap::c_allocator.deallocate(args_ptr, call_instruction->arg_count);
    return result;
}

static IrInstGen *ir_analyze_call_extra(IrAnalyze *ira, IrInst* source_instr,
        IrInstSrc *pass1_options, IrInstSrc *pass1_fn_ref, IrInstGen **args_ptr, size_t args_len,
        ResultLoc *result_loc)
{
    IrInstGen *options = pass1_options->child;
    if (type_is_invalid(options->value->type))
        return ira->codegen->invalid_inst_gen;

    IrInstGen *fn_ref = pass1_fn_ref->child;
    if (type_is_invalid(fn_ref->value->type))
        return ira->codegen->invalid_inst_gen;

    TypeStructField *modifier_field = find_struct_type_field(options->value->type, buf_create_from_str("modifier"));
    ir_assert(modifier_field != nullptr, source_instr);
    IrInstGen *modifier_inst = ir_analyze_struct_value_field_value(ira, source_instr, options, modifier_field);
    ZigValue *modifier_val = ir_resolve_const(ira, modifier_inst, UndefBad);
    if (modifier_val == nullptr)
        return ira->codegen->invalid_inst_gen;
    CallModifier modifier = (CallModifier)bigint_as_u32(&modifier_val->data.x_enum_tag);

    if (ir_should_inline(ira->old_irb.exec, source_instr->scope)) {
        switch (modifier) {
            case CallModifierBuiltin:
                zig_unreachable();
            case CallModifierAsync:
                ir_add_error(ira, source_instr, buf_sprintf("TODO: comptime @call with async modifier"));
                return ira->codegen->invalid_inst_gen;
            case CallModifierCompileTime:
            case CallModifierNone:
            case CallModifierAlwaysInline:
            case CallModifierAlwaysTail:
            case CallModifierNoSuspend:
                modifier = CallModifierCompileTime;
                break;
            case CallModifierNeverInline:
                ir_add_error(ira, source_instr,
                    buf_sprintf("unable to perform 'never_inline' call at compile-time"));
                return ira->codegen->invalid_inst_gen;
            case CallModifierNeverTail:
                ir_add_error(ira, source_instr,
                    buf_sprintf("unable to perform 'never_tail' call at compile-time"));
                return ira->codegen->invalid_inst_gen;
        }
    }

    IrInstGen *first_arg_ptr = nullptr;
    IrInst *first_arg_ptr_src = nullptr;
    ZigFn *fn = nullptr;
    if (instr_is_comptime(fn_ref)) {
        if (fn_ref->value->type->id == ZigTypeIdBoundFn) {
            assert(fn_ref->value->special == ConstValSpecialStatic);
            fn = fn_ref->value->data.x_bound_fn.fn;
            first_arg_ptr = fn_ref->value->data.x_bound_fn.first_arg;
            first_arg_ptr_src = fn_ref->value->data.x_bound_fn.first_arg_src;
            if (type_is_invalid(first_arg_ptr->value->type))
                return ira->codegen->invalid_inst_gen;
        } else {
            fn = ir_resolve_fn(ira, fn_ref);
        }
    }

    // Some modifiers require the callee to be comptime-known
    switch (modifier) {
        case CallModifierCompileTime:
        case CallModifierAlwaysInline:
        case CallModifierAsync:
            if (fn == nullptr) {
                ir_add_error(ira, &modifier_inst->base,
                    buf_sprintf("the specified modifier requires a comptime-known function"));
                return ira->codegen->invalid_inst_gen;
            }
            ZIG_FALLTHROUGH;
        default:
            break;
    }

    ZigType *fn_type = (fn != nullptr) ? fn->type_entry : fn_ref->value->type;

    TypeStructField *stack_field = find_struct_type_field(options->value->type, buf_create_from_str("stack"));
    ir_assert(stack_field != nullptr, source_instr);
    IrInstGen *opt_stack = ir_analyze_struct_value_field_value(ira, source_instr, options, stack_field);
    if (type_is_invalid(opt_stack->value->type))
        return ira->codegen->invalid_inst_gen;

    IrInstGen *stack_is_non_null_inst = ir_analyze_test_non_null(ira, source_instr, opt_stack);
    bool stack_is_non_null;
    if (!ir_resolve_bool(ira, stack_is_non_null_inst, &stack_is_non_null))
        return ira->codegen->invalid_inst_gen;

    IrInstGen *stack = nullptr;
    IrInst *stack_src = nullptr;
    if (stack_is_non_null) {
        stack = ir_analyze_optional_value_payload_value(ira, source_instr, opt_stack, false);
        if (type_is_invalid(stack->value->type))
            return ira->codegen->invalid_inst_gen;
        stack_src = &stack->base;
    }

    return ir_analyze_fn_call(ira, source_instr, fn, fn_type, fn_ref, first_arg_ptr, first_arg_ptr_src,
        modifier, stack, stack_src, false, args_ptr, args_len, nullptr, result_loc);
}

static IrInstGen *ir_analyze_async_call_extra(IrAnalyze *ira, IrInst* source_instr, CallModifier modifier,
        IrInstSrc *pass1_fn_ref, IrInstSrc *ret_ptr, IrInstSrc *new_stack, IrInstGen **args_ptr, size_t args_len, ResultLoc *result_loc)
{
    IrInstGen *fn_ref = pass1_fn_ref->child;
    if (type_is_invalid(fn_ref->value->type))
        return ira->codegen->invalid_inst_gen;

    if (ir_should_inline(ira->old_irb.exec, source_instr->scope)) {
        ir_add_error(ira, source_instr, buf_sprintf("TODO: comptime @asyncCall"));
            return ira->codegen->invalid_inst_gen;
    }

    IrInstGen *first_arg_ptr = nullptr;
    IrInst *first_arg_ptr_src = nullptr;
    ZigFn *fn = nullptr;
    if (instr_is_comptime(fn_ref)) {
        if (fn_ref->value->type->id == ZigTypeIdBoundFn) {
            assert(fn_ref->value->special == ConstValSpecialStatic);
            fn = fn_ref->value->data.x_bound_fn.fn;
            first_arg_ptr = fn_ref->value->data.x_bound_fn.first_arg;
            first_arg_ptr_src = fn_ref->value->data.x_bound_fn.first_arg_src;
            if (type_is_invalid(first_arg_ptr->value->type))
                return ira->codegen->invalid_inst_gen;
        } else {
            fn = ir_resolve_fn(ira, fn_ref);
        }
    }

    IrInstGen *ret_ptr_uncasted = nullptr;
    if (ret_ptr != nullptr) {
        ret_ptr_uncasted = ret_ptr->child;
        if (type_is_invalid(ret_ptr_uncasted->value->type))
            return ira->codegen->invalid_inst_gen;
    }

    ZigType *fn_type = (fn != nullptr) ? fn->type_entry : fn_ref->value->type;
    IrInstGen *casted_new_stack = analyze_casted_new_stack(ira, source_instr, new_stack->child,
            &new_stack->base, true, fn);
    if (casted_new_stack != nullptr && type_is_invalid(casted_new_stack->value->type))
        return ira->codegen->invalid_inst_gen;

    return ir_analyze_fn_call(ira, source_instr, fn, fn_type, fn_ref, first_arg_ptr, first_arg_ptr_src,
        modifier, casted_new_stack, &new_stack->base, true, args_ptr, args_len, ret_ptr_uncasted, result_loc);
}

static bool ir_extract_tuple_call_args(IrAnalyze *ira, IrInst *source_instr, IrInstGen *args, IrInstGen ***args_ptr, size_t *args_len) {
    ZigType *args_type = args->value->type;
    if (type_is_invalid(args_type))
        return false;

    if (args_type->id != ZigTypeIdStruct) {
        ir_add_error(ira, &args->base,
            buf_sprintf("expected tuple or struct, found '%s'", buf_ptr(&args_type->name)));
        return false;
    }

    if (is_tuple(args_type)) {
        *args_len = args_type->data.structure.src_field_count;
        *args_ptr = heap::c_allocator.allocate<IrInstGen *>(*args_len);
        for (size_t i = 0; i < *args_len; i += 1) {
            TypeStructField *arg_field = args_type->data.structure.fields[i];
            (*args_ptr)[i] = ir_analyze_struct_value_field_value(ira, source_instr, args, arg_field);
            if (type_is_invalid((*args_ptr)[i]->value->type))
                return false;
        }
    } else {
        ir_add_error(ira, &args->base, buf_sprintf("TODO: struct args"));
        return false;
    }
    return true;
}

static IrInstGen *ir_analyze_instruction_call_extra(IrAnalyze *ira, IrInstSrcCallExtra *instruction) {
    IrInstGen *args = instruction->args->child;
    IrInstGen **args_ptr = nullptr;
    size_t args_len = 0;
    if (!ir_extract_tuple_call_args(ira, &instruction->base.base, args, &args_ptr, &args_len)) {
        return ira->codegen->invalid_inst_gen;
    }

    IrInstGen *result = ir_analyze_call_extra(ira, &instruction->base.base, instruction->options,
            instruction->fn_ref, args_ptr, args_len, instruction->result_loc);
    heap::c_allocator.deallocate(args_ptr, args_len);
    return result;
}

static IrInstGen *ir_analyze_instruction_async_call_extra(IrAnalyze *ira, IrInstSrcAsyncCallExtra *instruction) {
    IrInstGen *args = instruction->args->child;
    IrInstGen **args_ptr = nullptr;
    size_t args_len = 0;
    if (!ir_extract_tuple_call_args(ira, &instruction->base.base, args, &args_ptr, &args_len)) {
        return ira->codegen->invalid_inst_gen;
    }

    IrInstGen *result = ir_analyze_async_call_extra(ira, &instruction->base.base, instruction->modifier,
            instruction->fn_ref, instruction->ret_ptr, instruction->new_stack, args_ptr, args_len, instruction->result_loc);
    heap::c_allocator.deallocate(args_ptr, args_len);
    return result;
}

static IrInstGen *ir_analyze_instruction_call_args(IrAnalyze *ira, IrInstSrcCallArgs *instruction) {
    IrInstGen **args_ptr = heap::c_allocator.allocate<IrInstGen *>(instruction->args_len);
    for (size_t i = 0; i < instruction->args_len; i += 1) {
        args_ptr[i] = instruction->args_ptr[i]->child;
        if (type_is_invalid(args_ptr[i]->value->type))
            return ira->codegen->invalid_inst_gen;
    }

    IrInstGen *result = ir_analyze_call_extra(ira, &instruction->base.base, instruction->options,
            instruction->fn_ref, args_ptr, instruction->args_len, instruction->result_loc);
    heap::c_allocator.deallocate(args_ptr, instruction->args_len);
    return result;
}

static IrInstGen *ir_analyze_instruction_call(IrAnalyze *ira, IrInstSrcCall *call_instruction) {
    IrInstGen *fn_ref = call_instruction->fn_ref->child;
    if (type_is_invalid(fn_ref->value->type))
        return ira->codegen->invalid_inst_gen;

    bool is_comptime = (call_instruction->modifier == CallModifierCompileTime) ||
        ir_should_inline(ira->old_irb.exec, call_instruction->base.base.scope);
    CallModifier modifier = is_comptime ? CallModifierCompileTime : call_instruction->modifier;

    if (is_comptime || instr_is_comptime(fn_ref)) {
        if (fn_ref->value->type->id == ZigTypeIdMetaType) {
            ZigType *ty = ir_resolve_type(ira, fn_ref);
            if (ty == nullptr)
                return ira->codegen->invalid_inst_gen;
            ErrorMsg *msg = ir_add_error(ira, &fn_ref->base,
                buf_sprintf("type '%s' not a function", buf_ptr(&ty->name)));
            add_error_note(ira->codegen, msg, call_instruction->base.base.source_node,
                buf_sprintf("use @as builtin for type coercion"));
            return ira->codegen->invalid_inst_gen;
        } else if (fn_ref->value->type->id == ZigTypeIdFn) {
            ZigFn *fn_table_entry = ir_resolve_fn(ira, fn_ref);
            ZigType *fn_type = fn_table_entry ? fn_table_entry->type_entry : fn_ref->value->type;
            CallModifier modifier = is_comptime ? CallModifierCompileTime : call_instruction->modifier;
            return ir_analyze_fn_call_src(ira, call_instruction, fn_table_entry, fn_type,
                fn_ref, nullptr, nullptr, modifier);
        } else if (fn_ref->value->type->id == ZigTypeIdBoundFn) {
            assert(fn_ref->value->special == ConstValSpecialStatic);
            ZigFn *fn_table_entry = fn_ref->value->data.x_bound_fn.fn;
            IrInstGen *first_arg_ptr = fn_ref->value->data.x_bound_fn.first_arg;
            IrInst *first_arg_ptr_src = fn_ref->value->data.x_bound_fn.first_arg_src;
            CallModifier modifier = is_comptime ? CallModifierCompileTime : call_instruction->modifier;
            return ir_analyze_fn_call_src(ira, call_instruction, fn_table_entry, fn_table_entry->type_entry,
                fn_ref, first_arg_ptr, first_arg_ptr_src, modifier);
        } else {
            ir_add_error(ira, &fn_ref->base,
                buf_sprintf("type '%s' not a function", buf_ptr(&fn_ref->value->type->name)));
            return ira->codegen->invalid_inst_gen;
        }
    }

    if (fn_ref->value->type->id == ZigTypeIdFn) {
        return ir_analyze_fn_call_src(ira, call_instruction, nullptr, fn_ref->value->type,
            fn_ref, nullptr, nullptr, modifier);
    } else {
        ir_add_error(ira, &fn_ref->base,
            buf_sprintf("type '%s' not a function", buf_ptr(&fn_ref->value->type->name)));
        return ira->codegen->invalid_inst_gen;
    }
}

// out_val->type must be the type to read the pointer as
// if the type is different than the actual type then it does a comptime byte reinterpretation
static Error ir_read_const_ptr(IrAnalyze *ira, CodeGen *codegen, AstNode *source_node,
        ZigValue *out_val, ZigValue *ptr_val)
{
    Error err;
    assert(out_val->type != nullptr);

    ZigValue *pointee = const_ptr_pointee_unchecked(codegen, ptr_val);
    src_assert(pointee->type != nullptr, source_node);

    if ((err = type_resolve(codegen, pointee->type, ResolveStatusSizeKnown)))
        return ErrorSemanticAnalyzeFail;
    if ((err = type_resolve(codegen, out_val->type, ResolveStatusSizeKnown)))
        return ErrorSemanticAnalyzeFail;

    size_t src_size = type_size(codegen, pointee->type);
    size_t dst_size = type_size(codegen, out_val->type);

    if (dst_size <= src_size) {
        if (src_size == dst_size && types_have_same_zig_comptime_repr(codegen, out_val->type, pointee->type)) {
            copy_const_val(codegen, out_val, pointee);
            return ErrorNone;
        }
        Buf buf = BUF_INIT;
        buf_resize(&buf, src_size);
        buf_write_value_bytes(codegen, (uint8_t*)buf_ptr(&buf), pointee);
        if ((err = buf_read_value_bytes(ira, codegen, source_node, (uint8_t*)buf_ptr(&buf), out_val)))
            return err;
        buf_deinit(&buf);
        return ErrorNone;
    }

    switch (ptr_val->data.x_ptr.special) {
        case ConstPtrSpecialInvalid:
            zig_unreachable();
        case ConstPtrSpecialNull:
            if (dst_size == 0)
                return ErrorNone;
            opt_ir_add_error_node(ira, co
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment