Created
March 4, 2024 15:09
-
-
Save phase/e22228c713d8f6265c27c32aff838853 to your computer and use it in GitHub Desktop.
a bunch of files from https://github.com/ziglang/zig/tree/master/src concatenated together
This file has been truncated, but you can view the full file.
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
//! Analyzed Intermediate Representation. | |
//! This data is produced by Sema and consumed by codegen. | |
//! Unlike ZIR where there is one instance for an entire source file, each function | |
//! gets its own `Air` instance. | |
const std = @import("std"); | |
const builtin = @import("builtin"); | |
const assert = std.debug.assert; | |
const Air = @This(); | |
const Value = @import("Value.zig"); | |
const Type = @import("type.zig").Type; | |
const InternPool = @import("InternPool.zig"); | |
const Module = @import("Module.zig"); | |
instructions: std.MultiArrayList(Inst).Slice, | |
/// The meaning of this data is determined by `Inst.Tag` value. | |
/// The first few indexes are reserved. See `ExtraIndex` for the values. | |
extra: []const u32, | |
pub const ExtraIndex = enum(u32) { | |
/// Payload index of the main `Block` in the `extra` array. | |
main_block, | |
_, | |
}; | |
pub const Inst = struct { | |
tag: Tag, | |
data: Data, | |
pub const Tag = enum(u8) { | |
/// The first N instructions in the main block must be one arg instruction per | |
/// function parameter. This makes function parameters participate in | |
/// liveness analysis without any special handling. | |
/// Uses the `arg` field. | |
arg, | |
/// Float or integer addition. For integers, wrapping is undefined behavior. | |
/// Both operands are guaranteed to be the same type, and the result type | |
/// is the same as both operands. | |
/// Uses the `bin_op` field. | |
add, | |
/// Integer addition. Wrapping is a safety panic. | |
/// Both operands are guaranteed to be the same type, and the result type | |
/// is the same as both operands. | |
/// The panic handler function must be populated before lowering AIR | |
/// that contains this instruction. | |
/// This instruction will only be emitted if the backend has the | |
/// feature `safety_checked_instructions`. | |
/// Uses the `bin_op` field. | |
add_safe, | |
/// Float addition. The instruction is allowed to have equal or more | |
/// mathematical accuracy than strict IEEE-757 float addition. | |
/// If either operand is NaN, the result value is undefined. | |
/// Uses the `bin_op` field. | |
add_optimized, | |
/// Twos complement wrapping integer addition. | |
/// Both operands are guaranteed to be the same type, and the result type | |
/// is the same as both operands. | |
/// Uses the `bin_op` field. | |
add_wrap, | |
/// Saturating integer addition. | |
/// Both operands are guaranteed to be the same type, and the result type | |
/// is the same as both operands. | |
/// Uses the `bin_op` field. | |
add_sat, | |
/// Float or integer subtraction. For integers, wrapping is undefined behavior. | |
/// Both operands are guaranteed to be the same type, and the result type | |
/// is the same as both operands. | |
/// Uses the `bin_op` field. | |
sub, | |
/// Integer subtraction. Wrapping is a safety panic. | |
/// Both operands are guaranteed to be the same type, and the result type | |
/// is the same as both operands. | |
/// The panic handler function must be populated before lowering AIR | |
/// that contains this instruction. | |
/// This instruction will only be emitted if the backend has the | |
/// feature `safety_checked_instructions`. | |
/// Uses the `bin_op` field. | |
sub_safe, | |
/// Float subtraction. The instruction is allowed to have equal or more | |
/// mathematical accuracy than strict IEEE-757 float subtraction. | |
/// If either operand is NaN, the result value is undefined. | |
/// Uses the `bin_op` field. | |
sub_optimized, | |
/// Twos complement wrapping integer subtraction. | |
/// Both operands are guaranteed to be the same type, and the result type | |
/// is the same as both operands. | |
/// Uses the `bin_op` field. | |
sub_wrap, | |
/// Saturating integer subtraction. | |
/// Both operands are guaranteed to be the same type, and the result type | |
/// is the same as both operands. | |
/// Uses the `bin_op` field. | |
sub_sat, | |
/// Float or integer multiplication. For integers, wrapping is undefined behavior. | |
/// Both operands are guaranteed to be the same type, and the result type | |
/// is the same as both operands. | |
/// Uses the `bin_op` field. | |
mul, | |
/// Integer multiplication. Wrapping is a safety panic. | |
/// Both operands are guaranteed to be the same type, and the result type | |
/// is the same as both operands. | |
/// The panic handler function must be populated before lowering AIR | |
/// that contains this instruction. | |
/// This instruction will only be emitted if the backend has the | |
/// feature `safety_checked_instructions`. | |
/// Uses the `bin_op` field. | |
mul_safe, | |
/// Float multiplication. The instruction is allowed to have equal or more | |
/// mathematical accuracy than strict IEEE-757 float multiplication. | |
/// If either operand is NaN, the result value is undefined. | |
/// Uses the `bin_op` field. | |
mul_optimized, | |
/// Twos complement wrapping integer multiplication. | |
/// Both operands are guaranteed to be the same type, and the result type | |
/// is the same as both operands. | |
/// Uses the `bin_op` field. | |
mul_wrap, | |
/// Saturating integer multiplication. | |
/// Both operands are guaranteed to be the same type, and the result type | |
/// is the same as both operands. | |
/// Uses the `bin_op` field. | |
mul_sat, | |
/// Float division. | |
/// Both operands are guaranteed to be the same type, and the result type | |
/// is the same as both operands. | |
/// Uses the `bin_op` field. | |
div_float, | |
/// Same as `div_float` with optimized float mode. | |
div_float_optimized, | |
/// Truncating integer or float division. For integers, wrapping is undefined behavior. | |
/// Both operands are guaranteed to be the same type, and the result type | |
/// is the same as both operands. | |
/// Uses the `bin_op` field. | |
div_trunc, | |
/// Same as `div_trunc` with optimized float mode. | |
div_trunc_optimized, | |
/// Flooring integer or float division. For integers, wrapping is undefined behavior. | |
/// Both operands are guaranteed to be the same type, and the result type | |
/// is the same as both operands. | |
/// Uses the `bin_op` field. | |
div_floor, | |
/// Same as `div_floor` with optimized float mode. | |
div_floor_optimized, | |
/// Integer or float division. | |
/// If a remainder would be produced, undefined behavior occurs. | |
/// For integers, overflow is undefined behavior. | |
/// Both operands are guaranteed to be the same type, and the result type | |
/// is the same as both operands. | |
/// Uses the `bin_op` field. | |
div_exact, | |
/// Same as `div_exact` with optimized float mode. | |
div_exact_optimized, | |
/// Integer or float remainder division. | |
/// Both operands are guaranteed to be the same type, and the result type | |
/// is the same as both operands. | |
/// Uses the `bin_op` field. | |
rem, | |
/// Same as `rem` with optimized float mode. | |
rem_optimized, | |
/// Integer or float modulus division. | |
/// Both operands are guaranteed to be the same type, and the result type | |
/// is the same as both operands. | |
/// Uses the `bin_op` field. | |
mod, | |
/// Same as `mod` with optimized float mode. | |
mod_optimized, | |
/// Add an offset to a pointer, returning a new pointer. | |
/// The offset is in element type units, not bytes. | |
/// Wrapping is undefined behavior. | |
/// The lhs is the pointer, rhs is the offset. Result type is the same as lhs. | |
/// The pointer may be a slice. | |
/// Uses the `ty_pl` field. Payload is `Bin`. | |
ptr_add, | |
/// Subtract an offset from a pointer, returning a new pointer. | |
/// The offset is in element type units, not bytes. | |
/// Wrapping is undefined behavior. | |
/// The lhs is the pointer, rhs is the offset. Result type is the same as lhs. | |
/// The pointer may be a slice. | |
/// Uses the `ty_pl` field. Payload is `Bin`. | |
ptr_sub, | |
/// Given two operands which can be floats, integers, or vectors, returns the | |
/// greater of the operands. For vectors it operates element-wise. | |
/// Both operands are guaranteed to be the same type, and the result type | |
/// is the same as both operands. | |
/// Uses the `bin_op` field. | |
max, | |
/// Given two operands which can be floats, integers, or vectors, returns the | |
/// lesser of the operands. For vectors it operates element-wise. | |
/// Both operands are guaranteed to be the same type, and the result type | |
/// is the same as both operands. | |
/// Uses the `bin_op` field. | |
min, | |
/// Integer addition with overflow. Both operands are guaranteed to be the same type, | |
/// and the result is a tuple with .{res, ov}. The wrapped value is written to res | |
/// and if an overflow happens, ov is 1. Otherwise ov is 0. | |
/// Uses the `ty_pl` field. Payload is `Bin`. | |
add_with_overflow, | |
/// Integer subtraction with overflow. Both operands are guaranteed to be the same type, | |
/// and the result is a tuple with .{res, ov}. The wrapped value is written to res | |
/// and if an overflow happens, ov is 1. Otherwise ov is 0. | |
/// Uses the `ty_pl` field. Payload is `Bin`. | |
sub_with_overflow, | |
/// Integer multiplication with overflow. Both operands are guaranteed to be the same type, | |
/// and the result is a tuple with .{res, ov}. The wrapped value is written to res | |
/// and if an overflow happens, ov is 1. Otherwise ov is 0. | |
/// Uses the `ty_pl` field. Payload is `Bin`. | |
mul_with_overflow, | |
/// Integer left-shift with overflow. Both operands are guaranteed to be the same type, | |
/// and the result is a tuple with .{res, ov}. The wrapped value is written to res | |
/// and if an overflow happens, ov is 1. Otherwise ov is 0. | |
/// Uses the `ty_pl` field. Payload is `Bin`. | |
shl_with_overflow, | |
/// Allocates stack local memory. | |
/// Uses the `ty` field. | |
alloc, | |
/// This special instruction only exists temporarily during semantic | |
/// analysis and is guaranteed to be unreachable in machine code | |
/// backends. It tracks a set of types that have been stored to an | |
/// inferred allocation. | |
/// Uses the `inferred_alloc` field. | |
inferred_alloc, | |
/// This special instruction only exists temporarily during semantic | |
/// analysis and is guaranteed to be unreachable in machine code | |
/// backends. Used to coordinate alloc_inferred, store_to_inferred_ptr, | |
/// and resolve_inferred_alloc instructions for comptime code. | |
/// Uses the `inferred_alloc_comptime` field. | |
inferred_alloc_comptime, | |
/// If the function will pass the result by-ref, this instruction returns the | |
/// result pointer. Otherwise it is equivalent to `alloc`. | |
/// Uses the `ty` field. | |
ret_ptr, | |
/// Inline assembly. Uses the `ty_pl` field. Payload is `Asm`. | |
assembly, | |
/// Bitwise AND. `&`. | |
/// Result type is the same as both operands. | |
/// Uses the `bin_op` field. | |
bit_and, | |
/// Bitwise OR. `|`. | |
/// Result type is the same as both operands. | |
/// Uses the `bin_op` field. | |
bit_or, | |
/// Shift right. `>>` | |
/// Uses the `bin_op` field. | |
shr, | |
/// Shift right. The shift produces a poison value if it shifts out any non-zero bits. | |
/// Uses the `bin_op` field. | |
shr_exact, | |
/// Shift left. `<<` | |
/// Uses the `bin_op` field. | |
shl, | |
/// Shift left; For unsigned integers, the shift produces a poison value if it shifts | |
/// out any non-zero bits. For signed integers, the shift produces a poison value if | |
/// it shifts out any bits that disagree with the resultant sign bit. | |
/// Uses the `bin_op` field. | |
shl_exact, | |
/// Saturating integer shift left. `<<|` | |
/// Uses the `bin_op` field. | |
shl_sat, | |
/// Bitwise XOR. `^` | |
/// Uses the `bin_op` field. | |
xor, | |
/// Boolean or binary NOT. | |
/// Uses the `ty_op` field. | |
not, | |
/// Reinterpret the memory representation of a value as a different type. | |
/// Uses the `ty_op` field. | |
bitcast, | |
/// Uses the `ty_pl` field with payload `Block`. A block runs its body which always ends | |
/// with a `noreturn` instruction, so the only way to proceed to the code after the `block` | |
/// is to encounter a `br` that targets this `block`. If the `block` type is `noreturn`, | |
/// then there do not exist any `br` instructions targetting this `block`. | |
block, | |
/// A labeled block of code that loops forever. At the end of the body it is implied | |
/// to repeat; no explicit "repeat" instruction terminates loop bodies. | |
/// Result type is always `noreturn`; no instructions in a block follow this one. | |
/// The body never ends with a `noreturn` instruction, so the "repeat" operation | |
/// is always statically reachable. | |
/// Uses the `ty_pl` field. Payload is `Block`. | |
loop, | |
/// Return from a block with a result. | |
/// Result type is always noreturn; no instructions in a block follow this one. | |
/// Uses the `br` field. | |
br, | |
/// Lowers to a trap/jam instruction causing program abortion. | |
/// This may lower to an instruction known to be invalid. | |
/// Sometimes, for the lack of a better instruction, `trap` and `breakpoint` may compile down to the same code. | |
/// Result type is always noreturn; no instructions in a block follow this one. | |
trap, | |
/// Lowers to a trap instruction causing debuggers to break here, or the next best thing. | |
/// The debugger or something else may allow the program to resume after this point. | |
/// Sometimes, for the lack of a better instruction, `trap` and `breakpoint` may compile down to the same code. | |
/// Result type is always void. | |
breakpoint, | |
/// Yields the return address of the current function. | |
/// Uses the `no_op` field. | |
ret_addr, | |
/// Implements @frameAddress builtin. | |
/// Uses the `no_op` field. | |
frame_addr, | |
/// Function call. | |
/// Result type is the return type of the function being called. | |
/// Uses the `pl_op` field with the `Call` payload. operand is the callee. | |
/// Triggers `resolveTypeLayout` on the return type of the callee. | |
call, | |
/// Same as `call` except with the `always_tail` attribute. | |
call_always_tail, | |
/// Same as `call` except with the `never_tail` attribute. | |
call_never_tail, | |
/// Same as `call` except with the `never_inline` attribute. | |
call_never_inline, | |
/// Count leading zeroes of an integer according to its representation in twos complement. | |
/// Result type will always be an unsigned integer big enough to fit the answer. | |
/// Uses the `ty_op` field. | |
clz, | |
/// Count trailing zeroes of an integer according to its representation in twos complement. | |
/// Result type will always be an unsigned integer big enough to fit the answer. | |
/// Uses the `ty_op` field. | |
ctz, | |
/// Count number of 1 bits in an integer according to its representation in twos complement. | |
/// Result type will always be an unsigned integer big enough to fit the answer. | |
/// Uses the `ty_op` field. | |
popcount, | |
/// Reverse the bytes in an integer according to its representation in twos complement. | |
/// Uses the `ty_op` field. | |
byte_swap, | |
/// Reverse the bits in an integer according to its representation in twos complement. | |
/// Uses the `ty_op` field. | |
bit_reverse, | |
/// Square root of a floating point number. | |
/// Uses the `un_op` field. | |
sqrt, | |
/// Sine function on a floating point number. | |
/// Uses the `un_op` field. | |
sin, | |
/// Cosine function on a floating point number. | |
/// Uses the `un_op` field. | |
cos, | |
/// Tangent function on a floating point number. | |
/// Uses the `un_op` field. | |
tan, | |
/// Base e exponential of a floating point number. | |
/// Uses the `un_op` field. | |
exp, | |
/// Base 2 exponential of a floating point number. | |
/// Uses the `un_op` field. | |
exp2, | |
/// Natural (base e) logarithm of a floating point number. | |
/// Uses the `un_op` field. | |
log, | |
/// Base 2 logarithm of a floating point number. | |
/// Uses the `un_op` field. | |
log2, | |
/// Base 10 logarithm of a floating point number. | |
/// Uses the `un_op` field. | |
log10, | |
/// Aboslute value of an integer, floating point number or vector. | |
/// Result type is always unsigned if the operand is an integer. | |
/// Uses the `ty_op` field. | |
abs, | |
/// Floor: rounds a floating pointer number down to the nearest integer. | |
/// Uses the `un_op` field. | |
floor, | |
/// Ceiling: rounds a floating pointer number up to the nearest integer. | |
/// Uses the `un_op` field. | |
ceil, | |
/// Rounds a floating pointer number to the nearest integer. | |
/// Uses the `un_op` field. | |
round, | |
/// Rounds a floating pointer number to the nearest integer towards zero. | |
/// Uses the `un_op` field. | |
trunc_float, | |
/// Float negation. This affects the sign of zero, inf, and NaN, which is impossible | |
/// to do with sub. Integers are not allowed and must be represented with sub with | |
/// LHS of zero. | |
/// Uses the `un_op` field. | |
neg, | |
/// Same as `neg` with optimized float mode. | |
neg_optimized, | |
/// `<`. Result type is always bool. | |
/// Uses the `bin_op` field. | |
cmp_lt, | |
/// Same as `cmp_lt` with optimized float mode. | |
cmp_lt_optimized, | |
/// `<=`. Result type is always bool. | |
/// Uses the `bin_op` field. | |
cmp_lte, | |
/// Same as `cmp_lte` with optimized float mode. | |
cmp_lte_optimized, | |
/// `==`. Result type is always bool. | |
/// Uses the `bin_op` field. | |
cmp_eq, | |
/// Same as `cmp_eq` with optimized float mode. | |
cmp_eq_optimized, | |
/// `>=`. Result type is always bool. | |
/// Uses the `bin_op` field. | |
cmp_gte, | |
/// Same as `cmp_gte` with optimized float mode. | |
cmp_gte_optimized, | |
/// `>`. Result type is always bool. | |
/// Uses the `bin_op` field. | |
cmp_gt, | |
/// Same as `cmp_gt` with optimized float mode. | |
cmp_gt_optimized, | |
/// `!=`. Result type is always bool. | |
/// Uses the `bin_op` field. | |
cmp_neq, | |
/// Same as `cmp_neq` with optimized float mode. | |
cmp_neq_optimized, | |
/// Conditional between two vectors. | |
/// Result type is always a vector of bools. | |
/// Uses the `ty_pl` field, payload is `VectorCmp`. | |
cmp_vector, | |
/// Same as `cmp_vector` with optimized float mode. | |
cmp_vector_optimized, | |
/// Conditional branch. | |
/// Result type is always noreturn; no instructions in a block follow this one. | |
/// Uses the `pl_op` field. Operand is the condition. Payload is `CondBr`. | |
cond_br, | |
/// Switch branch. | |
/// Result type is always noreturn; no instructions in a block follow this one. | |
/// Uses the `pl_op` field. Operand is the condition. Payload is `SwitchBr`. | |
switch_br, | |
/// Given an operand which is an error union, splits control flow. In | |
/// case of error, control flow goes into the block that is part of this | |
/// instruction, which is guaranteed to end with a return instruction | |
/// and never breaks out of the block. | |
/// In the case of non-error, control flow proceeds to the next instruction | |
/// after the `try`, with the result of this instruction being the unwrapped | |
/// payload value, as if `unwrap_errunion_payload` was executed on the operand. | |
/// Uses the `pl_op` field. Payload is `Try`. | |
@"try", | |
/// Same as `try` except the operand is a pointer to an error union, and the | |
/// result is a pointer to the payload. Result is as if `unwrap_errunion_payload_ptr` | |
/// was executed on the operand. | |
/// Uses the `ty_pl` field. Payload is `TryPtr`. | |
try_ptr, | |
/// Notes the beginning of a source code statement and marks the line and column. | |
/// Result type is always void. | |
/// Uses the `dbg_stmt` field. | |
dbg_stmt, | |
/// A block that represents an inlined function call. | |
/// Uses the `ty_pl` field. Payload is `DbgInlineBlock`. | |
dbg_inline_block, | |
/// Marks the beginning of a local variable. The operand is a pointer pointing | |
/// to the storage for the variable. The local may be a const or a var. | |
/// Result type is always void. | |
/// Uses `pl_op`. The payload index is the variable name. It points to the extra | |
/// array, reinterpreting the bytes there as a null-terminated string. | |
dbg_var_ptr, | |
/// Same as `dbg_var_ptr` except the local is a const, not a var, and the | |
/// operand is the local's value. | |
dbg_var_val, | |
/// ?T => bool | |
/// Result type is always bool. | |
/// Uses the `un_op` field. | |
is_null, | |
/// ?T => bool (inverted logic) | |
/// Result type is always bool. | |
/// Uses the `un_op` field. | |
is_non_null, | |
/// *?T => bool | |
/// Result type is always bool. | |
/// Uses the `un_op` field. | |
is_null_ptr, | |
/// *?T => bool (inverted logic) | |
/// Result type is always bool. | |
/// Uses the `un_op` field. | |
is_non_null_ptr, | |
/// E!T => bool | |
/// Result type is always bool. | |
/// Uses the `un_op` field. | |
is_err, | |
/// E!T => bool (inverted logic) | |
/// Result type is always bool. | |
/// Uses the `un_op` field. | |
is_non_err, | |
/// *E!T => bool | |
/// Result type is always bool. | |
/// Uses the `un_op` field. | |
is_err_ptr, | |
/// *E!T => bool (inverted logic) | |
/// Result type is always bool. | |
/// Uses the `un_op` field. | |
is_non_err_ptr, | |
/// Result type is always bool. | |
/// Uses the `bin_op` field. | |
bool_and, | |
/// Result type is always bool. | |
/// Uses the `bin_op` field. | |
bool_or, | |
/// Read a value from a pointer. | |
/// Uses the `ty_op` field. | |
load, | |
/// Converts a pointer to its address. Result type is always `usize`. | |
/// Pointer type size may be any, including slice. | |
/// Uses the `un_op` field. | |
int_from_ptr, | |
/// Given a boolean, returns 0 or 1. | |
/// Result type is always `u1`. | |
/// Uses the `un_op` field. | |
int_from_bool, | |
/// Return a value from a function. | |
/// Result type is always noreturn; no instructions in a block follow this one. | |
/// Uses the `un_op` field. | |
/// Triggers `resolveTypeLayout` on the return type. | |
ret, | |
/// Same as `ret`, except if the operand is undefined, the | |
/// returned value is 0xaa bytes, and any other safety metadata | |
/// such as Valgrind integrations should be notified of | |
/// this value being undefined. | |
ret_safe, | |
/// This instruction communicates that the function's result value is pointed to by | |
/// the operand. If the function will pass the result by-ref, the operand is a | |
/// `ret_ptr` instruction. Otherwise, this instruction is equivalent to a `load` | |
/// on the operand, followed by a `ret` on the loaded value. | |
/// Result type is always noreturn; no instructions in a block follow this one. | |
/// Uses the `un_op` field. | |
/// Triggers `resolveTypeLayout` on the return type. | |
ret_load, | |
/// Write a value to a pointer. LHS is pointer, RHS is value. | |
/// Result type is always void. | |
/// Uses the `bin_op` field. | |
/// The value to store may be undefined, in which case the destination | |
/// memory region has undefined bytes after this instruction is | |
/// evaluated. In such case ignoring this instruction is legal | |
/// lowering. | |
store, | |
/// Same as `store`, except if the value to store is undefined, the | |
/// memory region should be filled with 0xaa bytes, and any other | |
/// safety metadata such as Valgrind integrations should be notified of | |
/// this memory region being undefined. | |
store_safe, | |
/// Indicates the program counter will never get to this instruction. | |
/// Result type is always noreturn; no instructions in a block follow this one. | |
unreach, | |
/// Convert from a float type to a smaller one. | |
/// Uses the `ty_op` field. | |
fptrunc, | |
/// Convert from a float type to a wider one. | |
/// Uses the `ty_op` field. | |
fpext, | |
/// Returns an integer with a different type than the operand. The new type may have | |
/// fewer, the same, or more bits than the operand type. The new type may also | |
/// differ in signedness from the operand type. However, the instruction | |
/// guarantees that the same integer value fits in both types. | |
/// The new type may also be an enum type, in which case the integer cast operates on | |
/// the integer tag type of the enum. | |
/// See `trunc` for integer truncation. | |
/// Uses the `ty_op` field. | |
intcast, | |
/// Truncate higher bits from an integer, resulting in an integer with the same | |
/// sign but an equal or smaller number of bits. | |
/// Uses the `ty_op` field. | |
trunc, | |
/// ?T => T. If the value is null, undefined behavior. | |
/// Uses the `ty_op` field. | |
optional_payload, | |
/// *?T => *T. If the value is null, undefined behavior. | |
/// Uses the `ty_op` field. | |
optional_payload_ptr, | |
/// *?T => *T. Sets the value to non-null with an undefined payload value. | |
/// Uses the `ty_op` field. | |
optional_payload_ptr_set, | |
/// Given a payload value, wraps it in an optional type. | |
/// Uses the `ty_op` field. | |
wrap_optional, | |
/// E!T -> T. If the value is an error, undefined behavior. | |
/// Uses the `ty_op` field. | |
unwrap_errunion_payload, | |
/// E!T -> E. If the value is not an error, undefined behavior. | |
/// Uses the `ty_op` field. | |
unwrap_errunion_err, | |
/// *(E!T) -> *T. If the value is an error, undefined behavior. | |
/// Uses the `ty_op` field. | |
unwrap_errunion_payload_ptr, | |
/// *(E!T) -> E. If the value is not an error, undefined behavior. | |
/// Uses the `ty_op` field. | |
unwrap_errunion_err_ptr, | |
/// *(E!T) => *T. Sets the value to non-error with an undefined payload value. | |
/// Uses the `ty_op` field. | |
errunion_payload_ptr_set, | |
/// wrap from T to E!T | |
/// Uses the `ty_op` field. | |
wrap_errunion_payload, | |
/// wrap from E to E!T | |
/// Uses the `ty_op` field. | |
wrap_errunion_err, | |
/// Given a pointer to a struct or union and a field index, returns a pointer to the field. | |
/// Uses the `ty_pl` field, payload is `StructField`. | |
/// TODO rename to `agg_field_ptr`. | |
struct_field_ptr, | |
/// Given a pointer to a struct or union, returns a pointer to the field. | |
/// The field index is the number at the end of the name. | |
/// Uses `ty_op` field. | |
/// TODO rename to `agg_field_ptr_index_X` | |
struct_field_ptr_index_0, | |
struct_field_ptr_index_1, | |
struct_field_ptr_index_2, | |
struct_field_ptr_index_3, | |
/// Given a byval struct or union and a field index, returns the field byval. | |
/// Uses the `ty_pl` field, payload is `StructField`. | |
/// TODO rename to `agg_field_val` | |
struct_field_val, | |
/// Given a pointer to a tagged union, set its tag to the provided value. | |
/// Result type is always void. | |
/// Uses the `bin_op` field. LHS is union pointer, RHS is new tag value. | |
set_union_tag, | |
/// Given a tagged union value, get its tag value. | |
/// Uses the `ty_op` field. | |
get_union_tag, | |
/// Constructs a slice from a pointer and a length. | |
/// Uses the `ty_pl` field, payload is `Bin`. lhs is ptr, rhs is len. | |
slice, | |
/// Given a slice value, return the length. | |
/// Result type is always usize. | |
/// Uses the `ty_op` field. | |
slice_len, | |
/// Given a slice value, return the pointer. | |
/// Uses the `ty_op` field. | |
slice_ptr, | |
/// Given a pointer to a slice, return a pointer to the length of the slice. | |
/// Uses the `ty_op` field. | |
ptr_slice_len_ptr, | |
/// Given a pointer to a slice, return a pointer to the pointer of the slice. | |
/// Uses the `ty_op` field. | |
ptr_slice_ptr_ptr, | |
/// Given an (array value or vector value) and element index, | |
/// return the element value at that index. | |
/// Result type is the element type of the array operand. | |
/// Uses the `bin_op` field. | |
array_elem_val, | |
/// Given a slice value, and element index, return the element value at that index. | |
/// Result type is the element type of the slice operand. | |
/// Uses the `bin_op` field. | |
slice_elem_val, | |
/// Given a slice value and element index, return a pointer to the element value at that index. | |
/// Result type is a pointer to the element type of the slice operand. | |
/// Uses the `ty_pl` field with payload `Bin`. | |
slice_elem_ptr, | |
/// Given a pointer value, and element index, return the element value at that index. | |
/// Result type is the element type of the pointer operand. | |
/// Uses the `bin_op` field. | |
ptr_elem_val, | |
/// Given a pointer value, and element index, return the element pointer at that index. | |
/// Result type is pointer to the element type of the pointer operand. | |
/// Uses the `ty_pl` field with payload `Bin`. | |
ptr_elem_ptr, | |
/// Given a pointer to an array, return a slice. | |
/// Uses the `ty_op` field. | |
array_to_slice, | |
/// Given a float operand, return the integer with the closest mathematical meaning. | |
/// Uses the `ty_op` field. | |
int_from_float, | |
/// Same as `int_from_float` with optimized float mode. | |
int_from_float_optimized, | |
/// Given an integer operand, return the float with the closest mathematical meaning. | |
/// Uses the `ty_op` field. | |
float_from_int, | |
/// Transforms a vector into a scalar value by performing a sequential | |
/// horizontal reduction of its elements using the specified operator. | |
/// The vector element type (and hence result type) will be: | |
/// * and, or, xor => integer or boolean | |
/// * min, max, add, mul => integer or float | |
/// Uses the `reduce` field. | |
reduce, | |
/// Same as `reduce` with optimized float mode. | |
reduce_optimized, | |
/// Given an integer, bool, float, or pointer operand, return a vector with all elements | |
/// equal to the scalar value. | |
/// Uses the `ty_op` field. | |
splat, | |
/// Constructs a vector by selecting elements from `a` and `b` based on `mask`. | |
/// Uses the `ty_pl` field with payload `Shuffle`. | |
shuffle, | |
/// Constructs a vector element-wise from `a` or `b` based on `pred`. | |
/// Uses the `pl_op` field with `pred` as operand, and payload `Bin`. | |
select, | |
/// Given dest pointer and value, set all elements at dest to value. | |
/// Dest pointer is either a slice or a pointer to array. | |
/// The element type may be any type, and the slice may have any alignment. | |
/// Result type is always void. | |
/// Uses the `bin_op` field. LHS is the dest slice. RHS is the element value. | |
/// The element value may be undefined, in which case the destination | |
/// memory region has undefined bytes after this instruction is | |
/// evaluated. In such case ignoring this instruction is legal | |
/// lowering. | |
/// If the length is compile-time known (due to the destination being a | |
/// pointer-to-array), then it is guaranteed to be greater than zero. | |
memset, | |
/// Same as `memset`, except if the element value is undefined, the memory region | |
/// should be filled with 0xaa bytes, and any other safety metadata such as Valgrind | |
/// integrations should be notified of this memory region being undefined. | |
memset_safe, | |
/// Given dest pointer and source pointer, copy elements from source to dest. | |
/// Dest pointer is either a slice or a pointer to array. | |
/// The dest element type may be any type. | |
/// Source pointer must have same element type as dest element type. | |
/// Dest slice may have any alignment; source pointer may have any alignment. | |
/// The two memory regions must not overlap. | |
/// Result type is always void. | |
/// Uses the `bin_op` field. LHS is the dest slice. RHS is the source pointer. | |
/// If the length is compile-time known (due to the destination or | |
/// source being a pointer-to-array), then it is guaranteed to be | |
/// greater than zero. | |
memcpy, | |
/// Uses the `ty_pl` field with payload `Cmpxchg`. | |
cmpxchg_weak, | |
/// Uses the `ty_pl` field with payload `Cmpxchg`. | |
cmpxchg_strong, | |
/// Lowers to a memory fence instruction. | |
/// Result type is always void. | |
/// Uses the `fence` field. | |
fence, | |
/// Atomically load from a pointer. | |
/// Result type is the element type of the pointer. | |
/// Uses the `atomic_load` field. | |
atomic_load, | |
/// Atomically store through a pointer. | |
/// Result type is always `void`. | |
/// Uses the `bin_op` field. LHS is pointer, RHS is element. | |
atomic_store_unordered, | |
/// Same as `atomic_store_unordered` but with `AtomicOrder.Monotonic`. | |
atomic_store_monotonic, | |
/// Same as `atomic_store_unordered` but with `AtomicOrder.Release`. | |
atomic_store_release, | |
/// Same as `atomic_store_unordered` but with `AtomicOrder.SeqCst`. | |
atomic_store_seq_cst, | |
/// Atomically read-modify-write via a pointer. | |
/// Result type is the element type of the pointer. | |
/// Uses the `pl_op` field with payload `AtomicRmw`. Operand is `ptr`. | |
atomic_rmw, | |
/// Returns true if enum tag value has a name. | |
/// Uses the `un_op` field. | |
is_named_enum_value, | |
/// Given an enum tag value, returns the tag name. The enum type may be non-exhaustive. | |
/// Result type is always `[:0]const u8`. | |
/// Uses the `un_op` field. | |
tag_name, | |
/// Given an error value, return the error name. Result type is always `[:0]const u8`. | |
/// Uses the `un_op` field. | |
error_name, | |
/// Returns true if error set has error with value. | |
/// Uses the `ty_op` field. | |
error_set_has_value, | |
/// Constructs a vector, tuple, struct, or array value out of runtime-known elements. | |
/// Some of the elements may be comptime-known. | |
/// Uses the `ty_pl` field, payload is index of an array of elements, each of which | |
/// is a `Ref`. Length of the array is given by the vector type. | |
/// If the type is an array with a sentinel, the AIR elements do not include it | |
/// explicitly. | |
aggregate_init, | |
/// Constructs a union from a field index and a runtime-known init value. | |
/// Uses the `ty_pl` field with payload `UnionInit`. | |
union_init, | |
/// Communicates an intent to load memory. | |
/// Result is always unused. | |
/// Uses the `prefetch` field. | |
prefetch, | |
/// Computes `(a * b) + c`, but only rounds once. | |
/// Uses the `pl_op` field with payload `Bin`. | |
/// The operand is the addend. The mulends are lhs and rhs. | |
mul_add, | |
/// Implements @fieldParentPtr builtin. | |
/// Uses the `ty_pl` field. | |
field_parent_ptr, | |
/// Implements @wasmMemorySize builtin. | |
/// Result type is always `u32`, | |
/// Uses the `pl_op` field, payload represents the index of the target memory. | |
/// The operand is unused and always set to `Ref.none`. | |
wasm_memory_size, | |
/// Implements @wasmMemoryGrow builtin. | |
/// Result type is always `i32`, | |
/// Uses the `pl_op` field, payload represents the index of the target memory. | |
wasm_memory_grow, | |
/// Returns `true` if and only if the operand, an integer with | |
/// the same size as the error integer type, is less than the | |
/// total number of errors in the Module. | |
/// Result type is always `bool`. | |
/// Uses the `un_op` field. | |
/// Note that the number of errors in the Module cannot be considered stable until | |
/// flush(). | |
cmp_lt_errors_len, | |
/// Returns pointer to current error return trace. | |
err_return_trace, | |
/// Sets the operand as the current error return trace, | |
set_err_return_trace, | |
/// Convert the address space of a pointer. | |
/// Uses the `ty_op` field. | |
addrspace_cast, | |
/// Saves the error return trace index, if any. Otherwise, returns 0. | |
/// Uses the `ty_pl` field. | |
save_err_return_trace_index, | |
/// Store an element to a vector pointer at an index. | |
/// Uses the `vector_store_elem` field. | |
vector_store_elem, | |
/// Implements @cVaArg builtin. | |
/// Uses the `ty_op` field. | |
c_va_arg, | |
/// Implements @cVaCopy builtin. | |
/// Uses the `ty_op` field. | |
c_va_copy, | |
/// Implements @cVaEnd builtin. | |
/// Uses the `un_op` field. | |
c_va_end, | |
/// Implements @cVaStart builtin. | |
/// Uses the `ty` field. | |
c_va_start, | |
/// Implements @workItemId builtin. | |
/// Result type is always `u32` | |
/// Uses the `pl_op` field, payload is the dimension to get the work item id for. | |
/// Operand is unused and set to Ref.none | |
work_item_id, | |
/// Implements @workGroupSize builtin. | |
/// Result type is always `u32` | |
/// Uses the `pl_op` field, payload is the dimension to get the work group size for. | |
/// Operand is unused and set to Ref.none | |
work_group_size, | |
/// Implements @workGroupId builtin. | |
/// Result type is always `u32` | |
/// Uses the `pl_op` field, payload is the dimension to get the work group id for. | |
/// Operand is unused and set to Ref.none | |
work_group_id, | |
pub fn fromCmpOp(op: std.math.CompareOperator, optimized: bool) Tag { | |
switch (op) { | |
.lt => return if (optimized) .cmp_lt_optimized else .cmp_lt, | |
.lte => return if (optimized) .cmp_lte_optimized else .cmp_lte, | |
.eq => return if (optimized) .cmp_eq_optimized else .cmp_eq, | |
.gte => return if (optimized) .cmp_gte_optimized else .cmp_gte, | |
.gt => return if (optimized) .cmp_gt_optimized else .cmp_gt, | |
.neq => return if (optimized) .cmp_neq_optimized else .cmp_neq, | |
} | |
} | |
pub fn toCmpOp(tag: Tag) ?std.math.CompareOperator { | |
return switch (tag) { | |
.cmp_lt, .cmp_lt_optimized => .lt, | |
.cmp_lte, .cmp_lte_optimized => .lte, | |
.cmp_eq, .cmp_eq_optimized => .eq, | |
.cmp_gte, .cmp_gte_optimized => .gte, | |
.cmp_gt, .cmp_gt_optimized => .gt, | |
.cmp_neq, .cmp_neq_optimized => .neq, | |
else => null, | |
}; | |
} | |
}; | |
/// The position of an AIR instruction within the `Air` instructions array. | |
pub const Index = enum(u32) { | |
_, | |
pub fn toRef(i: Index) Inst.Ref { | |
assert(@intFromEnum(i) >> 31 == 0); | |
return @enumFromInt((1 << 31) | @intFromEnum(i)); | |
} | |
pub fn toTargetIndex(i: Index) u31 { | |
assert(@intFromEnum(i) >> 31 == 1); | |
return @truncate(@intFromEnum(i)); | |
} | |
}; | |
/// Either a reference to a value stored in the InternPool, or a reference to an AIR instruction. | |
/// The most-significant bit of the value is a tag bit. This bit is 1 if the value represents an | |
/// instruction index and 0 if it represents an InternPool index. | |
/// | |
/// The hardcoded refs `none` and `var_args_param_type` are exceptions to this rule: they have | |
/// their tag bit set but refer to the InternPool. | |
pub const Ref = enum(u32) { | |
u0_type = @intFromEnum(InternPool.Index.u0_type), | |
i0_type = @intFromEnum(InternPool.Index.i0_type), | |
u1_type = @intFromEnum(InternPool.Index.u1_type), | |
u8_type = @intFromEnum(InternPool.Index.u8_type), | |
i8_type = @intFromEnum(InternPool.Index.i8_type), | |
u16_type = @intFromEnum(InternPool.Index.u16_type), | |
i16_type = @intFromEnum(InternPool.Index.i16_type), | |
u29_type = @intFromEnum(InternPool.Index.u29_type), | |
u32_type = @intFromEnum(InternPool.Index.u32_type), | |
i32_type = @intFromEnum(InternPool.Index.i32_type), | |
u64_type = @intFromEnum(InternPool.Index.u64_type), | |
i64_type = @intFromEnum(InternPool.Index.i64_type), | |
u80_type = @intFromEnum(InternPool.Index.u80_type), | |
u128_type = @intFromEnum(InternPool.Index.u128_type), | |
i128_type = @intFromEnum(InternPool.Index.i128_type), | |
usize_type = @intFromEnum(InternPool.Index.usize_type), | |
isize_type = @intFromEnum(InternPool.Index.isize_type), | |
c_char_type = @intFromEnum(InternPool.Index.c_char_type), | |
c_short_type = @intFromEnum(InternPool.Index.c_short_type), | |
c_ushort_type = @intFromEnum(InternPool.Index.c_ushort_type), | |
c_int_type = @intFromEnum(InternPool.Index.c_int_type), | |
c_uint_type = @intFromEnum(InternPool.Index.c_uint_type), | |
c_long_type = @intFromEnum(InternPool.Index.c_long_type), | |
c_ulong_type = @intFromEnum(InternPool.Index.c_ulong_type), | |
c_longlong_type = @intFromEnum(InternPool.Index.c_longlong_type), | |
c_ulonglong_type = @intFromEnum(InternPool.Index.c_ulonglong_type), | |
c_longdouble_type = @intFromEnum(InternPool.Index.c_longdouble_type), | |
f16_type = @intFromEnum(InternPool.Index.f16_type), | |
f32_type = @intFromEnum(InternPool.Index.f32_type), | |
f64_type = @intFromEnum(InternPool.Index.f64_type), | |
f80_type = @intFromEnum(InternPool.Index.f80_type), | |
f128_type = @intFromEnum(InternPool.Index.f128_type), | |
anyopaque_type = @intFromEnum(InternPool.Index.anyopaque_type), | |
bool_type = @intFromEnum(InternPool.Index.bool_type), | |
void_type = @intFromEnum(InternPool.Index.void_type), | |
type_type = @intFromEnum(InternPool.Index.type_type), | |
anyerror_type = @intFromEnum(InternPool.Index.anyerror_type), | |
comptime_int_type = @intFromEnum(InternPool.Index.comptime_int_type), | |
comptime_float_type = @intFromEnum(InternPool.Index.comptime_float_type), | |
noreturn_type = @intFromEnum(InternPool.Index.noreturn_type), | |
anyframe_type = @intFromEnum(InternPool.Index.anyframe_type), | |
null_type = @intFromEnum(InternPool.Index.null_type), | |
undefined_type = @intFromEnum(InternPool.Index.undefined_type), | |
enum_literal_type = @intFromEnum(InternPool.Index.enum_literal_type), | |
atomic_order_type = @intFromEnum(InternPool.Index.atomic_order_type), | |
atomic_rmw_op_type = @intFromEnum(InternPool.Index.atomic_rmw_op_type), | |
calling_convention_type = @intFromEnum(InternPool.Index.calling_convention_type), | |
address_space_type = @intFromEnum(InternPool.Index.address_space_type), | |
float_mode_type = @intFromEnum(InternPool.Index.float_mode_type), | |
reduce_op_type = @intFromEnum(InternPool.Index.reduce_op_type), | |
call_modifier_type = @intFromEnum(InternPool.Index.call_modifier_type), | |
prefetch_options_type = @intFromEnum(InternPool.Index.prefetch_options_type), | |
export_options_type = @intFromEnum(InternPool.Index.export_options_type), | |
extern_options_type = @intFromEnum(InternPool.Index.extern_options_type), | |
type_info_type = @intFromEnum(InternPool.Index.type_info_type), | |
manyptr_u8_type = @intFromEnum(InternPool.Index.manyptr_u8_type), | |
manyptr_const_u8_type = @intFromEnum(InternPool.Index.manyptr_const_u8_type), | |
manyptr_const_u8_sentinel_0_type = @intFromEnum(InternPool.Index.manyptr_const_u8_sentinel_0_type), | |
single_const_pointer_to_comptime_int_type = @intFromEnum(InternPool.Index.single_const_pointer_to_comptime_int_type), | |
slice_const_u8_type = @intFromEnum(InternPool.Index.slice_const_u8_type), | |
slice_const_u8_sentinel_0_type = @intFromEnum(InternPool.Index.slice_const_u8_sentinel_0_type), | |
optional_noreturn_type = @intFromEnum(InternPool.Index.optional_noreturn_type), | |
anyerror_void_error_union_type = @intFromEnum(InternPool.Index.anyerror_void_error_union_type), | |
adhoc_inferred_error_set_type = @intFromEnum(InternPool.Index.adhoc_inferred_error_set_type), | |
generic_poison_type = @intFromEnum(InternPool.Index.generic_poison_type), | |
empty_struct_type = @intFromEnum(InternPool.Index.empty_struct_type), | |
undef = @intFromEnum(InternPool.Index.undef), | |
zero = @intFromEnum(InternPool.Index.zero), | |
zero_usize = @intFromEnum(InternPool.Index.zero_usize), | |
zero_u8 = @intFromEnum(InternPool.Index.zero_u8), | |
one = @intFromEnum(InternPool.Index.one), | |
one_usize = @intFromEnum(InternPool.Index.one_usize), | |
one_u8 = @intFromEnum(InternPool.Index.one_u8), | |
four_u8 = @intFromEnum(InternPool.Index.four_u8), | |
negative_one = @intFromEnum(InternPool.Index.negative_one), | |
calling_convention_c = @intFromEnum(InternPool.Index.calling_convention_c), | |
calling_convention_inline = @intFromEnum(InternPool.Index.calling_convention_inline), | |
void_value = @intFromEnum(InternPool.Index.void_value), | |
unreachable_value = @intFromEnum(InternPool.Index.unreachable_value), | |
null_value = @intFromEnum(InternPool.Index.null_value), | |
bool_true = @intFromEnum(InternPool.Index.bool_true), | |
bool_false = @intFromEnum(InternPool.Index.bool_false), | |
empty_struct = @intFromEnum(InternPool.Index.empty_struct), | |
generic_poison = @intFromEnum(InternPool.Index.generic_poison), | |
/// This Ref does not correspond to any AIR instruction or constant | |
/// value. It is used to handle argument types of var args functions. | |
var_args_param_type = @intFromEnum(InternPool.Index.var_args_param_type), | |
/// This Ref does not correspond to any AIR instruction or constant | |
/// value and may instead be used as a sentinel to indicate null. | |
none = @intFromEnum(InternPool.Index.none), | |
_, | |
pub fn toInterned(ref: Ref) ?InternPool.Index { | |
assert(ref != .none); | |
return ref.toInternedAllowNone(); | |
} | |
pub fn toInternedAllowNone(ref: Ref) ?InternPool.Index { | |
return switch (ref) { | |
.var_args_param_type => .var_args_param_type, | |
.none => .none, | |
else => if (@intFromEnum(ref) >> 31 == 0) | |
@enumFromInt(@as(u31, @truncate(@intFromEnum(ref)))) | |
else | |
null, | |
}; | |
} | |
pub fn toIndex(ref: Ref) ?Index { | |
assert(ref != .none); | |
return ref.toIndexAllowNone(); | |
} | |
pub fn toIndexAllowNone(ref: Ref) ?Index { | |
return switch (ref) { | |
.var_args_param_type, .none => null, | |
else => if (@intFromEnum(ref) >> 31 != 0) | |
@enumFromInt(@as(u31, @truncate(@intFromEnum(ref)))) | |
else | |
null, | |
}; | |
} | |
pub fn toType(ref: Ref) Type { | |
return Type.fromInterned(ref.toInterned().?); | |
} | |
}; | |
/// All instructions have an 8-byte payload, which is contained within | |
/// this union. `Tag` determines which union field is active, as well as | |
/// how to interpret the data within. | |
pub const Data = union { | |
no_op: void, | |
un_op: Ref, | |
bin_op: struct { | |
lhs: Ref, | |
rhs: Ref, | |
}, | |
ty: Type, | |
arg: struct { | |
ty: Ref, | |
src_index: u32, | |
}, | |
ty_op: struct { | |
ty: Ref, | |
operand: Ref, | |
}, | |
ty_pl: struct { | |
ty: Ref, | |
// Index into a different array. | |
payload: u32, | |
}, | |
br: struct { | |
block_inst: Index, | |
operand: Ref, | |
}, | |
pl_op: struct { | |
operand: Ref, | |
payload: u32, | |
}, | |
dbg_stmt: struct { | |
line: u32, | |
column: u32, | |
}, | |
fence: std.builtin.AtomicOrder, | |
atomic_load: struct { | |
ptr: Ref, | |
order: std.builtin.AtomicOrder, | |
}, | |
prefetch: struct { | |
ptr: Ref, | |
rw: std.builtin.PrefetchOptions.Rw, | |
locality: u2, | |
cache: std.builtin.PrefetchOptions.Cache, | |
}, | |
reduce: struct { | |
operand: Ref, | |
operation: std.builtin.ReduceOp, | |
}, | |
vector_store_elem: struct { | |
vector_ptr: Ref, | |
// Index into a different array. | |
payload: u32, | |
}, | |
inferred_alloc_comptime: InferredAllocComptime, | |
inferred_alloc: InferredAlloc, | |
pub const InferredAllocComptime = struct { | |
decl_index: InternPool.DeclIndex, | |
alignment: InternPool.Alignment, | |
is_const: bool, | |
}; | |
pub const InferredAlloc = struct { | |
alignment: InternPool.Alignment, | |
is_const: bool, | |
}; | |
// Make sure we don't accidentally add a field to make this union | |
// bigger than expected. Note that in safety builds, Zig is allowed | |
// to insert a secret field for safety checks. | |
comptime { | |
if (!std.debug.runtime_safety) { | |
assert(@sizeOf(Data) == 8); | |
} | |
} | |
}; | |
}; | |
/// Trailing is a list of instruction indexes for every `body_len`. | |
pub const Block = struct { | |
body_len: u32, | |
}; | |
/// Trailing is a list of instruction indexes for every `body_len`. | |
pub const DbgInlineBlock = struct { | |
func: InternPool.Index, | |
body_len: u32, | |
}; | |
/// Trailing is a list of `Inst.Ref` for every `args_len`. | |
pub const Call = struct { | |
args_len: u32, | |
}; | |
/// This data is stored inside extra, with two sets of trailing `Inst.Ref`: | |
/// * 0. the then body, according to `then_body_len`. | |
/// * 1. the else body, according to `else_body_len`. | |
pub const CondBr = struct { | |
then_body_len: u32, | |
else_body_len: u32, | |
}; | |
/// Trailing: | |
/// * 0. `Case` for each `cases_len` | |
/// * 1. the else body, according to `else_body_len`. | |
pub const SwitchBr = struct { | |
cases_len: u32, | |
else_body_len: u32, | |
/// Trailing: | |
/// * item: Inst.Ref // for each `items_len`. | |
/// * instruction index for each `body_len`. | |
pub const Case = struct { | |
items_len: u32, | |
body_len: u32, | |
}; | |
}; | |
/// This data is stored inside extra. Trailing: | |
/// 0. body: Inst.Index // for each body_len | |
pub const Try = struct { | |
body_len: u32, | |
}; | |
/// This data is stored inside extra. Trailing: | |
/// 0. body: Inst.Index // for each body_len | |
pub const TryPtr = struct { | |
ptr: Inst.Ref, | |
body_len: u32, | |
}; | |
pub const StructField = struct { | |
/// Whether this is a pointer or byval is determined by the AIR tag. | |
struct_operand: Inst.Ref, | |
field_index: u32, | |
}; | |
pub const Bin = struct { | |
lhs: Inst.Ref, | |
rhs: Inst.Ref, | |
}; | |
pub const FieldParentPtr = struct { | |
field_ptr: Inst.Ref, | |
field_index: u32, | |
}; | |
pub const Shuffle = struct { | |
a: Inst.Ref, | |
b: Inst.Ref, | |
mask: InternPool.Index, | |
mask_len: u32, | |
}; | |
pub const VectorCmp = struct { | |
lhs: Inst.Ref, | |
rhs: Inst.Ref, | |
op: u32, | |
pub fn compareOperator(self: VectorCmp) std.math.CompareOperator { | |
return @as(std.math.CompareOperator, @enumFromInt(@as(u3, @truncate(self.op)))); | |
} | |
pub fn encodeOp(compare_operator: std.math.CompareOperator) u32 { | |
return @intFromEnum(compare_operator); | |
} | |
}; | |
/// Trailing: | |
/// 0. `Inst.Ref` for every outputs_len | |
/// 1. `Inst.Ref` for every inputs_len | |
/// 2. for every outputs_len | |
/// - constraint: memory at this position is reinterpreted as a null | |
/// terminated string. | |
/// - name: memory at this position is reinterpreted as a null | |
/// terminated string. pad to the next u32 after the null byte. | |
/// 3. for every inputs_len | |
/// - constraint: memory at this position is reinterpreted as a null | |
/// terminated string. | |
/// - name: memory at this position is reinterpreted as a null | |
/// terminated string. pad to the next u32 after the null byte. | |
/// 4. for every clobbers_len | |
/// - clobber_name: memory at this position is reinterpreted as a null | |
/// terminated string. pad to the next u32 after the null byte. | |
/// 5. A number of u32 elements follow according to the equation `(source_len + 3) / 4`. | |
/// Memory starting at this position is reinterpreted as the source bytes. | |
pub const Asm = struct { | |
/// Length of the assembly source in bytes. | |
source_len: u32, | |
outputs_len: u32, | |
inputs_len: u32, | |
/// The MSB is `is_volatile`. | |
/// The rest of the bits are `clobbers_len`. | |
flags: u32, | |
}; | |
pub const Cmpxchg = struct { | |
ptr: Inst.Ref, | |
expected_value: Inst.Ref, | |
new_value: Inst.Ref, | |
/// 0b00000000000000000000000000000XXX - success_order | |
/// 0b00000000000000000000000000XXX000 - failure_order | |
flags: u32, | |
pub fn successOrder(self: Cmpxchg) std.builtin.AtomicOrder { | |
return @as(std.builtin.AtomicOrder, @enumFromInt(@as(u3, @truncate(self.flags)))); | |
} | |
pub fn failureOrder(self: Cmpxchg) std.builtin.AtomicOrder { | |
return @as(std.builtin.AtomicOrder, @enumFromInt(@as(u3, @truncate(self.flags >> 3)))); | |
} | |
}; | |
pub const AtomicRmw = struct { | |
operand: Inst.Ref, | |
/// 0b00000000000000000000000000000XXX - ordering | |
/// 0b0000000000000000000000000XXXX000 - op | |
flags: u32, | |
pub fn ordering(self: AtomicRmw) std.builtin.AtomicOrder { | |
return @as(std.builtin.AtomicOrder, @enumFromInt(@as(u3, @truncate(self.flags)))); | |
} | |
pub fn op(self: AtomicRmw) std.builtin.AtomicRmwOp { | |
return @as(std.builtin.AtomicRmwOp, @enumFromInt(@as(u4, @truncate(self.flags >> 3)))); | |
} | |
}; | |
pub const UnionInit = struct { | |
field_index: u32, | |
init: Inst.Ref, | |
}; | |
pub fn getMainBody(air: Air) []const Air.Inst.Index { | |
const body_index = air.extra[@intFromEnum(ExtraIndex.main_block)]; | |
const extra = air.extraData(Block, body_index); | |
return @ptrCast(air.extra[extra.end..][0..extra.data.body_len]); | |
} | |
pub fn typeOf(air: *const Air, inst: Air.Inst.Ref, ip: *const InternPool) Type { | |
if (inst.toInterned()) |ip_index| { | |
return Type.fromInterned(ip.typeOf(ip_index)); | |
} else { | |
return air.typeOfIndex(inst.toIndex().?, ip); | |
} | |
} | |
pub fn typeOfIndex(air: *const Air, inst: Air.Inst.Index, ip: *const InternPool) Type { | |
const datas = air.instructions.items(.data); | |
switch (air.instructions.items(.tag)[@intFromEnum(inst)]) { | |
.add, | |
.add_safe, | |
.add_wrap, | |
.add_sat, | |
.sub, | |
.sub_safe, | |
.sub_wrap, | |
.sub_sat, | |
.mul, | |
.mul_safe, | |
.mul_wrap, | |
.mul_sat, | |
.div_float, | |
.div_trunc, | |
.div_floor, | |
.div_exact, | |
.rem, | |
.mod, | |
.bit_and, | |
.bit_or, | |
.xor, | |
.shr, | |
.shr_exact, | |
.shl, | |
.shl_exact, | |
.shl_sat, | |
.min, | |
.max, | |
.bool_and, | |
.bool_or, | |
.add_optimized, | |
.sub_optimized, | |
.mul_optimized, | |
.div_float_optimized, | |
.div_trunc_optimized, | |
.div_floor_optimized, | |
.div_exact_optimized, | |
.rem_optimized, | |
.mod_optimized, | |
=> return air.typeOf(datas[@intFromEnum(inst)].bin_op.lhs, ip), | |
.sqrt, | |
.sin, | |
.cos, | |
.tan, | |
.exp, | |
.exp2, | |
.log, | |
.log2, | |
.log10, | |
.floor, | |
.ceil, | |
.round, | |
.trunc_float, | |
.neg, | |
.neg_optimized, | |
=> return air.typeOf(datas[@intFromEnum(inst)].un_op, ip), | |
.cmp_lt, | |
.cmp_lte, | |
.cmp_eq, | |
.cmp_gte, | |
.cmp_gt, | |
.cmp_neq, | |
.cmp_lt_optimized, | |
.cmp_lte_optimized, | |
.cmp_eq_optimized, | |
.cmp_gte_optimized, | |
.cmp_gt_optimized, | |
.cmp_neq_optimized, | |
.cmp_lt_errors_len, | |
.is_null, | |
.is_non_null, | |
.is_null_ptr, | |
.is_non_null_ptr, | |
.is_err, | |
.is_non_err, | |
.is_err_ptr, | |
.is_non_err_ptr, | |
.is_named_enum_value, | |
.error_set_has_value, | |
=> return Type.bool, | |
.alloc, | |
.ret_ptr, | |
.err_return_trace, | |
.c_va_start, | |
=> return datas[@intFromEnum(inst)].ty, | |
.arg => return datas[@intFromEnum(inst)].arg.ty.toType(), | |
.assembly, | |
.block, | |
.dbg_inline_block, | |
.struct_field_ptr, | |
.struct_field_val, | |
.slice_elem_ptr, | |
.ptr_elem_ptr, | |
.cmpxchg_weak, | |
.cmpxchg_strong, | |
.slice, | |
.shuffle, | |
.aggregate_init, | |
.union_init, | |
.field_parent_ptr, | |
.cmp_vector, | |
.cmp_vector_optimized, | |
.add_with_overflow, | |
.sub_with_overflow, | |
.mul_with_overflow, | |
.shl_with_overflow, | |
.ptr_add, | |
.ptr_sub, | |
.try_ptr, | |
=> return datas[@intFromEnum(inst)].ty_pl.ty.toType(), | |
.not, | |
.bitcast, | |
.load, | |
.fpext, | |
.fptrunc, | |
.intcast, | |
.trunc, | |
.optional_payload, | |
.optional_payload_ptr, | |
.optional_payload_ptr_set, | |
.errunion_payload_ptr_set, | |
.wrap_optional, | |
.unwrap_errunion_payload, | |
.unwrap_errunion_err, | |
.unwrap_errunion_payload_ptr, | |
.unwrap_errunion_err_ptr, | |
.wrap_errunion_payload, | |
.wrap_errunion_err, | |
.slice_ptr, | |
.ptr_slice_len_ptr, | |
.ptr_slice_ptr_ptr, | |
.struct_field_ptr_index_0, | |
.struct_field_ptr_index_1, | |
.struct_field_ptr_index_2, | |
.struct_field_ptr_index_3, | |
.array_to_slice, | |
.int_from_float, | |
.int_from_float_optimized, | |
.float_from_int, | |
.splat, | |
.get_union_tag, | |
.clz, | |
.ctz, | |
.popcount, | |
.byte_swap, | |
.bit_reverse, | |
.addrspace_cast, | |
.c_va_arg, | |
.c_va_copy, | |
.abs, | |
=> return datas[@intFromEnum(inst)].ty_op.ty.toType(), | |
.loop, | |
.br, | |
.cond_br, | |
.switch_br, | |
.ret, | |
.ret_safe, | |
.ret_load, | |
.unreach, | |
.trap, | |
=> return Type.noreturn, | |
.breakpoint, | |
.dbg_stmt, | |
.dbg_var_ptr, | |
.dbg_var_val, | |
.store, | |
.store_safe, | |
.fence, | |
.atomic_store_unordered, | |
.atomic_store_monotonic, | |
.atomic_store_release, | |
.atomic_store_seq_cst, | |
.memset, | |
.memset_safe, | |
.memcpy, | |
.set_union_tag, | |
.prefetch, | |
.set_err_return_trace, | |
.vector_store_elem, | |
.c_va_end, | |
=> return Type.void, | |
.int_from_ptr, | |
.slice_len, | |
.ret_addr, | |
.frame_addr, | |
.save_err_return_trace_index, | |
=> return Type.usize, | |
.wasm_memory_grow => return Type.i32, | |
.wasm_memory_size => return Type.u32, | |
.int_from_bool => return Type.u1, | |
.tag_name, .error_name => return Type.slice_const_u8_sentinel_0, | |
.call, .call_always_tail, .call_never_tail, .call_never_inline => { | |
const callee_ty = air.typeOf(datas[@intFromEnum(inst)].pl_op.operand, ip); | |
return Type.fromInterned(ip.funcTypeReturnType(callee_ty.toIntern())); | |
}, | |
.slice_elem_val, .ptr_elem_val, .array_elem_val => { | |
const ptr_ty = air.typeOf(datas[@intFromEnum(inst)].bin_op.lhs, ip); | |
return ptr_ty.childTypeIp(ip); | |
}, | |
.atomic_load => { | |
const ptr_ty = air.typeOf(datas[@intFromEnum(inst)].atomic_load.ptr, ip); | |
return ptr_ty.childTypeIp(ip); | |
}, | |
.atomic_rmw => { | |
const ptr_ty = air.typeOf(datas[@intFromEnum(inst)].pl_op.operand, ip); | |
return ptr_ty.childTypeIp(ip); | |
}, | |
.reduce, .reduce_optimized => { | |
const operand_ty = air.typeOf(datas[@intFromEnum(inst)].reduce.operand, ip); | |
return Type.fromInterned(ip.indexToKey(operand_ty.ip_index).vector_type.child); | |
}, | |
.mul_add => return air.typeOf(datas[@intFromEnum(inst)].pl_op.operand, ip), | |
.select => { | |
const extra = air.extraData(Air.Bin, datas[@intFromEnum(inst)].pl_op.payload).data; | |
return air.typeOf(extra.lhs, ip); | |
}, | |
.@"try" => { | |
const err_union_ty = air.typeOf(datas[@intFromEnum(inst)].pl_op.operand, ip); | |
return Type.fromInterned(ip.indexToKey(err_union_ty.ip_index).error_union_type.payload_type); | |
}, | |
.work_item_id, | |
.work_group_size, | |
.work_group_id, | |
=> return Type.u32, | |
.inferred_alloc => unreachable, | |
.inferred_alloc_comptime => unreachable, | |
} | |
} | |
/// Returns the requested data, as well as the new index which is at the start of the | |
/// trailers for the object. | |
pub fn extraData(air: Air, comptime T: type, index: usize) struct { data: T, end: usize } { | |
const fields = std.meta.fields(T); | |
var i: usize = index; | |
var result: T = undefined; | |
inline for (fields) |field| { | |
@field(result, field.name) = switch (field.type) { | |
u32 => air.extra[i], | |
Inst.Ref => @as(Inst.Ref, @enumFromInt(air.extra[i])), | |
i32 => @as(i32, @bitCast(air.extra[i])), | |
InternPool.Index => @as(InternPool.Index, @enumFromInt(air.extra[i])), | |
else => @compileError("bad field type: " ++ @typeName(field.type)), | |
}; | |
i += 1; | |
} | |
return .{ | |
.data = result, | |
.end = i, | |
}; | |
} | |
pub fn deinit(air: *Air, gpa: std.mem.Allocator) void { | |
air.instructions.deinit(gpa); | |
gpa.free(air.extra); | |
air.* = undefined; | |
} | |
pub fn internedToRef(ip_index: InternPool.Index) Inst.Ref { | |
return switch (ip_index) { | |
.var_args_param_type => .var_args_param_type, | |
.none => .none, | |
else => { | |
assert(@intFromEnum(ip_index) >> 31 == 0); | |
return @enumFromInt(@as(u31, @intCast(@intFromEnum(ip_index)))); | |
}, | |
}; | |
} | |
/// Returns `null` if runtime-known. | |
pub fn value(air: Air, inst: Inst.Ref, mod: *Module) !?Value { | |
if (inst.toInterned()) |ip_index| { | |
return Value.fromInterned(ip_index); | |
} | |
const index = inst.toIndex().?; | |
return air.typeOfIndex(index, &mod.intern_pool).onePossibleValue(mod); | |
} | |
pub fn nullTerminatedString(air: Air, index: usize) [:0]const u8 { | |
const bytes = std.mem.sliceAsBytes(air.extra[index..]); | |
var end: usize = 0; | |
while (bytes[end] != 0) { | |
end += 1; | |
} | |
return bytes[0..end :0]; | |
} | |
/// Returns whether the given instruction must always be lowered, for instance | |
/// because it can cause side effects. If an instruction does not need to be | |
/// lowered, and Liveness determines its result is unused, backends should | |
/// avoid lowering it. | |
pub fn mustLower(air: Air, inst: Air.Inst.Index, ip: *const InternPool) bool { | |
const data = air.instructions.items(.data)[@intFromEnum(inst)]; | |
return switch (air.instructions.items(.tag)[@intFromEnum(inst)]) { | |
.arg, | |
.block, | |
.loop, | |
.br, | |
.trap, | |
.breakpoint, | |
.call, | |
.call_always_tail, | |
.call_never_tail, | |
.call_never_inline, | |
.cond_br, | |
.switch_br, | |
.@"try", | |
.try_ptr, | |
.dbg_stmt, | |
.dbg_inline_block, | |
.dbg_var_ptr, | |
.dbg_var_val, | |
.ret, | |
.ret_safe, | |
.ret_load, | |
.store, | |
.store_safe, | |
.unreach, | |
.optional_payload_ptr_set, | |
.errunion_payload_ptr_set, | |
.set_union_tag, | |
.memset, | |
.memset_safe, | |
.memcpy, | |
.cmpxchg_weak, | |
.cmpxchg_strong, | |
.fence, | |
.atomic_store_unordered, | |
.atomic_store_monotonic, | |
.atomic_store_release, | |
.atomic_store_seq_cst, | |
.atomic_rmw, | |
.prefetch, | |
.wasm_memory_grow, | |
.set_err_return_trace, | |
.vector_store_elem, | |
.c_va_arg, | |
.c_va_copy, | |
.c_va_end, | |
.c_va_start, | |
.add_safe, | |
.sub_safe, | |
.mul_safe, | |
=> true, | |
.add, | |
.add_optimized, | |
.add_wrap, | |
.add_sat, | |
.sub, | |
.sub_optimized, | |
.sub_wrap, | |
.sub_sat, | |
.mul, | |
.mul_optimized, | |
.mul_wrap, | |
.mul_sat, | |
.div_float, | |
.div_float_optimized, | |
.div_trunc, | |
.div_trunc_optimized, | |
.div_floor, | |
.div_floor_optimized, | |
.div_exact, | |
.div_exact_optimized, | |
.rem, | |
.rem_optimized, | |
.mod, | |
.mod_optimized, | |
.ptr_add, | |
.ptr_sub, | |
.max, | |
.min, | |
.add_with_overflow, | |
.sub_with_overflow, | |
.mul_with_overflow, | |
.shl_with_overflow, | |
.alloc, | |
.inferred_alloc, | |
.inferred_alloc_comptime, | |
.ret_ptr, | |
.bit_and, | |
.bit_or, | |
.shr, | |
.shr_exact, | |
.shl, | |
.shl_exact, | |
.shl_sat, | |
.xor, | |
.not, | |
.bitcast, | |
.ret_addr, | |
.frame_addr, | |
.clz, | |
.ctz, | |
.popcount, | |
.byte_swap, | |
.bit_reverse, | |
.sqrt, | |
.sin, | |
.cos, | |
.tan, | |
.exp, | |
.exp2, | |
.log, | |
.log2, | |
.log10, | |
.abs, | |
.floor, | |
.ceil, | |
.round, | |
.trunc_float, | |
.neg, | |
.neg_optimized, | |
.cmp_lt, | |
.cmp_lt_optimized, | |
.cmp_lte, | |
.cmp_lte_optimized, | |
.cmp_eq, | |
.cmp_eq_optimized, | |
.cmp_gte, | |
.cmp_gte_optimized, | |
.cmp_gt, | |
.cmp_gt_optimized, | |
.cmp_neq, | |
.cmp_neq_optimized, | |
.cmp_vector, | |
.cmp_vector_optimized, | |
.is_null, | |
.is_non_null, | |
.is_null_ptr, | |
.is_non_null_ptr, | |
.is_err, | |
.is_non_err, | |
.is_err_ptr, | |
.is_non_err_ptr, | |
.bool_and, | |
.bool_or, | |
.int_from_ptr, | |
.int_from_bool, | |
.fptrunc, | |
.fpext, | |
.intcast, | |
.trunc, | |
.optional_payload, | |
.optional_payload_ptr, | |
.wrap_optional, | |
.unwrap_errunion_payload, | |
.unwrap_errunion_err, | |
.unwrap_errunion_payload_ptr, | |
.unwrap_errunion_err_ptr, | |
.wrap_errunion_payload, | |
.wrap_errunion_err, | |
.struct_field_ptr, | |
.struct_field_ptr_index_0, | |
.struct_field_ptr_index_1, | |
.struct_field_ptr_index_2, | |
.struct_field_ptr_index_3, | |
.struct_field_val, | |
.get_union_tag, | |
.slice, | |
.slice_len, | |
.slice_ptr, | |
.ptr_slice_len_ptr, | |
.ptr_slice_ptr_ptr, | |
.array_elem_val, | |
.slice_elem_ptr, | |
.ptr_elem_ptr, | |
.array_to_slice, | |
.int_from_float, | |
.int_from_float_optimized, | |
.float_from_int, | |
.reduce, | |
.reduce_optimized, | |
.splat, | |
.shuffle, | |
.select, | |
.is_named_enum_value, | |
.tag_name, | |
.error_name, | |
.error_set_has_value, | |
.aggregate_init, | |
.union_init, | |
.mul_add, | |
.field_parent_ptr, | |
.wasm_memory_size, | |
.cmp_lt_errors_len, | |
.err_return_trace, | |
.addrspace_cast, | |
.save_err_return_trace_index, | |
.work_item_id, | |
.work_group_size, | |
.work_group_id, | |
=> false, | |
.assembly => { | |
const extra = air.extraData(Air.Asm, data.ty_pl.payload); | |
const is_volatile = @as(u1, @truncate(extra.data.flags >> 31)) != 0; | |
return is_volatile or if (extra.data.outputs_len == 1) | |
@as(Air.Inst.Ref, @enumFromInt(air.extra[extra.end])) != .none | |
else | |
extra.data.outputs_len > 1; | |
}, | |
.load => air.typeOf(data.ty_op.operand, ip).isVolatilePtrIp(ip), | |
.slice_elem_val, .ptr_elem_val => air.typeOf(data.bin_op.lhs, ip).isVolatilePtrIp(ip), | |
.atomic_load => air.typeOf(data.atomic_load.ptr, ip).isVolatilePtrIp(ip), | |
}; | |
} | |
const builtin = @import("builtin"); | |
const std = @import("std"); | |
const build_options = @import("build_options"); | |
const Ast = std.zig.Ast; | |
const Autodoc = @This(); | |
const Compilation = @import("Compilation.zig"); | |
const Zcu = @import("Module.zig"); | |
const File = Zcu.File; | |
const Module = @import("Package.zig").Module; | |
const Tokenizer = std.zig.Tokenizer; | |
const InternPool = @import("InternPool.zig"); | |
const Zir = std.zig.Zir; | |
const Ref = Zir.Inst.Ref; | |
const log = std.log.scoped(.autodoc); | |
const renderer = @import("autodoc/render_source.zig"); | |
zcu: *Zcu, | |
arena: std.mem.Allocator, | |
// The goal of autodoc is to fill up these arrays | |
// that will then be serialized as JSON and consumed | |
// by the JS frontend. | |
modules: std.AutoArrayHashMapUnmanaged(*Module, DocData.DocModule) = .{}, | |
files: std.AutoArrayHashMapUnmanaged(*File, usize) = .{}, | |
calls: std.ArrayListUnmanaged(DocData.Call) = .{}, | |
types: std.ArrayListUnmanaged(DocData.Type) = .{}, | |
decls: std.ArrayListUnmanaged(DocData.Decl) = .{}, | |
exprs: std.ArrayListUnmanaged(DocData.Expr) = .{}, | |
ast_nodes: std.ArrayListUnmanaged(DocData.AstNode) = .{}, | |
comptime_exprs: std.ArrayListUnmanaged(DocData.ComptimeExpr) = .{}, | |
guide_sections: std.ArrayListUnmanaged(Section) = .{}, | |
// These fields hold temporary state of the analysis process | |
// and are mainly used by the decl path resolving algorithm. | |
pending_ref_paths: std.AutoHashMapUnmanaged( | |
*DocData.Expr, // pointer to declpath tail end (ie `&decl_path[decl_path.len - 1]`) | |
std.ArrayListUnmanaged(RefPathResumeInfo), | |
) = .{}, | |
ref_paths_pending_on_decls: std.AutoHashMapUnmanaged( | |
*Scope.DeclStatus, | |
std.ArrayListUnmanaged(RefPathResumeInfo), | |
) = .{}, | |
ref_paths_pending_on_types: std.AutoHashMapUnmanaged( | |
usize, | |
std.ArrayListUnmanaged(RefPathResumeInfo), | |
) = .{}, | |
/// A set of ZIR instruction refs which have a meaning other than the | |
/// instruction they refer to. For instance, during analysis of the arguments to | |
/// a `call`, the index of the `call` itself is repurposed to refer to the | |
/// parameter type. | |
/// TODO: there should be some kind of proper handling for these instructions; | |
/// currently we just ignore them! | |
repurposed_insts: std.AutoHashMapUnmanaged(Zir.Inst.Index, void) = .{}, | |
const RefPathResumeInfo = struct { | |
file: *File, | |
ref_path: []DocData.Expr, | |
}; | |
/// Used to accumulate src_node offsets. | |
/// In ZIR, all ast node indices are relative to the parent decl. | |
/// More concretely, `union_decl`, `struct_decl`, `enum_decl` and `opaque_decl` | |
/// and the value of each of their decls participate in the relative offset | |
/// counting, and nothing else. | |
/// We keep track of the line and byte values for these instructions in order | |
/// to avoid tokenizing every file (on new lines) from the start every time. | |
const SrcLocInfo = struct { | |
bytes: u32 = 0, | |
line: usize = 0, | |
src_node: u32 = 0, | |
}; | |
const Section = struct { | |
name: []const u8 = "", // empty string is the default section | |
guides: std.ArrayListUnmanaged(Guide) = .{}, | |
const Guide = struct { | |
name: []const u8, | |
body: []const u8, | |
}; | |
}; | |
pub fn generate(zcu: *Zcu, output_dir: std.fs.Dir) !void { | |
var arena_allocator = std.heap.ArenaAllocator.init(zcu.gpa); | |
defer arena_allocator.deinit(); | |
var autodoc: Autodoc = .{ | |
.zcu = zcu, | |
.arena = arena_allocator.allocator(), | |
}; | |
try autodoc.generateZirData(output_dir); | |
const lib_dir = zcu.comp.zig_lib_directory.handle; | |
try lib_dir.copyFile("docs/main.js", output_dir, "main.js", .{}); | |
try lib_dir.copyFile("docs/ziglexer.js", output_dir, "ziglexer.js", .{}); | |
try lib_dir.copyFile("docs/commonmark.js", output_dir, "commonmark.js", .{}); | |
try lib_dir.copyFile("docs/index.html", output_dir, "index.html", .{}); | |
} | |
fn generateZirData(self: *Autodoc, output_dir: std.fs.Dir) !void { | |
const root_src_path = self.zcu.main_mod.root_src_path; | |
const joined_src_path = try self.zcu.main_mod.root.joinString(self.arena, root_src_path); | |
defer self.arena.free(joined_src_path); | |
const abs_root_src_path = try std.fs.path.resolve(self.arena, &.{ ".", joined_src_path }); | |
defer self.arena.free(abs_root_src_path); | |
const file = self.zcu.import_table.get(abs_root_src_path).?; // file is expected to be present in the import table | |
// Append all the types in Zir.Inst.Ref. | |
{ | |
comptime std.debug.assert(@intFromEnum(InternPool.Index.first_type) == 0); | |
var i: u32 = 0; | |
while (i <= @intFromEnum(InternPool.Index.last_type)) : (i += 1) { | |
const ip_index = @as(InternPool.Index, @enumFromInt(i)); | |
var tmpbuf = std.ArrayList(u8).init(self.arena); | |
if (ip_index == .generic_poison_type) { | |
// Not a real type, doesn't have a normal name | |
try tmpbuf.writer().writeAll("(generic poison)"); | |
} else { | |
try @import("type.zig").Type.fromInterned(ip_index).fmt(self.zcu).format("", .{}, tmpbuf.writer()); | |
} | |
try self.types.append( | |
self.arena, | |
switch (ip_index) { | |
.u0_type, | |
.i0_type, | |
.u1_type, | |
.u8_type, | |
.i8_type, | |
.u16_type, | |
.i16_type, | |
.u29_type, | |
.u32_type, | |
.i32_type, | |
.u64_type, | |
.i64_type, | |
.u80_type, | |
.u128_type, | |
.i128_type, | |
.usize_type, | |
.isize_type, | |
.c_char_type, | |
.c_short_type, | |
.c_ushort_type, | |
.c_int_type, | |
.c_uint_type, | |
.c_long_type, | |
.c_ulong_type, | |
.c_longlong_type, | |
.c_ulonglong_type, | |
=> .{ | |
.Int = .{ .name = try tmpbuf.toOwnedSlice() }, | |
}, | |
.f16_type, | |
.f32_type, | |
.f64_type, | |
.f80_type, | |
.f128_type, | |
.c_longdouble_type, | |
=> .{ | |
.Float = .{ .name = try tmpbuf.toOwnedSlice() }, | |
}, | |
.comptime_int_type => .{ | |
.ComptimeInt = .{ .name = try tmpbuf.toOwnedSlice() }, | |
}, | |
.comptime_float_type => .{ | |
.ComptimeFloat = .{ .name = try tmpbuf.toOwnedSlice() }, | |
}, | |
.anyopaque_type => .{ | |
.ComptimeExpr = .{ .name = try tmpbuf.toOwnedSlice() }, | |
}, | |
.bool_type => .{ | |
.Bool = .{ .name = try tmpbuf.toOwnedSlice() }, | |
}, | |
.noreturn_type => .{ | |
.NoReturn = .{ .name = try tmpbuf.toOwnedSlice() }, | |
}, | |
.void_type => .{ | |
.Void = .{ .name = try tmpbuf.toOwnedSlice() }, | |
}, | |
.type_info_type => .{ | |
.ComptimeExpr = .{ .name = try tmpbuf.toOwnedSlice() }, | |
}, | |
.type_type => .{ | |
.Type = .{ .name = try tmpbuf.toOwnedSlice() }, | |
}, | |
.anyerror_type => .{ | |
.ErrorSet = .{ .name = try tmpbuf.toOwnedSlice() }, | |
}, | |
// should be different types but if we don't analyze std we don't get the ast nodes etc. | |
// since they're defined in std.builtin | |
.calling_convention_type, | |
.atomic_order_type, | |
.atomic_rmw_op_type, | |
.address_space_type, | |
.float_mode_type, | |
.reduce_op_type, | |
.call_modifier_type, | |
.prefetch_options_type, | |
.export_options_type, | |
.extern_options_type, | |
=> .{ | |
.Type = .{ .name = try tmpbuf.toOwnedSlice() }, | |
}, | |
.manyptr_u8_type => .{ | |
.Pointer = .{ | |
.size = .Many, | |
.child = .{ .type = @intFromEnum(InternPool.Index.u8_type) }, | |
.is_mutable = true, | |
}, | |
}, | |
.manyptr_const_u8_type => .{ | |
.Pointer = .{ | |
.size = .Many, | |
.child = .{ .type = @intFromEnum(InternPool.Index.u8_type) }, | |
}, | |
}, | |
.manyptr_const_u8_sentinel_0_type => .{ | |
.Pointer = .{ | |
.size = .Many, | |
.child = .{ .type = @intFromEnum(InternPool.Index.u8_type) }, | |
.sentinel = .{ .int = .{ .value = 0 } }, | |
}, | |
}, | |
.single_const_pointer_to_comptime_int_type => .{ | |
.Pointer = .{ | |
.size = .One, | |
.child = .{ .type = @intFromEnum(InternPool.Index.comptime_int_type) }, | |
}, | |
}, | |
.slice_const_u8_type => .{ | |
.Pointer = .{ | |
.size = .Slice, | |
.child = .{ .type = @intFromEnum(InternPool.Index.u8_type) }, | |
}, | |
}, | |
.slice_const_u8_sentinel_0_type => .{ | |
.Pointer = .{ | |
.size = .Slice, | |
.child = .{ .type = @intFromEnum(InternPool.Index.u8_type) }, | |
.sentinel = .{ .int = .{ .value = 0 } }, | |
}, | |
}, | |
// Not fully correct | |
// since it actually has no src or line_number | |
.empty_struct_type => .{ | |
.Struct = .{ | |
.name = "", | |
.src = 0, | |
.is_tuple = false, | |
.line_number = 0, | |
.parent_container = null, | |
.layout = null, | |
}, | |
}, | |
.anyerror_void_error_union_type => .{ | |
.ErrorUnion = .{ | |
.lhs = .{ .type = @intFromEnum(InternPool.Index.anyerror_type) }, | |
.rhs = .{ .type = @intFromEnum(InternPool.Index.void_type) }, | |
}, | |
}, | |
.anyframe_type => .{ | |
.AnyFrame = .{ .name = try tmpbuf.toOwnedSlice() }, | |
}, | |
.enum_literal_type => .{ | |
.EnumLiteral = .{ .name = try tmpbuf.toOwnedSlice() }, | |
}, | |
.undefined_type => .{ | |
.Undefined = .{ .name = try tmpbuf.toOwnedSlice() }, | |
}, | |
.null_type => .{ | |
.Null = .{ .name = try tmpbuf.toOwnedSlice() }, | |
}, | |
.optional_noreturn_type => .{ | |
.Optional = .{ | |
.name = try tmpbuf.toOwnedSlice(), | |
.child = .{ .type = @intFromEnum(InternPool.Index.noreturn_type) }, | |
}, | |
}, | |
// Poison and special tag | |
.generic_poison_type, | |
.var_args_param_type, | |
.adhoc_inferred_error_set_type, | |
=> .{ | |
.Type = .{ .name = try tmpbuf.toOwnedSlice() }, | |
}, | |
// We want to catch new types added to InternPool.Index | |
else => unreachable, | |
}, | |
); | |
} | |
} | |
const rootName = blk: { | |
const rootName = std.fs.path.basename(self.zcu.main_mod.root_src_path); | |
break :blk rootName[0 .. rootName.len - 4]; | |
}; | |
const main_type_index = self.types.items.len; | |
{ | |
try self.modules.put(self.arena, self.zcu.main_mod, .{ | |
.name = rootName, | |
.main = main_type_index, | |
.table = .{}, | |
}); | |
try self.modules.entries.items(.value)[0].table.put( | |
self.arena, | |
self.zcu.main_mod, | |
.{ | |
.name = rootName, | |
.value = 0, | |
}, | |
); | |
} | |
var root_scope = Scope{ | |
.parent = null, | |
.enclosing_type = null, | |
}; | |
const tldoc_comment = try self.getTLDocComment(file); | |
const cleaned_tldoc_comment = try self.findGuidePaths(file, tldoc_comment); | |
defer self.arena.free(cleaned_tldoc_comment); | |
try self.ast_nodes.append(self.arena, .{ | |
.name = "(root)", | |
.docs = cleaned_tldoc_comment, | |
}); | |
try self.files.put(self.arena, file, main_type_index); | |
_ = try self.walkInstruction( | |
file, | |
&root_scope, | |
.{}, | |
.main_struct_inst, | |
false, | |
null, | |
); | |
if (self.ref_paths_pending_on_decls.count() > 0) { | |
@panic("some decl paths were never fully analyzed (pending on decls)"); | |
} | |
if (self.ref_paths_pending_on_types.count() > 0) { | |
@panic("some decl paths were never fully analyzed (pending on types)"); | |
} | |
if (self.pending_ref_paths.count() > 0) { | |
@panic("some decl paths were never fully analyzed"); | |
} | |
var data = DocData{ | |
.modules = self.modules, | |
.files = self.files, | |
.calls = self.calls.items, | |
.types = self.types.items, | |
.decls = self.decls.items, | |
.exprs = self.exprs.items, | |
.astNodes = self.ast_nodes.items, | |
.comptimeExprs = self.comptime_exprs.items, | |
.guideSections = self.guide_sections, | |
}; | |
inline for (comptime std.meta.tags(std.meta.FieldEnum(DocData))) |f| { | |
const field_name = @tagName(f); | |
const file_name = "data-" ++ field_name ++ ".js"; | |
const data_js_f = try output_dir.createFile(file_name, .{}); | |
defer data_js_f.close(); | |
var buffer = std.io.bufferedWriter(data_js_f.writer()); | |
const out = buffer.writer(); | |
try out.print("var {s} =", .{field_name}); | |
var jsw = std.json.writeStream(out, .{ | |
.whitespace = .minified, | |
.emit_null_optional_fields = true, | |
}); | |
switch (f) { | |
.files => try writeFileTableToJson(data.files, data.modules, &jsw), | |
.guideSections => try writeGuidesToJson(data.guideSections, &jsw), | |
.modules => try jsw.write(data.modules.values()), | |
else => try jsw.write(@field(data, field_name)), | |
} | |
// try std.json.stringifyArbitraryDepth( | |
// self.arena, | |
// @field(data, field.name), | |
// .{ | |
// .whitespace = .minified, | |
// .emit_null_optional_fields = true, | |
// }, | |
// out, | |
// ); | |
try out.print(";", .{}); | |
// last thing (that can fail) that we do is flush | |
try buffer.flush(); | |
} | |
{ | |
output_dir.makeDir("src") catch |e| switch (e) { | |
error.PathAlreadyExists => {}, | |
else => |err| return err, | |
}; | |
const html_dir = try output_dir.openDir("src", .{}); | |
var files_iterator = self.files.iterator(); | |
while (files_iterator.next()) |entry| { | |
const sub_file_path = entry.key_ptr.*.sub_file_path; | |
const file_module = entry.key_ptr.*.mod; | |
const module_name = (self.modules.get(file_module) orelse continue).name; | |
const file_path = std.fs.path.dirname(sub_file_path) orelse ""; | |
const file_name = if (file_path.len > 0) sub_file_path[file_path.len + 1 ..] else sub_file_path; | |
const html_file_name = try std.mem.concat(self.arena, u8, &.{ file_name, ".html" }); | |
defer self.arena.free(html_file_name); | |
const dir_name = try std.fs.path.join(self.arena, &.{ module_name, file_path }); | |
defer self.arena.free(dir_name); | |
var dir = try html_dir.makeOpenPath(dir_name, .{}); | |
defer dir.close(); | |
const html_file = dir.createFile(html_file_name, .{}) catch |err| switch (err) { | |
error.PathAlreadyExists => try dir.openFile(html_file_name, .{}), | |
else => return err, | |
}; | |
defer html_file.close(); | |
var buffer = std.io.bufferedWriter(html_file.writer()); | |
const out = buffer.writer(); | |
try renderer.genHtml(self.zcu.gpa, entry.key_ptr.*, out); | |
try buffer.flush(); | |
} | |
} | |
} | |
/// Represents a chain of scopes, used to resolve decl references to the | |
/// corresponding entry in `self.decls`. It also keeps track of whether | |
/// a given decl has been analyzed or not. | |
const Scope = struct { | |
parent: ?*Scope, | |
map: std.AutoHashMapUnmanaged( | |
Zir.NullTerminatedString, // index into the current file's string table (decl name) | |
*DeclStatus, | |
) = .{}, | |
enclosing_type: ?usize, // index into `types`, null = file top-level struct | |
pub const DeclStatus = union(enum) { | |
Analyzed: usize, // index into `decls` | |
Pending, | |
NotRequested: u32, // instr_index | |
}; | |
/// Returns a pointer so that the caller has a chance to modify the value | |
/// in case they decide to start analyzing a previously not requested decl. | |
/// Another reason is that in some places we use the pointer to uniquely | |
/// refer to a decl, as we wait for it to be analyzed. This means that | |
/// those pointers must stay stable. | |
pub fn resolveDeclName(self: Scope, string_table_idx: Zir.NullTerminatedString, file: *File, inst: Zir.Inst.OptionalIndex) *DeclStatus { | |
var cur: ?*const Scope = &self; | |
return while (cur) |s| : (cur = s.parent) { | |
break s.map.get(string_table_idx) orelse continue; | |
} else { | |
printWithOptionalContext( | |
file, | |
inst, | |
"Could not find `{s}`\n\n", | |
.{file.zir.nullTerminatedString(string_table_idx)}, | |
); | |
unreachable; | |
}; | |
} | |
pub fn insertDeclRef( | |
self: *Scope, | |
arena: std.mem.Allocator, | |
decl_name_index: Zir.NullTerminatedString, // index into the current file's string table | |
decl_status: DeclStatus, | |
) !void { | |
const decl_status_ptr = try arena.create(DeclStatus); | |
errdefer arena.destroy(decl_status_ptr); | |
decl_status_ptr.* = decl_status; | |
try self.map.put(arena, decl_name_index, decl_status_ptr); | |
} | |
}; | |
/// The output of our analysis process. | |
const DocData = struct { | |
// NOTE: editing fields of DocData requires also updating: | |
// - the deployment script for ziglang.org | |
// - imports in index.html | |
typeKinds: []const []const u8 = std.meta.fieldNames(DocTypeKinds), | |
rootMod: u32 = 0, | |
modules: std.AutoArrayHashMapUnmanaged(*Module, DocModule), | |
// non-hardcoded stuff | |
astNodes: []AstNode, | |
calls: []Call, | |
files: std.AutoArrayHashMapUnmanaged(*File, usize), | |
types: []Type, | |
decls: []Decl, | |
exprs: []Expr, | |
comptimeExprs: []ComptimeExpr, | |
guideSections: std.ArrayListUnmanaged(Section), | |
const Call = struct { | |
func: Expr, | |
args: []Expr, | |
ret: Expr, | |
}; | |
/// All the type "families" as described by `std.builtin.TypeId` | |
/// plus a couple extra that are unique to our use case. | |
/// | |
/// `Unanalyzed` is used so that we can refer to types that have started | |
/// analysis but that haven't been fully analyzed yet (in case we find | |
/// self-referential stuff, like `@This()`). | |
/// | |
/// `ComptimeExpr` represents the result of a piece of comptime logic | |
/// that we weren't able to analyze fully. Examples of that are comptime | |
/// function calls and comptime if / switch / ... expressions. | |
const DocTypeKinds = @typeInfo(Type).Union.tag_type.?; | |
const ComptimeExpr = struct { | |
code: []const u8, | |
}; | |
const DocModule = struct { | |
name: []const u8 = "(root)", | |
file: usize = 0, // index into `files` | |
main: usize = 0, // index into `types` | |
table: std.AutoHashMapUnmanaged(*Module, TableEntry), | |
pub const TableEntry = struct { | |
name: []const u8, | |
value: usize, | |
}; | |
pub fn jsonStringify(self: DocModule, jsw: anytype) !void { | |
try jsw.beginObject(); | |
inline for (comptime std.meta.tags(std.meta.FieldEnum(DocModule))) |f| { | |
const f_name = @tagName(f); | |
try jsw.objectField(f_name); | |
switch (f) { | |
.table => try writeModuleTableToJson(self.table, jsw), | |
else => try jsw.write(@field(self, f_name)), | |
} | |
} | |
try jsw.endObject(); | |
} | |
}; | |
const Decl = struct { | |
name: []const u8, | |
kind: []const u8, | |
src: usize, // index into astNodes | |
value: WalkResult, | |
// The index in astNodes of the `test declname { }` node | |
decltest: ?usize = null, | |
is_uns: bool = false, // usingnamespace | |
parent_container: ?usize, // index into `types` | |
pub fn jsonStringify(self: Decl, jsw: anytype) !void { | |
try jsw.beginArray(); | |
inline for (comptime std.meta.fields(Decl)) |f| { | |
try jsw.write(@field(self, f.name)); | |
} | |
try jsw.endArray(); | |
} | |
}; | |
const AstNode = struct { | |
file: usize = 0, // index into files | |
line: usize = 0, | |
col: usize = 0, | |
name: ?[]const u8 = null, | |
code: ?[]const u8 = null, | |
docs: ?[]const u8 = null, | |
fields: ?[]usize = null, // index into astNodes | |
@"comptime": bool = false, | |
pub fn jsonStringify(self: AstNode, jsw: anytype) !void { | |
try jsw.beginArray(); | |
inline for (comptime std.meta.fields(AstNode)) |f| { | |
try jsw.write(@field(self, f.name)); | |
} | |
try jsw.endArray(); | |
} | |
}; | |
const Type = union(enum) { | |
Unanalyzed: struct {}, | |
Type: struct { name: []const u8 }, | |
Void: struct { name: []const u8 }, | |
Bool: struct { name: []const u8 }, | |
NoReturn: struct { name: []const u8 }, | |
Int: struct { name: []const u8 }, | |
Float: struct { name: []const u8 }, | |
Pointer: struct { | |
size: std.builtin.Type.Pointer.Size, | |
child: Expr, | |
sentinel: ?Expr = null, | |
@"align": ?Expr = null, | |
address_space: ?Expr = null, | |
bit_start: ?Expr = null, | |
host_size: ?Expr = null, | |
is_ref: bool = false, | |
is_allowzero: bool = false, | |
is_mutable: bool = false, | |
is_volatile: bool = false, | |
has_sentinel: bool = false, | |
has_align: bool = false, | |
has_addrspace: bool = false, | |
has_bit_range: bool = false, | |
}, | |
Array: struct { | |
len: Expr, | |
child: Expr, | |
sentinel: ?Expr = null, | |
}, | |
Struct: struct { | |
name: []const u8, | |
src: usize, // index into astNodes | |
privDecls: []usize = &.{}, // index into decls | |
pubDecls: []usize = &.{}, // index into decls | |
field_types: []Expr = &.{}, // (use src->fields to find names) | |
field_defaults: []?Expr = &.{}, // default values is specified | |
backing_int: ?Expr = null, // backing integer if specified | |
is_tuple: bool, | |
line_number: usize, | |
parent_container: ?usize, // index into `types` | |
layout: ?Expr, // if different than Auto | |
}, | |
ComptimeExpr: struct { name: []const u8 }, | |
ComptimeFloat: struct { name: []const u8 }, | |
ComptimeInt: struct { name: []const u8 }, | |
Undefined: struct { name: []const u8 }, | |
Null: struct { name: []const u8 }, | |
Optional: struct { | |
name: []const u8, | |
child: Expr, | |
}, | |
ErrorUnion: struct { lhs: Expr, rhs: Expr }, | |
InferredErrorUnion: struct { payload: Expr }, | |
ErrorSet: struct { | |
name: []const u8, | |
fields: ?[]const Field = null, | |
// TODO: fn field for inferred error sets? | |
}, | |
Enum: struct { | |
name: []const u8, | |
src: usize, // index into astNodes | |
privDecls: []usize = &.{}, // index into decls | |
pubDecls: []usize = &.{}, // index into decls | |
// (use src->fields to find field names) | |
tag: ?Expr = null, // tag type if specified | |
values: []?Expr = &.{}, // tag values if specified | |
nonexhaustive: bool, | |
parent_container: ?usize, // index into `types` | |
}, | |
Union: struct { | |
name: []const u8, | |
src: usize, // index into astNodes | |
privDecls: []usize = &.{}, // index into decls | |
pubDecls: []usize = &.{}, // index into decls | |
fields: []Expr = &.{}, // (use src->fields to find names) | |
tag: ?Expr, // tag type if specified | |
auto_enum: bool, // tag is an auto enum | |
parent_container: ?usize, // index into `types` | |
layout: ?Expr, // if different than Auto | |
}, | |
Fn: struct { | |
name: []const u8, | |
src: ?usize = null, // index into `astNodes` | |
ret: Expr, | |
generic_ret: ?Expr = null, | |
params: ?[]Expr = null, // (use src->fields to find names) | |
lib_name: []const u8 = "", | |
is_var_args: bool = false, | |
is_inferred_error: bool = false, | |
has_lib_name: bool = false, | |
has_cc: bool = false, | |
cc: ?usize = null, | |
@"align": ?usize = null, | |
has_align: bool = false, | |
is_test: bool = false, | |
is_extern: bool = false, | |
}, | |
Opaque: struct { | |
name: []const u8, | |
src: usize, // index into astNodes | |
privDecls: []usize = &.{}, // index into decls | |
pubDecls: []usize = &.{}, // index into decls | |
parent_container: ?usize, // index into `types` | |
}, | |
Frame: struct { name: []const u8 }, | |
AnyFrame: struct { name: []const u8 }, | |
Vector: struct { name: []const u8 }, | |
EnumLiteral: struct { name: []const u8 }, | |
const Field = struct { | |
name: []const u8, | |
docs: []const u8, | |
}; | |
pub fn jsonStringify(self: Type, jsw: anytype) !void { | |
const active_tag = std.meta.activeTag(self); | |
try jsw.beginArray(); | |
try jsw.write(@intFromEnum(active_tag)); | |
inline for (comptime std.meta.fields(Type)) |case| { | |
if (@field(Type, case.name) == active_tag) { | |
const current_value = @field(self, case.name); | |
inline for (comptime std.meta.fields(case.type)) |f| { | |
if (f.type == std.builtin.Type.Pointer.Size) { | |
try jsw.write(@intFromEnum(@field(current_value, f.name))); | |
} else { | |
try jsw.write(@field(current_value, f.name)); | |
} | |
} | |
} | |
} | |
try jsw.endArray(); | |
} | |
}; | |
/// An Expr represents the (untyped) result of analyzing instructions. | |
/// The data is normalized, which means that an Expr that results in a | |
/// type definition will hold an index into `self.types`. | |
pub const Expr = union(enum) { | |
comptimeExpr: usize, // index in `comptimeExprs` | |
void: struct {}, | |
@"unreachable": struct {}, | |
null: struct {}, | |
undefined: struct {}, | |
@"struct": []FieldVal, | |
fieldVal: FieldVal, | |
bool: bool, | |
@"anytype": struct {}, | |
@"&": usize, // index in `exprs` | |
type: usize, // index in `types` | |
this: usize, // index in `types` | |
declRef: *Scope.DeclStatus, | |
declIndex: usize, // index into `decls`, alternative repr for `declRef` | |
declName: []const u8, // unresolved decl name | |
builtinField: enum { len, ptr }, | |
fieldRef: FieldRef, | |
refPath: []Expr, | |
int: struct { | |
value: u64, // direct value | |
negated: bool = false, | |
}, | |
int_big: struct { | |
value: []const u8, // string representation | |
negated: bool = false, | |
}, | |
float: f64, // direct value | |
float128: f128, // direct value | |
array: []usize, // index in `exprs` | |
call: usize, // index in `calls` | |
enumLiteral: []const u8, // direct value | |
typeOf: usize, // index in `exprs` | |
typeOf_peer: []usize, | |
errorUnion: usize, // index in `types` | |
as: As, | |
sizeOf: usize, // index in `exprs` | |
bitSizeOf: usize, // index in `exprs` | |
compileError: usize, // index in `exprs` | |
optionalPayload: usize, // index in `exprs` | |
elemVal: ElemVal, | |
errorSets: usize, | |
string: []const u8, // direct value | |
sliceIndex: usize, | |
slice: Slice, | |
sliceLength: SliceLength, | |
cmpxchgIndex: usize, | |
cmpxchg: Cmpxchg, | |
builtin: Builtin, | |
builtinIndex: usize, | |
builtinBin: BuiltinBin, | |
builtinBinIndex: usize, | |
unionInit: UnionInit, | |
builtinCall: BuiltinCall, | |
mulAdd: MulAdd, | |
switchIndex: usize, // index in `exprs` | |
switchOp: SwitchOp, | |
unOp: UnOp, | |
unOpIndex: usize, | |
binOp: BinOp, | |
binOpIndex: usize, | |
load: usize, // index in `exprs` | |
const UnOp = struct { | |
param: usize, // index in `exprs` | |
name: []const u8 = "", // tag name | |
}; | |
const BinOp = struct { | |
lhs: usize, // index in `exprs` | |
rhs: usize, // index in `exprs` | |
name: []const u8 = "", // tag name | |
}; | |
const SwitchOp = struct { | |
cond_index: usize, | |
file_name: []const u8, | |
src: usize, | |
outer_decl: usize, // index in `types` | |
}; | |
const BuiltinBin = struct { | |
name: []const u8 = "", // fn name | |
lhs: usize, // index in `exprs` | |
rhs: usize, // index in `exprs` | |
}; | |
const UnionInit = struct { | |
type: usize, // index in `exprs` | |
field: usize, // index in `exprs` | |
init: usize, // index in `exprs` | |
}; | |
const Builtin = struct { | |
name: []const u8 = "", // fn name | |
param: usize, // index in `exprs` | |
}; | |
const BuiltinCall = struct { | |
modifier: usize, // index in `exprs` | |
function: usize, // index in `exprs` | |
args: usize, // index in `exprs` | |
}; | |
const MulAdd = struct { | |
mulend1: usize, // index in `exprs` | |
mulend2: usize, // index in `exprs` | |
addend: usize, // index in `exprs` | |
type: usize, // index in `exprs` | |
}; | |
const Slice = struct { | |
lhs: usize, // index in `exprs` | |
start: usize, | |
end: ?usize = null, | |
sentinel: ?usize = null, // index in `exprs` | |
}; | |
const SliceLength = struct { | |
lhs: usize, | |
start: usize, | |
len: usize, | |
sentinel: ?usize = null, | |
}; | |
const Cmpxchg = struct { | |
name: []const u8, | |
type: usize, | |
ptr: usize, | |
expected_value: usize, | |
new_value: usize, | |
success_order: usize, | |
failure_order: usize, | |
}; | |
const As = struct { | |
typeRefArg: ?usize, // index in `exprs` | |
exprArg: usize, // index in `exprs` | |
}; | |
const FieldRef = struct { | |
type: usize, // index in `types` | |
index: usize, // index in type.fields | |
}; | |
const FieldVal = struct { | |
name: []const u8, | |
val: struct { | |
typeRef: ?usize, // index in `exprs` | |
expr: usize, // index in `exprs` | |
}, | |
}; | |
const ElemVal = struct { | |
lhs: usize, // index in `exprs` | |
rhs: usize, // index in `exprs` | |
}; | |
pub fn jsonStringify(self: Expr, jsw: anytype) !void { | |
const active_tag = std.meta.activeTag(self); | |
try jsw.beginObject(); | |
if (active_tag == .declIndex) { | |
try jsw.objectField("declRef"); | |
} else { | |
try jsw.objectField(@tagName(active_tag)); | |
} | |
switch (self) { | |
.int => { | |
if (self.int.negated) { | |
try jsw.write(-@as(i65, self.int.value)); | |
} else { | |
try jsw.write(self.int.value); | |
} | |
}, | |
.builtinField => { | |
try jsw.write(@tagName(self.builtinField)); | |
}, | |
.declRef => { | |
try jsw.write(self.declRef.Analyzed); | |
}, | |
else => { | |
inline for (comptime std.meta.fields(Expr)) |case| { | |
// TODO: this is super ugly, fix once `inline else` is a thing | |
if (comptime std.mem.eql(u8, case.name, "builtinField")) | |
continue; | |
if (comptime std.mem.eql(u8, case.name, "declRef")) | |
continue; | |
if (@field(Expr, case.name) == active_tag) { | |
try jsw.write(@field(self, case.name)); | |
} | |
} | |
}, | |
} | |
try jsw.endObject(); | |
} | |
}; | |
/// A WalkResult represents the result of the analysis process done to a | |
/// a Zir instruction. Walk results carry type information either inferred | |
/// from the context (eg string literals are pointers to null-terminated | |
/// arrays), or because of @as() instructions. | |
/// Since the type information is only needed in certain contexts, the | |
/// underlying normalized data (Expr) is untyped. | |
const WalkResult = struct { | |
typeRef: ?Expr = null, | |
expr: Expr, | |
}; | |
}; | |
const AutodocErrors = error{ | |
OutOfMemory, | |
CurrentWorkingDirectoryUnlinked, | |
UnexpectedEndOfFile, | |
ModuleNotFound, | |
ImportOutsideModulePath, | |
} || std.fs.File.OpenError || std.fs.File.ReadError; | |
/// `call` instructions will have loopy references to themselves | |
/// whenever an as_node is required for a complex expression. | |
/// This type is used to keep track of dangerous instruction | |
/// numbers that we definitely don't want to recurse into. | |
const CallContext = struct { | |
inst: Zir.Inst.Index, | |
prev: ?*const CallContext, | |
}; | |
/// Called when we need to analyze a Zir instruction. | |
/// For example it gets called by `generateZirData` on instruction 0, | |
/// which represents the top-level struct corresponding to the root file. | |
/// Note that in some situations where we're analyzing code that only allows | |
/// for a limited subset of Zig syntax, we don't always resort to calling | |
/// `walkInstruction` and instead sometimes we handle Zir directly. | |
/// The best example of that are instructions corresponding to function | |
/// params, as those can only occur while analyzing a function definition. | |
fn walkInstruction( | |
self: *Autodoc, | |
file: *File, | |
parent_scope: *Scope, | |
parent_src: SrcLocInfo, | |
inst: Zir.Inst.Index, | |
need_type: bool, // true if the caller needs us to provide also a typeRef | |
call_ctx: ?*const CallContext, | |
) AutodocErrors!DocData.WalkResult { | |
const tags = file.zir.instructions.items(.tag); | |
const data = file.zir.instructions.items(.data); | |
if (self.repurposed_insts.contains(inst)) { | |
// TODO: better handling here | |
return .{ .expr = .{ .comptimeExpr = 0 } }; | |
} | |
// We assume that the topmost ast_node entry corresponds to our decl | |
const self_ast_node_index = self.ast_nodes.items.len - 1; | |
switch (tags[@intFromEnum(inst)]) { | |
else => { | |
printWithContext( | |
file, | |
inst, | |
"TODO: implement `{s}` for walkInstruction\n\n", | |
.{@tagName(tags[@intFromEnum(inst)])}, | |
); | |
return self.cteTodo(@tagName(tags[@intFromEnum(inst)])); | |
}, | |
.import => { | |
const str_tok = data[@intFromEnum(inst)].str_tok; | |
const path = str_tok.get(file.zir); | |
// importFile cannot error out since all files | |
// are already loaded at this point | |
if (file.mod.deps.get(path)) |other_module| { | |
const result = try self.modules.getOrPut(self.arena, other_module); | |
// Immediately add this module to the import table of our | |
// current module, regardless of wether it's new or not. | |
if (self.modules.getPtr(file.mod)) |current_module| { | |
// TODO: apparently, in the stdlib a file gets analyzed before | |
// its module gets added. I guess we're importing a file | |
// that belongs to another module through its file path? | |
// (ie not through its module name). | |
// We're bailing for now, but maybe we shouldn't? | |
_ = try current_module.table.getOrPutValue( | |
self.arena, | |
other_module, | |
.{ | |
.name = path, | |
.value = self.modules.getIndex(other_module).?, | |
}, | |
); | |
} | |
if (result.found_existing) { | |
return DocData.WalkResult{ | |
.typeRef = .{ .type = @intFromEnum(Ref.type_type) }, | |
.expr = .{ .type = result.value_ptr.main }, | |
}; | |
} | |
// create a new module entry | |
const main_type_index = self.types.items.len; | |
result.value_ptr.* = .{ | |
.name = path, | |
.main = main_type_index, | |
.table = .{}, | |
}; | |
// TODO: Add this module as a dependency to the current module | |
// TODO: this seems something that could be done in bulk | |
// at the beginning or the end, or something. | |
const abs_root_src_path = try std.fs.path.resolve(self.arena, &.{ | |
".", | |
other_module.root.root_dir.path orelse ".", | |
other_module.root.sub_path, | |
other_module.root_src_path, | |
}); | |
defer self.arena.free(abs_root_src_path); | |
const new_file = self.zcu.import_table.get(abs_root_src_path).?; | |
var root_scope = Scope{ | |
.parent = null, | |
.enclosing_type = null, | |
}; | |
const maybe_tldoc_comment = try self.getTLDocComment(file); | |
try self.ast_nodes.append(self.arena, .{ | |
.name = "(root)", | |
.docs = maybe_tldoc_comment, | |
}); | |
try self.files.put(self.arena, new_file, main_type_index); | |
return self.walkInstruction( | |
new_file, | |
&root_scope, | |
.{}, | |
.main_struct_inst, | |
false, | |
call_ctx, | |
); | |
} | |
const new_file = try self.zcu.importFile(file, path); | |
const result = try self.files.getOrPut(self.arena, new_file.file); | |
if (result.found_existing) { | |
return DocData.WalkResult{ | |
.typeRef = .{ .type = @intFromEnum(Ref.type_type) }, | |
.expr = .{ .type = result.value_ptr.* }, | |
}; | |
} | |
const maybe_tldoc_comment = try self.getTLDocComment(new_file.file); | |
try self.ast_nodes.append(self.arena, .{ | |
.name = path, | |
.docs = maybe_tldoc_comment, | |
}); | |
result.value_ptr.* = self.types.items.len; | |
var new_scope = Scope{ | |
.parent = null, | |
.enclosing_type = null, | |
}; | |
return self.walkInstruction( | |
new_file.file, | |
&new_scope, | |
.{}, | |
.main_struct_inst, | |
need_type, | |
call_ctx, | |
); | |
}, | |
.ret_type => { | |
return DocData.WalkResult{ | |
.typeRef = .{ .type = @intFromEnum(Ref.type_type) }, | |
.expr = .{ .type = @intFromEnum(Ref.type_type) }, | |
}; | |
}, | |
.ret_node => { | |
const un_node = data[@intFromEnum(inst)].un_node; | |
return self.walkRef( | |
file, | |
parent_scope, | |
parent_src, | |
un_node.operand, | |
false, | |
call_ctx, | |
); | |
}, | |
.ret_load => { | |
const un_node = data[@intFromEnum(inst)].un_node; | |
const res_ptr_ref = un_node.operand; | |
const res_ptr_inst = @intFromEnum(res_ptr_ref.toIndex().?); | |
// TODO: this instruction doesn't let us know trivially if there's | |
// branching involved or not. For now here's the strat: | |
// We search backwarts until `ret_ptr` for `store_node`, | |
// if we find only one, then that's our value, if we find more | |
// than one, then it means that there's branching involved. | |
// Maybe. | |
var i = @intFromEnum(inst) - 1; | |
var result_ref: ?Ref = null; | |
while (i > res_ptr_inst) : (i -= 1) { | |
if (tags[i] == .store_node) { | |
const pl_node = data[i].pl_node; | |
const extra = file.zir.extraData(Zir.Inst.Bin, pl_node.payload_index); | |
if (extra.data.lhs == res_ptr_ref) { | |
// this store_load instruction is indeed pointing at | |
// the result location that we care about! | |
if (result_ref != null) return DocData.WalkResult{ | |
.expr = .{ .comptimeExpr = 0 }, | |
}; | |
result_ref = extra.data.rhs; | |
} | |
} | |
} | |
if (result_ref) |rr| { | |
return self.walkRef( | |
file, | |
parent_scope, | |
parent_src, | |
rr, | |
need_type, | |
call_ctx, | |
); | |
} | |
return DocData.WalkResult{ | |
.expr = .{ .comptimeExpr = 0 }, | |
}; | |
}, | |
.closure_get => { | |
const inst_node = data[@intFromEnum(inst)].inst_node; | |
const code = try self.getBlockSource(file, parent_src, inst_node.src_node); | |
const idx = self.comptime_exprs.items.len; | |
try self.exprs.append(self.arena, .{ .comptimeExpr = idx }); | |
try self.comptime_exprs.append(self.arena, .{ .code = code }); | |
return DocData.WalkResult{ | |
.expr = .{ .comptimeExpr = idx }, | |
}; | |
}, | |
.closure_capture => { | |
const un_tok = data[@intFromEnum(inst)].un_tok; | |
return try self.walkRef( | |
file, | |
parent_scope, | |
parent_src, | |
un_tok.operand, | |
need_type, | |
call_ctx, | |
); | |
}, | |
.str => { | |
const str = data[@intFromEnum(inst)].str.get(file.zir); | |
const tRef: ?DocData.Expr = if (!need_type) null else blk: { | |
const arrTypeId = self.types.items.len; | |
try self.types.append(self.arena, .{ | |
.Array = .{ | |
.len = .{ .int = .{ .value = str.len } }, | |
.child = .{ .type = @intFromEnum(Ref.u8_type) }, | |
.sentinel = .{ .int = .{ | |
.value = 0, | |
.negated = false, | |
} }, | |
}, | |
}); | |
// const sentinel: ?usize = if (ptr.flags.has_sentinel) 0 else null; | |
const ptrTypeId = self.types.items.len; | |
try self.types.append(self.arena, .{ | |
.Pointer = .{ | |
.size = .One, | |
.child = .{ .type = arrTypeId }, | |
.sentinel = .{ .int = .{ | |
.value = 0, | |
.negated = false, | |
} }, | |
.is_mutable = false, | |
}, | |
}); | |
break :blk .{ .type = ptrTypeId }; | |
}; | |
return DocData.WalkResult{ | |
.typeRef = tRef, | |
.expr = .{ .string = str }, | |
}; | |
}, | |
.compile_error => { | |
const un_node = data[@intFromEnum(inst)].un_node; | |
const operand: DocData.WalkResult = try self.walkRef( | |
file, | |
parent_scope, | |
parent_src, | |
un_node.operand, | |
false, | |
call_ctx, | |
); | |
const operand_index = self.exprs.items.len; | |
try self.exprs.append(self.arena, operand.expr); | |
return DocData.WalkResult{ | |
.expr = .{ .compileError = operand_index }, | |
}; | |
}, | |
.enum_literal => { | |
const str_tok = data[@intFromEnum(inst)].str_tok; | |
const literal = file.zir.nullTerminatedString(str_tok.start); | |
const type_index = self.types.items.len; | |
try self.types.append(self.arena, .{ | |
.EnumLiteral = .{ .name = "todo enum literal" }, | |
}); | |
return DocData.WalkResult{ | |
.typeRef = .{ .type = type_index }, | |
.expr = .{ .enumLiteral = literal }, | |
}; | |
}, | |
.int => { | |
const int = data[@intFromEnum(inst)].int; | |
return DocData.WalkResult{ | |
.typeRef = .{ .type = @intFromEnum(Ref.comptime_int_type) }, | |
.expr = .{ .int = .{ .value = int } }, | |
}; | |
}, | |
.int_big => { | |
// @check | |
const str = data[@intFromEnum(inst)].str; //.get(file.zir); | |
const byte_count = str.len * @sizeOf(std.math.big.Limb); | |
const limb_bytes = file.zir.string_bytes[@intFromEnum(str.start)..][0..byte_count]; | |
const limbs = try self.arena.alloc(std.math.big.Limb, str.len); | |
@memcpy(std.mem.sliceAsBytes(limbs)[0..limb_bytes.len], limb_bytes); | |
const big_int = std.math.big.int.Const{ | |
.limbs = limbs, | |
.positive = true, | |
}; | |
const as_string = try big_int.toStringAlloc(self.arena, 10, .lower); | |
return DocData.WalkResult{ | |
.typeRef = .{ .type = @intFromEnum(Ref.comptime_int_type) }, | |
.expr = .{ .int_big = .{ .value = as_string } }, | |
}; | |
}, | |
.@"unreachable" => { | |
return DocData.WalkResult{ | |
.typeRef = .{ .type = @intFromEnum(Ref.noreturn_type) }, | |
.expr = .{ .@"unreachable" = .{} }, | |
}; | |
}, | |
.slice_start => { | |
const pl_node = data[@intFromEnum(inst)].pl_node; | |
const extra = file.zir.extraData(Zir.Inst.SliceStart, pl_node.payload_index); | |
const slice_index = self.exprs.items.len; | |
try self.exprs.append(self.arena, .{ .slice = .{ .lhs = 0, .start = 0 } }); | |
const lhs: DocData.WalkResult = try self.walkRef( | |
file, | |
parent_scope, | |
parent_src, | |
extra.data.lhs, | |
false, | |
call_ctx, | |
); | |
const start: DocData.WalkResult = try self.walkRef( | |
file, | |
parent_scope, | |
parent_src, | |
extra.data.start, | |
false, | |
call_ctx, | |
); | |
const lhs_index = self.exprs.items.len; | |
try self.exprs.append(self.arena, lhs.expr); | |
const start_index = self.exprs.items.len; | |
try self.exprs.append(self.arena, start.expr); | |
self.exprs.items[slice_index] = .{ .slice = .{ .lhs = lhs_index, .start = start_index } }; | |
const typeRef = switch (lhs.expr) { | |
.declRef => |ref| self.decls.items[ref.Analyzed].value.typeRef, | |
else => null, | |
}; | |
return DocData.WalkResult{ | |
.typeRef = typeRef, | |
.expr = .{ .sliceIndex = slice_index }, | |
}; | |
}, | |
.slice_end => { | |
const pl_node = data[@intFromEnum(inst)].pl_node; | |
const extra = file.zir.extraData(Zir.Inst.SliceEnd, pl_node.payload_index); | |
const slice_index = self.exprs.items.len; | |
try self.exprs.append(self.arena, .{ .slice = .{ .lhs = 0, .start = 0 } }); | |
const lhs: DocData.WalkResult = try self.walkRef( | |
file, | |
parent_scope, | |
parent_src, | |
extra.data.lhs, | |
false, | |
call_ctx, | |
); | |
const start: DocData.WalkResult = try self.walkRef( | |
file, | |
parent_scope, | |
parent_src, | |
extra.data.start, | |
false, | |
call_ctx, | |
); | |
const end: DocData.WalkResult = try self.walkRef( | |
file, | |
parent_scope, | |
parent_src, | |
extra.data.end, | |
false, | |
call_ctx, | |
); | |
const lhs_index = self.exprs.items.len; | |
try self.exprs.append(self.arena, lhs.expr); | |
const start_index = self.exprs.items.len; | |
try self.exprs.append(self.arena, start.expr); | |
const end_index = self.exprs.items.len; | |
try self.exprs.append(self.arena, end.expr); | |
self.exprs.items[slice_index] = .{ .slice = .{ .lhs = lhs_index, .start = start_index, .end = end_index } }; | |
const typeRef = switch (lhs.expr) { | |
.declRef => |ref| self.decls.items[ref.Analyzed].value.typeRef, | |
else => null, | |
}; | |
return DocData.WalkResult{ | |
.typeRef = typeRef, | |
.expr = .{ .sliceIndex = slice_index }, | |
}; | |
}, | |
.slice_sentinel => { | |
const pl_node = data[@intFromEnum(inst)].pl_node; | |
const extra = file.zir.extraData(Zir.Inst.SliceSentinel, pl_node.payload_index); | |
const slice_index = self.exprs.items.len; | |
try self.exprs.append(self.arena, .{ .slice = .{ .lhs = 0, .start = 0 } }); | |
const lhs: DocData.WalkResult = try self.walkRef( | |
file, | |
parent_scope, | |
parent_src, | |
extra.data.lhs, | |
false, | |
call_ctx, | |
); | |
const start: DocData.WalkResult = try self.walkRef( | |
file, | |
parent_scope, | |
parent_src, | |
extra.data.start, | |
false, | |
call_ctx, | |
); | |
const end: DocData.WalkResult = try self.walkRef( | |
file, | |
parent_scope, | |
parent_src, | |
extra.data.end, | |
false, | |
call_ctx, | |
); | |
const sentinel: DocData.WalkResult = try self.walkRef( | |
file, | |
parent_scope, | |
parent_src, | |
extra.data.sentinel, | |
false, | |
call_ctx, | |
); | |
const lhs_index = self.exprs.items.len; | |
try self.exprs.append(self.arena, lhs.expr); | |
const start_index = self.exprs.items.len; | |
try self.exprs.append(self.arena, start.expr); | |
const end_index = self.exprs.items.len; | |
try self.exprs.append(self.arena, end.expr); | |
const sentinel_index = self.exprs.items.len; | |
try self.exprs.append(self.arena, sentinel.expr); | |
self.exprs.items[slice_index] = .{ .slice = .{ | |
.lhs = lhs_index, | |
.start = start_index, | |
.end = end_index, | |
.sentinel = sentinel_index, | |
} }; | |
const typeRef = switch (lhs.expr) { | |
.declRef => |ref| self.decls.items[ref.Analyzed].value.typeRef, | |
else => null, | |
}; | |
return DocData.WalkResult{ | |
.typeRef = typeRef, | |
.expr = .{ .sliceIndex = slice_index }, | |
}; | |
}, | |
.slice_length => { | |
const pl_node = data[@intFromEnum(inst)].pl_node; | |
const extra = file.zir.extraData(Zir.Inst.SliceLength, pl_node.payload_index); | |
const slice_index = self.exprs.items.len; | |
try self.exprs.append(self.arena, .{ .slice = .{ .lhs = 0, .start = 0 } }); | |
const lhs: DocData.WalkResult = try self.walkRef( | |
file, | |
parent_scope, | |
parent_src, | |
extra.data.lhs, | |
false, | |
call_ctx, | |
); | |
const start: DocData.WalkResult = try self.walkRef( | |
file, | |
parent_scope, | |
parent_src, | |
extra.data.start, | |
false, | |
call_ctx, | |
); | |
const len: DocData.WalkResult = try self.walkRef( | |
file, | |
parent_scope, | |
parent_src, | |
extra.data.len, | |
false, | |
call_ctx, | |
); | |
const sentinel_opt: ?DocData.WalkResult = if (extra.data.sentinel != .none) | |
try self.walkRef( | |
file, | |
parent_scope, | |
parent_src, | |
extra.data.sentinel, | |
false, | |
call_ctx, | |
) | |
else | |
null; | |
const lhs_index = self.exprs.items.len; | |
try self.exprs.append(self.arena, lhs.expr); | |
const start_index = self.exprs.items.len; | |
try self.exprs.append(self.arena, start.expr); | |
const len_index = self.exprs.items.len; | |
try self.exprs.append(self.arena, len.expr); | |
const sentinel_index = if (sentinel_opt) |sentinel| sentinel_index: { | |
const index = self.exprs.items.len; | |
try self.exprs.append(self.arena, sentinel.expr); | |
break :sentinel_index index; | |
} else null; | |
self.exprs.items[slice_index] = .{ .sliceLength = .{ | |
.lhs = lhs_index, | |
.start = start_index, | |
.len = len_index, | |
.sentinel = sentinel_index, | |
} }; | |
const typeRef = switch (lhs.expr) { | |
.declRef => |ref| self.decls.items[ref.Analyzed].value.typeRef, | |
else => null, | |
}; | |
return DocData.WalkResult{ | |
.typeRef = typeRef, | |
.expr = .{ .sliceIndex = slice_index }, | |
}; | |
}, | |
.load => { | |
const un_node = data[@intFromEnum(inst)].un_node; | |
const operand = try self.walkRef( | |
file, | |
parent_scope, | |
parent_src, | |
un_node.operand, | |
need_type, | |
call_ctx, | |
); | |
const load_idx = self.exprs.items.len; | |
try self.exprs.append(self.arena, operand.expr); | |
var typeRef: ?DocData.Expr = null; | |
if (operand.typeRef) |ref| { | |
switch (ref) { | |
.type => |t_index| { | |
switch (self.types.items[t_index]) { | |
.Pointer => |p| typeRef = p.child, | |
else => {}, | |
} | |
}, | |
else => {}, | |
} | |
} | |
return DocData.WalkResult{ | |
.typeRef = typeRef, | |
.expr = .{ .load = load_idx }, | |
}; | |
}, | |
.ref => { | |
const un_tok = data[@intFromEnum(inst)].un_tok; | |
const operand = try self.walkRef( | |
file, | |
parent_scope, | |
parent_src, | |
un_tok.operand, | |
need_type, | |
call_ctx, | |
); | |
const ref_idx = self.exprs.items.len; | |
try self.exprs.append(self.arena, operand.expr); | |
return DocData.WalkResult{ | |
.expr = .{ .@"&" = ref_idx }, | |
}; | |
}, | |
.add, | |
.addwrap, | |
.add_sat, | |
.sub, | |
.subwrap, | |
.sub_sat, | |
.mul, | |
.mulwrap, | |
.mul_sat, | |
.div, | |
.shl, | |
.shl_sat, | |
.shr, | |
.bit_or, | |
.bit_and, | |
.xor, | |
.array_cat, | |
=> { | |
const pl_node = data[@intFromEnum(inst)].pl_node; | |
const extra = file.zir.extraData(Zir.Inst.Bin, pl_node.payload_index); | |
const binop_index = self.exprs.items.len; | |
try self.exprs.append(self.arena, .{ .binOp = .{ .lhs = 0, .rhs = 0 } }); | |
const lhs: DocData.WalkResult = try self.walkRef( | |
file, | |
parent_scope, | |
parent_src, | |
extra.data.lhs, | |
false, | |
call_ctx, | |
); | |
const rhs: DocData.WalkResult = try self.walkRef( | |
file, | |
parent_scope, | |
parent_src, | |
extra.data.rhs, | |
false, | |
call_ctx, | |
); | |
const lhs_index = self.exprs.items.len; | |
try self.exprs.append(self.arena, lhs.expr); | |
const rhs_index = self.exprs.items.len; | |
try self.exprs.append(self.arena, rhs.expr); | |
self.exprs.items[binop_index] = .{ .binOp = .{ | |
.name = @tagName(tags[@intFromEnum(inst)]), | |
.lhs = lhs_index, | |
.rhs = rhs_index, | |
} }; | |
return DocData.WalkResult{ | |
.typeRef = .{ .type = @intFromEnum(Ref.type_type) }, | |
.expr = .{ .binOpIndex = binop_index }, | |
}; | |
}, | |
.array_mul => { | |
const pl_node = data[@intFromEnum(inst)].pl_node; | |
const extra = file.zir.extraData(Zir.Inst.ArrayMul, pl_node.payload_index); | |
const binop_index = self.exprs.items.len; | |
try self.exprs.append(self.arena, .{ .binOp = .{ .lhs = 0, .rhs = 0 } }); | |
const lhs: DocData.WalkResult = try self.walkRef( | |
file, | |
parent_scope, | |
parent_src, | |
extra.data.lhs, | |
false, | |
call_ctx, | |
); | |
const rhs: DocData.WalkResult = try self.walkRef( | |
file, | |
parent_scope, | |
parent_src, | |
extra.data.rhs, | |
false, | |
call_ctx, | |
); | |
const res_ty: ?DocData.WalkResult = if (extra.data.res_ty != .none) | |
try self.walkRef( | |
file, | |
parent_scope, | |
parent_src, | |
extra.data.res_ty, | |
false, | |
call_ctx, | |
) | |
else | |
null; | |
const lhs_index = self.exprs.items.len; | |
try self.exprs.append(self.arena, lhs.expr); | |
const rhs_index = self.exprs.items.len; | |
try self.exprs.append(self.arena, rhs.expr); | |
self.exprs.items[binop_index] = .{ .binOp = .{ | |
.name = @tagName(tags[@intFromEnum(inst)]), | |
.lhs = lhs_index, | |
.rhs = rhs_index, | |
} }; | |
return DocData.WalkResult{ | |
.typeRef = if (res_ty) |rt| rt.expr else null, | |
.expr = .{ .binOpIndex = binop_index }, | |
}; | |
}, | |
// compare operators | |
.cmp_eq, | |
.cmp_neq, | |
.cmp_gt, | |
.cmp_gte, | |
.cmp_lt, | |
.cmp_lte, | |
=> { | |
const pl_node = data[@intFromEnum(inst)].pl_node; | |
const extra = file.zir.extraData(Zir.Inst.Bin, pl_node.payload_index); | |
const binop_index = self.exprs.items.len; | |
try self.exprs.append(self.arena, .{ .binOp = .{ .lhs = 0, .rhs = 0 } }); | |
const lhs: DocData.WalkResult = try self.walkRef( | |
file, | |
parent_scope, | |
parent_src, | |
extra.data.lhs, | |
false, | |
call_ctx, | |
); | |
const rhs: DocData.WalkResult = try self.walkRef( | |
file, | |
parent_scope, | |
parent_src, | |
extra.data.rhs, | |
false, | |
call_ctx, | |
); | |
const lhs_index = self.exprs.items.len; | |
try self.exprs.append(self.arena, lhs.expr); | |
const rhs_index = self.exprs.items.len; | |
try self.exprs.append(self.arena, rhs.expr); | |
self.exprs.items[binop_index] = .{ .binOp = .{ | |
.name = @tagName(tags[@intFromEnum(inst)]), | |
.lhs = lhs_index, | |
.rhs = rhs_index, | |
} }; | |
return DocData.WalkResult{ | |
.typeRef = .{ .type = @intFromEnum(Ref.bool_type) }, | |
.expr = .{ .binOpIndex = binop_index }, | |
}; | |
}, | |
// builtin functions | |
.align_of, | |
.int_from_bool, | |
.embed_file, | |
.error_name, | |
.panic, | |
.set_runtime_safety, // @check | |
.sqrt, | |
.sin, | |
.cos, | |
.tan, | |
.exp, | |
.exp2, | |
.log, | |
.log2, | |
.log10, | |
.abs, | |
.floor, | |
.ceil, | |
.trunc, | |
.round, | |
.tag_name, | |
.type_name, | |
.frame_type, | |
.frame_size, | |
.int_from_ptr, | |
.type_info, | |
// @check | |
.clz, | |
.ctz, | |
.pop_count, | |
.byte_swap, | |
.bit_reverse, | |
=> { | |
const un_node = data[@intFromEnum(inst)].un_node; | |
const bin_index = self.exprs.items.len; | |
try self.exprs.append(self.arena, .{ .builtin = .{ .param = 0 } }); | |
const param = try self.walkRef( | |
file, | |
parent_scope, | |
parent_src, | |
un_node.operand, | |
false, | |
call_ctx, | |
); | |
const param_index = self.exprs.items.len; | |
try self.exprs.append(self.arena, param.expr); | |
self.exprs.items[bin_index] = .{ | |
.builtin = .{ | |
.name = @tagName(tags[@intFromEnum(inst)]), | |
.param = param_index, | |
}, | |
}; | |
return DocData.WalkResult{ | |
.typeRef = param.typeRef orelse .{ .type = @intFromEnum(Ref.type_type) }, | |
.expr = .{ .builtinIndex = bin_index }, | |
}; | |
}, | |
.bit_not, | |
.bool_not, | |
.negate_wrap, | |
=> { | |
const un_node = data[@intFromEnum(inst)].un_node; | |
const un_index = self.exprs.items.len; | |
try self.exprs.append(self.arena, .{ .unOp = .{ .param = 0 } }); | |
const param = try self.walkRef( | |
file, | |
parent_scope, | |
parent_src, | |
un_node.operand, | |
false, | |
call_ctx, | |
); | |
const param_index = self.exprs.items.len; | |
try self.exprs.append(self.arena, param.expr); | |
self.exprs.items[un_index] = .{ | |
.unOp = .{ | |
.name = @tagName(tags[@intFromEnum(inst)]), | |
.param = param_index, | |
}, | |
}; | |
return DocData.WalkResult{ | |
.typeRef = param.typeRef, | |
.expr = .{ .unOpIndex = un_index }, | |
}; | |
}, | |
.bool_br_and, .bool_br_or => { | |
const pl_node = data[@intFromEnum(inst)].pl_node; | |
const extra = file.zir.extraData(Zir.Inst.BoolBr, pl_node.payload_index); | |
const bin_index = self.exprs.items.len; | |
try self.exprs.append(self.arena, .{ .binOp = .{ .lhs = 0, .rhs = 0 } }); | |
const lhs = try self.walkRef( | |
file, | |
parent_scope, | |
parent_src, | |
extra.data.lhs, | |
false, | |
call_ctx, | |
); | |
const lhs_index = self.exprs.items.len; | |
try self.exprs.append(self.arena, lhs.expr); | |
const rhs = try self.walkInstruction( | |
file, | |
parent_scope, | |
parent_src, | |
@enumFromInt(file.zir.extra[extra.end..][extra.data.body_len - 1]), | |
false, | |
call_ctx, | |
); | |
const rhs_index = self.exprs.items.len; | |
try self.exprs.append(self.arena, rhs.expr); | |
self.exprs.items[bin_index] = .{ .binOp = .{ .name = @tagName(tags[@intFromEnum(inst)]), .lhs = lhs_index, .rhs = rhs_index } }; | |
return DocData.WalkResult{ | |
.typeRef = .{ .type = @intFromEnum(Ref.bool_type) }, | |
.expr = .{ .binOpIndex = bin_index }, | |
}; | |
}, | |
.truncate => { | |
// in the ZIR this node is a builtin `bin` but we want send it as a `un` builtin | |
const pl_node = data[@intFromEnum(inst)].pl_node; | |
const extra = file.zir.extraData(Zir.Inst.Bin, pl_node.payload_index); | |
const rhs: DocData.WalkResult = try self.walkRef( | |
file, | |
parent_scope, | |
parent_src, | |
extra.data.rhs, | |
false, | |
call_ctx, | |
); | |
const bin_index = self.exprs.items.len; | |
try self.exprs.append(self.arena, .{ .builtin = .{ .param = 0 } }); | |
const rhs_index = self.exprs.items.len; | |
try self.exprs.append(self.arena, rhs.expr); | |
const lhs: DocData.WalkResult = try self.walkRef( | |
file, | |
parent_scope, | |
parent_src, | |
extra.data.lhs, | |
false, | |
call_ctx, | |
); | |
self.exprs.items[bin_index] = .{ .builtin = .{ .name = @tagName(tags[@intFromEnum(inst)]), .param = rhs_index } }; | |
return DocData.WalkResult{ | |
.typeRef = lhs.expr, | |
.expr = .{ .builtinIndex = bin_index }, | |
}; | |
}, | |
.int_from_float, | |
.float_from_int, | |
.ptr_from_int, | |
.enum_from_int, | |
.float_cast, | |
.int_cast, | |
.ptr_cast, | |
.has_decl, | |
.has_field, | |
.div_exact, | |
.div_floor, | |
.div_trunc, | |
.mod, | |
.rem, | |
.mod_rem, | |
.shl_exact, | |
.shr_exact, | |
.bitcast, | |
.vector_type, | |
// @check | |
.bit_offset_of, | |
.offset_of, | |
.splat, | |
.reduce, | |
.min, | |
.max, | |
=> { | |
const pl_node = data[@intFromEnum(inst)].pl_node; | |
const extra = file.zir.extraData(Zir.Inst.Bin, pl_node.payload_index); | |
const binop_index = self.exprs.items.len; | |
try self.exprs.append(self.arena, .{ .builtinBin = .{ .lhs = 0, .rhs = 0 } }); | |
const lhs: DocData.WalkResult = try self.walkRef( | |
file, | |
parent_scope, | |
parent_src, | |
extra.data.lhs, | |
false, | |
call_ctx, | |
); | |
const rhs: DocData.WalkResult = try self.walkRef( | |
file, | |
parent_scope, | |
parent_src, | |
extra.data.rhs, | |
false, | |
call_ctx, | |
); | |
const lhs_index = self.exprs.items.len; | |
try self.exprs.append(self.arena, lhs.expr); | |
const rhs_index = self.exprs.items.len; | |
try self.exprs.append(self.arena, rhs.expr); | |
self.exprs.items[binop_index] = .{ .builtinBin = .{ .name = @tagName(tags[@intFromEnum(inst)]), .lhs = lhs_index, .rhs = rhs_index } }; | |
return DocData.WalkResult{ | |
.typeRef = .{ .type = @intFromEnum(Ref.type_type) }, | |
.expr = .{ .builtinBinIndex = binop_index }, | |
}; | |
}, | |
.mul_add => { | |
const pl_node = data[@intFromEnum(inst)].pl_node; | |
const extra = file.zir.extraData(Zir.Inst.MulAdd, pl_node.payload_index); | |
const mul1: DocData.WalkResult = try self.walkRef( | |
file, | |
parent_scope, | |
parent_src, | |
extra.data.mulend1, | |
false, | |
call_ctx, | |
); | |
const mul2: DocData.WalkResult = try self.walkRef( | |
file, | |
parent_scope, | |
parent_src, | |
extra.data.mulend2, | |
false, | |
call_ctx, | |
); | |
const add: DocData.WalkResult = try self.walkRef( | |
file, | |
parent_scope, | |
parent_src, | |
extra.data.addend, | |
false, | |
call_ctx, | |
); | |
const mul1_index = self.exprs.items.len; | |
try self.exprs.append(self.arena, mul1.expr); | |
const mul2_index = self.exprs.items.len; | |
try self.exprs.append(self.arena, mul2.expr); | |
const add_index = self.exprs.items.len; | |
try self.exprs.append(self.arena, add.expr); | |
const type_index: usize = self.exprs.items.len; | |
try self.exprs.append(self.arena, add.typeRef orelse .{ .type = @intFromEnum(Ref.type_type) }); | |
return DocData.WalkResult{ | |
.typeRef = add.typeRef, | |
.expr = .{ | |
.mulAdd = .{ | |
.mulend1 = mul1_index, | |
.mulend2 = mul2_index, | |
.addend = add_index, | |
.type = type_index, | |
}, | |
}, | |
}; | |
}, | |
.union_init => { | |
const pl_node = data[@intFromEnum(inst)].pl_node; | |
const extra = file.zir.extraData(Zir.Inst.UnionInit, pl_node.payload_index); | |
const union_type: DocData.WalkResult = try self.walkRef( | |
file, | |
parent_scope, | |
parent_src, | |
extra.data.union_type, | |
false, | |
call_ctx, | |
); | |
const field_name: DocData.WalkResult = try self.walkRef( | |
file, | |
parent_scope, | |
parent_src, | |
extra.data.field_name, | |
false, | |
call_ctx, | |
); | |
const init: DocData.WalkResult = try self.walkRef( | |
file, | |
parent_scope, | |
parent_src, | |
extra.data.init, | |
false, | |
call_ctx, | |
); | |
const union_type_index = self.exprs.items.len; | |
try self.exprs.append(self.arena, union_type.expr); | |
const field_name_index = self.exprs.items.len; | |
try self.exprs.append(self.arena, field_name.expr); | |
const init_index = self.exprs.items.len; | |
try self.exprs.append(self.arena, init.expr); | |
return DocData.WalkResult{ | |
.typeRef = union_type.expr, | |
.expr = .{ | |
.unionInit = .{ | |
.type = union_type_index, | |
.field = field_name_index, | |
.init = init_index, | |
}, | |
}, | |
}; | |
}, | |
.builtin_call => { | |
const pl_node = data[@intFromEnum(inst)].pl_node; | |
const extra = file.zir.extraData(Zir.Inst.BuiltinCall, pl_node.payload_index); | |
const modifier: DocData.WalkResult = try self.walkRef( | |
file, | |
parent_scope, | |
parent_src, | |
extra.data.modifier, | |
false, | |
call_ctx, | |
); | |
const callee: DocData.WalkResult = try self.walkRef( | |
file, | |
parent_scope, | |
parent_src, | |
extra.data.callee, | |
false, | |
call_ctx, | |
); | |
const args: DocData.WalkResult = try self.walkRef( | |
file, | |
parent_scope, | |
parent_src, | |
extra.data.args, | |
false, | |
call_ctx, | |
); | |
const modifier_index = self.exprs.items.len; | |
try self.exprs.append(self.arena, modifier.expr); | |
const function_index = self.exprs.items.len; | |
try self.exprs.append(self.arena, callee.expr); | |
const args_index = self.exprs.items.len; | |
try self.exprs.append(self.arena, args.expr); | |
return DocData.WalkResult{ | |
.expr = .{ | |
.builtinCall = .{ | |
.modifier = modifier_index, | |
.function = function_index, | |
.args = args_index, | |
}, | |
}, | |
}; | |
}, | |
.error_union_type => { | |
const pl_node = data[@intFromEnum(inst)].pl_node; | |
const extra = file.zir.extraData(Zir.Inst.Bin, pl_node.payload_index); | |
const lhs: DocData.WalkResult = try self.walkRef( | |
file, | |
parent_scope, | |
parent_src, | |
extra.data.lhs, | |
false, | |
call_ctx, | |
); | |
const rhs: DocData.WalkResult = try self.walkRef( | |
file, | |
parent_scope, | |
parent_src, | |
extra.data.rhs, | |
false, | |
call_ctx, | |
); | |
const type_slot_index = self.types.items.len; | |
try self.types.append(self.arena, .{ .ErrorUnion = .{ | |
.lhs = lhs.expr, | |
.rhs = rhs.expr, | |
} }); | |
return DocData.WalkResult{ | |
.typeRef = .{ .type = @intFromEnum(Ref.type_type) }, | |
.expr = .{ .errorUnion = type_slot_index }, | |
}; | |
}, | |
.merge_error_sets => { | |
const pl_node = data[@intFromEnum(inst)].pl_node; | |
const extra = file.zir.extraData(Zir.Inst.Bin, pl_node.payload_index); | |
const lhs: DocData.WalkResult = try self.walkRef( | |
file, | |
parent_scope, | |
parent_src, | |
extra.data.lhs, | |
false, | |
call_ctx, | |
); | |
const rhs: DocData.WalkResult = try self.walkRef( | |
file, | |
parent_scope, | |
parent_src, | |
extra.data.rhs, | |
false, | |
call_ctx, | |
); | |
const type_slot_index = self.types.items.len; | |
try self.types.append(self.arena, .{ .ErrorUnion = .{ | |
.lhs = lhs.expr, | |
.rhs = rhs.expr, | |
} }); | |
return DocData.WalkResult{ | |
.typeRef = .{ .type = @intFromEnum(Ref.type_type) }, | |
.expr = .{ .errorSets = type_slot_index }, | |
}; | |
}, | |
// .elem_type => { | |
// const un_node = data[@intFromEnum(inst)].un_node; | |
// const operand: DocData.WalkResult = try self.walkRef( | |
// file, | |
// parent_scope, parent_src, | |
// un_node.operand, | |
// false, | |
// ); | |
// return operand; | |
// }, | |
.ptr_type => { | |
const ptr = data[@intFromEnum(inst)].ptr_type; | |
const extra = file.zir.extraData(Zir.Inst.PtrType, ptr.payload_index); | |
var extra_index = extra.end; | |
const elem_type_ref = try self.walkRef( | |
file, | |
parent_scope, | |
parent_src, | |
extra.data.elem_type, | |
false, | |
call_ctx, | |
); | |
// @check if `addrspace`, `bit_start` and `host_size` really need to be | |
// present in json | |
var sentinel: ?DocData.Expr = null; | |
if (ptr.flags.has_sentinel) { | |
const ref: Zir.Inst.Ref = @enumFromInt(file.zir.extra[extra_index]); | |
const ref_result = try self.walkRef( | |
file, | |
parent_scope, | |
parent_src, | |
ref, | |
false, | |
call_ctx, | |
); | |
sentinel = ref_result.expr; | |
extra_index += 1; | |
} | |
var @"align": ?DocData.Expr = null; | |
if (ptr.flags.has_align) { | |
const ref: Zir.Inst.Ref = @enumFromInt(file.zir.extra[extra_index]); | |
const ref_result = try self.walkRef( | |
file, | |
parent_scope, | |
parent_src, | |
ref, | |
false, | |
call_ctx, | |
); | |
@"align" = ref_result.expr; | |
extra_index += 1; | |
} | |
var address_space: ?DocData.Expr = null; | |
if (ptr.flags.has_addrspace) { | |
const ref: Zir.Inst.Ref = @enumFromInt(file.zir.extra[extra_index]); | |
const ref_result = try self.walkRef( | |
file, | |
parent_scope, | |
parent_src, | |
ref, | |
false, | |
call_ctx, | |
); | |
address_space = ref_result.expr; | |
extra_index += 1; | |
} | |
const bit_start: ?DocData.Expr = null; | |
if (ptr.flags.has_bit_range) { | |
const ref: Zir.Inst.Ref = @enumFromInt(file.zir.extra[extra_index]); | |
const ref_result = try self.walkRef( | |
file, | |
parent_scope, | |
parent_src, | |
ref, | |
false, | |
call_ctx, | |
); | |
address_space = ref_result.expr; | |
extra_index += 1; | |
} | |
var host_size: ?DocData.Expr = null; | |
if (ptr.flags.has_bit_range) { | |
const ref: Zir.Inst.Ref = @enumFromInt(file.zir.extra[extra_index]); | |
const ref_result = try self.walkRef( | |
file, | |
parent_scope, | |
parent_src, | |
ref, | |
false, | |
call_ctx, | |
); | |
host_size = ref_result.expr; | |
} | |
const type_slot_index = self.types.items.len; | |
try self.types.append(self.arena, .{ | |
.Pointer = .{ | |
.size = ptr.size, | |
.child = elem_type_ref.expr, | |
.has_align = ptr.flags.has_align, | |
.@"align" = @"align", | |
.has_addrspace = ptr.flags.has_addrspace, | |
.address_space = address_space, | |
.has_sentinel = ptr.flags.has_sentinel, | |
.sentinel = sentinel, | |
.is_mutable = ptr.flags.is_mutable, | |
.is_volatile = ptr.flags.is_volatile, | |
.has_bit_range = ptr.flags.has_bit_range, | |
.bit_start = bit_start, | |
.host_size = host_size, | |
}, | |
}); | |
return DocData.WalkResult{ | |
.typeRef = .{ .type = @intFromEnum(Ref.type_type) }, | |
.expr = .{ .type = type_slot_index }, | |
}; | |
}, | |
.array_type => { | |
const pl_node = data[@intFromEnum(inst)].pl_node; | |
const bin = file.zir.extraData(Zir.Inst.Bin, pl_node.payload_index).data; | |
const len = try self.walkRef( | |
file, | |
parent_scope, | |
parent_src, | |
bin.lhs, | |
false, | |
call_ctx, | |
); | |
const child = try self.walkRef( | |
file, | |
parent_scope, | |
parent_src, | |
bin.rhs, | |
false, | |
call_ctx, | |
); | |
const type_slot_index = self.types.items.len; | |
try self.types.append(self.arena, .{ | |
.Array = .{ | |
.len = len.expr, | |
.child = child.expr, | |
}, | |
}); | |
return DocData.WalkResult{ | |
.typeRef = .{ .type = @intFromEnum(Ref.type_type) }, | |
.expr = .{ .type = type_slot_index }, | |
}; | |
}, | |
.array_type_sentinel => { | |
const pl_node = data[@intFromEnum(inst)].pl_node; | |
const extra = file.zir.extraData(Zir.Inst.ArrayTypeSentinel, pl_node.payload_index); | |
const len = try self.walkRef( | |
file, | |
parent_scope, | |
parent_src, | |
extra.data.len, | |
false, | |
call_ctx, | |
); | |
const sentinel = try self.walkRef( | |
file, | |
parent_scope, | |
parent_src, | |
extra.data.sentinel, | |
false, | |
call_ctx, | |
); | |
const elem_type = try self.walkRef( | |
file, | |
parent_scope, | |
parent_src, | |
extra.data.elem_type, | |
false, | |
call_ctx, | |
); | |
const type_slot_index = self.types.items.len; | |
try self.types.append(self.arena, .{ | |
.Array = .{ | |
.len = len.expr, | |
.child = elem_type.expr, | |
.sentinel = sentinel.expr, | |
}, | |
}); | |
return DocData.WalkResult{ | |
.typeRef = .{ .type = @intFromEnum(Ref.type_type) }, | |
.expr = .{ .type = type_slot_index }, | |
}; | |
}, | |
.array_init => { | |
const pl_node = data[@intFromEnum(inst)].pl_node; | |
const extra = file.zir.extraData(Zir.Inst.MultiOp, pl_node.payload_index); | |
const operands = file.zir.refSlice(extra.end, extra.data.operands_len); | |
const array_data = try self.arena.alloc(usize, operands.len - 1); | |
std.debug.assert(operands.len > 0); | |
const array_type = try self.walkRef( | |
file, | |
parent_scope, | |
parent_src, | |
operands[0], | |
false, | |
call_ctx, | |
); | |
for (operands[1..], 0..) |op, idx| { | |
const wr = try self.walkRef( | |
file, | |
parent_scope, | |
parent_src, | |
op, | |
false, | |
call_ctx, | |
); | |
const expr_index = self.exprs.items.len; | |
try self.exprs.append(self.arena, wr.expr); | |
array_data[idx] = expr_index; | |
} | |
return DocData.WalkResult{ | |
.typeRef = array_type.expr, | |
.expr = .{ .array = array_data }, | |
}; | |
}, | |
.array_init_anon => { | |
const pl_node = data[@intFromEnum(inst)].pl_node; | |
const extra = file.zir.extraData(Zir.Inst.MultiOp, pl_node.payload_index); | |
const operands = file.zir.refSlice(extra.end, extra.data.operands_len); | |
const array_data = try self.arena.alloc(usize, operands.len); | |
for (operands, 0..) |op, idx| { | |
const wr = try self.walkRef( | |
file, | |
parent_scope, | |
parent_src, | |
op, | |
false, | |
call_ctx, | |
); | |
const expr_index = self.exprs.items.len; | |
try self.exprs.append(self.arena, wr.expr); | |
array_data[idx] = expr_index; | |
} | |
return DocData.WalkResult{ | |
.typeRef = null, | |
.expr = .{ .array = array_data }, | |
}; | |
}, | |
.array_init_ref => { | |
const pl_node = data[@intFromEnum(inst)].pl_node; | |
const extra = file.zir.extraData(Zir.Inst.MultiOp, pl_node.payload_index); | |
const operands = file.zir.refSlice(extra.end, extra.data.operands_len); | |
const array_data = try self.arena.alloc(usize, operands.len - 1); | |
std.debug.assert(operands.len > 0); | |
const array_type = try self.walkRef( | |
file, | |
parent_scope, | |
parent_src, | |
operands[0], | |
false, | |
call_ctx, | |
); | |
for (operands[1..], 0..) |op, idx| { | |
const wr = try self.walkRef( | |
file, | |
parent_scope, | |
parent_src, | |
op, | |
false, | |
call_ctx, | |
); | |
const expr_index = self.exprs.items.len; | |
try self.exprs.append(self.arena, wr.expr); | |
array_data[idx] = expr_index; | |
} | |
const type_slot_index = self.types.items.len; | |
try self.types.append(self.arena, .{ | |
.Pointer = .{ | |
.size = .One, | |
.child = array_type.expr, | |
}, | |
}); | |
const expr_index = self.exprs.items.len; | |
try self.exprs.append(self.arena, .{ .array = array_data }); | |
return DocData.WalkResult{ | |
.typeRef = .{ .type = type_slot_index }, | |
.expr = .{ .@"&" = expr_index }, | |
}; | |
}, | |
.float => { | |
const float = data[@intFromEnum(inst)].float; | |
return DocData.WalkResult{ | |
.typeRef = .{ .type = @intFromEnum(Ref.comptime_float_type) }, | |
.expr = .{ .float = float }, | |
}; | |
}, | |
// @check: In frontend I'm handling float128 with `.toFixed(2)` | |
.float128 => { | |
const pl_node = data[@intFromEnum(inst)].pl_node; | |
const extra = file.zir.extraData(Zir.Inst.Float128, pl_node.payload_index); | |
return DocData.WalkResult{ | |
.typeRef = .{ .type = @intFromEnum(Ref.comptime_float_type) }, | |
.expr = .{ .float128 = extra.data.get() }, | |
}; | |
}, | |
.negate => { | |
const un_node = data[@intFromEnum(inst)].un_node; | |
var operand: DocData.WalkResult = try self.walkRef( | |
file, | |
parent_scope, | |
parent_src, | |
un_node.operand, | |
need_type, | |
call_ctx, | |
); | |
switch (operand.expr) { | |
.int => |*int| int.negated = true, | |
.int_big => |*int_big| int_big.negated = true, | |
else => { | |
const un_index = self.exprs.items.len; | |
try self.exprs.append(self.arena, .{ .unOp = .{ .param = 0 } }); | |
const param_index = self.exprs.items.len; | |
try self.exprs.append(self.arena, operand.expr); | |
self.exprs.items[un_index] = .{ | |
.unOp = .{ | |
.name = @tagName(tags[@intFromEnum(inst)]), | |
.param = param_index, | |
}, | |
}; | |
return DocData.WalkResult{ | |
.typeRef = operand.typeRef, | |
.expr = .{ .unOpIndex = un_index }, | |
}; | |
}, | |
} | |
return operand; | |
}, | |
.size_of => { | |
const un_node = data[@intFromEnum(inst)].un_node; | |
const operand = try self.walkRef( | |
file, | |
parent_scope, | |
parent_src, | |
un_node.operand, | |
false, | |
call_ctx, | |
); | |
const operand_index = self.exprs.items.len; | |
try self.exprs.append(self.arena, operand.expr); | |
return DocData.WalkResult{ | |
.typeRef = .{ .type = @intFromEnum(Ref.comptime_int_type) }, | |
.expr = .{ .sizeOf = operand_index }, | |
}; | |
}, | |
.bit_size_of => { | |
// not working correctly with `align()` | |
const un_node = data[@intFromEnum(inst)].un_node; | |
const operand = try self.walkRef( | |
file, | |
parent_scope, | |
parent_src, | |
un_node.operand, | |
need_type, | |
call_ctx, | |
); | |
const operand_index = self.exprs.items.len; | |
try self.exprs.append(self.arena, operand.expr); | |
return DocData.WalkResult{ | |
.typeRef = operand.typeRef, | |
.expr = .{ .bitSizeOf = operand_index }, | |
}; | |
}, | |
.int_from_enum => { | |
// not working correctly with `align()` | |
const un_node = data[@intFromEnum(inst)].un_node; | |
const operand = try self.walkRef( | |
file, | |
parent_scope, | |
parent_src, | |
un_node.operand, | |
false, | |
call_ctx, | |
); | |
const builtin_index = self.exprs.items.len; | |
try self.exprs.append(self.arena, .{ .builtin = .{ .param = 0 } }); | |
const operand_index = self.exprs.items.len; | |
try self.exprs.append(self.arena, operand.expr); | |
self.exprs.items[builtin_index] = .{ | |
.builtin = .{ | |
.name = @tagName(tags[@intFromEnum(inst)]), | |
.param = operand_index, | |
}, | |
}; | |
return DocData.WalkResult{ | |
.typeRef = .{ .type = @intFromEnum(Ref.comptime_int_type) }, | |
.expr = .{ .builtinIndex = builtin_index }, | |
}; | |
}, | |
.switch_block => { | |
// WIP | |
const pl_node = data[@intFromEnum(inst)].pl_node; | |
const extra = file.zir.extraData(Zir.Inst.SwitchBlock, pl_node.payload_index); | |
const switch_cond = try self.walkRef( | |
file, | |
parent_scope, | |
parent_src, | |
extra.data.operand, | |
false, | |
call_ctx, | |
); | |
const cond_index = self.exprs.items.len; | |
try self.exprs.append(self.arena, switch_cond.expr); | |
_ = cond_index; | |
// const ast_index = self.ast_nodes.items.len; | |
// const type_index = self.types.items.len - 1; | |
// const ast_line = self.ast_nodes.items[ast_index - 1]; | |
// const sep = "=" ** 200; | |
// log.debug("{s}", .{sep}); | |
// log.debug("SWITCH BLOCK", .{}); | |
// log.debug("extra = {any}", .{extra}); | |
// log.debug("outer_decl = {any}", .{self.types.items[type_index]}); | |
// log.debug("ast_lines = {}", .{ast_line}); | |
// log.debug("{s}", .{sep}); | |
const switch_index = self.exprs.items.len; | |
// const src_loc = try self.srcLocInfo(file, pl_node.src_node, parent_src); | |
const switch_expr = try self.getBlockSource(file, parent_src, pl_node.src_node); | |
try self.exprs.append(self.arena, .{ .comptimeExpr = self.comptime_exprs.items.len }); | |
try self.comptime_exprs.append(self.arena, .{ .code = switch_expr }); | |
// try self.exprs.append(self.arena, .{ .switchOp = .{ | |
// .cond_index = cond_index, | |
// .file_name = file.sub_file_path, | |
// .src = ast_index, | |
// .outer_decl = type_index, | |
// } }); | |
return DocData.WalkResult{ | |
.typeRef = .{ .type = @intFromEnum(Ref.type_type) }, | |
.expr = .{ .switchIndex = switch_index }, | |
}; | |
}, | |
.typeof => { | |
const un_node = data[@intFromEnum(inst)].un_node; | |
const operand = try self.walkRef( | |
file, | |
parent_scope, | |
parent_src, | |
un_node.operand, | |
need_type, | |
call_ctx, | |
); | |
const operand_index = self.exprs.items.len; | |
try self.exprs.append(self.arena, operand.expr); | |
return DocData.WalkResult{ | |
.typeRef = operand.typeRef, | |
.expr = .{ .typeOf = operand_index }, | |
}; | |
}, | |
.typeof_builtin => { | |
const pl_node = data[@intFromEnum(inst)].pl_node; | |
const extra = file.zir.extraData(Zir.Inst.Block, pl_node.payload_index); | |
const body = file.zir.extra[extra.end..][extra.data.body_len - 1]; | |
const operand: DocData.WalkResult = try self.walkRef( | |
file, | |
parent_scope, | |
parent_src, | |
data[body].@"break".operand, | |
false, | |
call_ctx, | |
); | |
const operand_index = self.exprs.items.len; | |
try self.exprs.append(self.arena, operand.expr); | |
return DocData.WalkResult{ | |
.typeRef = operand.typeRef, | |
.expr = .{ .typeOf = operand_index }, | |
}; | |
}, | |
.as_node, .as_shift_operand => { | |
const pl_node = data[@intFromEnum(inst)].pl_node; | |
const extra = file.zir.extraData(Zir.Inst.As, pl_node.payload_index); | |
// Skip the as_node if the destination type is a call instruction | |
if (extra.data.dest_type.toIndex()) |dti| { | |
var maybe_cc = call_ctx; | |
while (maybe_cc) |cc| : (maybe_cc = cc.prev) { | |
if (cc.inst == dti) { | |
return try self.walkRef( | |
file, | |
parent_scope, | |
parent_src, | |
extra.data.operand, | |
false, | |
call_ctx, | |
); | |
} | |
} | |
} | |
const dest_type_walk = try self.walkRef( | |
file, | |
parent_scope, | |
parent_src, | |
extra.data.dest_type, | |
false, | |
call_ctx, | |
); | |
const operand = try self.walkRef( | |
file, | |
parent_scope, | |
parent_src, | |
extra.data.operand, | |
false, | |
call_ctx, | |
); | |
const operand_idx = self.exprs.items.len; | |
try self.exprs.append(self.arena, operand.expr); | |
const dest_type_idx = self.exprs.items.len; | |
try self.exprs.append(self.arena, dest_type_walk.expr); | |
// TODO: there's something wrong with how both `as` and `WalkrResult` | |
// try to store type information. | |
return DocData.WalkResult{ | |
.typeRef = dest_type_walk.expr, | |
.expr = .{ | |
.as = .{ | |
.typeRefArg = dest_type_idx, | |
.exprArg = operand_idx, | |
}, | |
}, | |
}; | |
}, | |
.optional_type => { | |
const un_node = data[@intFromEnum(inst)].un_node; | |
const operand: DocData.WalkResult = try self.walkRef( | |
file, | |
parent_scope, | |
parent_src, | |
un_node.operand, | |
false, | |
call_ctx, | |
); | |
const operand_idx = self.types.items.len; | |
try self.types.append(self.arena, .{ | |
.Optional = .{ .name = "?TODO", .child = operand.expr }, | |
}); | |
return DocData.WalkResult{ | |
.typeRef = .{ .type = @intFromEnum(Ref.type_type) }, | |
.expr = .{ .type = operand_idx }, | |
}; | |
}, | |
.decl_val, .decl_ref => { | |
const str_tok = data[@intFromEnum(inst)].str_tok; | |
const decl_status = parent_scope.resolveDeclName(str_tok.start, file, inst.toOptional()); | |
return DocData.WalkResult{ | |
.expr = .{ .declRef = decl_status }, | |
}; | |
}, | |
.field_val, .field_ptr => { | |
const pl_node = data[@intFromEnum(inst)].pl_node; | |
const extra = file.zir.extraData(Zir.Inst.Field, pl_node.payload_index); | |
var path: std.ArrayListUnmanaged(DocData.Expr) = .{}; | |
try path.append(self.arena, .{ | |
.declName = file.zir.nullTerminatedString(extra.data.field_name_start), | |
}); | |
// Put inside path the starting index of each decl name that | |
// we encounter as we navigate through all the field_*s | |
const lhs_ref = blk: { | |
var lhs_extra = extra; | |
while (true) { | |
const lhs = @intFromEnum(lhs_extra.data.lhs.toIndex() orelse { | |
break :blk lhs_extra.data.lhs; | |
}); | |
if (tags[lhs] != .field_val and | |
tags[lhs] != .field_ptr) | |
{ | |
break :blk lhs_extra.data.lhs; | |
} | |
lhs_extra = file.zir.extraData( | |
Zir.Inst.Field, | |
data[lhs].pl_node.payload_index, | |
); | |
try path.append(self.arena, .{ | |
.declName = file.zir.nullTerminatedString(lhs_extra.data.field_name_start), | |
}); | |
} | |
}; | |
// If the lhs is a `call` instruction, it means that we're inside | |
// a function call and we're referring to one of its arguments. | |
// We can't just blindly analyze the instruction or we will | |
// start recursing forever. | |
// TODO: add proper resolution of the container type for `calls` | |
// TODO: we're like testing lhs as an instruction twice | |
// (above and below) this todo, maybe a cleaer solution woul | |
// avoid that. | |
// TODO: double check that we really don't need type info here | |
const wr = blk: { | |
if (lhs_ref.toIndex()) |lhs_inst| switch (tags[@intFromEnum(lhs_inst)]) { | |
.call, .field_call => { | |
break :blk DocData.WalkResult{ | |
.expr = .{ | |
.comptimeExpr = 0, | |
}, | |
}; | |
}, | |
else => {}, | |
}; | |
break :blk try self.walkRef( | |
file, | |
parent_scope, | |
parent_src, | |
lhs_ref, | |
false, | |
call_ctx, | |
); | |
}; | |
try path.append(self.arena, wr.expr); | |
// This way the data in `path` has the same ordering that the ref | |
// path has in the text: most general component first. | |
std.mem.reverse(DocData.Expr, path.items); | |
// Righ now, every element of `path` is a string except its first | |
// element (at index 0). We're now going to attempt to resolve each | |
// string. If one or more components in this path are not yet fully | |
// analyzed, the path will only be solved partially, but we expect | |
// to eventually solve it fully(or give up in case of a | |
// comptimeExpr). This means that: | |
// - (1) Paths can be not fully analyzed temporarily, so any code | |
// that requires to know where a ref path leads to, neeeds to | |
// implement support for lazyness (see self.pending_ref_paths) | |
// - (2) Paths can sometimes never resolve fully. This means that | |
// any value that depends on that will have to become a | |
// comptimeExpr. | |
try self.tryResolveRefPath(file, inst, path.items); | |
return DocData.WalkResult{ .expr = .{ .refPath = path.items } }; | |
}, | |
.int_type => { | |
const int_type = data[@intFromEnum(inst)].int_type; | |
const sign = if (int_type.signedness == .unsigned) "u" else "i"; | |
const bits = int_type.bit_count; | |
const name = try std.fmt.allocPrint(self.arena, "{s}{}", .{ sign, bits }); | |
try self.types.append(self.arena, .{ | |
.Int = .{ .name = name }, | |
}); | |
return DocData.WalkResult{ | |
.typeRef = .{ .type = @intFromEnum(Ref.type_type) }, | |
.expr = .{ .type = self.types.items.len - 1 }, | |
}; | |
}, | |
.block => { | |
const res = DocData.WalkResult{ | |
.typeRef = .{ .type = @intFromEnum(Ref.type_type) }, | |
.expr = .{ .comptimeExpr = self.comptime_exprs.items.len }, | |
}; | |
const pl_node = data[@intFromEnum(inst)].pl_node; | |
const block_expr = try self.getBlockSource(file, parent_src, pl_node.src_node); | |
try self.comptime_exprs.append(self.arena, .{ | |
.code = block_expr, | |
}); | |
return res; | |
}, | |
.block_inline => { | |
const pl_node = data[@intFromEnum(inst)].pl_node; | |
const extra = file.zir.extraData(Zir.Inst.Block, pl_node.payload_index); | |
return self.walkInlineBody( | |
file, | |
parent_scope, | |
try self.srcLocInfo(file, pl_node.src_node, parent_src), | |
parent_src, | |
file.zir.bodySlice(extra.end, extra.data.body_len), | |
need_type, | |
call_ctx, | |
); | |
}, | |
.break_inline => { | |
const @"break" = data[@intFromEnum(inst)].@"break"; | |
return try self.walkRef( | |
file, | |
parent_scope, | |
parent_src, | |
@"break".operand, | |
need_type, | |
call_ctx, | |
); | |
}, | |
.struct_init => { | |
const pl_node = data[@intFromEnum(inst)].pl_node; | |
const extra = file.zir.extraData(Zir.Inst.StructInit, pl_node.payload_index); | |
const field_vals = try self.arena.alloc( | |
DocData.Expr.FieldVal, | |
extra.data.fields_len, | |
); | |
var type_ref: DocData.Expr = undefined; | |
var idx = extra.end; | |
for (field_vals) |*fv| { | |
const init_extra = file.zir.extraData(Zir.Inst.StructInit.Item, idx); | |
defer idx = init_extra.end; | |
const field_name = blk: { | |
const field_inst_index = @intFromEnum(init_extra.data.field_type); | |
if (tags[field_inst_index] != .struct_init_field_type) unreachable; | |
const field_pl_node = data[field_inst_index].pl_node; | |
const field_extra = file.zir.extraData( | |
Zir.Inst.FieldType, | |
field_pl_node.payload_index, | |
); | |
const field_src = try self.srcLocInfo( | |
file, | |
field_pl_node.src_node, | |
parent_src, | |
); | |
// On first iteration use field info to find out the struct type | |
if (idx == extra.end) { | |
const wr = try self.walkRef( | |
file, | |
parent_scope, | |
field_src, | |
field_extra.data.container_type, | |
false, | |
call_ctx, | |
); | |
type_ref = wr.expr; | |
} | |
break :blk file.zir.nullTerminatedString(field_extra.data.name_start); | |
}; | |
const value = try self.walkRef( | |
file, | |
parent_scope, | |
parent_src, | |
init_extra.data.init, | |
need_type, | |
call_ctx, | |
); | |
const exprIdx = self.exprs.items.len; | |
try self.exprs.append(self.arena, value.expr); | |
var typeRefIdx: ?usize = null; | |
if (value.typeRef) |ref| { | |
typeRefIdx = self.exprs.items.len; | |
try self.exprs.append(self.arena, ref); | |
} | |
fv.* = .{ | |
.name = field_name, | |
.val = .{ | |
.typeRef = typeRefIdx, | |
.expr = exprIdx, | |
}, | |
}; | |
} | |
return DocData.WalkResult{ | |
.typeRef = type_ref, | |
.expr = .{ .@"struct" = field_vals }, | |
}; | |
}, | |
.struct_init_empty, | |
.struct_init_empty_result, | |
=> { | |
const un_node = data[@intFromEnum(inst)].un_node; | |
const operand: DocData.WalkResult = try self.walkRef( | |
file, | |
parent_scope, | |
parent_src, | |
un_node.operand, | |
false, | |
call_ctx, | |
); | |
return DocData.WalkResult{ | |
.typeRef = operand.expr, | |
.expr = .{ .@"struct" = &.{} }, | |
}; | |
}, | |
.struct_init_empty_ref_result => { | |
const un_node = data[@intFromEnum(inst)].un_node; | |
const operand: DocData.WalkResult = try self.walkRef( | |
file, | |
parent_scope, | |
parent_src, | |
un_node.operand, | |
false, | |
call_ctx, | |
); | |
const struct_init_idx = self.exprs.items.len; | |
try self.exprs.append(self.arena, .{ .@"struct" = &.{} }); | |
return DocData.WalkResult{ | |
.typeRef = operand.expr, | |
.expr = .{ .@"&" = struct_init_idx }, | |
}; | |
}, | |
.struct_init_anon => { | |
const pl_node = data[@intFromEnum(inst)].pl_node; | |
const extra = file.zir.extraData(Zir.Inst.StructInitAnon, pl_node.payload_index); | |
const field_vals = try self.arena.alloc( | |
DocData.Expr.FieldVal, | |
extra.data.fields_len, | |
); | |
var idx = extra.end; | |
for (field_vals) |*fv| { | |
const init_extra = file.zir.extraData(Zir.Inst.StructInitAnon.Item, idx); | |
const field_name = file.zir.nullTerminatedString(init_extra.data.field_name); | |
const value = try self.walkRef( | |
file, | |
parent_scope, | |
parent_src, | |
init_extra.data.init, | |
need_type, | |
call_ctx, | |
); | |
const exprIdx = self.exprs.items.len; | |
try self.exprs.append(self.arena, value.expr); | |
var typeRefIdx: ?usize = null; | |
if (value.typeRef) |ref| { | |
typeRefIdx = self.exprs.items.len; | |
try self.exprs.append(self.arena, ref); | |
} | |
fv.* = .{ | |
.name = field_name, | |
.val = .{ | |
.typeRef = typeRefIdx, | |
.expr = exprIdx, | |
}, | |
}; | |
idx = init_extra.end; | |
} | |
return DocData.WalkResult{ | |
.expr = .{ .@"struct" = field_vals }, | |
}; | |
}, | |
.error_set_decl => { | |
const pl_node = data[@intFromEnum(inst)].pl_node; | |
const extra = file.zir.extraData(Zir.Inst.ErrorSetDecl, pl_node.payload_index); | |
const fields = try self.arena.alloc( | |
DocData.Type.Field, | |
extra.data.fields_len, | |
); | |
var idx = extra.end; | |
for (fields) |*f| { | |
const name = file.zir.nullTerminatedString(@enumFromInt(file.zir.extra[idx])); | |
idx += 1; | |
const docs = file.zir.nullTerminatedString(@enumFromInt(file.zir.extra[idx])); | |
idx += 1; | |
f.* = .{ | |
.name = name, | |
.docs = docs, | |
}; | |
} | |
const type_slot_index = self.types.items.len; | |
try self.types.append(self.arena, .{ | |
.ErrorSet = .{ | |
.name = "todo errset", | |
.fields = fields, | |
}, | |
}); | |
return DocData.WalkResult{ | |
.typeRef = .{ .type = @intFromEnum(Ref.type_type) }, | |
.expr = .{ .type = type_slot_index }, | |
}; | |
}, | |
.param_anytype, .param_anytype_comptime => { | |
// @check if .param_anytype_comptime can be here | |
// Analysis of anytype function params happens in `.func`. | |
// This switch case handles the case where an expression depends | |
// on an anytype field. E.g.: `fn foo(bar: anytype) @TypeOf(bar)`. | |
// This means that we're looking at a generic expression. | |
const str_tok = data[@intFromEnum(inst)].str_tok; | |
const name = str_tok.get(file.zir); | |
const cte_slot_index = self.comptime_exprs.items.len; | |
try self.comptime_exprs.append(self.arena, .{ | |
.code = name, | |
}); | |
return DocData.WalkResult{ .expr = .{ .comptimeExpr = cte_slot_index } }; | |
}, | |
.param, .param_comptime => { | |
// See .param_anytype for more information. | |
const pl_tok = data[@intFromEnum(inst)].pl_tok; | |
const extra = file.zir.extraData(Zir.Inst.Param, pl_tok.payload_index); | |
const name = file.zir.nullTerminatedString(extra.data.name); | |
const cte_slot_index = self.comptime_exprs.items.len; | |
try self.comptime_exprs.append(self.arena, .{ | |
.code = name, | |
}); | |
return DocData.WalkResult{ .expr = .{ .comptimeExpr = cte_slot_index } }; | |
}, | |
.call => { | |
const pl_node = data[@intFromEnum(inst)].pl_node; | |
const extra = file.zir.extraData(Zir.Inst.Call, pl_node.payload_index); | |
const callee = try self.walkRef( | |
file, | |
parent_scope, | |
parent_src, | |
extra.data.callee, | |
need_type, | |
call_ctx, | |
); | |
const args_len = extra.data.flags.args_len; | |
var args = try self.arena.alloc(DocData.Expr, args_len); | |
const body = file.zir.extra[extra.end..]; | |
try self.repurposed_insts.put(self.arena, inst, {}); | |
defer _ = self.repurposed_insts.remove(inst); | |
var i: usize = 0; | |
while (i < args_len) : (i += 1) { | |
const arg_end = file.zir.extra[extra.end + i]; | |
const break_index = body[arg_end - 1]; | |
const ref = data[break_index].@"break".operand; | |
// TODO: consider toggling need_type to true if we ever want | |
// to show discrepancies between the types of provided | |
// arguments and the types declared in the function | |
// signature for its parameters. | |
const wr = try self.walkRef( | |
file, | |
parent_scope, | |
parent_src, | |
ref, | |
false, | |
&.{ | |
.inst = inst, | |
.prev = call_ctx, | |
}, | |
); | |
args[i] = wr.expr; | |
} | |
const cte_slot_index = self.comptime_exprs.items.len; | |
try self.comptime_exprs.append(self.arena, .{ | |
.code = "func call", | |
}); | |
const call_slot_index = self.calls.items.len; | |
try self.calls.append(self.arena, .{ | |
.func = callee.expr, | |
.args = args, | |
.ret = .{ .comptimeExpr = cte_slot_index }, | |
}); | |
return DocData.WalkResult{ | |
.typeRef = if (callee.typeRef) |tr| switch (tr) { | |
.type => |func_type_idx| switch (self.types.items[func_type_idx]) { | |
.Fn => |func| func.ret, | |
else => blk: { | |
printWithContext( | |
file, | |
inst, | |
"unexpected callee type in walkInstruction.call: `{s}`\n", | |
.{@tagName(self.types.items[func_type_idx])}, | |
); | |
break :blk null; | |
}, | |
}, | |
else => null, | |
} else null, | |
.expr = .{ .call = call_slot_index }, | |
}; | |
}, | |
.field_call => { | |
const pl_node = data[@intFromEnum(inst)].pl_node; | |
const extra = file.zir.extraData(Zir.Inst.FieldCall, pl_node.payload_index); | |
const obj_ptr = try self.walkRef( | |
file, | |
parent_scope, | |
parent_src, | |
extra.data.obj_ptr, | |
need_type, | |
call_ctx, | |
); | |
var field_call = try self.arena.alloc(DocData.Expr, 2); | |
if (obj_ptr.typeRef) |ref| { | |
field_call[0] = ref; | |
} else { | |
field_call[0] = obj_ptr.expr; | |
} | |
field_call[1] = .{ .declName = file.zir.nullTerminatedString(extra.data.field_name_start) }; | |
try self.tryResolveRefPath(file, inst, field_call); | |
const args_len = extra.data.flags.args_len; | |
var args = try self.arena.alloc(DocData.Expr, args_len); | |
const body = file.zir.extra[extra.end..]; | |
try self.repurposed_insts.put(self.arena, inst, {}); | |
defer _ = self.repurposed_insts.remove(inst); | |
var i: usize = 0; | |
while (i < args_len) : (i += 1) { | |
const arg_end = file.zir.extra[extra.end + i]; | |
const break_index = body[arg_end - 1]; | |
const ref = data[break_index].@"break".operand; | |
// TODO: consider toggling need_type to true if we ever want | |
// to show discrepancies between the types of provided | |
// arguments and the types declared in the function | |
// signature for its parameters. | |
const wr = try self.walkRef( | |
file, | |
parent_scope, | |
parent_src, | |
ref, | |
false, | |
&.{ | |
.inst = inst, | |
.prev = call_ctx, | |
}, | |
); | |
args[i] = wr.expr; | |
} | |
const cte_slot_index = self.comptime_exprs.items.len; | |
try self.comptime_exprs.append(self.arena, .{ | |
.code = "field call", | |
}); | |
const call_slot_index = self.calls.items.len; | |
try self.calls.append(self.arena, .{ | |
.func = .{ .refPath = field_call }, | |
.args = args, | |
.ret = .{ .comptimeExpr = cte_slot_index }, | |
}); | |
return DocData.WalkResult{ | |
.expr = .{ .call = call_slot_index }, | |
}; | |
}, | |
.func, .func_inferred => { | |
const type_slot_index = self.types.items.len; | |
try self.types.append(self.arena, .{ .Unanalyzed = .{} }); | |
const result = self.analyzeFunction( | |
file, | |
parent_scope, | |
parent_src, | |
inst, | |
self_ast_node_index, | |
type_slot_index, | |
tags[@intFromEnum(inst)] == .func_inferred, | |
call_ctx, | |
); | |
return result; | |
}, | |
.func_fancy => { | |
const type_slot_index = self.types.items.len; | |
try self.types.append(self.arena, .{ .Unanalyzed = .{} }); | |
const result = self.analyzeFancyFunction( | |
file, | |
parent_scope, | |
parent_src, | |
inst, | |
self_ast_node_index, | |
type_slot_index, | |
call_ctx, | |
); | |
return result; | |
}, | |
.optional_payload_safe, .optional_payload_unsafe => { | |
const un_node = data[@intFromEnum(inst)].un_node; | |
const operand = try self.walkRef( | |
file, | |
parent_scope, | |
parent_src, | |
un_node.operand, | |
need_type, | |
call_ctx, | |
); | |
const optional_idx = self.exprs.items.len; | |
try self.exprs.append(self.arena, operand.expr); | |
var typeRef: ?DocData.Expr = null; | |
if (operand.typeRef) |ref| { | |
switch (ref) { | |
.type => |t_index| { | |
const t = self.types.items[t_index]; | |
switch (t) { | |
.Optional => |opt| typeRef = opt.child, | |
else => { | |
printWithContext(file, inst, "Invalid type for optional_payload_*: {}\n", .{t}); | |
}, | |
} | |
}, | |
else => {}, | |
} | |
} | |
return DocData.WalkResult{ | |
.typeRef = typeRef, | |
.expr = .{ .optionalPayload = optional_idx }, | |
}; | |
}, | |
.elem_val_node => { | |
const pl_node = data[@intFromEnum(inst)].pl_node; | |
const extra = file.zir.extraData(Zir.Inst.Bin, pl_node.payload_index); | |
const lhs = try self.walkRef( | |
file, | |
parent_scope, | |
parent_src, | |
extra.data.lhs, | |
need_type, | |
call_ctx, | |
); | |
const rhs = try self.walkRef( | |
file, | |
parent_scope, | |
parent_src, | |
extra.data.rhs, | |
need_type, | |
call_ctx, | |
); | |
const lhs_idx = self.exprs.items.len; | |
try self.exprs.append(self.arena, lhs.expr); | |
const rhs_idx = self.exprs.items.len; | |
try self.exprs.append(self.arena, rhs.expr); | |
return DocData.WalkResult{ | |
.expr = .{ | |
.elemVal = .{ | |
.lhs = lhs_idx, | |
.rhs = rhs_idx, | |
}, | |
}, | |
}; | |
}, | |
.extended => { | |
const extended = data[@intFromEnum(inst)].extended; | |
switch (extended.opcode) { | |
else => { | |
printWithContext( | |
file, | |
inst, | |
"TODO: implement `walkInstruction.extended` for {s}", | |
.{@tagName(extended.opcode)}, | |
); | |
return self.cteTodo(@tagName(extended.opcode)); | |
}, | |
.typeof_peer => { | |
// Zir says it's a NodeMultiOp but in this case it's TypeOfPeer | |
const extra = file.zir.extraData(Zir.Inst.TypeOfPeer, extended.operand); | |
const args = file.zir.refSlice(extra.end, extended.small); | |
const array_data = try self.arena.alloc(usize, args.len); | |
var array_type: ?DocData.Expr = null; | |
for (args, 0..) |arg, idx| { | |
const wr = try self.walkRef( | |
file, | |
parent_scope, | |
parent_src, | |
arg, | |
idx == 0, | |
call_ctx, | |
); | |
if (idx == 0) { | |
array_type = wr.typeRef; | |
} | |
const expr_index = self.exprs.items.len; | |
try self.exprs.append(self.arena, wr.expr); | |
array_data[idx] = expr_index; | |
} | |
const type_slot_index = self.types.items.len; | |
try self.types.append(self.arena, .{ | |
.Array = .{ | |
.len = .{ | |
.int = .{ | |
.value = args.len, | |
.negated = false, | |
}, | |
}, | |
.child = .{ .type = 0 }, | |
}, | |
}); | |
const result = DocData.WalkResult{ | |
.typeRef = .{ .type = type_slot_index }, | |
.expr = .{ .typeOf_peer = array_data }, | |
}; | |
return result; | |
}, | |
.opaque_decl => { | |
const type_slot_index = self.types.items.len; | |
try self.types.append(self.arena, .{ .Unanalyzed = .{} }); | |
var scope: Scope = .{ | |
.parent = parent_scope, | |
.enclosing_type = type_slot_index, | |
}; | |
const extra = file.zir.extraData(Zir.Inst.OpaqueDecl, extended.operand); | |
var extra_index: usize = extra.end; | |
const src_info = try self.srcLocInfo(file, extra.data.src_node, parent_src); | |
var decl_indexes: std.ArrayListUnmanaged(usize) = .{}; | |
var priv_decl_indexes: std.ArrayListUnmanaged(usize) = .{}; | |
extra_index = try self.analyzeAllDecls( | |
file, | |
&scope, | |
inst, | |
src_info, | |
&decl_indexes, | |
&priv_decl_indexes, | |
call_ctx, | |
); | |
self.types.items[type_slot_index] = .{ | |
.Opaque = .{ | |
.name = "todo_name", | |
.src = self_ast_node_index, | |
.privDecls = priv_decl_indexes.items, | |
.pubDecls = decl_indexes.items, | |
.parent_container = parent_scope.enclosing_type, | |
}, | |
}; | |
if (self.ref_paths_pending_on_types.get(type_slot_index)) |paths| { | |
for (paths.items) |resume_info| { | |
try self.tryResolveRefPath( | |
resume_info.file, | |
inst, | |
resume_info.ref_path, | |
); | |
} | |
_ = self.ref_paths_pending_on_types.remove(type_slot_index); | |
// TODO: we should deallocate the arraylist that holds all the | |
// decl paths. not doing it now since it's arena-allocated | |
// anyway, but maybe we should put it elsewhere. | |
} | |
return DocData.WalkResult{ | |
.typeRef = .{ .type = @intFromEnum(Ref.type_type) }, | |
.expr = .{ .type = type_slot_index }, | |
}; | |
}, | |
.variable => { | |
const extra = file.zir.extraData(Zir.Inst.ExtendedVar, extended.operand); | |
const small = @as(Zir.Inst.ExtendedVar.Small, @bitCast(extended.small)); | |
var extra_index: usize = extra.end; | |
if (small.has_lib_name) extra_index += 1; | |
if (small.has_align) extra_index += 1; | |
const var_type = try self.walkRef( | |
file, | |
parent_scope, | |
parent_src, | |
extra.data.var_type, | |
need_type, | |
call_ctx, | |
); | |
var value: DocData.WalkResult = .{ | |
.typeRef = var_type.expr, | |
.expr = .{ .undefined = .{} }, | |
}; | |
if (small.has_init) { | |
const var_init_ref = @as(Ref, @enumFromInt(file.zir.extra[extra_index])); | |
const var_init = try self.walkRef( | |
file, | |
parent_scope, | |
parent_src, | |
var_init_ref, | |
need_type, | |
call_ctx, | |
); | |
value.expr = var_init.expr; | |
value.typeRef = var_init.typeRef; | |
} | |
return value; | |
}, | |
.union_decl => { | |
const type_slot_index = self.types.items.len; | |
try self.types.append(self.arena, .{ .Unanalyzed = .{} }); | |
var scope: Scope = .{ | |
.parent = parent_scope, | |
.enclosing_type = type_slot_index, | |
}; | |
const small = @as(Zir.Inst.UnionDecl.Small, @bitCast(extended.small)); | |
const extra = file.zir.extraData(Zir.Inst.UnionDecl, extended.operand); | |
var extra_index: usize = extra.end; | |
const src_info = try self.srcLocInfo(file, extra.data.src_node, parent_src); | |
// We delay analysis because union tags can refer to | |
// decls defined inside the union itself. | |
const tag_type_ref: ?Ref = if (small.has_tag_type) blk: { | |
const tag_type = file.zir.extra[extra_index]; | |
extra_index += 1; | |
const tag_ref = @as(Ref, @enumFromInt(tag_type)); | |
break :blk tag_ref; | |
} else null; | |
const body_len = if (small.has_body_len) blk: { | |
const body_len = file.zir.extra[extra_index]; | |
extra_index += 1; | |
break :blk body_len; | |
} else 0; | |
const fields_len = if (small.has_fields_len) blk: { | |
const fields_len = file.zir.extra[extra_index]; | |
extra_index += 1; | |
break :blk fields_len; | |
} else 0; | |
const layout_expr: ?DocData.Expr = switch (small.layout) { | |
.Auto => null, | |
else => .{ .enumLiteral = @tagName(small.layout) }, | |
}; | |
var decl_indexes: std.ArrayListUnmanaged(usize) = .{}; | |
var priv_decl_indexes: std.ArrayListUnmanaged(usize) = .{}; | |
extra_index = try self.analyzeAllDecls( | |
file, | |
&scope, | |
inst, | |
src_info, | |
&decl_indexes, | |
&priv_decl_indexes, | |
call_ctx, | |
); | |
// Analyze the tag once all decls have been analyzed | |
const tag_type = if (tag_type_ref) |tt_ref| (try self.walkRef( | |
file, | |
&scope, | |
parent_src, | |
tt_ref, | |
false, | |
call_ctx, | |
)).expr else null; | |
// Fields | |
extra_index += body_len; | |
var field_type_refs = try std.ArrayListUnmanaged(DocData.Expr).initCapacity( | |
self.arena, | |
fields_len, | |
); | |
var field_name_indexes = try std.ArrayListUnmanaged(usize).initCapacity( | |
self.arena, | |
fields_len, | |
); | |
try self.collectUnionFieldInfo( | |
file, | |
&scope, | |
src_info, | |
fields_len, | |
&field_type_refs, | |
&field_name_indexes, | |
extra_index, | |
call_ctx, | |
); | |
self.ast_nodes.items[self_ast_node_index].fields = field_name_indexes.items; | |
self.types.items[type_slot_index] = .{ | |
.Union = .{ | |
.name = "todo_name", | |
.src = self_ast_node_index, | |
.privDecls = priv_decl_indexes.items, | |
.pubDecls = decl_indexes.items, | |
.fields = field_type_refs.items, | |
.tag = tag_type, | |
.auto_enum = small.auto_enum_tag, | |
.parent_container = parent_scope.enclosing_type, | |
.layout = layout_expr, | |
}, | |
}; | |
if (self.ref_paths_pending_on_types.get(type_slot_index)) |paths| { | |
for (paths.items) |resume_info| { | |
try self.tryResolveRefPath( | |
resume_info.file, | |
inst, | |
resume_info.ref_path, | |
); | |
} | |
_ = self.ref_paths_pending_on_types.remove(type_slot_index); | |
// TODO: we should deallocate the arraylist that holds all the | |
// decl paths. not doing it now since it's arena-allocated | |
// anyway, but maybe we should put it elsewhere. | |
} | |
return DocData.WalkResult{ | |
.typeRef = .{ .type = @intFromEnum(Ref.type_type) }, | |
.expr = .{ .type = type_slot_index }, | |
}; | |
}, | |
.enum_decl => { | |
const type_slot_index = self.types.items.len; | |
try self.types.append(self.arena, .{ .Unanalyzed = .{} }); | |
var scope: Scope = .{ | |
.parent = parent_scope, | |
.enclosing_type = type_slot_index, | |
}; | |
const small = @as(Zir.Inst.EnumDecl.Small, @bitCast(extended.small)); | |
const extra = file.zir.extraData(Zir.Inst.EnumDecl, extended.operand); | |
var extra_index: usize = extra.end; | |
const src_info = try self.srcLocInfo(file, extra.data.src_node, parent_src); | |
const tag_type: ?DocData.Expr = if (small.has_tag_type) blk: { | |
const tag_type = file.zir.extra[extra_index]; | |
extra_index += 1; | |
const tag_ref = @as(Ref, @enumFromInt(tag_type)); | |
const wr = try self.walkRef( | |
file, | |
parent_scope, | |
parent_src, | |
tag_ref, | |
false, | |
call_ctx, | |
); | |
break :blk wr.expr; | |
} else null; | |
const body_len = if (small.has_body_len) blk: { | |
const body_len = file.zir.extra[extra_index]; | |
extra_index += 1; | |
break :blk body_len; | |
} else 0; | |
const fields_len = if (small.has_fields_len) blk: { | |
const fields_len = file.zir.extra[extra_index]; | |
extra_index += 1; | |
break :blk fields_len; | |
} else 0; | |
var decl_indexes: std.ArrayListUnmanaged(usize) = .{}; | |
var priv_decl_indexes: std.ArrayListUnmanaged(usize) = .{}; | |
extra_index = try self.analyzeAllDecls( | |
file, | |
&scope, | |
inst, | |
src_info, | |
&decl_indexes, | |
&priv_decl_indexes, | |
call_ctx, | |
); | |
// const body = file.zir.extra[extra_index..][0..body_len]; | |
extra_index += body_len; | |
var field_name_indexes: std.ArrayListUnmanaged(usize) = .{}; | |
var field_values: std.ArrayListUnmanaged(?DocData.Expr) = .{}; | |
{ | |
var bit_bag_idx = extra_index; | |
var cur_bit_bag: u32 = undefined; | |
extra_index += std.math.divCeil(usize, fields_len, 32) catch unreachable; | |
var idx: usize = 0; | |
while (idx < fields_len) : (idx += 1) { | |
if (idx % 32 == 0) { | |
cur_bit_bag = file.zir.extra[bit_bag_idx]; | |
bit_bag_idx += 1; | |
} | |
const has_value = @as(u1, @truncate(cur_bit_bag)) != 0; | |
cur_bit_bag >>= 1; | |
const field_name_index: Zir.NullTerminatedString = @enumFromInt(file.zir.extra[extra_index]); | |
extra_index += 1; | |
const doc_comment_index: Zir.NullTerminatedString = @enumFromInt(file.zir.extra[extra_index]); | |
extra_index += 1; | |
const value_expr: ?DocData.Expr = if (has_value) blk: { | |
const value_ref = file.zir.extra[extra_index]; | |
extra_index += 1; | |
const value = try self.walkRef( | |
file, | |
&scope, | |
src_info, | |
@as(Ref, @enumFromInt(value_ref)), | |
false, | |
call_ctx, | |
); | |
break :blk value.expr; | |
} else null; | |
try field_values.append(self.arena, value_expr); | |
const field_name = file.zir.nullTerminatedString(field_name_index); | |
try field_name_indexes.append(self.arena, self.ast_nodes.items.len); | |
const doc_comment: ?[]const u8 = if (doc_comment_index != .empty) | |
file.zir.nullTerminatedString(doc_comment_index) | |
else | |
null; | |
try self.ast_nodes.append(self.arena, .{ | |
.name = field_name, | |
.docs = doc_comment, | |
}); | |
} | |
} | |
self.ast_nodes.items[self_ast_node_index].fields = field_name_indexes.items; | |
self.types.items[type_slot_index] = .{ | |
.Enum = .{ | |
.name = "todo_name", | |
.src = self_ast_node_index, | |
.privDecls = priv_decl_indexes.items, | |
.pubDecls = decl_indexes.items, | |
.tag = tag_type, | |
.values = field_values.items, | |
.nonexhaustive = small.nonexhaustive, | |
.parent_container = parent_scope.enclosing_type, | |
}, | |
}; | |
if (self.ref_paths_pending_on_types.get(type_slot_index)) |paths| { | |
for (paths.items) |resume_info| { | |
try self.tryResolveRefPath( | |
resume_info.file, | |
inst, | |
resume_info.ref_path, | |
); | |
} | |
_ = self.ref_paths_pending_on_types.remove(type_slot_index); | |
// TODO: we should deallocate the arraylist that holds all the | |
// decl paths. not doing it now since it's arena-allocated | |
// anyway, but maybe we should put it elsewhere. | |
} | |
return DocData.WalkResult{ | |
.typeRef = .{ .type = @intFromEnum(Ref.type_type) }, | |
.expr = .{ .type = type_slot_index }, | |
}; | |
}, | |
.struct_decl => { | |
const type_slot_index = self.types.items.len; | |
try self.types.append(self.arena, .{ .Unanalyzed = .{} }); | |
var scope: Scope = .{ | |
.parent = parent_scope, | |
.enclosing_type = type_slot_index, | |
}; | |
const small = @as(Zir.Inst.StructDecl.Small, @bitCast(extended.small)); | |
const extra = file.zir.extraData(Zir.Inst.StructDecl, extended.operand); | |
var extra_index: usize = extra.end; | |
const src_info = try self.srcLocInfo(file, extra.data.src_node, parent_src); | |
const fields_len = if (small.has_fields_len) blk: { | |
const fields_len = file.zir.extra[extra_index]; | |
extra_index += 1; | |
break :blk fields_len; | |
} else 0; | |
// We don't care about decls yet | |
if (small.has_decls_len) extra_index += 1; | |
var backing_int: ?DocData.Expr = null; | |
if (small.has_backing_int) { | |
const backing_int_body_len = file.zir.extra[extra_index]; | |
extra_index += 1; // backing_int_body_len | |
if (backing_int_body_len == 0) { | |
const backing_int_ref = @as(Ref, @enumFromInt(file.zir.extra[extra_index])); | |
const backing_int_res = try self.walkRef( | |
file, | |
&scope, | |
src_info, | |
backing_int_ref, | |
true, | |
call_ctx, | |
); | |
backing_int = backing_int_res.expr; | |
extra_index += 1; // backing_int_ref | |
} else { | |
const backing_int_body = file.zir.bodySlice(extra_index, backing_int_body_len); | |
const break_inst = backing_int_body[backing_int_body.len - 1]; | |
const operand = data[@intFromEnum(break_inst)].@"break".operand; | |
const backing_int_res = try self.walkRef( | |
file, | |
&scope, | |
src_info, | |
operand, | |
true, | |
call_ctx, | |
); | |
backing_int = backing_int_res.expr; | |
extra_index += backing_int_body_len; // backing_int_body_inst | |
} | |
} | |
const layout_expr: ?DocData.Expr = switch (small.layout) { | |
.Auto => null, | |
else => .{ .enumLiteral = @tagName(small.layout) }, | |
}; | |
var decl_indexes: std.ArrayListUnmanaged(usize) = .{}; | |
var priv_decl_indexes: std.ArrayListUnmanaged(usize) = .{}; | |
extra_index = try self.analyzeAllDecls( | |
file, | |
&scope, | |
inst, | |
src_info, | |
&decl_indexes, | |
&priv_decl_indexes, | |
call_ctx, | |
); | |
// Inside field init bodies, the struct decl instruction is used to refer to the | |
// field type during the second pass of analysis. | |
try self.repurposed_insts.put(self.arena, inst, {}); | |
defer _ = self.repurposed_insts.remove(inst); | |
var field_type_refs: std.ArrayListUnmanaged(DocData.Expr) = .{}; | |
var field_default_refs: std.ArrayListUnmanaged(?DocData.Expr) = .{}; | |
var field_name_indexes: std.ArrayListUnmanaged(usize) = .{}; | |
try self.collectStructFieldInfo( | |
file, | |
&scope, | |
src_info, | |
fields_len, | |
&field_type_refs, | |
&field_default_refs, | |
&field_name_indexes, | |
extra_index, | |
small.is_tuple, | |
call_ctx, | |
); | |
self.ast_nodes.items[self_ast_node_index].fields = field_name_indexes.items; | |
self.types.items[type_slot_index] = .{ | |
.Struct = .{ | |
.name = "todo_name", | |
.src = self_ast_node_index, | |
.privDecls = priv_decl_indexes.items, | |
.pubDecls = decl_indexes.items, | |
.field_types = field_type_refs.items, | |
.field_defaults = field_default_refs.items, | |
.is_tuple = small.is_tuple, | |
.backing_int = backing_int, | |
.line_number = self.ast_nodes.items[self_ast_node_index].line, | |
.parent_container = parent_scope.enclosing_type, | |
.layout = layout_expr, | |
}, | |
}; | |
if (self.ref_paths_pending_on_types.get(type_slot_index)) |paths| { | |
for (paths.items) |resume_info| { | |
try self.tryResolveRefPath( | |
resume_info.file, | |
inst, | |
resume_info.ref_path, | |
); | |
} | |
_ = self.ref_paths_pending_on_types.remove(type_slot_index); | |
// TODO: we should deallocate the arraylist that holds all the | |
// decl paths. not doing it now since it's arena-allocated | |
// anyway, but maybe we should put it elsewhere. | |
} | |
return DocData.WalkResult{ | |
.typeRef = .{ .type = @intFromEnum(Ref.type_type) }, | |
.expr = .{ .type = type_slot_index }, | |
}; | |
}, | |
.this => { | |
return DocData.WalkResult{ | |
.typeRef = .{ .type = @intFromEnum(Ref.type_type) }, | |
.expr = .{ | |
.this = parent_scope.enclosing_type.?, | |
// We know enclosing_type is always present | |
// because it's only null for the top-level | |
// struct instruction of a file. | |
}, | |
}; | |
}, | |
.int_from_error, | |
.error_from_int, | |
.reify, | |
=> { | |
const extra = file.zir.extraData(Zir.Inst.UnNode, extended.operand).data; | |
const bin_index = self.exprs.items.len; | |
try self.exprs.append(self.arena, .{ .builtin = .{ .param = 0 } }); | |
const param = try self.walkRef( | |
file, | |
parent_scope, | |
parent_src, | |
extra.operand, | |
false, | |
call_ctx, | |
); | |
const param_index = self.exprs.items.len; | |
try self.exprs.append(self.arena, param.expr); | |
self.exprs.items[bin_index] = .{ .builtin = .{ .name = @tagName(extended.opcode), .param = param_index } }; | |
return DocData.WalkResult{ | |
.typeRef = param.typeRef orelse .{ .type = @intFromEnum(Ref.type_type) }, | |
.expr = .{ .builtinIndex = bin_index }, | |
}; | |
}, | |
.work_item_id, | |
.work_group_size, | |
.work_group_id, | |
=> { | |
const extra = file.zir.extraData(Zir.Inst.UnNode, extended.operand).data; | |
const bin_index = self.exprs.items.len; | |
try self.exprs.append(self.arena, .{ .builtin = .{ .param = 0 } }); | |
const param = try self.walkRef( | |
file, | |
parent_scope, | |
parent_src, | |
extra.operand, | |
false, | |
call_ctx, | |
); | |
const param_index = self.exprs.items.len; | |
try self.exprs.append(self.arena, param.expr); | |
self.exprs.items[bin_index] = .{ .builtin = .{ .name = @tagName(extended.opcode), .param = param_index } }; | |
return DocData.WalkResult{ | |
// from docs we know they return u32 | |
.typeRef = .{ .type = @intFromEnum(Ref.u32_type) }, | |
.expr = .{ .builtinIndex = bin_index }, | |
}; | |
}, | |
.cmpxchg => { | |
const extra = file.zir.extraData(Zir.Inst.Cmpxchg, extended.operand).data; | |
const last_type_index = self.exprs.items.len; | |
const last_type = self.exprs.items[last_type_index - 1]; | |
const type_index = self.exprs.items.len; | |
try self.exprs.append(self.arena, last_type); | |
const ptr_index = self.exprs.items.len; | |
const ptr: DocData.WalkResult = try self.walkRef( | |
file, | |
parent_scope, | |
parent_src, | |
extra.ptr, | |
false, | |
call_ctx, | |
); | |
try self.exprs.append(self.arena, ptr.expr); | |
const expected_value_index = self.exprs.items.len; | |
const expected_value: DocData.WalkResult = try self.walkRef( | |
file, | |
parent_scope, | |
parent_src, | |
extra.expected_value, | |
false, | |
call_ctx, | |
); | |
try self.exprs.append(self.arena, expected_value.expr); | |
const new_value_index = self.exprs.items.len; | |
const new_value: DocData.WalkResult = try self.walkRef( | |
file, | |
parent_scope, | |
parent_src, | |
extra.new_value, | |
false, | |
call_ctx, | |
); | |
try self.exprs.append(self.arena, new_value.expr); | |
const success_order_index = self.exprs.items.len; | |
const success_order: DocData.WalkResult = try self.walkRef( | |
file, | |
parent_scope, | |
parent_src, | |
extra.success_order, | |
false, | |
call_ctx, | |
); | |
try self.exprs.append(self.arena, success_order.expr); | |
const failure_order_index = self.exprs.items.len; | |
const failure_order: DocData.WalkResult = try self.walkRef( | |
file, | |
parent_scope, | |
parent_src, | |
extra.failure_order, | |
false, | |
call_ctx, | |
); | |
try self.exprs.append(self.arena, failure_order.expr); | |
const cmpxchg_index = self.exprs.items.len; | |
try self.exprs.append(self.arena, .{ .cmpxchg = .{ | |
.name = @tagName(tags[@intFromEnum(inst)]), | |
.type = type_index, | |
.ptr = ptr_index, | |
.expected_value = expected_value_index, | |
.new_value = new_value_index, | |
.success_order = success_order_index, | |
.failure_order = failure_order_index, | |
} }); | |
return DocData.WalkResult{ | |
.typeRef = .{ .type = @intFromEnum(Ref.type_type) }, | |
.expr = .{ .cmpxchgIndex = cmpxchg_index }, | |
}; | |
}, | |
} | |
}, | |
} | |
} | |
/// Called by `walkInstruction` when encountering a container type. | |
/// Iterates over all decl definitions in its body and it also analyzes each | |
/// decl's body recursively by calling into `walkInstruction`. | |
/// | |
/// Does not append to `self.decls` directly because `walkInstruction` | |
/// is expected to look-ahead scan all decls and reserve `body_len` | |
/// slots in `self.decls`, which are then filled out by this function. | |
fn analyzeAllDecls( | |
self: *Autodoc, | |
file: *File, | |
scope: *Scope, | |
parent_inst: Zir.Inst.Index, | |
parent_src: SrcLocInfo, | |
decl_indexes: *std.ArrayListUnmanaged(usize), | |
priv_decl_indexes: *std.ArrayListUnmanaged(usize), | |
call_ctx: ?*const CallContext, | |
) AutodocErrors!usize { | |
const first_decl_indexes_slot = decl_indexes.items.len; | |
const original_it = file.zir.declIterator(parent_inst); | |
// First loop to discover decl names | |
{ | |
var it = original_it; | |
while (it.next()) |zir_index| { | |
const declaration, _ = file.zir.getDeclaration(zir_index); | |
if (declaration.name.isNamedTest(file.zir)) continue; | |
const decl_name = declaration.name.toString(file.zir) orelse continue; | |
try scope.insertDeclRef(self.arena, decl_name, .Pending); | |
} | |
} | |
// Second loop to analyze `usingnamespace` decls | |
{ | |
var it = original_it; | |
var decl_indexes_slot = first_decl_indexes_slot; | |
while (it.next()) |zir_index| : (decl_indexes_slot += 1) { | |
const pl_node = file.zir.instructions.items(.data)[@intFromEnum(zir_index)].pl_node; | |
const extra = file.zir.extraData(Zir.Inst.Declaration, pl_node.payload_index); | |
if (extra.data.name != .@"usingnamespace") continue; | |
try self.analyzeUsingnamespaceDecl( | |
file, | |
scope, | |
try self.srcLocInfo(file, pl_node.src_node, parent_src), | |
decl_indexes, | |
priv_decl_indexes, | |
extra.data, | |
@intCast(extra.end), | |
call_ctx, | |
); | |
} | |
} | |
// Third loop to analyze all remaining decls | |
{ | |
var it = original_it; | |
while (it.next()) |zir_index| { | |
const pl_node = file.zir.instructions.items(.data)[@intFromEnum(zir_index)].pl_node; | |
const extra = file.zir.extraData(Zir.Inst.Declaration, pl_node.payload_index); | |
switch (extra.data.name) { | |
.@"comptime", .@"usingnamespace", .unnamed_test, .decltest => continue, | |
_ => if (extra.data.name.isNamedTest(file.zir)) continue, | |
} | |
try self.analyzeDecl( | |
file, | |
scope, | |
try self.srcLocInfo(file, pl_node.src_node, parent_src), | |
decl_indexes, | |
priv_decl_indexes, | |
zir_index, | |
extra.data, | |
@intCast(extra.end), | |
call_ctx, | |
); | |
} | |
} | |
// Fourth loop to analyze decltests | |
var it = original_it; | |
while (it.next()) |zir_index| { | |
const pl_node = file.zir.instructions.items(.data)[@intFromEnum(zir_index)].pl_node; | |
const extra = file.zir.extraData(Zir.Inst.Declaration, pl_node.payload_index); | |
if (extra.data.name != .decltest) continue; | |
try self.analyzeDecltest( | |
file, | |
scope, | |
try self.srcLocInfo(file, pl_node.src_node, parent_src), | |
extra.data, | |
@intCast(extra.end), | |
); | |
} | |
return it.extra_index; | |
} | |
fn walkInlineBody( | |
autodoc: *Autodoc, | |
file: *File, | |
scope: *Scope, | |
block_src: SrcLocInfo, | |
parent_src: SrcLocInfo, | |
body: []const Zir.Inst.Index, | |
need_type: bool, | |
call_ctx: ?*const CallContext, | |
) AutodocErrors!DocData.WalkResult { | |
const tags = file.zir.instructions.items(.tag); | |
const break_inst = switch (tags[@intFromEnum(body[body.len - 1])]) { | |
.condbr_inline => { | |
// Unresolvable. | |
const res: DocData.WalkResult = .{ | |
.typeRef = .{ .type = @intFromEnum(Ref.type_type) }, | |
.expr = .{ .comptimeExpr = autodoc.comptime_exprs.items.len }, | |
}; | |
const source = (try file.getTree(autodoc.zcu.gpa)).getNodeSource(block_src.src_node); | |
try autodoc.comptime_exprs.append(autodoc.arena, .{ | |
.code = source, | |
}); | |
return res; | |
}, | |
.break_inline => body[body.len - 1], | |
else => unreachable, | |
}; | |
const break_data = file.zir.instructions.items(.data)[@intFromEnum(break_inst)].@"break"; | |
return autodoc.walkRef(file, scope, parent_src, break_data.operand, need_type, call_ctx); | |
} | |
// Asserts the given decl is public | |
fn analyzeDecl( | |
self: *Autodoc, | |
file: *File, | |
scope: *Scope, | |
decl_src: SrcLocInfo, | |
decl_indexes: *std.ArrayListUnmanaged(usize), | |
priv_decl_indexes: *std.ArrayListUnmanaged(usize), | |
decl_inst: Zir.Inst.Index, | |
declaration: Zir.Inst.Declaration, | |
extra_index: u32, | |
call_ctx: ?*const CallContext, | |
) AutodocErrors!void { | |
const bodies = declaration.getBodies(extra_index, file.zir); | |
const name = file.zir.nullTerminatedString(declaration.name.toString(file.zir).?); | |
const doc_comment: ?[]const u8 = if (declaration.flags.has_doc_comment) | |
file.zir.nullTerminatedString(@enumFromInt(file.zir.extra[extra_index])) | |
else | |
null; | |
// astnode | |
const ast_node_index = idx: { | |
const idx = self.ast_nodes.items.len; | |
try self.ast_nodes.append(self.arena, .{ | |
.file = self.files.getIndex(file).?, | |
.line = decl_src.line, | |
.col = 0, | |
.docs = doc_comment, | |
.fields = null, // walkInstruction will fill `fields` if necessary | |
}); | |
break :idx idx; | |
}; | |
const walk_result = try self.walkInlineBody( | |
file, | |
scope, | |
decl_src, | |
decl_src, | |
bodies.value_body, | |
true, | |
call_ctx, | |
); | |
const tree = try file.getTree(self.zcu.gpa); | |
const kind_token = tree.nodes.items(.main_token)[decl_src.src_node]; | |
const kind: []const u8 = switch (tree.tokens.items(.tag)[kind_token]) { | |
.keyword_var => "var", | |
else => "const", | |
}; | |
const decls_slot_index = self.decls.items.len; | |
try self.decls.append(self.arena, .{ | |
.name = name, | |
.src = ast_node_index, | |
.value = walk_result, | |
.kind = kind, | |
.parent_container = scope.enclosing_type, | |
}); | |
if (declaration.flags.is_pub) { | |
try decl_indexes.append(self.arena, decls_slot_index); | |
} else { | |
try priv_decl_indexes.append(self.arena, decls_slot_index); | |
} | |
const decl_status_ptr = scope.resolveDeclName(declaration.name.toString(file.zir).?, file, .none); | |
std.debug.assert(decl_status_ptr.* == .Pending); | |
decl_status_ptr.* = .{ .Analyzed = decls_slot_index }; | |
// Unblock any pending decl path that was waiting for this decl. | |
if (self.ref_paths_pending_on_decls.get(decl_status_ptr)) |paths| { | |
for (paths.items) |resume_info| { | |
try self.tryResolveRefPath( | |
resume_info.file, | |
decl_inst, | |
resume_info.ref_path, | |
); | |
} | |
_ = self.ref_paths_pending_on_decls.remove(decl_status_ptr); | |
// TODO: we should deallocate the arraylist that holds all the | |
// ref paths. not doing it now since it's arena-allocated | |
// anyway, but maybe we should put it elsewhere. | |
} | |
} | |
fn analyzeUsingnamespaceDecl( | |
self: *Autodoc, | |
file: *File, | |
scope: *Scope, | |
decl_src: SrcLocInfo, | |
decl_indexes: *std.ArrayListUnmanaged(usize), | |
priv_decl_indexes: *std.ArrayListUnmanaged(usize), | |
declaration: Zir.Inst.Declaration, | |
extra_index: u32, | |
call_ctx: ?*const CallContext, | |
) AutodocErrors!void { | |
const bodies = declaration.getBodies(extra_index, file.zir); | |
const doc_comment: ?[]const u8 = if (declaration.flags.has_doc_comment) | |
file.zir.nullTerminatedString(@enumFromInt(file.zir.extra[extra_index])) | |
else | |
null; | |
// astnode | |
const ast_node_index = idx: { | |
const idx = self.ast_nodes.items.len; | |
try self.ast_nodes.append(self.arena, .{ | |
.file = self.files.getIndex(file).?, | |
.line = decl_src.line, | |
.col = 0, | |
.docs = doc_comment, | |
.fields = null, // walkInstruction will fill `fields` if necessary | |
}); | |
break :idx idx; | |
}; | |
const walk_result = try self.walkInlineBody( | |
file, | |
scope, | |
decl_src, | |
decl_src, | |
bodies.value_body, | |
true, | |
call_ctx, | |
); | |
const decl_slot_index = self.decls.items.len; | |
try self.decls.append(self.arena, .{ | |
.name = "", | |
.kind = "", | |
.src = ast_node_index, | |
.value = walk_result, | |
.is_uns = true, | |
.parent_container = scope.enclosing_type, | |
}); | |
if (declaration.flags.is_pub) { | |
try decl_indexes.append(self.arena, decl_slot_index); | |
} else { | |
try priv_decl_indexes.append(self.arena, decl_slot_index); | |
} | |
} | |
fn analyzeDecltest( | |
self: *Autodoc, | |
file: *File, | |
scope: *Scope, | |
decl_src: SrcLocInfo, | |
declaration: Zir.Inst.Declaration, | |
extra_index: u32, | |
) AutodocErrors!void { | |
std.debug.assert(declaration.flags.has_doc_comment); | |
const decl_name_index: Zir.NullTerminatedString = @enumFromInt(file.zir.extra[extra_index]); | |
const test_source_code = (try file.getTree(self.zcu.gpa)).getNodeSource(decl_src.src_node); | |
const decl_name: ?[]const u8 = if (decl_name_index != .empty) | |
file.zir.nullTerminatedString(decl_name_index) | |
else | |
null; | |
// astnode | |
const ast_node_index = idx: { | |
const idx = self.ast_nodes.items.len; | |
try self.ast_nodes.append(self.arena, .{ | |
.file = self.files.getIndex(file).?, | |
.line = decl_src.line, | |
.col = 0, | |
.name = decl_name, | |
.code = test_source_code, | |
}); | |
break :idx idx; | |
}; | |
const decl_status = scope.resolveDeclName(decl_name_index, file, .none); | |
switch (decl_status.*) { | |
.Analyzed => |idx| { | |
self.decls.items[idx].decltest = ast_node_index; | |
}, | |
else => unreachable, // we assume analyzeAllDecls analyzed other decls by this point | |
} | |
} | |
/// An unresolved path has a non-string WalkResult at its beginnig, while every | |
/// other element is a string WalkResult. Resolving means iteratively map each | |
/// string to a Decl / Type / Call / etc. | |
/// | |
/// If we encounter an unanalyzed decl during the process, we append the | |
/// unsolved sub-path to `self.ref_paths_pending_on_decls` and bail out. | |
/// Same happens when a decl holds a type definition that hasn't been fully | |
/// analyzed yet (except that we append to `self.ref_paths_pending_on_types`. | |
/// | |
/// When analyzeAllDecls / walkInstruction finishes analyzing a decl / type, it will | |
/// then check if there's any pending ref path blocked on it and, if any, it | |
/// will progress their resolution by calling tryResolveRefPath again. | |
/// | |
/// Ref paths can also depend on other ref paths. See | |
/// `self.pending_ref_paths` for more info. | |
/// | |
/// A ref path that has a component that resolves into a comptimeExpr will | |
/// give up its resolution process entirely, leaving the remaining components | |
/// as strings. | |
fn tryResolveRefPath( | |
self: *Autodoc, | |
/// File from which the decl path originates. | |
file: *File, | |
inst: Zir.Inst.Index, // used only for panicWithContext | |
path: []DocData.Expr, | |
) AutodocErrors!void { | |
var i: usize = 0; | |
outer: while (i < path.len - 1) : (i += 1) { | |
const parent = path[i]; | |
const child_string = path[i + 1].declName; // we expect to find an unsolved decl | |
var resolved_parent = parent; | |
var j: usize = 0; | |
while (j < 10_000) : (j += 1) { | |
switch (resolved_parent) { | |
else => break, | |
.this => |t| resolved_parent = .{ .type = t }, | |
.declIndex => |decl_index| { | |
const decl = self.decls.items[decl_index]; | |
resolved_parent = decl.value.expr; | |
continue; | |
}, | |
.declRef => |decl_status_ptr| { | |
// NOTE: must be kep in sync with `findNameInUnsDecls` | |
switch (decl_status_ptr.*) { | |
// The use of unreachable here is conservative. | |
// It might be that it truly should be up to us to | |
// request the analys of this decl, but it's not clear | |
// at the moment of writing. | |
.NotRequested => unreachable, | |
.Analyzed => |decl_index| { | |
const decl = self.decls.items[decl_index]; | |
resolved_parent = decl.value.expr; | |
continue; | |
}, | |
.Pending => { | |
// This decl path is pending completion | |
{ | |
const res = try self.pending_ref_paths.getOrPut( | |
self.arena, | |
&path[path.len - 1], | |
); | |
if (!res.found_existing) res.value_ptr.* = .{}; | |
} | |
const res = try self.ref_paths_pending_on_decls.getOrPut( | |
self.arena, | |
decl_status_ptr, | |
); | |
if (!res.found_existing) res.value_ptr.* = .{}; | |
try res.value_ptr.*.append(self.arena, .{ | |
.file = file, | |
.ref_path = path[i..path.len], | |
}); | |
// We return instead doing `break :outer` to prevent the | |
// code after the :outer while loop to run, as it assumes | |
// that the path will have been fully analyzed (or we | |
// have given up because of a comptimeExpr). | |
return; | |
}, | |
} | |
}, | |
.refPath => |rp| { | |
if (self.pending_ref_paths.getPtr(&rp[rp.len - 1])) |waiter_list| { | |
try waiter_list.append(self.arena, .{ | |
.file = file, | |
.ref_path = path[i..path.len], | |
}); | |
// This decl path is pending completion | |
{ | |
const res = try self.pending_ref_paths.getOrPut( | |
self.arena, | |
&path[path.len - 1], | |
); | |
if (!res.found_existing) res.value_ptr.* = .{}; | |
} | |
return; | |
} | |
// If the last element is a declName or a CTE, then we give up, | |
// otherwise we resovle the parent to it and loop again. | |
// NOTE: we assume that if we find a string, it's because of | |
// a CTE component somewhere in the path. We know that the path | |
// is not pending futher evaluation because we just checked! | |
const last = rp[rp.len - 1]; | |
switch (last) { | |
.comptimeExpr, .declName => break :outer, | |
else => { | |
resolved_parent = last; | |
continue; | |
}, | |
} | |
}, | |
.fieldVal => |fv| { | |
resolved_parent = self.exprs.items[fv.val.expr]; | |
}, | |
} | |
} else { | |
panicWithContext( | |
file, | |
inst, | |
"exhausted eval quota for `{}`in tryResolveRefPath\n", | |
.{resolved_parent}, | |
); | |
} | |
switch (resolved_parent) { | |
else => { | |
// NOTE: indirect references to types / decls should be handled | |
// in the switch above this one! | |
printWithContext( | |
file, | |
inst, | |
"TODO: handle `{s}`in tryResolveRefPath\nInfo: {}", | |
.{ @tagName(resolved_parent), resolved_parent }, | |
); | |
// path[i + 1] = (try self.cteTodo("<match failure>")).expr; | |
continue :outer; | |
}, | |
.comptimeExpr, .call, .typeOf => { | |
// Since we hit a cte, we leave the remaining strings unresolved | |
// and completely give up on resolving this decl path. | |
//decl_path.hasCte = true; | |
break :outer; | |
}, | |
.type => |t_index| switch (self.types.items[t_index]) { | |
else => { | |
panicWithContext( | |
file, | |
inst, | |
"TODO: handle `{s}` in tryResolveDeclPath.type\nInfo: {}", | |
.{ @tagName(self.types.items[t_index]), resolved_parent }, | |
); | |
}, | |
.ComptimeExpr => { | |
// Same as the comptimeExpr branch above | |
break :outer; | |
}, | |
.Unanalyzed => { | |
// This decl path is pending completion | |
{ | |
const res = try self.pending_ref_paths.getOrPut( | |
self.arena, | |
&path[path.len - 1], | |
); | |
if (!res.found_existing) res.value_ptr.* = .{}; | |
} | |
const res = try self.ref_paths_pending_on_types.getOrPut( | |
self.arena, | |
t_index, | |
); | |
if (!res.found_existing) res.value_ptr.* = .{}; | |
try res.value_ptr.*.append(self.arena, .{ | |
.file = file, | |
.ref_path = path[i..path.len], | |
}); | |
return; | |
}, | |
.Array => { | |
if (std.mem.eql(u8, child_string, "len")) { | |
path[i + 1] = .{ | |
.builtinField = .len, | |
}; | |
} else { | |
panicWithContext( | |
file, | |
inst, | |
"TODO: handle `{s}` in tryResolveDeclPath.type.Array\nInfo: {}", | |
.{ child_string, resolved_parent }, | |
); | |
} | |
}, | |
// TODO: the following searches could probably | |
// be performed more efficiently on the corresponding | |
// scope | |
.Enum => |t_enum| { // foo.bar.baz | |
// Look into locally-defined pub decls | |
for (t_enum.pubDecls) |idx| { | |
const d = self.decls.items[idx]; | |
if (d.is_uns) continue; | |
if (std.mem.eql(u8, d.name, child_string)) { | |
path[i + 1] = .{ .declIndex = idx }; | |
continue :outer; | |
} | |
} | |
// Look into locally-defined priv decls | |
for (t_enum.privDecls) |idx| { | |
const d = self.decls.items[idx]; | |
if (d.is_uns) continue; | |
if (std.mem.eql(u8, d.name, child_string)) { | |
path[i + 1] = .{ .declIndex = idx }; | |
continue :outer; | |
} | |
} | |
switch (try self.findNameInUnsDecls(file, path[i..path.len], resolved_parent, child_string)) { | |
.Pending => return, | |
.NotFound => {}, | |
.Found => |match| { | |
path[i + 1] = match; | |
continue :outer; | |
}, | |
} | |
for (self.ast_nodes.items[t_enum.src].fields.?, 0..) |ast_node, idx| { | |
const name = self.ast_nodes.items[ast_node].name.?; | |
if (std.mem.eql(u8, name, child_string)) { | |
// TODO: should we really create an artificial | |
// decl for this type? Probably not. | |
path[i + 1] = .{ | |
.fieldRef = .{ | |
.type = t_index, | |
.index = idx, | |
}, | |
}; | |
continue :outer; | |
} | |
} | |
// if we got here, our search failed | |
printWithContext( | |
file, | |
inst, | |
"failed to match `{s}` in enum", | |
.{child_string}, | |
); | |
path[i + 1] = (try self.cteTodo("match failure")).expr; | |
continue :outer; | |
}, | |
.Union => |t_union| { | |
// Look into locally-defined pub decls | |
for (t_union.pubDecls) |idx| { | |
const d = self.decls.items[idx]; | |
if (d.is_uns) continue; | |
if (std.mem.eql(u8, d.name, child_string)) { | |
path[i + 1] = .{ .declIndex = idx }; | |
continue :outer; | |
} | |
} | |
// Look into locally-defined priv decls | |
for (t_union.privDecls) |idx| { | |
const d = self.decls.items[idx]; | |
if (d.is_uns) continue; | |
if (std.mem.eql(u8, d.name, child_string)) { | |
path[i + 1] = .{ .declIndex = idx }; | |
continue :outer; | |
} | |
} | |
switch (try self.findNameInUnsDecls(file, path[i..path.len], resolved_parent, child_string)) { | |
.Pending => return, | |
.NotFound => {}, | |
.Found => |match| { | |
path[i + 1] = match; | |
continue :outer; | |
}, | |
} | |
for (self.ast_nodes.items[t_union.src].fields.?, 0..) |ast_node, idx| { | |
const name = self.ast_nodes.items[ast_node].name.?; | |
if (std.mem.eql(u8, name, child_string)) { | |
// TODO: should we really create an artificial | |
// decl for this type? Probably not. | |
path[i + 1] = .{ | |
.fieldRef = .{ | |
.type = t_index, | |
.index = idx, | |
}, | |
}; | |
continue :outer; | |
} | |
} | |
// if we got here, our search failed | |
printWithContext( | |
file, | |
inst, | |
"failed to match `{s}` in union", | |
.{child_string}, | |
); | |
path[i + 1] = (try self.cteTodo("match failure")).expr; | |
continue :outer; | |
}, | |
.Struct => |t_struct| { | |
// Look into locally-defined pub decls | |
for (t_struct.pubDecls) |idx| { | |
const d = self.decls.items[idx]; | |
if (d.is_uns) continue; | |
if (std.mem.eql(u8, d.name, child_string)) { | |
path[i + 1] = .{ .declIndex = idx }; | |
continue :outer; | |
} | |
} | |
// Look into locally-defined priv decls | |
for (t_struct.privDecls) |idx| { | |
const d = self.decls.items[idx]; | |
if (d.is_uns) continue; | |
if (std.mem.eql(u8, d.name, child_string)) { | |
path[i + 1] = .{ .declIndex = idx }; | |
continue :outer; | |
} | |
} | |
switch (try self.findNameInUnsDecls(file, path[i..path.len], resolved_parent, child_string)) { | |
.Pending => return, | |
.NotFound => {}, | |
.Found => |match| { | |
path[i + 1] = match; | |
continue :outer; | |
}, | |
} | |
for (self.ast_nodes.items[t_struct.src].fields.?, 0..) |ast_node, idx| { | |
const name = self.ast_nodes.items[ast_node].name.?; | |
if (std.mem.eql(u8, name, child_string)) { | |
// TODO: should we really create an artificial | |
// decl for this type? Probably not. | |
path[i + 1] = .{ | |
.fieldRef = .{ | |
.type = t_index, | |
.index = idx, | |
}, | |
}; | |
continue :outer; | |
} | |
} | |
// if we got here, our search failed | |
// printWithContext( | |
// file, | |
// inst, | |
// "failed to match `{s}` in struct", | |
// .{child_string}, | |
// ); | |
// path[i + 1] = (try self.cteTodo("match failure")).expr; | |
// | |
// that's working | |
path[i + 1] = (try self.cteTodo(child_string)).expr; | |
continue :outer; | |
}, | |
.Opaque => |t_opaque| { | |
// Look into locally-defined pub decls | |
for (t_opaque.pubDecls) |idx| { | |
const d = self.decls.items[idx]; | |
if (d.is_uns) continue; | |
if (std.mem.eql(u8, d.name, child_string)) { | |
path[i + 1] = .{ .declIndex = idx }; | |
continue :outer; | |
} | |
} | |
// Look into locally-defined priv decls | |
for (t_opaque.privDecls) |idx| { | |
const d = self.decls.items[idx]; | |
if (d.is_uns) continue; | |
if (std.mem.eql(u8, d.name, child_string)) { | |
path[i + 1] = .{ .declIndex = idx }; | |
continue :outer; | |
} | |
} | |
// We delay looking into Uns decls since they could be | |
// not fully analyzed yet. | |
switch (try self.findNameInUnsDecls(file, path[i..path.len], resolved_parent, child_string)) { | |
.Pending => return, | |
.NotFound => {}, | |
.Found => |match| { | |
path[i + 1] = match; | |
continue :outer; | |
}, | |
} | |
// if we got here, our search failed | |
printWithContext( | |
file, | |
inst, | |
"failed to match `{s}` in opaque", | |
.{child_string}, | |
); | |
path[i + 1] = (try self.cteTodo("match failure")).expr; | |
continue :outer; | |
}, | |
}, | |
.@"struct" => |st| { | |
for (st) |field| { | |
if (std.mem.eql(u8, field.name, child_string)) { | |
path[i + 1] = .{ .fieldVal = field }; | |
continue :outer; | |
} | |
} | |
// if we got here, our search failed | |
printWithContext( | |
file, | |
inst, | |
"failed to match `{s}` in struct", | |
.{child_string}, | |
); | |
path[i + 1] = (try self.cteTodo("match failure")).expr; | |
continue :outer; | |
}, | |
} | |
} | |
if (self.pending_ref_paths.get(&path[path.len - 1])) |waiter_list| { | |
// It's important to de-register ourselves as pending before | |
// attempting to resolve any other decl. | |
_ = self.pending_ref_paths.remove(&path[path.len - 1]); | |
for (waiter_list.items) |resume_info| { | |
try self.tryResolveRefPath(resume_info.file, inst, resume_info.ref_path); | |
} | |
// TODO: this is where we should free waiter_list, but its in the arena | |
// that said, we might want to store it elsewhere and reclaim memory asap | |
} | |
} | |
const UnsSearchResult = union(enum) { | |
Found: DocData.Expr, | |
Pending, | |
NotFound, | |
}; | |
fn findNameInUnsDecls( | |
self: *Autodoc, | |
file: *File, | |
tail: []DocData.Expr, | |
uns_expr: DocData.Expr, | |
name: []const u8, | |
) !UnsSearchResult { | |
var to_analyze = std.SegmentedList(DocData.Expr, 1){}; | |
// TODO: make this an appendAssumeCapacity | |
try to_analyze.append(self.arena, uns_expr); | |
while (to_analyze.pop()) |cte| { | |
var container_expression = cte; | |
for (0..10_000) |_| { | |
// TODO: handle other types of indirection, like @import | |
const type_index = switch (container_expression) { | |
.type => |t| t, | |
.declRef => |decl_status_ptr| { | |
switch (decl_status_ptr.*) { | |
// The use of unreachable here is conservative. | |
// It might be that it truly should be up to us to | |
// request the analys of this decl, but it's not clear | |
// at the moment of writing. | |
.NotRequested => unreachable, | |
.Analyzed => |decl_index| { | |
const decl = self.decls.items[decl_index]; | |
container_expression = decl.value.expr; | |
continue; | |
}, | |
.Pending => { | |
// This decl path is pending completion | |
{ | |
const res = try self.pending_ref_paths.getOrPut( | |
self.arena, | |
&tail[tail.len - 1], | |
); | |
if (!res.found_existing) res.value_ptr.* = .{}; | |
} | |
const res = try self.ref_paths_pending_on_decls.getOrPut( | |
self.arena, | |
decl_status_ptr, | |
); | |
if (!res.found_existing) res.value_ptr.* = .{}; | |
try res.value_ptr.*.append(self.arena, .{ | |
.file = file, | |
.ref_path = tail, | |
}); | |
// TODO: save some state that keeps track of our | |
// progress because, as things stand, we | |
// always re-start the search from scratch | |
return .Pending; | |
}, | |
} | |
}, | |
else => { | |
log.debug( | |
"Handle `{s}` in findNameInUnsDecls (first switch)", | |
.{@tagName(cte)}, | |
); | |
return .{ .Found = .{ .comptimeExpr = 0 } }; | |
}, | |
}; | |
const t = self.types.items[type_index]; | |
const decls = switch (t) { | |
else => { | |
log.debug( | |
"Handle `{s}` in findNameInUnsDecls (second switch)", | |
.{@tagName(cte)}, | |
); | |
return .{ .Found = .{ .comptimeExpr = 0 } }; | |
}, | |
inline .Struct, .Union, .Opaque, .Enum => |c| c.pubDecls, | |
}; | |
for (decls) |idx| { | |
const d = self.decls.items[idx]; | |
if (d.is_uns) { | |
try to_analyze.append(self.arena, d.value.expr); | |
} else if (std.mem.eql(u8, d.name, name)) { | |
return .{ .Found = .{ .declIndex = idx } }; | |
} | |
} | |
} | |
} | |
return .NotFound; | |
} | |
fn analyzeFancyFunction( | |
self: *Autodoc, | |
file: *File, | |
scope: *Scope, | |
parent_src: SrcLocInfo, | |
inst: Zir.Inst.Index, | |
self_ast_node_index: usize, | |
type_slot_index: usize, | |
call_ctx: ?*const CallContext, | |
) AutodocErrors!DocData.WalkResult { | |
const tags = file.zir.instructions.items(.tag); | |
const data = file.zir.instructions.items(.data); | |
const fn_info = file.zir.getFnInfo(inst); | |
try self.ast_nodes.ensureUnusedCapacity(self.arena, fn_info.total_params_len); | |
var param_type_refs = try std.ArrayListUnmanaged(DocData.Expr).initCapacity( | |
self.arena, | |
fn_info.total_params_len, | |
); | |
var param_ast_indexes = try std.ArrayListUnmanaged(usize).initCapacity( | |
self.arena, | |
fn_info.total_params_len, | |
); | |
// TODO: handle scope rules for fn parameters | |
for (fn_info.param_body[0..fn_info.total_params_len]) |param_index| { | |
switch (tags[@intFromEnum(param_index)]) { | |
else => { | |
panicWithContext( | |
file, | |
param_index, | |
"TODO: handle `{s}` in walkInstruction.func\n", | |
.{@tagName(tags[@intFromEnum(param_index)])}, | |
); | |
}, | |
.param_anytype, .param_anytype_comptime => { | |
// TODO: where are the doc comments? | |
const str_tok = data[@intFromEnum(param_index)].str_tok; | |
const name = str_tok.get(file.zir); | |
param_ast_indexes.appendAssumeCapacity(self.ast_nodes.items.len); | |
self.ast_nodes.appendAssumeCapacity(.{ | |
.name = name, | |
.docs = "", | |
.@"comptime" = tags[@intFromEnum(param_index)] == .param_anytype_comptime, | |
}); | |
param_type_refs.appendAssumeCapacity( | |
DocData.Expr{ .@"anytype" = .{} }, | |
); | |
}, | |
.param, .param_comptime => { | |
const pl_tok = data[@intFromEnum(param_index)].pl_tok; | |
const extra = file.zir.extraData(Zir.Inst.Param, pl_tok.payload_index); | |
const doc_comment = if (extra.data.doc_comment != .empty) | |
file.zir.nullTerminatedString(extra.data.doc_comment) | |
else | |
""; | |
const name = file.zir.nullTerminatedString(extra.data.name); | |
param_ast_indexes.appendAssumeCapacity(self.ast_nodes.items.len); | |
try self.ast_nodes.append(self.arena, .{ | |
.name = name, | |
.docs = doc_comment, | |
.@"comptime" = tags[@intFromEnum(param_index)] == .param_comptime, | |
}); | |
const break_index = file.zir.extra[extra.end..][extra.data.body_len - 1]; | |
const break_operand = data[break_index].@"break".operand; | |
const param_type_ref = try self.walkRef( | |
file, | |
scope, | |
parent_src, | |
break_operand, | |
false, | |
call_ctx, | |
); | |
param_type_refs.appendAssumeCapacity(param_type_ref.expr); | |
}, | |
} | |
} | |
self.ast_nodes.items[self_ast_node_index].fields = param_ast_indexes.items; | |
const pl_node = data[@intFromEnum(inst)].pl_node; | |
const extra = file.zir.extraData(Zir.Inst.FuncFancy, pl_node.payload_index); | |
var extra_index: usize = extra.end; | |
var lib_name: []const u8 = ""; | |
if (extra.data.bits.has_lib_name) { | |
const lib_name_index: Zir.NullTerminatedString = @enumFromInt(file.zir.extra[extra_index]); | |
lib_name = file.zir.nullTerminatedString(lib_name_index); | |
extra_index += 1; | |
} | |
var align_index: ?usize = null; | |
if (extra.data.bits.has_align_ref) { | |
const align_ref: Zir.Inst.Ref = @enumFromInt(file.zir.extra[extra_index]); | |
align_index = self.exprs.items.len; | |
_ = try self.walkRef( | |
file, | |
scope, | |
parent_src, | |
align_ref, | |
false, | |
call_ctx, | |
); | |
extra_index += 1; | |
} else if (extra.data.bits.has_align_body) { | |
const align_body_len = file.zir.extra[extra_index]; | |
extra_index += 1; | |
const align_body = file.zir.extra[extra_index .. extra_index + align_body_len]; | |
_ = align_body; | |
// TODO: analyze the block (or bail with a comptimeExpr) | |
extra_index += align_body_len; | |
} else { | |
// default alignment | |
} | |
var addrspace_index: ?usize = null; | |
if (extra.data.bits.has_addrspace_ref) { | |
const addrspace_ref: Zir.Inst.Ref = @enumFromInt(file.zir.extra[extra_index]); | |
addrspace_index = self.exprs.items.len; | |
_ = try self.walkRef( | |
file, | |
scope, | |
parent_src, | |
addrspace_ref, | |
false, | |
call_ctx, | |
); | |
extra_index += 1; | |
} else if (extra.data.bits.has_addrspace_body) { | |
const addrspace_body_len = file.zir.extra[extra_index]; | |
extra_index += 1; | |
const addrspace_body = file.zir.extra[extra_index .. extra_index + addrspace_body_len]; | |
_ = addrspace_body; | |
// TODO: analyze the block (or bail with a comptimeExpr) | |
extra_index += addrspace_body_len; | |
} else { | |
// default alignment | |
} | |
var section_index: ?usize = null; | |
if (extra.data.bits.has_section_ref) { | |
const section_ref: Zir.Inst.Ref = @enumFromInt(file.zir.extra[extra_index]); | |
section_index = self.exprs.items.len; | |
_ = try self.walkRef( | |
file, | |
scope, | |
parent_src, | |
section_ref, | |
false, | |
call_ctx, | |
); | |
extra_index += 1; | |
} else if (extra.data.bits.has_section_body) { | |
const section_body_len = file.zir.extra[extra_index]; | |
extra_index += 1; | |
const section_body = file.zir.extra[extra_index .. extra_index + section_body_len]; | |
_ = section_body; | |
// TODO: analyze the block (or bail with a comptimeExpr) | |
extra_index += section_body_len; | |
} else { | |
// default alignment | |
} | |
var cc_index: ?usize = null; | |
if (extra.data.bits.has_cc_ref and !extra.data.bits.has_cc_body) { | |
const cc_ref: Zir.Inst.Ref = @enumFromInt(file.zir.extra[extra_index]); | |
const cc_expr = try self.walkRef( | |
file, | |
scope, | |
parent_src, | |
cc_ref, | |
false, | |
call_ctx, | |
); | |
cc_index = self.exprs.items.len; | |
try self.exprs.append(self.arena, cc_expr.expr); | |
extra_index += 1; | |
} else if (extra.data.bits.has_cc_body) { | |
const cc_body_len = file.zir.extra[extra_index]; | |
extra_index += 1; | |
const cc_body = file.zir.bodySlice(extra_index, cc_body_len); | |
// We assume the body ends with a break_inline | |
const break_index = cc_body[cc_body.len - 1]; | |
const break_operand = data[@intFromEnum(break_index)].@"break".operand; | |
const cc_expr = try self.walkRef( | |
file, | |
scope, | |
parent_src, | |
break_operand, | |
false, | |
call_ctx, | |
); | |
cc_index = self.exprs.items.len; | |
try self.exprs.append(self.arena, cc_expr.expr); | |
extra_index += cc_body_len; | |
} else { | |
// auto calling convention | |
} | |
// ret | |
const ret_type_ref: DocData.Expr = switch (fn_info.ret_ty_body.len) { | |
0 => switch (fn_info.ret_ty_ref) { | |
.none => DocData.Expr{ .void = .{} }, | |
else => blk: { | |
const ref = fn_info.ret_ty_ref; | |
const wr = try self.walkRef( | |
file, | |
scope, | |
parent_src, | |
ref, | |
false, | |
call_ctx, | |
); | |
break :blk wr.expr; | |
}, | |
}, | |
else => blk: { | |
const last_instr_index = fn_info.ret_ty_body[fn_info.ret_ty_body.len - 1]; | |
const break_operand = data[@intFromEnum(last_instr_index)].@"break".operand; | |
const wr = try self.walkRef( | |
file, | |
scope, | |
parent_src, | |
break_operand, | |
false, | |
call_ctx, | |
); | |
break :blk wr.expr; | |
}, | |
}; | |
// TODO: a complete version of this will probably need a scope | |
// in order to evaluate correctly closures around funcion | |
// parameters etc. | |
const generic_ret: ?DocData.Expr = switch (ret_type_ref) { | |
.type => |t| blk: { | |
if (fn_info.body.len == 0) break :blk null; | |
if (t == @intFromEnum(Ref.type_type)) { | |
break :blk try self.getGenericReturnType( | |
file, | |
scope, | |
parent_src, | |
fn_info.body, | |
call_ctx, | |
); | |
} else { | |
break :blk null; | |
} | |
}, | |
else => null, | |
}; | |
// if we're analyzing a function signature (ie without body), we | |
// actually don't have an ast_node reserved for us, but since | |
// we don't have a name, we don't need it. | |
const src = if (fn_info.body.len == 0) 0 else self_ast_node_index; | |
self.types.items[type_slot_index] = .{ | |
.Fn = .{ | |
.name = "todo_name func", | |
.src = src, | |
.params = param_type_refs.items, | |
.ret = ret_type_ref, | |
.generic_ret = generic_ret, | |
.is_extern = extra.data.bits.is_extern, | |
.has_cc = cc_index != null, | |
.has_align = align_index != null, | |
.has_lib_name = extra.data.bits.has_lib_name, | |
.lib_name = lib_name, | |
.is_inferred_error = extra.data.bits.is_inferred_error, | |
.cc = cc_index, | |
.@"align" = align_index, | |
}, | |
}; | |
return DocData.WalkResult{ | |
.typeRef = .{ .type = @intFromEnum(Ref.type_type) }, | |
.expr = .{ .type = type_slot_index }, | |
}; | |
} | |
fn analyzeFunction( | |
self: *Autodoc, | |
file: *File, | |
scope: *Scope, | |
parent_src: SrcLocInfo, | |
inst: Zir.Inst.Index, | |
self_ast_node_index: usize, | |
type_slot_index: usize, | |
ret_is_inferred_error_set: bool, | |
call_ctx: ?*const CallContext, | |
) AutodocErrors!DocData.WalkResult { | |
const tags = file.zir.instructions.items(.tag); | |
const data = file.zir.instructions.items(.data); | |
const fn_info = file.zir.getFnInfo(inst); | |
try self.ast_nodes.ensureUnusedCapacity(self.arena, fn_info.total_params_len); | |
var param_type_refs = try std.ArrayListUnmanaged(DocData.Expr).initCapacity( | |
self.arena, | |
fn_info.total_params_len, | |
); | |
var param_ast_indexes = try std.ArrayListUnmanaged(usize).initCapacity( | |
self.arena, | |
fn_info.total_params_len, | |
); | |
// TODO: handle scope rules for fn parameters | |
for (fn_info.param_body[0..fn_info.total_params_len]) |param_index| { | |
switch (tags[@intFromEnum(param_index)]) { | |
else => { | |
panicWithContext( | |
file, | |
param_index, | |
"TODO: handle `{s}` in walkInstruction.func\n", | |
.{@tagName(tags[@intFromEnum(param_index)])}, | |
); | |
}, | |
.param_anytype, .param_anytype_comptime => { | |
// TODO: where are the doc comments? | |
const str_tok = data[@intFromEnum(param_index)].str_tok; | |
const name = str_tok.get(file.zir); | |
param_ast_indexes.appendAssumeCapacity(self.ast_nodes.items.len); | |
self.ast_nodes.appendAssumeCapacity(.{ | |
.name = name, | |
.docs = "", | |
.@"comptime" = tags[@intFromEnum(param_index)] == .param_anytype_comptime, | |
}); | |
param_type_refs.appendAssumeCapacity( | |
DocData.Expr{ .@"anytype" = .{} }, | |
); | |
}, | |
.param, .param_comptime => { | |
const pl_tok = data[@intFromEnum(param_index)].pl_tok; | |
const extra = file.zir.extraData(Zir.Inst.Param, pl_tok.payload_index); | |
const doc_comment = if (extra.data.doc_comment != .empty) | |
file.zir.nullTerminatedString(extra.data.doc_comment) | |
else | |
""; | |
const name = file.zir.nullTerminatedString(extra.data.name); | |
param_ast_indexes.appendAssumeCapacity(self.ast_nodes.items.len); | |
try self.ast_nodes.append(self.arena, .{ | |
.name = name, | |
.docs = doc_comment, | |
.@"comptime" = tags[@intFromEnum(param_index)] == .param_comptime, | |
}); | |
const break_index = file.zir.extra[extra.end..][extra.data.body_len - 1]; | |
const break_operand = data[break_index].@"break".operand; | |
const param_type_ref = try self.walkRef( | |
file, | |
scope, | |
parent_src, | |
break_operand, | |
false, | |
call_ctx, | |
); | |
param_type_refs.appendAssumeCapacity(param_type_ref.expr); | |
}, | |
} | |
} | |
// ret | |
const ret_type_ref: DocData.Expr = switch (fn_info.ret_ty_body.len) { | |
0 => switch (fn_info.ret_ty_ref) { | |
.none => DocData.Expr{ .void = .{} }, | |
else => blk: { | |
const ref = fn_info.ret_ty_ref; | |
const wr = try self.walkRef( | |
file, | |
scope, | |
parent_src, | |
ref, | |
false, | |
call_ctx, | |
); | |
break :blk wr.expr; | |
}, | |
}, | |
else => blk: { | |
const last_instr_index = fn_info.ret_ty_body[fn_info.ret_ty_body.len - 1]; | |
const break_operand = data[@intFromEnum(last_instr_index)].@"break".operand; | |
const wr = try self.walkRef( | |
file, | |
scope, | |
parent_src, | |
break_operand, | |
false, | |
call_ctx, | |
); | |
break :blk wr.expr; | |
}, | |
}; | |
// TODO: a complete version of this will probably need a scope | |
// in order to evaluate correctly closures around funcion | |
// parameters etc. | |
const generic_ret: ?DocData.Expr = switch (ret_type_ref) { | |
.type => |t| blk: { | |
if (fn_info.body.len == 0) break :blk null; | |
if (t == @intFromEnum(Ref.type_type)) { | |
break :blk try self.getGenericReturnType( | |
file, | |
scope, | |
parent_src, | |
fn_info.body, | |
call_ctx, | |
); | |
} else { | |
break :blk null; | |
} | |
}, | |
else => null, | |
}; | |
const ret_type: DocData.Expr = blk: { | |
if (ret_is_inferred_error_set) { | |
const ret_type_slot_index = self.types.items.len; | |
try self.types.append(self.arena, .{ | |
.InferredErrorUnion = .{ .payload = ret_type_ref }, | |
}); | |
break :blk .{ .type = ret_type_slot_index }; | |
} else break :blk ret_type_ref; | |
}; | |
// if we're analyzing a function signature (ie without body), we | |
// actually don't have an ast_node reserved for us, but since | |
// we don't have a name, we don't need it. | |
const src = if (fn_info.body.len == 0) 0 else self_ast_node_index; | |
self.ast_nodes.items[self_ast_node_index].fields = param_ast_indexes.items; | |
self.types.items[type_slot_index] = .{ | |
.Fn = .{ | |
.name = "todo_name func", | |
.src = src, | |
.params = param_type_refs.items, | |
.ret = ret_type, | |
.generic_ret = generic_ret, | |
}, | |
}; | |
return DocData.WalkResult{ | |
.typeRef = .{ .type = @intFromEnum(Ref.type_type) }, | |
.expr = .{ .type = type_slot_index }, | |
}; | |
} | |
fn getGenericReturnType( | |
self: *Autodoc, | |
file: *File, | |
scope: *Scope, | |
parent_src: SrcLocInfo, // function decl line | |
body: []const Zir.Inst.Index, | |
call_ctx: ?*const CallContext, | |
) !DocData.Expr { | |
const tags = file.zir.instructions.items(.tag); | |
if (body.len >= 4) { | |
const maybe_ret_inst = body[body.len - 4]; | |
switch (tags[@intFromEnum(maybe_ret_inst)]) { | |
.ret_node, .ret_load => { | |
const wr = try self.walkInstruction( | |
file, | |
scope, | |
parent_src, | |
maybe_ret_inst, | |
false, | |
call_ctx, | |
); | |
return wr.expr; | |
}, | |
else => {}, | |
} | |
} | |
return DocData.Expr{ .comptimeExpr = 0 }; | |
} | |
fn collectUnionFieldInfo( | |
self: *Autodoc, | |
file: *File, | |
scope: *Scope, | |
parent_src: SrcLocInfo, | |
fields_len: usize, | |
field_type_refs: *std.ArrayListUnmanaged(DocData.Expr), | |
field_name_indexes: *std.ArrayListUnmanaged(usize), | |
ei: usize, | |
call_ctx: ?*const CallContext, | |
) !void { | |
if (fields_len == 0) return; | |
var extra_index = ei; | |
const bits_per_field = 4; | |
const fields_per_u32 = 32 / bits_per_field; | |
const bit_bags_count = std.math.divCeil(usize, fields_len, fields_per_u32) catch unreachable; | |
var bit_bag_index: usize = extra_index; | |
extra_index += bit_bags_count; | |
var cur_bit_bag: u32 = undefined; | |
var field_i: u32 = 0; | |
while (field_i < fields_len) : (field_i += 1) { | |
if (field_i % fields_per_u32 == 0) { | |
cur_bit_bag = file.zir.extra[bit_bag_index]; | |
bit_bag_index += 1; | |
} | |
const has_type = @as(u1, @truncate(cur_bit_bag)) != 0; | |
cur_bit_bag >>= 1; | |
const has_align = @as(u1, @truncate(cur_bit_bag)) != 0; | |
cur_bit_bag >>= 1; | |
const has_tag = @as(u1, @truncate(cur_bit_bag)) != 0; | |
cur_bit_bag >>= 1; | |
const unused = @as(u1, @truncate(cur_bit_bag)) != 0; | |
cur_bit_bag >>= 1; | |
_ = unused; | |
const field_name = file.zir.nullTerminatedString(@enumFromInt(file.zir.extra[extra_index])); | |
extra_index += 1; | |
const doc_comment_index: Zir.NullTerminatedString = @enumFromInt(file.zir.extra[extra_index]); | |
extra_index += 1; | |
const field_type: Zir.Inst.Ref = if (has_type) @enumFromInt(file.zir.extra[extra_index]) else .void_type; | |
if (has_type) extra_index += 1; | |
if (has_align) extra_index += 1; | |
if (has_tag) extra_index += 1; | |
// type | |
{ | |
const walk_result = try self.walkRef( | |
file, | |
scope, | |
parent_src, | |
field_type, | |
false, | |
call_ctx, | |
); | |
try field_type_refs.append(self.arena, walk_result.expr); | |
} | |
// ast node | |
{ | |
try field_name_indexes.append(self.arena, self.ast_nodes.items.len); | |
const doc_comment: ?[]const u8 = if (doc_comment_index != .empty) | |
file.zir.nullTerminatedString(doc_comment_index) | |
else | |
null; | |
try self.ast_nodes.append(self.arena, .{ | |
.name = field_name, | |
.docs = doc_comment, | |
}); | |
} | |
} | |
} | |
fn collectStructFieldInfo( | |
self: *Autodoc, | |
file: *File, | |
scope: *Scope, | |
parent_src: SrcLocInfo, | |
fields_len: usize, | |
field_type_refs: *std.ArrayListUnmanaged(DocData.Expr), | |
field_default_refs: *std.ArrayListUnmanaged(?DocData.Expr), | |
field_name_indexes: *std.ArrayListUnmanaged(usize), | |
ei: usize, | |
is_tuple: bool, | |
call_ctx: ?*const CallContext, | |
) !void { | |
if (fields_len == 0) return; | |
var extra_index = ei; | |
const bits_per_field = 4; | |
const fields_per_u32 = 32 / bits_per_field; | |
const bit_bags_count = std.math.divCeil(usize, fields_len, fields_per_u32) catch unreachable; | |
const Field = struct { | |
field_name: Zir.NullTerminatedString, | |
doc_comment_index: Zir.NullTerminatedString, | |
type_body_len: u32 = 0, | |
align_body_len: u32 = 0, | |
init_body_len: u32 = 0, | |
type_ref: Zir.Inst.Ref = .none, | |
}; | |
const fields = try self.arena.alloc(Field, fields_len); | |
var bit_bag_index: usize = extra_index; | |
extra_index += bit_bags_count; | |
var cur_bit_bag: u32 = undefined; | |
var field_i: u32 = 0; | |
while (field_i < fields_len) : (field_i += 1) { | |
if (field_i % fields_per_u32 == 0) { | |
cur_bit_bag = file.zir.extra[bit_bag_index]; | |
bit_bag_index += 1; | |
} | |
const has_align = @as(u1, @truncate(cur_bit_bag)) != 0; | |
cur_bit_bag >>= 1; | |
const has_default = @as(u1, @truncate(cur_bit_bag)) != 0; | |
cur_bit_bag >>= 1; | |
// const is_comptime = @truncate(u1, cur_bit_bag) != 0; | |
cur_bit_bag >>= 1; | |
const has_type_body = @as(u1, @truncate(cur_bit_bag)) != 0; | |
cur_bit_bag >>= 1; | |
const field_name: Zir.NullTerminatedString = if (!is_tuple) blk: { | |
const fname = file.zir.extra[extra_index]; | |
extra_index += 1; | |
break :blk @enumFromInt(fname); | |
} else .empty; | |
const doc_comment_index: Zir.NullTerminatedString = @enumFromInt(file.zir.extra[extra_index]); | |
extra_index += 1; | |
fields[field_i] = .{ | |
.field_name = field_name, | |
.doc_comment_index = doc_comment_index, | |
}; | |
if (has_type_body) { | |
fields[field_i].type_body_len = file.zir.extra[extra_index]; | |
} else { | |
fields[field_i].type_ref = @enumFromInt(file.zir.extra[extra_index]); | |
} | |
extra_index += 1; | |
if (has_align) { | |
fields[field_i].align_body_len = file.zir.extra[extra_index]; | |
extra_index += 1; | |
} | |
if (has_default) { | |
fields[field_i].init_body_len = file.zir.extra[extra_index]; | |
extra_index += 1; | |
} | |
} | |
const data = file.zir.instructions.items(.data); | |
for (fields) |field| { | |
const type_expr = expr: { | |
if (field.type_ref != .none) { | |
const walk_result = try self.walkRef( | |
file, | |
scope, | |
parent_src, | |
field.type_ref, | |
false, | |
call_ctx, | |
); | |
break :expr walk_result.expr; | |
} | |
std.debug.assert(field.type_body_len != 0); | |
const body = file.zir.bodySlice(extra_index, field.type_body_len); | |
extra_index += body.len; | |
const break_inst = body[body.len - 1]; | |
const operand = data[@intFromEnum(break_inst)].@"break".operand; | |
try self.ast_nodes.append(self.arena, .{ | |
.file = self.files.getIndex(file).?, | |
.line = parent_src.line, | |
.col = 0, | |
.fields = null, // walkInstruction will fill `fields` if necessary | |
}); | |
const walk_result = try self.walkRef( | |
file, | |
scope, | |
parent_src, | |
operand, | |
false, | |
call_ctx, | |
); | |
break :expr walk_result.expr; | |
}; | |
extra_index += field.align_body_len; | |
const default_expr: ?DocData.Expr = def: { | |
if (field.init_body_len == 0) { | |
break :def null; | |
} | |
const body = file.zir.bodySlice(extra_index, field.init_body_len); | |
extra_index += body.len; | |
const break_inst = body[body.len - 1]; | |
const operand = data[@intFromEnum(break_inst)].@"break".operand; | |
const walk_result = try self.walkRef( | |
file, | |
scope, | |
parent_src, | |
operand, | |
false, | |
call_ctx, | |
); | |
break :def walk_result.expr; | |
}; | |
try field_type_refs.append(self.arena, type_expr); | |
try field_default_refs.append(self.arena, default_expr); | |
// ast node | |
{ | |
try field_name_indexes.append(self.arena, self.ast_nodes.items.len); | |
const doc_comment: ?[]const u8 = if (field.doc_comment_index != .empty) | |
file.zir.nullTerminatedString(field.doc_comment_index) | |
else | |
null; | |
const field_name: []const u8 = if (field.field_name != .empty) | |
file.zir.nullTerminatedString(field.field_name) | |
else | |
""; | |
try self.ast_nodes.append(self.arena, .{ | |
.name = field_name, | |
.docs = doc_comment, | |
}); | |
} | |
} | |
} | |
/// A Zir Ref can either refer to common types and values, or to a Zir index. | |
/// WalkRef resolves common cases and delegates to `walkInstruction` otherwise. | |
fn walkRef( | |
self: *Autodoc, | |
file: *File, | |
parent_scope: *Scope, | |
parent_src: SrcLocInfo, | |
ref: Ref, | |
need_type: bool, // true when the caller needs also a typeRef for the return value | |
call_ctx: ?*const CallContext, | |
) AutodocErrors!DocData.WalkResult { | |
if (ref == .none) { | |
return .{ .expr = .{ .comptimeExpr = 0 } }; | |
} else if (@intFromEnum(ref) <= @intFromEnum(InternPool.Index.last_type)) { | |
// We can just return a type that indexes into `types` with the | |
// enum value because in the beginning we pre-filled `types` with | |
// the types that are listed in `Ref`. | |
return DocData.WalkResult{ | |
.typeRef = .{ .type = @intFromEnum(std.builtin.TypeId.Type) }, | |
.expr = .{ .type = @intFromEnum(ref) }, | |
}; | |
} else if (ref.toIndex()) |zir_index| { | |
return self.walkInstruction( | |
file, | |
parent_scope, | |
parent_src, | |
zir_index, | |
need_type, | |
call_ctx, | |
); | |
} else { | |
switch (ref) { | |
else => { | |
panicWithOptionalContext( | |
file, | |
.none, | |
"TODO: handle {s} in walkRef", | |
.{@tagName(ref)}, | |
); | |
}, | |
.undef => { | |
return DocData.WalkResult{ .expr = .undefined }; | |
}, | |
.zero => { | |
return DocData.WalkResult{ | |
.typeRef = .{ .type = @intFromEnum(Ref.comptime_int_type) }, | |
.expr = .{ .int = .{ .value = 0 } }, | |
}; | |
}, | |
.one => { | |
return DocData.WalkResult{ | |
.typeRef = .{ .type = @intFromEnum(Ref.comptime_int_type) }, | |
.expr = .{ .int = .{ .value = 1 } }, | |
}; | |
}, | |
.negative_one => { | |
return DocData.WalkResult{ | |
.typeRef = .{ .type = @intFromEnum(Ref.comptime_int_type) }, | |
.expr = .{ .int = .{ .value = 1, .negated = true } }, | |
}; | |
}, | |
.zero_usize => { | |
return DocData.WalkResult{ | |
.typeRef = .{ .type = @intFromEnum(Ref.usize_type) }, | |
.expr = .{ .int = .{ .value = 0 } }, | |
}; | |
}, | |
.one_usize => { | |
return DocData.WalkResult{ | |
.typeRef = .{ .type = @intFromEnum(Ref.usize_type) }, | |
.expr = .{ .int = .{ .value = 1 } }, | |
}; | |
}, | |
.zero_u8 => { | |
return DocData.WalkResult{ | |
.typeRef = .{ .type = @intFromEnum(Ref.u8_type) }, | |
.expr = .{ .int = .{ .value = 0 } }, | |
}; | |
}, | |
.one_u8 => { | |
return DocData.WalkResult{ | |
.typeRef = .{ .type = @intFromEnum(Ref.u8_type) }, | |
.expr = .{ .int = .{ .value = 1 } }, | |
}; | |
}, | |
.four_u8 => { | |
return DocData.WalkResult{ | |
.typeRef = .{ .type = @intFromEnum(Ref.u8_type) }, | |
.expr = .{ .int = .{ .value = 4 } }, | |
}; | |
}, | |
.void_value => { | |
return DocData.WalkResult{ | |
.typeRef = .{ .type = @intFromEnum(Ref.void_type) }, | |
.expr = .{ .void = .{} }, | |
}; | |
}, | |
.unreachable_value => { | |
return DocData.WalkResult{ | |
.typeRef = .{ .type = @intFromEnum(Ref.noreturn_type) }, | |
.expr = .{ .@"unreachable" = .{} }, | |
}; | |
}, | |
.null_value => { | |
return DocData.WalkResult{ .expr = .null }; | |
}, | |
.bool_true => { | |
return DocData.WalkResult{ | |
.typeRef = .{ .type = @intFromEnum(Ref.bool_type) }, | |
.expr = .{ .bool = true }, | |
}; | |
}, | |
.bool_false => { | |
return DocData.WalkResult{ | |
.typeRef = .{ .type = @intFromEnum(Ref.bool_type) }, | |
.expr = .{ .bool = false }, | |
}; | |
}, | |
.empty_struct => { | |
return DocData.WalkResult{ .expr = .{ .@"struct" = &.{} } }; | |
}, | |
.calling_convention_type => { | |
return DocData.WalkResult{ | |
.typeRef = .{ .type = @intFromEnum(Ref.type_type) }, | |
.expr = .{ .type = @intFromEnum(Ref.calling_convention_type) }, | |
}; | |
}, | |
.calling_convention_c => { | |
return DocData.WalkResult{ | |
.typeRef = .{ .type = @intFromEnum(Ref.calling_convention_type) }, | |
.expr = .{ .enumLiteral = "C" }, | |
}; | |
}, | |
.calling_convention_inline => { | |
return DocData.WalkResult{ | |
.typeRef = .{ .type = @intFromEnum(Ref.calling_convention_type) }, | |
.expr = .{ .enumLiteral = "Inline" }, | |
}; | |
}, | |
// .generic_poison => { | |
// return DocData.WalkResult{ .int = .{ | |
// .type = @intFromEnum(Ref.comptime_int_type), | |
// .value = 1, | |
// } }; | |
// }, | |
} | |
} | |
} | |
fn printWithContext( | |
file: *File, | |
inst: Zir.Inst.Index, | |
comptime fmt: []const u8, | |
args: anytype, | |
) void { | |
return printWithOptionalContext(file, inst.toOptional(), fmt, args); | |
} | |
fn printWithOptionalContext(file: *File, inst: Zir.Inst.OptionalIndex, comptime fmt: []const u8, args: anytype) void { | |
log.debug("Context [{s}] % {} \n " ++ fmt, .{ file.sub_file_path, inst } ++ args); | |
} | |
fn panicWithContext( | |
file: *File, | |
inst: Zir.Inst.Index, | |
comptime fmt: []const u8, | |
args: anytype, | |
) noreturn { | |
printWithOptionalContext(file, inst.toOptional(), fmt, args); | |
unreachable; | |
} | |
fn panicWithOptionalContext( | |
file: *File, | |
inst: Zir.Inst.OptionalIndex, | |
comptime fmt: []const u8, | |
args: anytype, | |
) noreturn { | |
printWithOptionalContext(file, inst, fmt, args); | |
unreachable; | |
} | |
fn cteTodo(self: *Autodoc, msg: []const u8) error{OutOfMemory}!DocData.WalkResult { | |
const cte_slot_index = self.comptime_exprs.items.len; | |
try self.comptime_exprs.append(self.arena, .{ | |
.code = msg, | |
}); | |
return DocData.WalkResult{ .expr = .{ .comptimeExpr = cte_slot_index } }; | |
} | |
fn writeFileTableToJson( | |
map: std.AutoArrayHashMapUnmanaged(*File, usize), | |
mods: std.AutoArrayHashMapUnmanaged(*Module, DocData.DocModule), | |
jsw: anytype, | |
) !void { | |
try jsw.beginArray(); | |
var it = map.iterator(); | |
while (it.next()) |entry| { | |
try jsw.beginArray(); | |
try jsw.write(entry.key_ptr.*.sub_file_path); | |
try jsw.write(mods.getIndex(entry.key_ptr.*.mod) orelse 0); | |
try jsw.endArray(); | |
} | |
try jsw.endArray(); | |
} | |
/// Writes the data like so: | |
/// ``` | |
/// { | |
/// "<section name>": [{name: "<guide name>", text: "<guide contents>"},], | |
/// } | |
/// ``` | |
fn writeGuidesToJson(sections: std.ArrayListUnmanaged(Section), jsw: anytype) !void { | |
try jsw.beginArray(); | |
for (sections.items) |s| { | |
// section name | |
try jsw.beginObject(); | |
try jsw.objectField("name"); | |
try jsw.write(s.name); | |
try jsw.objectField("guides"); | |
// section value | |
try jsw.beginArray(); | |
for (s.guides.items) |g| { | |
try jsw.beginObject(); | |
try jsw.objectField("name"); | |
try jsw.write(g.name); | |
try jsw.objectField("body"); | |
try jsw.write(g.body); | |
try jsw.endObject(); | |
} | |
try jsw.endArray(); | |
try jsw.endObject(); | |
} | |
try jsw.endArray(); | |
} | |
fn writeModuleTableToJson( | |
map: std.AutoHashMapUnmanaged(*Module, DocData.DocModule.TableEntry), | |
jsw: anytype, | |
) !void { | |
try jsw.beginObject(); | |
var it = map.valueIterator(); | |
while (it.next()) |entry| { | |
try jsw.objectField(entry.name); | |
try jsw.write(entry.value); | |
} | |
try jsw.endObject(); | |
} | |
fn srcLocInfo( | |
self: Autodoc, | |
file: *File, | |
src_node: i32, | |
parent_src: SrcLocInfo, | |
) !SrcLocInfo { | |
const sn = @as(u32, @intCast(@as(i32, @intCast(parent_src.src_node)) + src_node)); | |
const tree = try file.getTree(self.zcu.gpa); | |
const node_idx = @as(Ast.Node.Index, @bitCast(sn)); | |
const tokens = tree.nodes.items(.main_token); | |
const tok_idx = tokens[node_idx]; | |
const start = tree.tokens.items(.start)[tok_idx]; | |
const loc = tree.tokenLocation(parent_src.bytes, tok_idx); | |
return SrcLocInfo{ | |
.line = parent_src.line + loc.line, | |
.bytes = start, | |
.src_node = sn, | |
}; | |
} | |
fn declIsVar( | |
self: Autodoc, | |
file: *File, | |
src_node: i32, | |
parent_src: SrcLocInfo, | |
) !bool { | |
const sn = @as(u32, @intCast(@as(i32, @intCast(parent_src.src_node)) + src_node)); | |
const tree = try file.getTree(self.zcu.gpa); | |
const node_idx = @as(Ast.Node.Index, @bitCast(sn)); | |
const tokens = tree.nodes.items(.main_token); | |
const tags = tree.tokens.items(.tag); | |
const tok_idx = tokens[node_idx]; | |
// tags[tok_idx] is the token called 'mut token' in AstGen | |
return (tags[tok_idx] == .keyword_var); | |
} | |
fn getBlockSource( | |
self: Autodoc, | |
file: *File, | |
parent_src: SrcLocInfo, | |
block_src_node: i32, | |
) AutodocErrors![]const u8 { | |
const tree = try file.getTree(self.zcu.gpa); | |
const block_src = try self.srcLocInfo(file, block_src_node, parent_src); | |
return tree.getNodeSource(block_src.src_node); | |
} | |
fn getTLDocComment(self: *Autodoc, file: *File) ![]const u8 { | |
const source = (try file.getSource(self.zcu.gpa)).bytes; | |
var tokenizer = Tokenizer.init(source); | |
var tok = tokenizer.next(); | |
var comment = std.ArrayList(u8).init(self.arena); | |
while (tok.tag == .container_doc_comment) : (tok = tokenizer.next()) { | |
try comment.appendSlice(source[tok.loc.start + "//!".len .. tok.loc.end + 1]); | |
} | |
return comment.items; | |
} | |
/// Returns the doc comment cleared of autodoc directives. | |
fn findGuidePaths(self: *Autodoc, file: *File, str: []const u8) ![]const u8 { | |
const guide_prefix = "zig-autodoc-guide:"; | |
const section_prefix = "zig-autodoc-section:"; | |
try self.guide_sections.append(self.arena, .{}); // add a default section | |
var current_section = &self.guide_sections.items[self.guide_sections.items.len - 1]; | |
var clean_docs: std.ArrayListUnmanaged(u8) = .{}; | |
errdefer clean_docs.deinit(self.arena); | |
// TODO: this algo is kinda inefficient | |
var it = std.mem.splitScalar(u8, str, '\n'); | |
while (it.next()) |line| { | |
const trimmed_line = std.mem.trim(u8, line, " "); | |
if (std.mem.startsWith(u8, trimmed_line, guide_prefix)) { | |
const path = trimmed_line[guide_prefix.len..]; | |
const trimmed_path = std.mem.trim(u8, path, " "); | |
try self.addGuide(file, trimmed_path, current_section); | |
} else if (std.mem.startsWith(u8, trimmed_line, section_prefix)) { | |
const section_name = trimmed_line[section_prefix.len..]; | |
const trimmed_section_name = std.mem.trim(u8, section_name, " "); | |
try self.guide_sections.append(self.arena, .{ | |
.name = trimmed_section_name, | |
}); | |
current_section = &self.guide_sections.items[self.guide_sections.items.len - 1]; | |
} else { | |
try clean_docs.appendSlice(self.arena, line); | |
try clean_docs.append(self.arena, '\n'); | |
} | |
} | |
return clean_docs.toOwnedSlice(self.arena); | |
} | |
fn addGuide(self: *Autodoc, file: *File, guide_path: []const u8, section: *Section) !void { | |
if (guide_path.len == 0) return error.MissingAutodocGuideName; | |
const resolved_path = try std.fs.path.resolve(self.arena, &[_][]const u8{ | |
file.sub_file_path, "..", guide_path, | |
}); | |
var guide_file = try file.mod.root.openFile(resolved_path, .{}); | |
defer guide_file.close(); | |
const guide = guide_file.reader().readAllAlloc(self.arena, 1 * 1024 * 1024) catch |err| switch (err) { | |
error.StreamTooLong => @panic("stream too long"), | |
else => |e| return e, | |
}; | |
try section.guides.append(self.arena, .{ | |
.name = resolved_path, | |
.body = guide, | |
}); | |
} | |
target: std.Target, | |
zig_backend: std.builtin.CompilerBackend, | |
output_mode: std.builtin.OutputMode, | |
link_mode: std.builtin.LinkMode, | |
is_test: bool, | |
single_threaded: bool, | |
link_libc: bool, | |
link_libcpp: bool, | |
optimize_mode: std.builtin.OptimizeMode, | |
error_tracing: bool, | |
valgrind: bool, | |
sanitize_thread: bool, | |
pic: bool, | |
pie: bool, | |
strip: bool, | |
code_model: std.builtin.CodeModel, | |
omit_frame_pointer: bool, | |
wasi_exec_model: std.builtin.WasiExecModel, | |
pub fn generate(opts: @This(), allocator: Allocator) Allocator.Error![:0]u8 { | |
var buffer = std.ArrayList(u8).init(allocator); | |
try append(opts, &buffer); | |
return buffer.toOwnedSliceSentinel(0); | |
} | |
pub fn append(opts: @This(), buffer: *std.ArrayList(u8)) Allocator.Error!void { | |
const target = opts.target; | |
const generic_arch_name = target.cpu.arch.genericName(); | |
const zig_backend = opts.zig_backend; | |
@setEvalBranchQuota(4000); | |
try buffer.writer().print( | |
\\const std = @import("std"); | |
\\/// Zig version. When writing code that supports multiple versions of Zig, prefer | |
\\/// feature detection (i.e. with `@hasDecl` or `@hasField`) over version checks. | |
\\pub const zig_version = std.SemanticVersion.parse(zig_version_string) catch unreachable; | |
\\pub const zig_version_string = "{s}"; | |
\\pub const zig_backend = std.builtin.CompilerBackend.{}; | |
\\ | |
\\pub const output_mode = std.builtin.OutputMode.{}; | |
\\pub const link_mode = std.builtin.LinkMode.{}; | |
\\pub const is_test = {}; | |
\\pub const single_threaded = {}; | |
\\pub const abi = std.Target.Abi.{}; | |
\\pub const cpu: std.Target.Cpu = .{{ | |
\\ .arch = .{}, | |
\\ .model = &std.Target.{}.cpu.{}, | |
\\ .features = std.Target.{}.featureSet(&[_]std.Target.{}.Feature{{ | |
\\ | |
, .{ | |
build_options.version, | |
std.zig.fmtId(@tagName(zig_backend)), | |
std.zig.fmtId(@tagName(opts.output_mode)), | |
std.zig.fmtId(@tagName(opts.link_mode)), | |
opts.is_test, | |
opts.single_threaded, | |
std.zig.fmtId(@tagName(target.abi)), | |
std.zig.fmtId(@tagName(target.cpu.arch)), | |
std.zig.fmtId(generic_arch_name), | |
std.zig.fmtId(target.cpu.model.name), | |
std.zig.fmtId(generic_arch_name), | |
std.zig.fmtId(generic_arch_name), | |
}); | |
for (target.cpu.arch.allFeaturesList(), 0..) |feature, index_usize| { | |
const index = @as(std.Target.Cpu.Feature.Set.Index, @intCast(index_usize)); | |
const is_enabled = target.cpu.features.isEnabled(index); | |
if (is_enabled) { | |
try buffer.writer().print(" .{},\n", .{std.zig.fmtId(feature.name)}); | |
} | |
} | |
try buffer.writer().print( | |
\\ }}), | |
\\}}; | |
\\pub const os = std.Target.Os{{ | |
\\ .tag = .{}, | |
\\ .version_range = .{{ | |
, | |
.{std.zig.fmtId(@tagName(target.os.tag))}, | |
); | |
switch (target.os.getVersionRange()) { | |
.none => try buffer.appendSlice(" .none = {} },\n"), | |
.semver => |semver| try buffer.writer().print( | |
\\ .semver = .{{ | |
\\ .min = .{{ | |
\\ .major = {}, | |
\\ .minor = {}, | |
\\ .patch = {}, | |
\\ }}, | |
\\ .max = .{{ | |
\\ .major = {}, | |
\\ .minor = {}, | |
\\ .patch = {}, | |
\\ }}, | |
\\ }}}}, | |
\\ | |
, .{ | |
semver.min.major, | |
semver.min.minor, | |
semver.min.patch, | |
semver.max.major, | |
semver.max.minor, | |
semver.max.patch, | |
}), | |
.linux => |linux| try buffer.writer().print( | |
\\ .linux = .{{ | |
\\ .range = .{{ | |
\\ .min = .{{ | |
\\ .major = {}, | |
\\ .minor = {}, | |
\\ .patch = {}, | |
\\ }}, | |
\\ .max = .{{ | |
\\ .major = {}, | |
\\ .minor = {}, | |
\\ .patch = {}, | |
\\ }}, | |
\\ }}, | |
\\ .glibc = .{{ | |
\\ .major = {}, | |
\\ .minor = {}, | |
\\ .patch = {}, | |
\\ }}, | |
\\ }}}}, | |
\\ | |
, .{ | |
linux.range.min.major, | |
linux.range.min.minor, | |
linux.range.min.patch, | |
linux.range.max.major, | |
linux.range.max.minor, | |
linux.range.max.patch, | |
linux.glibc.major, | |
linux.glibc.minor, | |
linux.glibc.patch, | |
}), | |
.windows => |windows| try buffer.writer().print( | |
\\ .windows = .{{ | |
\\ .min = {s}, | |
\\ .max = {s}, | |
\\ }}}}, | |
\\ | |
, | |
.{ windows.min, windows.max }, | |
), | |
} | |
try buffer.appendSlice( | |
\\}; | |
\\pub const target: std.Target = .{ | |
\\ .cpu = cpu, | |
\\ .os = os, | |
\\ .abi = abi, | |
\\ .ofmt = object_format, | |
\\ | |
); | |
if (target.dynamic_linker.get()) |dl| { | |
try buffer.writer().print( | |
\\ .dynamic_linker = std.Target.DynamicLinker.init("{s}"), | |
\\}}; | |
\\ | |
, .{dl}); | |
} else { | |
try buffer.appendSlice( | |
\\ .dynamic_linker = std.Target.DynamicLinker.none, | |
\\}; | |
\\ | |
); | |
} | |
// This is so that compiler_rt and libc.zig libraries know whether they | |
// will eventually be linked with libc. They make different decisions | |
// about what to export depending on whether another libc will be linked | |
// in. For example, compiler_rt will not export the __chkstk symbol if it | |
// knows libc will provide it, and likewise c.zig will not export memcpy. | |
const link_libc = opts.link_libc; | |
try buffer.writer().print( | |
\\pub const object_format = std.Target.ObjectFormat.{}; | |
\\pub const mode = std.builtin.OptimizeMode.{}; | |
\\pub const link_libc = {}; | |
\\pub const link_libcpp = {}; | |
\\pub const have_error_return_tracing = {}; | |
\\pub const valgrind_support = {}; | |
\\pub const sanitize_thread = {}; | |
\\pub const position_independent_code = {}; | |
\\pub const position_independent_executable = {}; | |
\\pub const strip_debug_info = {}; | |
\\pub const code_model = std.builtin.CodeModel.{}; | |
\\pub const omit_frame_pointer = {}; | |
\\ | |
, .{ | |
std.zig.fmtId(@tagName(target.ofmt)), | |
std.zig.fmtId(@tagName(opts.optimize_mode)), | |
link_libc, | |
opts.link_libcpp, | |
opts.error_tracing, | |
opts.valgrind, | |
opts.sanitize_thread, | |
opts.pic, | |
opts.pie, | |
opts.strip, | |
std.zig.fmtId(@tagName(opts.code_model)), | |
opts.omit_frame_pointer, | |
}); | |
if (target.os.tag == .wasi) { | |
const wasi_exec_model_fmt = std.zig.fmtId(@tagName(opts.wasi_exec_model)); | |
try buffer.writer().print( | |
\\pub const wasi_exec_model = std.builtin.WasiExecModel.{}; | |
\\ | |
, .{wasi_exec_model_fmt}); | |
} | |
if (opts.is_test) { | |
try buffer.appendSlice( | |
\\pub var test_functions: []const std.builtin.TestFn = undefined; // overwritten later | |
\\ | |
); | |
} | |
} | |
pub fn populateFile(comp: *Compilation, mod: *Module, file: *File) !void { | |
assert(file.source_loaded == true); | |
if (mod.root.statFile(mod.root_src_path)) |stat| { | |
if (stat.size != file.source.len) { | |
std.log.warn( | |
"the cached file '{}{s}' had the wrong size. Expected {d}, found {d}. " ++ | |
"Overwriting with correct file contents now", | |
.{ mod.root, mod.root_src_path, file.source.len, stat.size }, | |
); | |
try writeFile(file, mod); | |
} else { | |
file.stat = .{ | |
.size = stat.size, | |
.inode = stat.inode, | |
.mtime = stat.mtime, | |
}; | |
} | |
} else |err| switch (err) { | |
error.BadPathName => unreachable, // it's always "builtin.zig" | |
error.NameTooLong => unreachable, // it's always "builtin.zig" | |
error.PipeBusy => unreachable, // it's not a pipe | |
error.WouldBlock => unreachable, // not asking for non-blocking I/O | |
error.FileNotFound => try writeFile(file, mod), | |
else => |e| return e, | |
} | |
log.debug("parsing and generating '{s}'", .{mod.root_src_path}); | |
file.tree = try std.zig.Ast.parse(comp.gpa, file.source, .zig); | |
assert(file.tree.errors.len == 0); // builtin.zig must parse | |
file.tree_loaded = true; | |
file.zir = try AstGen.generate(comp.gpa, file.tree); | |
assert(!file.zir.hasCompileErrors()); // builtin.zig must not have astgen errors | |
file.zir_loaded = true; | |
file.status = .success_zir; | |
} | |
fn writeFile(file: *File, mod: *Module) !void { | |
var buf: [std.fs.MAX_PATH_BYTES]u8 = undefined; | |
var af = try mod.root.atomicFile(mod.root_src_path, .{ .make_path = true }, &buf); | |
defer af.deinit(); | |
try af.file.writeAll(file.source); | |
af.finish() catch |err| switch (err) { | |
error.AccessDenied => switch (builtin.os.tag) { | |
.windows => { | |
// Very likely happened due to another process or thread | |
// simultaneously creating the same, correct builtin.zig file. | |
// This is not a problem; ignore it. | |
}, | |
else => return err, | |
}, | |
else => return err, | |
}; | |
file.stat = .{ | |
.size = file.source.len, | |
.inode = 0, // dummy value | |
.mtime = 0, // dummy value | |
}; | |
} | |
const builtin = @import("builtin"); | |
const std = @import("std"); | |
const Allocator = std.mem.Allocator; | |
const build_options = @import("build_options"); | |
const Module = @import("Package/Module.zig"); | |
const assert = std.debug.assert; | |
const AstGen = std.zig.AstGen; | |
const File = @import("Module.zig").File; | |
const Compilation = @import("Compilation.zig"); | |
const log = std.log.scoped(.builtin); | |
const Compilation = @This(); | |
const std = @import("std"); | |
const builtin = @import("builtin"); | |
const mem = std.mem; | |
const Allocator = std.mem.Allocator; | |
const assert = std.debug.assert; | |
const log = std.log.scoped(.compilation); | |
const Target = std.Target; | |
const ThreadPool = std.Thread.Pool; | |
const WaitGroup = std.Thread.WaitGroup; | |
const ErrorBundle = std.zig.ErrorBundle; | |
const Value = @import("Value.zig"); | |
const Type = @import("type.zig").Type; | |
const target_util = @import("target.zig"); | |
const Package = @import("Package.zig"); | |
const link = @import("link.zig"); | |
const tracy = @import("tracy.zig"); | |
const trace = tracy.trace; | |
const build_options = @import("build_options"); | |
const LibCInstallation = std.zig.LibCInstallation; | |
const glibc = @import("glibc.zig"); | |
const musl = @import("musl.zig"); | |
const mingw = @import("mingw.zig"); | |
const libunwind = @import("libunwind.zig"); | |
const libcxx = @import("libcxx.zig"); | |
const wasi_libc = @import("wasi_libc.zig"); | |
const fatal = @import("main.zig").fatal; | |
const clangMain = @import("main.zig").clangMain; | |
const Zcu = @import("Module.zig"); | |
/// Deprecated; use `Zcu`. | |
const Module = Zcu; | |
const InternPool = @import("InternPool.zig"); | |
const Cache = std.Build.Cache; | |
const c_codegen = @import("codegen/c.zig"); | |
const libtsan = @import("libtsan.zig"); | |
const Zir = std.zig.Zir; | |
const Autodoc = @import("Autodoc.zig"); | |
const resinator = @import("resinator.zig"); | |
const Builtin = @import("Builtin.zig"); | |
const LlvmObject = @import("codegen/llvm.zig").Object; | |
pub const Config = @import("Compilation/Config.zig"); | |
/// General-purpose allocator. Used for both temporary and long-term storage. | |
gpa: Allocator, | |
/// Arena-allocated memory, mostly used during initialization. However, it can | |
/// be used for other things requiring the same lifetime as the `Compilation`. | |
arena: Allocator, | |
/// Not every Compilation compiles .zig code! For example you could do `zig build-exe foo.o`. | |
/// TODO: rename to zcu: ?*Zcu | |
module: ?*Module, | |
/// Contains different state depending on whether the Compilation uses | |
/// incremental or whole cache mode. | |
cache_use: CacheUse, | |
/// All compilations have a root module because this is where some important | |
/// settings are stored, such as target and optimization mode. This module | |
/// might not have any .zig code associated with it, however. | |
root_mod: *Package.Module, | |
/// User-specified settings that have all the defaults resolved into concrete values. | |
config: Config, | |
/// The main output file. | |
/// In whole cache mode, this is null except for during the body of the update | |
/// function. In incremental cache mode, this is a long-lived object. | |
/// In both cases, this is `null` when `-fno-emit-bin` is used. | |
bin_file: ?*link.File, | |
/// The root path for the dynamic linker and system libraries (as well as frameworks on Darwin) | |
sysroot: ?[]const u8, | |
/// This is `null` when not building a Windows DLL, or when `-fno-emit-implib` is used. | |
implib_emit: ?Emit, | |
/// This is non-null when `-femit-docs` is provided. | |
docs_emit: ?Emit, | |
root_name: [:0]const u8, | |
include_compiler_rt: bool, | |
objects: []Compilation.LinkObject, | |
/// Needed only for passing -F args to clang. | |
framework_dirs: []const []const u8, | |
/// These are *always* dynamically linked. Static libraries will be | |
/// provided as positional arguments. | |
system_libs: std.StringArrayHashMapUnmanaged(SystemLib), | |
version: ?std.SemanticVersion, | |
libc_installation: ?*const LibCInstallation, | |
skip_linker_dependencies: bool, | |
no_builtin: bool, | |
function_sections: bool, | |
data_sections: bool, | |
link_eh_frame_hdr: bool, | |
native_system_include_paths: []const []const u8, | |
/// List of symbols forced as undefined in the symbol table | |
/// thus forcing their resolution by the linker. | |
/// Corresponds to `-u <symbol>` for ELF/MachO and `/include:<symbol>` for COFF/PE. | |
force_undefined_symbols: std.StringArrayHashMapUnmanaged(void), | |
c_object_table: std.AutoArrayHashMapUnmanaged(*CObject, void) = .{}, | |
win32_resource_table: if (build_options.only_core_functionality) void else std.AutoArrayHashMapUnmanaged(*Win32Resource, void) = | |
if (build_options.only_core_functionality) {} else .{}, | |
link_error_flags: link.File.ErrorFlags = .{}, | |
link_errors: std.ArrayListUnmanaged(link.File.ErrorMsg) = .{}, | |
lld_errors: std.ArrayListUnmanaged(LldError) = .{}, | |
work_queue: std.fifo.LinearFifo(Job, .Dynamic), | |
anon_work_queue: std.fifo.LinearFifo(Job, .Dynamic), | |
/// These jobs are to invoke the Clang compiler to create an object file, which | |
/// gets linked with the Compilation. | |
c_object_work_queue: std.fifo.LinearFifo(*CObject, .Dynamic), | |
/// These jobs are to invoke the RC compiler to create a compiled resource file (.res), which | |
/// gets linked with the Compilation. | |
win32_resource_work_queue: if (build_options.only_core_functionality) void else std.fifo.LinearFifo(*Win32Resource, .Dynamic), | |
/// These jobs are to tokenize, parse, and astgen files, which may be outdated | |
/// since the last compilation, as well as scan for `@import` and queue up | |
/// additional jobs corresponding to those new files. | |
astgen_work_queue: std.fifo.LinearFifo(*Module.File, .Dynamic), | |
/// These jobs are to inspect the file system stat() and if the embedded file has changed | |
/// on disk, mark the corresponding Decl outdated and queue up an `analyze_decl` | |
/// task for it. | |
embed_file_work_queue: std.fifo.LinearFifo(*Module.EmbedFile, .Dynamic), | |
/// The ErrorMsg memory is owned by the `CObject`, using Compilation's general purpose allocator. | |
/// This data is accessed by multiple threads and is protected by `mutex`. | |
failed_c_objects: std.AutoArrayHashMapUnmanaged(*CObject, *CObject.Diag.Bundle) = .{}, | |
/// The ErrorBundle memory is owned by the `Win32Resource`, using Compilation's general purpose allocator. | |
/// This data is accessed by multiple threads and is protected by `mutex`. | |
failed_win32_resources: if (build_options.only_core_functionality) void else std.AutoArrayHashMapUnmanaged(*Win32Resource, ErrorBundle) = | |
if (build_options.only_core_functionality) {} else .{}, | |
/// Miscellaneous things that can fail. | |
misc_failures: std.AutoArrayHashMapUnmanaged(MiscTask, MiscError) = .{}, | |
/// When this is `true` it means invoking clang as a sub-process is expected to inherit | |
/// stdin, stdout, stderr, and if it returns non success, to forward the exit code. | |
/// Otherwise we attempt to parse the error messages and expose them via the Compilation API. | |
/// This is `true` for `zig cc`, `zig c++`, and `zig translate-c`. | |
clang_passthrough_mode: bool, | |
clang_preprocessor_mode: ClangPreprocessorMode, | |
/// Whether to print clang argvs to stdout. | |
verbose_cc: bool, | |
verbose_air: bool, | |
verbose_intern_pool: bool, | |
verbose_generic_instances: bool, | |
verbose_llvm_ir: ?[]const u8, | |
verbose_llvm_bc: ?[]const u8, | |
verbose_cimport: bool, | |
verbose_llvm_cpu_features: bool, | |
verbose_link: bool, | |
disable_c_depfile: bool, | |
time_report: bool, | |
stack_report: bool, | |
debug_compiler_runtime_libs: bool, | |
debug_compile_errors: bool, | |
debug_incremental: bool, | |
job_queued_compiler_rt_lib: bool = false, | |
job_queued_compiler_rt_obj: bool = false, | |
job_queued_update_builtin_zig: bool, | |
alloc_failure_occurred: bool = false, | |
formatted_panics: bool = false, | |
last_update_was_cache_hit: bool = false, | |
c_source_files: []const CSourceFile, | |
rc_source_files: []const RcSourceFile, | |
global_cc_argv: []const []const u8, | |
cache_parent: *Cache, | |
/// Path to own executable for invoking `zig clang`. | |
self_exe_path: ?[]const u8, | |
zig_lib_directory: Directory, | |
local_cache_directory: Directory, | |
global_cache_directory: Directory, | |
libc_include_dir_list: []const []const u8, | |
libc_framework_dir_list: []const []const u8, | |
rc_include_dir_list: []const []const u8, | |
thread_pool: *ThreadPool, | |
/// Populated when we build the libc++ static library. A Job to build this is placed in the queue | |
/// and resolved before calling linker.flush(). | |
libcxx_static_lib: ?CRTFile = null, | |
/// Populated when we build the libc++abi static library. A Job to build this is placed in the queue | |
/// and resolved before calling linker.flush(). | |
libcxxabi_static_lib: ?CRTFile = null, | |
/// Populated when we build the libunwind static library. A Job to build this is placed in the queue | |
/// and resolved before calling linker.flush(). | |
libunwind_static_lib: ?CRTFile = null, | |
/// Populated when we build the TSAN static library. A Job to build this is placed in the queue | |
/// and resolved before calling linker.flush(). | |
tsan_static_lib: ?CRTFile = null, | |
/// Populated when we build the libc static library. A Job to build this is placed in the queue | |
/// and resolved before calling linker.flush(). | |
libc_static_lib: ?CRTFile = null, | |
/// Populated when we build the libcompiler_rt static library. A Job to build this is indicated | |
/// by setting `job_queued_compiler_rt_lib` and resolved before calling linker.flush(). | |
compiler_rt_lib: ?CRTFile = null, | |
/// Populated when we build the compiler_rt_obj object. A Job to build this is indicated | |
/// by setting `job_queued_compiler_rt_obj` and resolved before calling linker.flush(). | |
compiler_rt_obj: ?CRTFile = null, | |
glibc_so_files: ?glibc.BuiltSharedObjects = null, | |
wasi_emulated_libs: []const wasi_libc.CRTFile, | |
/// For example `Scrt1.o` and `libc_nonshared.a`. These are populated after building libc from source, | |
/// The set of needed CRT (C runtime) files differs depending on the target and compilation settings. | |
/// The key is the basename, and the value is the absolute path to the completed build artifact. | |
crt_files: std.StringHashMapUnmanaged(CRTFile) = .{}, | |
/// How many lines of reference trace should be included per compile error. | |
/// Null means only show snippet on first error. | |
reference_trace: ?u32 = null, | |
libcxx_abi_version: libcxx.AbiVersion = libcxx.AbiVersion.default, | |
/// This mutex guards all `Compilation` mutable state. | |
mutex: std.Thread.Mutex = .{}, | |
test_filters: []const []const u8, | |
test_name_prefix: ?[]const u8, | |
emit_asm: ?EmitLoc, | |
emit_llvm_ir: ?EmitLoc, | |
emit_llvm_bc: ?EmitLoc, | |
work_queue_wait_group: WaitGroup = .{}, | |
astgen_wait_group: WaitGroup = .{}, | |
llvm_opt_bisect_limit: c_int, | |
pub const Emit = struct { | |
/// Where the output will go. | |
directory: Directory, | |
/// Path to the output file, relative to `directory`. | |
sub_path: []const u8, | |
/// Returns the full path to `basename` if it were in the same directory as the | |
/// `Emit` sub_path. | |
pub fn basenamePath(emit: Emit, arena: Allocator, basename: []const u8) ![:0]const u8 { | |
const full_path = if (emit.directory.path) |p| | |
try std.fs.path.join(arena, &[_][]const u8{ p, emit.sub_path }) | |
else | |
emit.sub_path; | |
if (std.fs.path.dirname(full_path)) |dirname| { | |
return try std.fs.path.joinZ(arena, &.{ dirname, basename }); | |
} else { | |
return try arena.dupeZ(u8, basename); | |
} | |
} | |
}; | |
pub const default_stack_protector_buffer_size = target_util.default_stack_protector_buffer_size; | |
pub const SemaError = Module.SemaError; | |
pub const CRTFile = struct { | |
lock: Cache.Lock, | |
full_object_path: []const u8, | |
pub fn deinit(self: *CRTFile, gpa: Allocator) void { | |
self.lock.release(); | |
gpa.free(self.full_object_path); | |
self.* = undefined; | |
} | |
}; | |
/// Supported languages for "zig clang -x <lang>". | |
/// Loosely based on llvm-project/clang/include/clang/Driver/Types.def | |
pub const LangToExt = std.ComptimeStringMap(FileExt, .{ | |
.{ "c", .c }, | |
.{ "c-header", .h }, | |
.{ "c++", .cpp }, | |
.{ "c++-header", .hpp }, | |
.{ "objective-c", .m }, | |
.{ "objective-c-header", .hm }, | |
.{ "objective-c++", .mm }, | |
.{ "objective-c++-header", .hmm }, | |
.{ "assembler", .assembly }, | |
.{ "assembler-with-cpp", .assembly_with_cpp }, | |
.{ "cuda", .cu }, | |
}); | |
/// For passing to a C compiler. | |
pub const CSourceFile = struct { | |
/// Many C compiler flags are determined by settings contained in the owning Module. | |
owner: *Package.Module, | |
src_path: []const u8, | |
extra_flags: []const []const u8 = &.{}, | |
/// Same as extra_flags except they are not added to the Cache hash. | |
cache_exempt_flags: []const []const u8 = &.{}, | |
/// This field is non-null if and only if the language was explicitly set | |
/// with "-x lang". | |
ext: ?FileExt = null, | |
}; | |
/// For passing to resinator. | |
pub const RcSourceFile = struct { | |
owner: *Package.Module, | |
src_path: []const u8, | |
extra_flags: []const []const u8 = &.{}, | |
}; | |
pub const RcIncludes = enum { | |
/// Use MSVC if available, fall back to MinGW. | |
any, | |
/// Use MSVC include paths (MSVC install + Windows SDK, must be present on the system). | |
msvc, | |
/// Use MinGW include paths (distributed with Zig). | |
gnu, | |
/// Do not use any autodetected include paths. | |
none, | |
}; | |
const Job = union(enum) { | |
/// Write the constant value for a Decl to the output file. | |
codegen_decl: InternPool.DeclIndex, | |
/// Write the machine code for a function to the output file. | |
/// This will either be a non-generic `func_decl` or a `func_instance`. | |
codegen_func: InternPool.Index, | |
/// Render the .h file snippet for the Decl. | |
emit_h_decl: InternPool.DeclIndex, | |
/// The Decl needs to be analyzed and possibly export itself. | |
/// It may have already be analyzed, or it may have been determined | |
/// to be outdated; in this case perform semantic analysis again. | |
analyze_decl: InternPool.DeclIndex, | |
/// The source file containing the Decl has been updated, and so the | |
/// Decl may need its line number information updated in the debug info. | |
update_line_number: InternPool.DeclIndex, | |
/// The main source file for the module needs to be analyzed. | |
analyze_mod: *Package.Module, | |
/// one of the glibc static objects | |
glibc_crt_file: glibc.CRTFile, | |
/// all of the glibc shared objects | |
glibc_shared_objects, | |
/// one of the musl static objects | |
musl_crt_file: musl.CRTFile, | |
/// one of the mingw-w64 static objects | |
mingw_crt_file: mingw.CRTFile, | |
/// libunwind.a, usually needed when linking libc | |
libunwind: void, | |
libcxx: void, | |
libcxxabi: void, | |
libtsan: void, | |
/// needed when not linking libc and using LLVM for code generation because it generates | |
/// calls to, for example, memcpy and memset. | |
zig_libc: void, | |
/// one of WASI libc static objects | |
wasi_libc_crt_file: wasi_libc.CRTFile, | |
/// The value is the index into `system_libs`. | |
windows_import_lib: usize, | |
}; | |
pub const CObject = struct { | |
/// Relative to cwd. Owned by arena. | |
src: CSourceFile, | |
status: union(enum) { | |
new, | |
success: struct { | |
/// The outputted result. Owned by gpa. | |
object_path: []u8, | |
/// This is a file system lock on the cache hash manifest representing this | |
/// object. It prevents other invocations of the Zig compiler from interfering | |
/// with this object until released. | |
lock: Cache.Lock, | |
}, | |
/// There will be a corresponding ErrorMsg in Compilation.failed_c_objects. | |
failure, | |
/// A transient failure happened when trying to compile the C Object; it may | |
/// succeed if we try again. There may be a corresponding ErrorMsg in | |
/// Compilation.failed_c_objects. If there is not, the failure is out of memory. | |
failure_retryable, | |
}, | |
pub const Diag = struct { | |
level: u32 = 0, | |
category: u32 = 0, | |
msg: []const u8 = &.{}, | |
src_loc: SrcLoc = .{}, | |
src_ranges: []const SrcRange = &.{}, | |
sub_diags: []const Diag = &.{}, | |
pub const SrcLoc = struct { | |
file: u32 = 0, | |
line: u32 = 0, | |
column: u32 = 0, | |
offset: u32 = 0, | |
}; | |
pub const SrcRange = struct { | |
start: SrcLoc = .{}, | |
end: SrcLoc = .{}, | |
}; | |
pub fn deinit(diag: *Diag, gpa: Allocator) void { | |
gpa.free(diag.msg); | |
gpa.free(diag.src_ranges); | |
for (diag.sub_diags) |sub_diag| { | |
var sub_diag_mut = sub_diag; | |
sub_diag_mut.deinit(gpa); | |
} | |
gpa.free(diag.sub_diags); | |
diag.* = undefined; | |
} | |
pub fn count(diag: Diag) u32 { | |
var total: u32 = 1; | |
for (diag.sub_diags) |sub_diag| total += sub_diag.count(); | |
return total; | |
} | |
pub fn addToErrorBundle(diag: Diag, eb: *ErrorBundle.Wip, bundle: Bundle, note: *u32) !void { | |
const err_msg = try eb.addErrorMessage(try diag.toErrorMessage(eb, bundle, 0)); | |
eb.extra.items[note.*] = @intFromEnum(err_msg); | |
note.* += 1; | |
for (diag.sub_diags) |sub_diag| try sub_diag.addToErrorBundle(eb, bundle, note); | |
} | |
pub fn toErrorMessage( | |
diag: Diag, | |
eb: *ErrorBundle.Wip, | |
bundle: Bundle, | |
notes_len: u32, | |
) !ErrorBundle.ErrorMessage { | |
var start = diag.src_loc.offset; | |
var end = diag.src_loc.offset; | |
for (diag.src_ranges) |src_range| { | |
if (src_range.start.file == diag.src_loc.file and | |
src_range.start.line == diag.src_loc.line) | |
{ | |
start = @min(src_range.start.offset, start); | |
} | |
if (src_range.end.file == diag.src_loc.file and | |
src_range.end.line == diag.src_loc.line) | |
{ | |
end = @max(src_range.end.offset, end); | |
} | |
} | |
const file_name = bundle.file_names.get(diag.src_loc.file) orelse ""; | |
const source_line = source_line: { | |
if (diag.src_loc.offset == 0 or diag.src_loc.column == 0) break :source_line 0; | |
const file = std.fs.cwd().openFile(file_name, .{}) catch break :source_line 0; | |
defer file.close(); | |
file.seekTo(diag.src_loc.offset + 1 - diag.src_loc.column) catch break :source_line 0; | |
var line = std.ArrayList(u8).init(eb.gpa); | |
defer line.deinit(); | |
file.reader().readUntilDelimiterArrayList(&line, '\n', 1 << 10) catch break :source_line 0; | |
break :source_line try eb.addString(line.items); | |
}; | |
return .{ | |
.msg = try eb.addString(diag.msg), | |
.src_loc = try eb.addSourceLocation(.{ | |
.src_path = try eb.addString(file_name), | |
.line = diag.src_loc.line -| 1, | |
.column = diag.src_loc.column -| 1, | |
.span_start = start, | |
.span_main = diag.src_loc.offset, | |
.span_end = end + 1, | |
.source_line = source_line, | |
}), | |
.notes_len = notes_len, | |
}; | |
} | |
pub const Bundle = struct { | |
file_names: std.AutoArrayHashMapUnmanaged(u32, []const u8) = .{}, | |
category_names: std.AutoArrayHashMapUnmanaged(u32, []const u8) = .{}, | |
diags: []Diag = &.{}, | |
pub fn destroy(bundle: *Bundle, gpa: Allocator) void { | |
for (bundle.file_names.values()) |file_name| gpa.free(file_name); | |
for (bundle.category_names.values()) |category_name| gpa.free(category_name); | |
for (bundle.diags) |*diag| diag.deinit(gpa); | |
gpa.free(bundle.diags); | |
gpa.destroy(bundle); | |
} | |
pub fn parse(gpa: Allocator, path: []const u8) !*Bundle { | |
const BitcodeReader = @import("codegen/llvm/BitcodeReader.zig"); | |
const BlockId = enum(u32) { | |
Meta = 8, | |
Diag, | |
_, | |
}; | |
const RecordId = enum(u32) { | |
Version = 1, | |
DiagInfo, | |
SrcRange, | |
DiagFlag, | |
CatName, | |
FileName, | |
FixIt, | |
_, | |
}; | |
const WipDiag = struct { | |
level: u32 = 0, | |
category: u32 = 0, | |
msg: []const u8 = &.{}, | |
src_loc: SrcLoc = .{}, | |
src_ranges: std.ArrayListUnmanaged(SrcRange) = .{}, | |
sub_diags: std.ArrayListUnmanaged(Diag) = .{}, | |
fn deinit(wip_diag: *@This(), allocator: Allocator) void { | |
allocator.free(wip_diag.msg); | |
wip_diag.src_ranges.deinit(allocator); | |
for (wip_diag.sub_diags.items) |*sub_diag| sub_diag.deinit(allocator); | |
wip_diag.sub_diags.deinit(allocator); | |
wip_diag.* = undefined; | |
} | |
}; | |
const file = try std.fs.cwd().openFile(path, .{}); | |
defer file.close(); | |
var br = std.io.bufferedReader(file.reader()); | |
const reader = br.reader(); | |
var bc = BitcodeReader.init(gpa, .{ .reader = reader.any() }); | |
defer bc.deinit(); | |
var file_names: std.AutoArrayHashMapUnmanaged(u32, []const u8) = .{}; | |
errdefer { | |
for (file_names.values()) |file_name| gpa.free(file_name); | |
file_names.deinit(gpa); | |
} | |
var category_names: std.AutoArrayHashMapUnmanaged(u32, []const u8) = .{}; | |
errdefer { | |
for (category_names.values()) |category_name| gpa.free(category_name); | |
category_names.deinit(gpa); | |
} | |
var stack: std.ArrayListUnmanaged(WipDiag) = .{}; | |
defer { | |
for (stack.items) |*wip_diag| wip_diag.deinit(gpa); | |
stack.deinit(gpa); | |
} | |
try stack.append(gpa, .{}); | |
try bc.checkMagic("DIAG"); | |
while (try bc.next()) |item| switch (item) { | |
.start_block => |block| switch (@as(BlockId, @enumFromInt(block.id))) { | |
.Meta => if (stack.items.len > 0) try bc.skipBlock(block), | |
.Diag => try stack.append(gpa, .{}), | |
_ => try bc.skipBlock(block), | |
}, | |
.record => |record| switch (@as(RecordId, @enumFromInt(record.id))) { | |
.Version => if (record.operands[0] != 2) return error.InvalidVersion, | |
.DiagInfo => { | |
const top = &stack.items[stack.items.len - 1]; | |
top.level = @intCast(record.operands[0]); | |
top.src_loc = .{ | |
.file = @intCast(record.operands[1]), | |
.line = @intCast(record.operands[2]), | |
.column = @intCast(record.operands[3]), | |
.offset = @intCast(record.operands[4]), | |
}; | |
top.category = @intCast(record.operands[5]); | |
top.msg = try gpa.dupe(u8, record.blob); | |
}, | |
.SrcRange => try stack.items[stack.items.len - 1].src_ranges.append(gpa, .{ | |
.start = .{ | |
.file = @intCast(record.operands[0]), | |
.line = @intCast(record.operands[1]), | |
.column = @intCast(record.operands[2]), | |
.offset = @intCast(record.operands[3]), | |
}, | |
.end = .{ | |
.file = @intCast(record.operands[4]), | |
.line = @intCast(record.operands[5]), | |
.column = @intCast(record.operands[6]), | |
.offset = @intCast(record.operands[7]), | |
}, | |
}), | |
.DiagFlag => {}, | |
.CatName => { | |
try category_names.ensureUnusedCapacity(gpa, 1); | |
category_names.putAssumeCapacity( | |
@intCast(record.operands[0]), | |
try gpa.dupe(u8, record.blob), | |
); | |
}, | |
.FileName => { | |
try file_names.ensureUnusedCapacity(gpa, 1); | |
file_names.putAssumeCapacity( | |
@intCast(record.operands[0]), | |
try gpa.dupe(u8, record.blob), | |
); | |
}, | |
.FixIt => {}, | |
_ => {}, | |
}, | |
.end_block => |block| switch (@as(BlockId, @enumFromInt(block.id))) { | |
.Meta => {}, | |
.Diag => { | |
var wip_diag = stack.pop(); | |
errdefer wip_diag.deinit(gpa); | |
const src_ranges = try wip_diag.src_ranges.toOwnedSlice(gpa); | |
errdefer gpa.free(src_ranges); | |
const sub_diags = try wip_diag.sub_diags.toOwnedSlice(gpa); | |
errdefer { | |
for (sub_diags) |*sub_diag| sub_diag.deinit(gpa); | |
gpa.free(sub_diags); | |
} | |
try stack.items[stack.items.len - 1].sub_diags.append(gpa, .{ | |
.level = wip_diag.level, | |
.category = wip_diag.category, | |
.msg = wip_diag.msg, | |
.src_loc = wip_diag.src_loc, | |
.src_ranges = src_ranges, | |
.sub_diags = sub_diags, | |
}); | |
}, | |
_ => {}, | |
}, | |
}; | |
const bundle = try gpa.create(Bundle); | |
assert(stack.items.len == 1); | |
bundle.* = .{ | |
.file_names = file_names, | |
.category_names = category_names, | |
.diags = try stack.items[0].sub_diags.toOwnedSlice(gpa), | |
}; | |
return bundle; | |
} | |
pub fn addToErrorBundle(bundle: Bundle, eb: *ErrorBundle.Wip) !void { | |
for (bundle.diags) |diag| { | |
const notes_len = diag.count() - 1; | |
try eb.addRootErrorMessage(try diag.toErrorMessage(eb, bundle, notes_len)); | |
if (notes_len > 0) { | |
var note = try eb.reserveNotes(notes_len); | |
for (diag.sub_diags) |sub_diag| | |
try sub_diag.addToErrorBundle(eb, bundle, ¬e); | |
} | |
} | |
} | |
}; | |
}; | |
/// Returns if there was failure. | |
pub fn clearStatus(self: *CObject, gpa: Allocator) bool { | |
switch (self.status) { | |
.new => return false, | |
.failure, .failure_retryable => { | |
self.status = .new; | |
return true; | |
}, | |
.success => |*success| { | |
gpa.free(success.object_path); | |
success.lock.release(); | |
self.status = .new; | |
return false; | |
}, | |
} | |
} | |
pub fn destroy(self: *CObject, gpa: Allocator) void { | |
_ = self.clearStatus(gpa); | |
gpa.destroy(self); | |
} | |
}; | |
pub const Win32Resource = struct { | |
/// Relative to cwd. Owned by arena. | |
src: union(enum) { | |
rc: RcSourceFile, | |
manifest: []const u8, | |
}, | |
status: union(enum) { | |
new, | |
success: struct { | |
/// The outputted result. Owned by gpa. | |
res_path: []u8, | |
/// This is a file system lock on the cache hash manifest representing this | |
/// object. It prevents other invocations of the Zig compiler from interfering | |
/// with this object until released. | |
lock: Cache.Lock, | |
}, | |
/// There will be a corresponding ErrorMsg in Compilation.failed_win32_resources. | |
failure, | |
/// A transient failure happened when trying to compile the resource file; it may | |
/// succeed if we try again. There may be a corresponding ErrorMsg in | |
/// Compilation.failed_win32_resources. If there is not, the failure is out of memory. | |
failure_retryable, | |
}, | |
/// Returns true if there was failure. | |
pub fn clearStatus(self: *Win32Resource, gpa: Allocator) bool { | |
switch (self.status) { | |
.new => return false, | |
.failure, .failure_retryable => { | |
self.status = .new; | |
return true; | |
}, | |
.success => |*success| { | |
gpa.free(success.res_path); | |
success.lock.release(); | |
self.status = .new; | |
return false; | |
}, | |
} | |
} | |
pub fn destroy(self: *Win32Resource, gpa: Allocator) void { | |
_ = self.clearStatus(gpa); | |
gpa.destroy(self); | |
} | |
}; | |
pub const MiscTask = enum { | |
write_builtin_zig, | |
rename_results, | |
check_whole_cache, | |
glibc_crt_file, | |
glibc_shared_objects, | |
musl_crt_file, | |
mingw_crt_file, | |
windows_import_lib, | |
libunwind, | |
libcxx, | |
libcxxabi, | |
libtsan, | |
wasi_libc_crt_file, | |
compiler_rt, | |
zig_libc, | |
analyze_mod, | |
@"musl crti.o", | |
@"musl crtn.o", | |
@"musl crt1.o", | |
@"musl rcrt1.o", | |
@"musl Scrt1.o", | |
@"musl libc.a", | |
@"musl libc.so", | |
@"wasi crt1-reactor.o", | |
@"wasi crt1-command.o", | |
@"wasi libc.a", | |
@"libwasi-emulated-process-clocks.a", | |
@"libwasi-emulated-getpid.a", | |
@"libwasi-emulated-mman.a", | |
@"libwasi-emulated-signal.a", | |
@"glibc crti.o", | |
@"glibc crtn.o", | |
@"glibc Scrt1.o", | |
@"glibc libc_nonshared.a", | |
@"glibc shared object", | |
@"mingw-w64 crt2.o", | |
@"mingw-w64 dllcrt2.o", | |
@"mingw-w64 mingwex.lib", | |
}; | |
pub const MiscError = struct { | |
/// Allocated with gpa. | |
msg: []u8, | |
children: ?ErrorBundle = null, | |
pub fn deinit(misc_err: *MiscError, gpa: Allocator) void { | |
gpa.free(misc_err.msg); | |
if (misc_err.children) |*children| { | |
children.deinit(gpa); | |
} | |
misc_err.* = undefined; | |
} | |
}; | |
pub const LldError = struct { | |
/// Allocated with gpa. | |
msg: []const u8, | |
context_lines: []const []const u8 = &.{}, | |
pub fn deinit(self: *LldError, gpa: Allocator) void { | |
for (self.context_lines) |line| { | |
gpa.free(line); | |
} | |
gpa.free(self.context_lines); | |
gpa.free(self.msg); | |
} | |
}; | |
pub const Directory = Cache.Directory; | |
pub const EmitLoc = struct { | |
/// If this is `null` it means the file will be output to the cache directory. | |
/// When provided, both the open file handle and the path name must outlive the `Compilation`. | |
directory: ?Compilation.Directory, | |
/// This may not have sub-directories in it. | |
basename: []const u8, | |
}; | |
pub const cache_helpers = struct { | |
pub fn addModule(hh: *Cache.HashHelper, mod: *const Package.Module) void { | |
addResolvedTarget(hh, mod.resolved_target); | |
hh.add(mod.optimize_mode); | |
hh.add(mod.code_model); | |
hh.add(mod.single_threaded); | |
hh.add(mod.error_tracing); | |
hh.add(mod.valgrind); | |
hh.add(mod.pic); | |
hh.add(mod.strip); | |
hh.add(mod.omit_frame_pointer); | |
hh.add(mod.stack_check); | |
hh.add(mod.red_zone); | |
hh.add(mod.sanitize_c); | |
hh.add(mod.sanitize_thread); | |
hh.add(mod.unwind_tables); | |
hh.add(mod.structured_cfg); | |
hh.addListOfBytes(mod.cc_argv); | |
} | |
pub fn addResolvedTarget( | |
hh: *Cache.HashHelper, | |
resolved_target: Package.Module.ResolvedTarget, | |
) void { | |
const target = resolved_target.result; | |
hh.add(target.cpu.arch); | |
hh.addBytes(target.cpu.model.name); | |
hh.add(target.cpu.features.ints); | |
hh.add(target.os.tag); | |
hh.add(target.os.getVersionRange()); | |
hh.add(target.abi); | |
hh.add(target.ofmt); | |
hh.add(resolved_target.is_native_os); | |
hh.add(resolved_target.is_native_abi); | |
} | |
pub fn addEmitLoc(hh: *Cache.HashHelper, emit_loc: EmitLoc) void { | |
hh.addBytes(emit_loc.basename); | |
} | |
pub fn addOptionalEmitLoc(hh: *Cache.HashHelper, optional_emit_loc: ?EmitLoc) void { | |
hh.add(optional_emit_loc != null); | |
addEmitLoc(hh, optional_emit_loc orelse return); | |
} | |
pub fn addOptionalDebugFormat(hh: *Cache.HashHelper, x: ?Config.DebugFormat) void { | |
hh.add(x != null); | |
addDebugFormat(hh, x orelse return); | |
} | |
pub fn addDebugFormat(hh: *Cache.HashHelper, x: Config.DebugFormat) void { | |
const tag: @typeInfo(Config.DebugFormat).Union.tag_type.? = x; | |
hh.add(tag); | |
switch (x) { | |
.strip, .code_view => {}, | |
.dwarf => |f| hh.add(f), | |
} | |
} | |
pub fn hashCSource(self: *Cache.Manifest, c_source: CSourceFile) !void { | |
_ = try self.addFile(c_source.src_path, null); | |
// Hash the extra flags, with special care to call addFile for file parameters. | |
// TODO this logic can likely be improved by utilizing clang_options_data.zig. | |
const file_args = [_][]const u8{"-include"}; | |
var arg_i: usize = 0; | |
while (arg_i < c_source.extra_flags.len) : (arg_i += 1) { | |
const arg = c_source.extra_flags[arg_i]; | |
self.hash.addBytes(arg); | |
for (file_args) |file_arg| { | |
if (mem.eql(u8, file_arg, arg) and arg_i + 1 < c_source.extra_flags.len) { | |
arg_i += 1; | |
_ = try self.addFile(c_source.extra_flags[arg_i], null); | |
} | |
} | |
} | |
} | |
}; | |
pub const ClangPreprocessorMode = enum { | |
no, | |
/// This means we are doing `zig cc -E -o <path>`. | |
yes, | |
/// This means we are doing `zig cc -E`. | |
stdout, | |
/// precompiled C header | |
pch, | |
}; | |
pub const Framework = link.File.MachO.Framework; | |
pub const SystemLib = link.SystemLib; | |
pub const CacheMode = enum { incremental, whole }; | |
const CacheUse = union(CacheMode) { | |
incremental: *Incremental, | |
whole: *Whole, | |
const Whole = struct { | |
/// This is a pointer to a local variable inside `update()`. | |
cache_manifest: ?*Cache.Manifest = null, | |
cache_manifest_mutex: std.Thread.Mutex = .{}, | |
/// null means -fno-emit-bin. | |
/// This is mutable memory allocated into the Compilation-lifetime arena (`arena`) | |
/// of exactly the correct size for "o/[digest]/[basename]". | |
/// The basename is of the outputted binary file in case we don't know the directory yet. | |
bin_sub_path: ?[]u8, | |
/// Same as `bin_sub_path` but for implibs. | |
implib_sub_path: ?[]u8, | |
docs_sub_path: ?[]u8, | |
lf_open_opts: link.File.OpenOptions, | |
tmp_artifact_directory: ?Cache.Directory, | |
/// Prevents other processes from clobbering files in the output directory. | |
lock: ?Cache.Lock, | |
fn releaseLock(whole: *Whole) void { | |
if (whole.lock) |*lock| { | |
lock.release(); | |
whole.lock = null; | |
} | |
} | |
fn moveLock(whole: *Whole) Cache.Lock { | |
const result = whole.lock.?; | |
whole.lock = null; | |
return result; | |
} | |
}; | |
const Incremental = struct { | |
/// Where build artifacts and incremental compilation metadata serialization go. | |
artifact_directory: Compilation.Directory, | |
}; | |
fn deinit(cu: CacheUse) void { | |
switch (cu) { | |
.incremental => |incremental| { | |
incremental.artifact_directory.handle.close(); | |
}, | |
.whole => |whole| { | |
whole.releaseLock(); | |
}, | |
} | |
} | |
}; | |
pub const LinkObject = struct { | |
path: []const u8, | |
must_link: bool = false, | |
// When the library is passed via a positional argument, it will be | |
// added as a full path. If it's `-l<lib>`, then just the basename. | |
// | |
// Consistent with `withLOption` variable name in lld ELF driver. | |
loption: bool = false, | |
}; | |
pub const CreateOptions = struct { | |
zig_lib_directory: Directory, | |
local_cache_directory: Directory, | |
global_cache_directory: Directory, | |
thread_pool: *ThreadPool, | |
self_exe_path: ?[]const u8 = null, | |
/// Options that have been resolved by calling `resolveDefaults`. | |
config: Compilation.Config, | |
root_mod: *Package.Module, | |
/// Normally, `main_mod` and `root_mod` are the same. The exception is `zig | |
/// test`, in which `root_mod` is the test runner, and `main_mod` is the | |
/// user's source file which has the tests. | |
main_mod: ?*Package.Module = null, | |
/// This is provided so that the API user has a chance to tweak the | |
/// per-module settings of the standard library. | |
/// When this is null, a default configuration of the std lib is created | |
/// based on the settings of root_mod. | |
std_mod: ?*Package.Module = null, | |
root_name: []const u8, | |
sysroot: ?[]const u8 = null, | |
/// `null` means to not emit a binary file. | |
emit_bin: ?EmitLoc, | |
/// `null` means to not emit a C header file. | |
emit_h: ?EmitLoc = null, | |
/// `null` means to not emit assembly. | |
emit_asm: ?EmitLoc = null, | |
/// `null` means to not emit LLVM IR. | |
emit_llvm_ir: ?EmitLoc = null, | |
/// `null` means to not emit LLVM module bitcode. | |
emit_llvm_bc: ?EmitLoc = null, | |
/// `null` means to not emit docs. | |
emit_docs: ?EmitLoc = null, | |
/// `null` means to not emit an import lib. | |
emit_implib: ?EmitLoc = null, | |
/// Normally when using LLD to link, Zig uses a file named "lld.id" in the | |
/// same directory as the output binary which contains the hash of the link | |
/// operation, allowing Zig to skip linking when the hash would be unchanged. | |
/// In the case that the output binary is being emitted into a directory which | |
/// is externally modified - essentially anything other than zig-cache - then | |
/// this flag would be set to disable this machinery to avoid false positives. | |
disable_lld_caching: bool = false, | |
cache_mode: CacheMode = .incremental, | |
lib_dirs: []const []const u8 = &[0][]const u8{}, | |
rpath_list: []const []const u8 = &[0][]const u8{}, | |
symbol_wrap_set: std.StringArrayHashMapUnmanaged(void) = .{}, | |
c_source_files: []const CSourceFile = &.{}, | |
rc_source_files: []const RcSourceFile = &.{}, | |
manifest_file: ?[]const u8 = null, | |
rc_includes: RcIncludes = .any, | |
link_objects: []LinkObject = &[0]LinkObject{}, | |
framework_dirs: []const []const u8 = &[0][]const u8{}, | |
frameworks: []const Framework = &.{}, | |
system_lib_names: []const []const u8 = &.{}, | |
system_lib_infos: []const SystemLib = &.{}, | |
/// These correspond to the WASI libc emulated subcomponents including: | |
/// * process clocks | |
/// * getpid | |
/// * mman | |
/// * signal | |
wasi_emulated_libs: []const wasi_libc.CRTFile = &.{}, | |
/// This means that if the output mode is an executable it will be a | |
/// Position Independent Executable. If the output mode is not an | |
/// executable this field is ignored. | |
want_compiler_rt: ?bool = null, | |
want_lto: ?bool = null, | |
formatted_panics: ?bool = null, | |
function_sections: bool = false, | |
data_sections: bool = false, | |
no_builtin: bool = false, | |
time_report: bool = false, | |
stack_report: bool = false, | |
link_eh_frame_hdr: bool = false, | |
link_emit_relocs: bool = false, | |
linker_script: ?[]const u8 = null, | |
version_script: ?[]const u8 = null, | |
linker_allow_undefined_version: bool = false, | |
soname: ?[]const u8 = null, | |
linker_gc_sections: ?bool = null, | |
linker_allow_shlib_undefined: ?bool = null, | |
linker_bind_global_refs_locally: ?bool = null, | |
linker_import_symbols: bool = false, | |
linker_import_table: bool = false, | |
linker_export_table: bool = false, | |
linker_initial_memory: ?u64 = null, | |
linker_max_memory: ?u64 = null, | |
linker_global_base: ?u64 = null, | |
linker_export_symbol_names: []const []const u8 = &.{}, | |
linker_print_gc_sections: bool = false, | |
linker_print_icf_sections: bool = false, | |
linker_print_map: bool = false, | |
llvm_opt_bisect_limit: i32 = -1, | |
build_id: ?std.zig.BuildId = null, | |
disable_c_depfile: bool = false, | |
linker_z_nodelete: bool = false, | |
linker_z_notext: bool = false, | |
linker_z_defs: bool = false, | |
linker_z_origin: bool = false, | |
linker_z_now: bool = true, | |
linker_z_relro: bool = true, | |
linker_z_nocopyreloc: bool = false, | |
linker_z_common_page_size: ?u64 = null, | |
linker_z_max_page_size: ?u64 = null, | |
linker_tsaware: bool = false, | |
linker_nxcompat: bool = false, | |
linker_dynamicbase: bool = true, | |
linker_compress_debug_sections: ?link.File.Elf.CompressDebugSections = null, | |
linker_module_definition_file: ?[]const u8 = null, | |
linker_sort_section: ?link.File.Elf.SortSection = null, | |
major_subsystem_version: ?u16 = null, | |
minor_subsystem_version: ?u16 = null, | |
clang_passthrough_mode: bool = false, | |
verbose_cc: bool = false, | |
verbose_link: bool = false, | |
verbose_air: bool = false, | |
verbose_intern_pool: bool = false, | |
verbose_generic_instances: bool = false, | |
verbose_llvm_ir: ?[]const u8 = null, | |
verbose_llvm_bc: ?[]const u8 = null, | |
verbose_cimport: bool = false, | |
verbose_llvm_cpu_features: bool = false, | |
debug_compiler_runtime_libs: bool = false, | |
debug_compile_errors: bool = false, | |
debug_incremental: bool = false, | |
/// Normally when you create a `Compilation`, Zig will automatically build | |
/// and link in required dependencies, such as compiler-rt and libc. When | |
/// building such dependencies themselves, this flag must be set to avoid | |
/// infinite recursion. | |
skip_linker_dependencies: bool = false, | |
hash_style: link.File.Elf.HashStyle = .both, | |
entry: Entry = .default, | |
force_undefined_symbols: std.StringArrayHashMapUnmanaged(void) = .{}, | |
stack_size: ?u64 = null, | |
image_base: ?u64 = null, | |
version: ?std.SemanticVersion = null, | |
compatibility_version: ?std.SemanticVersion = null, | |
libc_installation: ?*const LibCInstallation = null, | |
native_system_include_paths: []const []const u8 = &.{}, | |
clang_preprocessor_mode: ClangPreprocessorMode = .no, | |
reference_trace: ?u32 = null, | |
test_filters: []const []const u8 = &.{}, | |
test_name_prefix: ?[]const u8 = null, | |
test_runner_path: ?[]const u8 = null, | |
subsystem: ?std.Target.SubSystem = null, | |
/// (Zig compiler development) Enable dumping linker's state as JSON. | |
enable_link_snapshots: bool = false, | |
/// (Darwin) Install name of the dylib | |
install_name: ?[]const u8 = null, | |
/// (Darwin) Path to entitlements file | |
entitlements: ?[]const u8 = null, | |
/// (Darwin) size of the __PAGEZERO segment | |
pagezero_size: ?u64 = null, | |
/// (Darwin) set minimum space for future expansion of the load commands | |
headerpad_size: ?u32 = null, | |
/// (Darwin) set enough space as if all paths were MATPATHLEN | |
headerpad_max_install_names: bool = false, | |
/// (Darwin) remove dylibs that are unreachable by the entry point or exported symbols | |
dead_strip_dylibs: bool = false, | |
/// (Darwin) Force load all members of static archives that implement an Objective-C class or category | |
force_load_objc: bool = false, | |
libcxx_abi_version: libcxx.AbiVersion = libcxx.AbiVersion.default, | |
/// (Windows) PDB source path prefix to instruct the linker how to resolve relative | |
/// paths when consolidating CodeView streams into a single PDB file. | |
pdb_source_path: ?[]const u8 = null, | |
/// (Windows) PDB output path | |
pdb_out_path: ?[]const u8 = null, | |
error_limit: ?Compilation.Module.ErrorInt = null, | |
global_cc_argv: []const []const u8 = &.{}, | |
pub const Entry = link.File.OpenOptions.Entry; | |
}; | |
fn addModuleTableToCacheHash( | |
gpa: Allocator, | |
arena: Allocator, | |
hash: *Cache.HashHelper, | |
root_mod: *Package.Module, | |
main_mod: *Package.Module, | |
hash_type: union(enum) { path_bytes, files: *Cache.Manifest }, | |
) (error{OutOfMemory} || std.os.GetCwdError)!void { | |
var seen_table: std.AutoArrayHashMapUnmanaged(*Package.Module, void) = .{}; | |
defer seen_table.deinit(gpa); | |
// root_mod and main_mod may be the same pointer. In fact they usually are. | |
// However in the case of `zig test` or `zig build` they will be different, | |
// and it's possible for one to not reference the other via the import table. | |
try seen_table.put(gpa, root_mod, {}); | |
try seen_table.put(gpa, main_mod, {}); | |
const SortByName = struct { | |
has_builtin: bool, | |
names: []const []const u8, | |
pub fn lessThan(ctx: @This(), lhs: usize, rhs: usize) bool { | |
return if (ctx.has_builtin and (lhs == 0 or rhs == 0)) | |
lhs < rhs | |
else | |
mem.lessThan(u8, ctx.names[lhs], ctx.names[rhs]); | |
} | |
}; | |
var i: usize = 0; | |
while (i < seen_table.count()) : (i += 1) { | |
const mod = seen_table.keys()[i]; | |
if (mod.isBuiltin()) { | |
// Skip builtin.zig; it is useless as an input, and we don't want to | |
// have to write it before checking for a cache hit. | |
continue; | |
} | |
cache_helpers.addModule(hash, mod); | |
switch (hash_type) { | |
.path_bytes => { | |
hash.addBytes(mod.root_src_path); | |
hash.addOptionalBytes(mod.root.root_dir.path); | |
hash.addBytes(mod.root.sub_path); | |
}, | |
.files => |man| if (mod.root_src_path.len != 0) { | |
const pkg_zig_file = try mod.root.joinString(arena, mod.root_src_path); | |
_ = try man.addFile(pkg_zig_file, null); | |
}, | |
} | |
mod.deps.sortUnstable(SortByName{ | |
.has_builtin = mod.deps.count() >= 1 and | |
mod.deps.values()[0].isBuiltin(), | |
.names = mod.deps.keys(), | |
}); | |
hash.addListOfBytes(mod.deps.keys()); | |
const deps = mod.deps.values(); | |
try seen_table.ensureUnusedCapacity(gpa, deps.len); | |
for (deps) |dep| seen_table.putAssumeCapacity(dep, {}); | |
} | |
} | |
pub fn create(gpa: Allocator, arena: Allocator, options: CreateOptions) !*Compilation { | |
const output_mode = options.config.output_mode; | |
const is_dyn_lib = switch (output_mode) { | |
.Obj, .Exe => false, | |
.Lib => options.config.link_mode == .Dynamic, | |
}; | |
const is_exe_or_dyn_lib = switch (output_mode) { | |
.Obj => false, | |
.Lib => is_dyn_lib, | |
.Exe => true, | |
}; | |
if (options.linker_export_table and options.linker_import_table) { | |
return error.ExportTableAndImportTableConflict; | |
} | |
const have_zcu = options.config.have_zcu; | |
const comp: *Compilation = comp: { | |
// We put the `Compilation` itself in the arena. Freeing the arena will free the module. | |
// It's initialized later after we prepare the initialization options. | |
const root_name = try arena.dupeZ(u8, options.root_name); | |
const use_llvm = options.config.use_llvm; | |
// The "any" values provided by resolved config only account for | |
// explicitly-provided settings. We now make them additionally account | |
// for default setting resolution. | |
const any_unwind_tables = options.config.any_unwind_tables or options.root_mod.unwind_tables; | |
const any_non_single_threaded = options.config.any_non_single_threaded or !options.root_mod.single_threaded; | |
const any_sanitize_thread = options.config.any_sanitize_thread or options.root_mod.sanitize_thread; | |
const link_eh_frame_hdr = options.link_eh_frame_hdr or any_unwind_tables; | |
const build_id = options.build_id orelse .none; | |
const link_libc = options.config.link_libc; | |
const libc_dirs = try std.zig.LibCDirs.detect( | |
arena, | |
options.zig_lib_directory.path.?, | |
options.root_mod.resolved_target.result, | |
options.root_mod.resolved_target.is_native_abi, | |
link_libc, | |
options.libc_installation, | |
); | |
// The include directories used when preprocessing .rc files are separate from the | |
// target. Which include directories are used is determined by `options.rc_includes`. | |
// | |
// Note: It should be okay that the include directories used when compiling .rc | |
// files differ from the include directories used when compiling the main | |
// binary, since the .res format is not dependent on anything ABI-related. The | |
// only relevant differences would be things like `#define` constants being | |
// different in the MinGW headers vs the MSVC headers, but any such | |
// differences would likely be a MinGW bug. | |
const rc_dirs: std.zig.LibCDirs = b: { | |
// Set the includes to .none here when there are no rc files to compile | |
var includes = if (options.rc_source_files.len > 0) options.rc_includes else .none; | |
const target = options.root_mod.resolved_target.result; | |
if (!options.root_mod.resolved_target.is_native_os or target.os.tag != .windows) { | |
switch (includes) { | |
// MSVC can't be found when the host isn't Windows, so short-circuit. | |
.msvc => return error.WindowsSdkNotFound, | |
// Skip straight to gnu since we won't be able to detect | |
// MSVC on non-Windows hosts. | |
.any => includes = .gnu, | |
.none, .gnu => {}, | |
} | |
} | |
while (true) switch (includes) { | |
.any, .msvc => break :b std.zig.LibCDirs.detect( | |
arena, | |
options.zig_lib_directory.path.?, | |
.{ | |
.cpu = target.cpu, | |
.os = target.os, | |
.abi = .msvc, | |
.ofmt = target.ofmt, | |
}, | |
options.root_mod.resolved_target.is_native_abi, | |
// The .rc preprocessor will need to know the libc include dirs even if we | |
// are not linking libc, so force 'link_libc' to true | |
true, | |
options.libc_installation, | |
) catch |err| { | |
if (includes == .any) { | |
// fall back to mingw | |
includes = .gnu; | |
continue; | |
} | |
return err; | |
}, | |
.gnu => break :b try std.zig.LibCDirs.detectFromBuilding(arena, options.zig_lib_directory.path.?, .{ | |
.cpu = target.cpu, | |
.os = target.os, | |
.abi = .gnu, | |
.ofmt = target.ofmt, | |
}), | |
.none => break :b .{ | |
.libc_include_dir_list = &[0][]u8{}, | |
.libc_installation = null, | |
.libc_framework_dir_list = &.{}, | |
.sysroot = null, | |
.darwin_sdk_layout = null, | |
}, | |
}; | |
}; | |
const sysroot = options.sysroot orelse libc_dirs.sysroot; | |
const include_compiler_rt = options.want_compiler_rt orelse | |
(!options.skip_linker_dependencies and is_exe_or_dyn_lib); | |
if (include_compiler_rt and output_mode == .Obj) { | |
// For objects, this mechanism relies on essentially `_ = @import("compiler-rt");` | |
// injected into the object. | |
const compiler_rt_mod = try Package.Module.create(arena, .{ | |
.global_cache_directory = options.global_cache_directory, | |
.paths = .{ | |
.root = .{ | |
.root_dir = options.zig_lib_directory, | |
}, | |
.root_src_path = "compiler_rt.zig", | |
}, | |
.fully_qualified_name = "compiler_rt", | |
.cc_argv = &.{}, | |
.inherited = .{}, | |
.global = options.config, | |
.parent = options.root_mod, | |
.builtin_mod = options.root_mod.getBuiltinDependency(), | |
}); | |
try options.root_mod.deps.putNoClobber(arena, "compiler_rt", compiler_rt_mod); | |
} | |
if (options.verbose_llvm_cpu_features) { | |
if (options.root_mod.resolved_target.llvm_cpu_features) |cf| print: { | |
const target = options.root_mod.resolved_target.result; | |
std.debug.getStderrMutex().lock(); | |
defer std.debug.getStderrMutex().unlock(); | |
const stderr = std.io.getStdErr().writer(); | |
nosuspend { | |
stderr.print("compilation: {s}\n", .{options.root_name}) catch break :print; | |
stderr.print(" target: {s}\n", .{try target.zigTriple(arena)}) catch break :print; | |
stderr.print(" cpu: {s}\n", .{target.cpu.model.name}) catch break :print; | |
stderr.print(" features: {s}\n", .{cf}) catch {}; | |
} | |
} | |
} | |
// TODO: https://github.com/ziglang/zig/issues/17969 | |
const formatted_panics = options.formatted_panics orelse (options.root_mod.optimize_mode == .Debug); | |
const error_limit = options.error_limit orelse (std.math.maxInt(u16) - 1); | |
// We put everything into the cache hash that *cannot be modified | |
// during an incremental update*. For example, one cannot change the | |
// target between updates, but one can change source files, so the | |
// target goes into the cache hash, but source files do not. This is so | |
// that we can find the same binary and incrementally update it even if | |
// there are modified source files. We do this even if outputting to | |
// the current directory because we need somewhere to store incremental | |
// compilation metadata. | |
const cache = try arena.create(Cache); | |
cache.* = .{ | |
.gpa = gpa, | |
.manifest_dir = try options.local_cache_directory.handle.makeOpenPath("h", .{}), | |
}; | |
cache.addPrefix(.{ .path = null, .handle = std.fs.cwd() }); | |
cache.addPrefix(options.zig_lib_directory); | |
cache.addPrefix(options.local_cache_directory); | |
errdefer cache.manifest_dir.close(); | |
// This is shared hasher state common to zig source and all C source files. | |
cache.hash.addBytes(build_options.version); | |
cache.hash.add(builtin.zig_backend); | |
cache.hash.add(options.config.pie); | |
cache.hash.add(options.config.lto); | |
cache.hash.add(options.config.link_mode); | |
cache.hash.add(options.function_sections); | |
cache.hash.add(options.data_sections); | |
cache.hash.add(options.no_builtin); | |
cache.hash.add(link_libc); | |
cache.hash.add(options.config.link_libcpp); | |
cache.hash.add(options.config.link_libunwind); | |
cache.hash.add(output_mode); | |
cache_helpers.addDebugFormat(&cache.hash, options.config.debug_format); | |
cache_helpers.addOptionalEmitLoc(&cache.hash, options.emit_bin); | |
cache_helpers.addOptionalEmitLoc(&cache.hash, options.emit_implib); | |
cache_helpers.addOptionalEmitLoc(&cache.hash, options.emit_docs); | |
cache.hash.addBytes(options.root_name); | |
cache.hash.add(options.config.wasi_exec_model); | |
// TODO audit this and make sure everything is in it | |
const main_mod = options.main_mod orelse options.root_mod; | |
const comp = try arena.create(Compilation); | |
const opt_zcu: ?*Module = if (have_zcu) blk: { | |
// Pre-open the directory handles for cached ZIR code so that it does not need | |
// to redundantly happen for each AstGen operation. | |
const zir_sub_dir = "z"; | |
var local_zir_dir = try options.local_cache_directory.handle.makeOpenPath(zir_sub_dir, .{}); | |
errdefer local_zir_dir.close(); | |
const local_zir_cache: Directory = .{ | |
.handle = local_zir_dir, | |
.path = try options.local_cache_directory.join(arena, &[_][]const u8{zir_sub_dir}), | |
}; | |
var global_zir_dir = try options.global_cache_directory.handle.makeOpenPath(zir_sub_dir, .{}); | |
errdefer global_zir_dir.close(); | |
const global_zir_cache: Directory = .{ | |
.handle = global_zir_dir, | |
.path = try options.global_cache_directory.join(arena, &[_][]const u8{zir_sub_dir}), | |
}; | |
const emit_h: ?*Module.GlobalEmitH = if (options.emit_h) |loc| eh: { | |
const eh = try arena.create(Module.GlobalEmitH); | |
eh.* = .{ .loc = loc }; | |
break :eh eh; | |
} else null; | |
const std_mod = options.std_mod orelse try Package.Module.create(arena, .{ | |
.global_cache_directory = options.global_cache_directory, | |
.paths = .{ | |
.root = .{ | |
.root_dir = options.zig_lib_directory, | |
.sub_path = "std", | |
}, | |
.root_src_path = "std.zig", | |
}, | |
.fully_qualified_name = "std", | |
.cc_argv = &.{}, | |
.inherited = .{}, | |
.global = options.config, | |
.parent = options.root_mod, | |
.builtin_mod = options.root_mod.getBuiltinDependency(), | |
}); | |
const zcu = try arena.create(Module); | |
zcu.* = .{ | |
.gpa = gpa, | |
.comp = comp, | |
.main_mod = main_mod, | |
.root_mod = options.root_mod, | |
.std_mod = std_mod, | |
.global_zir_cache = global_zir_cache, | |
.local_zir_cache = local_zir_cache, | |
.emit_h = emit_h, | |
.tmp_hack_arena = std.heap.ArenaAllocator.init(gpa), | |
.error_limit = error_limit, | |
.llvm_object = null, | |
}; | |
try zcu.init(); | |
break :blk zcu; | |
} else blk: { | |
if (options.emit_h != null) return error.NoZigModuleForCHeader; | |
break :blk null; | |
}; | |
errdefer if (opt_zcu) |zcu| zcu.deinit(); | |
var system_libs = try std.StringArrayHashMapUnmanaged(SystemLib).init( | |
gpa, | |
options.system_lib_names, | |
options.system_lib_infos, | |
); | |
errdefer system_libs.deinit(gpa); | |
comp.* = .{ | |
.gpa = gpa, | |
.arena = arena, | |
.module = opt_zcu, | |
.cache_use = undefined, // populated below | |
.bin_file = null, // populated below | |
.implib_emit = null, // handled below | |
.docs_emit = null, // handled below | |
.root_mod = options.root_mod, | |
.config = options.config, | |
.zig_lib_directory = options.zig_lib_directory, | |
.local_cache_directory = options.local_cache_directory, | |
.global_cache_directory = options.global_cache_directory, | |
.emit_asm = options.emit_asm, | |
.emit_llvm_ir = options.emit_llvm_ir, | |
.emit_llvm_bc = options.emit_llvm_bc, | |
.work_queue = std.fifo.LinearFifo(Job, .Dynamic).init(gpa), | |
.anon_work_queue = std.fifo.LinearFifo(Job, .Dynamic).init(gpa), | |
.c_object_work_queue = std.fifo.LinearFifo(*CObject, .Dynamic).init(gpa), | |
.win32_resource_work_queue = if (build_options.only_core_functionality) {} else std.fifo.LinearFifo(*Win32Resource, .Dynamic).init(gpa), | |
.astgen_work_queue = std.fifo.LinearFifo(*Module.File, .Dynamic).init(gpa), | |
.embed_file_work_queue = std.fifo.LinearFifo(*Module.EmbedFile, .Dynamic).init(gpa), | |
.c_source_files = options.c_source_files, | |
.rc_source_files = options.rc_source_files, | |
.cache_parent = cache, | |
.self_exe_path = options.self_exe_path, | |
.libc_include_dir_list = libc_dirs.libc_include_dir_list, | |
.libc_framework_dir_list = libc_dirs.libc_framework_dir_list, | |
.rc_include_dir_list = rc_dirs.libc_include_dir_list, | |
.thread_pool = options.thread_pool, | |
.clang_passthrough_mode = options.clang_passthrough_mode, | |
.clang_preprocessor_mode = options.clang_preprocessor_mode, | |
.verbose_cc = options.verbose_cc, | |
.verbose_air = options.verbose_air, | |
.verbose_intern_pool = options.verbose_intern_pool, | |
.verbose_generic_instances = options.verbose_generic_instances, | |
.verbose_llvm_ir = options.verbose_llvm_ir, | |
.verbose_llvm_bc = options.verbose_llvm_bc, | |
.verbose_cimport = options.verbose_cimport, | |
.verbose_llvm_cpu_features = options.verbose_llvm_cpu_features, | |
.verbose_link = options.verbose_link, | |
.disable_c_depfile = options.disable_c_depfile, | |
.reference_trace = options.reference_trace, | |
.formatted_panics = formatted_panics, | |
.time_report = options.time_report, | |
.stack_report = options.stack_report, | |
.test_filters = options.test_filters, | |
.test_name_prefix = options.test_name_prefix, | |
.debug_compiler_runtime_libs = options.debug_compiler_runtime_libs, | |
.debug_compile_errors = options.debug_compile_errors, | |
.debug_incremental = options.debug_incremental, | |
.libcxx_abi_version = options.libcxx_abi_version, | |
.root_name = root_name, | |
.sysroot = sysroot, | |
.system_libs = system_libs, | |
.version = options.version, | |
.libc_installation = libc_dirs.libc_installation, | |
.include_compiler_rt = include_compiler_rt, | |
.objects = options.link_objects, | |
.framework_dirs = options.framework_dirs, | |
.llvm_opt_bisect_limit = options.llvm_opt_bisect_limit, | |
.skip_linker_dependencies = options.skip_linker_dependencies, | |
.no_builtin = options.no_builtin, | |
.job_queued_update_builtin_zig = have_zcu, | |
.function_sections = options.function_sections, | |
.data_sections = options.data_sections, | |
.native_system_include_paths = options.native_system_include_paths, | |
.wasi_emulated_libs = options.wasi_emulated_libs, | |
.force_undefined_symbols = options.force_undefined_symbols, | |
.link_eh_frame_hdr = link_eh_frame_hdr, | |
.global_cc_argv = options.global_cc_argv, | |
}; | |
// Prevent some footguns by making the "any" fields of config reflect | |
// the default Module settings. | |
comp.config.any_unwind_tables = any_unwind_tables; | |
comp.config.any_non_single_threaded = any_non_single_threaded; | |
comp.config.any_sanitize_thread = any_sanitize_thread; | |
const lf_open_opts: link.File.OpenOptions = .{ | |
.linker_script = options.linker_script, | |
.z_nodelete = options.linker_z_nodelete, | |
.z_notext = options.linker_z_notext, | |
.z_defs = options.linker_z_defs, | |
.z_origin = options.linker_z_origin, | |
.z_nocopyreloc = options.linker_z_nocopyreloc, | |
.z_now = options.linker_z_now, | |
.z_relro = options.linker_z_relro, | |
.z_common_page_size = options.linker_z_common_page_size, | |
.z_max_page_size = options.linker_z_max_page_size, | |
.darwin_sdk_layout = libc_dirs.darwin_sdk_layout, | |
.frameworks = options.frameworks, | |
.lib_dirs = options.lib_dirs, | |
.framework_dirs = options.framework_dirs, | |
.rpath_list = options.rpath_list, | |
.symbol_wrap_set = options.symbol_wrap_set, | |
.allow_shlib_undefined = options.linker_allow_shlib_undefined, | |
.bind_global_refs_locally = options.linker_bind_global_refs_locally orelse false, | |
.compress_debug_sections = options.linker_compress_debug_sections orelse .none, | |
.module_definition_file = options.linker_module_definition_file, | |
.sort_section = options.linker_sort_section, | |
.import_symbols = options.linker_import_symbols, | |
.import_table = options.linker_import_table, | |
.export_table = options.linker_export_table, | |
.initial_memory = options.linker_initial_memory, | |
.max_memory = options.linker_max_memory, | |
.global_base = options.linker_global_base, | |
.export_symbol_names = options.linker_export_symbol_names, | |
.print_gc_sections = options.linker_print_gc_sections, | |
.print_icf_sections = options.linker_print_icf_sections, | |
.print_map = options.linker_print_map, | |
.tsaware = options.linker_tsaware, | |
.nxcompat = options.linker_nxcompat, | |
.dynamicbase = options.linker_dynamicbase, | |
.major_subsystem_version = options.major_subsystem_version, | |
.minor_subsystem_version = options.minor_subsystem_version, | |
.entry = options.entry, | |
.stack_size = options.stack_size, | |
.image_base = options.image_base, | |
.version_script = options.version_script, | |
.allow_undefined_version = options.linker_allow_undefined_version, | |
.gc_sections = options.linker_gc_sections, | |
.emit_relocs = options.link_emit_relocs, | |
.soname = options.soname, | |
.compatibility_version = options.compatibility_version, | |
.build_id = build_id, | |
.disable_lld_caching = options.disable_lld_caching or options.cache_mode == .whole, | |
.subsystem = options.subsystem, | |
.hash_style = options.hash_style, | |
.enable_link_snapshots = options.enable_link_snapshots, | |
.install_name = options.install_name, | |
.entitlements = options.entitlements, | |
.pagezero_size = options.pagezero_size, | |
.headerpad_size = options.headerpad_size, | |
.headerpad_max_install_names = options.headerpad_max_install_names, | |
.dead_strip_dylibs = options.dead_strip_dylibs, | |
.force_load_objc = options.force_load_objc, | |
.pdb_source_path = options.pdb_source_path, | |
.pdb_out_path = options.pdb_out_path, | |
.entry_addr = null, // CLI does not expose this option (yet?) | |
}; | |
switch (options.cache_mode) { | |
.incremental => { | |
// Options that are specific to zig source files, that cannot be | |
// modified between incremental updates. | |
var hash = cache.hash; | |
// Synchronize with other matching comments: ZigOnlyHashStuff | |
hash.add(use_llvm); | |
hash.add(options.config.use_lib_llvm); | |
hash.add(options.config.dll_export_fns); | |
hash.add(options.config.is_test); | |
hash.addListOfBytes(options.test_filters); | |
hash.addOptionalBytes(options.test_name_prefix); | |
hash.add(options.skip_linker_dependencies); | |
hash.add(formatted_panics); | |
hash.add(options.emit_h != null); | |
hash.add(error_limit); | |
// Here we put the root source file path name, but *not* with addFile. | |
// We want the hash to be the same regardless of the contents of the | |
// source file, because incremental compilation will handle it, but we | |
// do want to namespace different source file names because they are | |
// likely different compilations and therefore this would be likely to | |
// cause cache hits. | |
try addModuleTableToCacheHash(gpa, arena, &hash, options.root_mod, main_mod, .path_bytes); | |
// In the case of incremental cache mode, this `artifact_directory` | |
// is computed based on a hash of non-linker inputs, and it is where all | |
// build artifacts are stored (even while in-progress). | |
const digest = hash.final(); | |
const artifact_sub_dir = "o" ++ std.fs.path.sep_str ++ digest; | |
var artifact_dir = try options.local_cache_directory.handle.makeOpenPath(artifact_sub_dir, .{}); | |
errdefer artifact_dir.close(); | |
const artifact_directory: Directory = .{ | |
.handle = artifact_dir, | |
.path = try options.local_cache_directory.join(arena, &[_][]const u8{artifact_sub_dir}), | |
}; | |
const incremental = try arena.create(CacheUse.Incremental); | |
incremental.* = .{ | |
.artifact_directory = artifact_directory, | |
}; | |
comp.cache_use = .{ .incremental = incremental }; | |
if (options.emit_bin) |emit_bin| { | |
const emit: Emit = .{ | |
.directory = emit_bin.directory orelse artifact_directory, | |
.sub_path = emit_bin.basename, | |
}; | |
comp.bin_file = try link.File.open(arena, comp, emit, lf_open_opts); | |
} | |
if (options.emit_implib) |emit_implib| { | |
comp.implib_emit = .{ | |
.directory = emit_implib.directory orelse artifact_directory, | |
.sub_path = emit_implib.basename, | |
}; | |
} | |
if (options.emit_docs) |emit_docs| { | |
comp.docs_emit = .{ | |
.directory = emit_docs.directory orelse artifact_directory, | |
.sub_path = emit_docs.basename, | |
}; | |
} | |
}, | |
.whole => { | |
// For whole cache mode, we don't know where to put outputs from | |
// the linker until the final cache hash, which is available after | |
// the compilation is complete. | |
// | |
// Therefore, bin_file is left null until the beginning of update(), | |
// where it may find a cache hit, or use a temporary directory to | |
// hold output artifacts. | |
const whole = try arena.create(CacheUse.Whole); | |
whole.* = .{ | |
// This is kept here so that link.File.open can be called later. | |
.lf_open_opts = lf_open_opts, | |
// This is so that when doing `CacheMode.whole`, the mechanism in update() | |
// can use it for communicating the result directory via `bin_file.emit`. | |
// This is used to distinguish between -fno-emit-bin and -femit-bin | |
// for `CacheMode.whole`. | |
// This memory will be overwritten with the real digest in update() but | |
// the basename will be preserved. | |
.bin_sub_path = try prepareWholeEmitSubPath(arena, options.emit_bin), | |
.implib_sub_path = try prepareWholeEmitSubPath(arena, options.emit_implib), | |
.docs_sub_path = try prepareWholeEmitSubPath(arena, options.emit_docs), | |
.tmp_artifact_directory = null, | |
.lock = null, | |
}; | |
comp.cache_use = .{ .whole = whole }; | |
}, | |
} | |
// Handle the case of e.g. -fno-emit-bin -femit-llvm-ir. | |
if (options.emit_bin == null and (comp.verbose_llvm_ir != null or | |
comp.verbose_llvm_bc != null or | |
(use_llvm and comp.emit_asm != null) or | |
comp.emit_llvm_ir != null or | |
comp.emit_llvm_bc != null)) | |
{ | |
if (build_options.only_c) unreachable; | |
if (opt_zcu) |zcu| zcu.llvm_object = try LlvmObject.create(arena, comp); | |
} | |
break :comp comp; | |
}; | |
errdefer comp.destroy(); | |
const target = comp.root_mod.resolved_target.result; | |
const capable_of_building_compiler_rt = canBuildLibCompilerRt(target, comp.config.use_llvm); | |
const capable_of_building_zig_libc = canBuildZigLibC(target, comp.config.use_llvm); | |
// Add a `CObject` for each `c_source_files`. | |
try comp.c_object_table.ensureTotalCapacity(gpa, options.c_source_files.len); | |
for (options.c_source_files) |c_source_file| { | |
const c_object = try gpa.create(CObject); | |
errdefer gpa.destroy(c_object); | |
c_object.* = .{ | |
.status = .{ .new = {} }, | |
.src = c_source_file, | |
}; | |
comp.c_object_table.putAssumeCapacityNoClobber(c_object, {}); | |
} | |
// Add a `Win32Resource` for each `rc_source_files` and one for `manifest_file`. | |
if (!build_options.only_core_functionality) { | |
try comp.win32_resource_table.ensureTotalCapacity(gpa, options.rc_source_files.len + @intFromBool(options.manifest_file != null)); | |
for (options.rc_source_files) |rc_source_file| { | |
const win32_resource = try gpa.create(Win32Resource); | |
errdefer gpa.destroy(win32_resource); | |
win32_resource.* = .{ | |
.status = .{ .new = {} }, | |
.src = .{ .rc = rc_source_file }, | |
}; | |
comp.win32_resource_table.putAssumeCapacityNoClobber(win32_resource, {}); | |
} | |
if (options.manifest_file) |manifest_path| { | |
const win32_resource = try gpa.create(Win32Resource); | |
errdefer gpa.destroy(win32_resource); | |
win32_resource.* = .{ | |
.status = .{ .new = {} }, | |
.src = .{ .manifest = manifest_path }, | |
}; | |
comp.win32_resource_table.putAssumeCapacityNoClobber(win32_resource, {}); | |
} | |
} | |
const have_bin_emit = switch (comp.cache_use) { | |
.whole => |whole| whole.bin_sub_path != null, | |
.incremental => comp.bin_file != null, | |
}; | |
if (have_bin_emit and !comp.skip_linker_dependencies and target.ofmt != .c) { | |
if (target.isDarwin()) { | |
switch (target.abi) { | |
.none, | |
.simulator, | |
.macabi, | |
=> {}, | |
else => return error.LibCUnavailable, | |
} | |
} | |
// If we need to build glibc for the target, add work items for it. | |
// We go through the work queue so that building can be done in parallel. | |
if (comp.wantBuildGLibCFromSource()) { | |
if (!std.zig.target.canBuildLibC(target)) return error.LibCUnavailable; | |
if (glibc.needsCrtiCrtn(target)) { | |
try comp.work_queue.write(&[_]Job{ | |
.{ .glibc_crt_file = .crti_o }, | |
.{ .glibc_crt_file = .crtn_o }, | |
}); | |
} | |
try comp.work_queue.write(&[_]Job{ | |
.{ .glibc_crt_file = .scrt1_o }, | |
.{ .glibc_crt_file = .libc_nonshared_a }, | |
.{ .glibc_shared_objects = {} }, | |
}); | |
} | |
if (comp.wantBuildMuslFromSource()) { | |
if (!std.zig.target.canBuildLibC(target)) return error.LibCUnavailable; | |
try comp.work_queue.ensureUnusedCapacity(6); | |
if (musl.needsCrtiCrtn(target)) { | |
comp.work_queue.writeAssumeCapacity(&[_]Job{ | |
.{ .musl_crt_file = .crti_o }, | |
.{ .musl_crt_file = .crtn_o }, | |
}); | |
} | |
comp.work_queue.writeAssumeCapacity(&[_]Job{ | |
.{ .musl_crt_file = .crt1_o }, | |
.{ .musl_crt_file = .scrt1_o }, | |
.{ .musl_crt_file = .rcrt1_o }, | |
switch (comp.config.link_mode) { | |
.Static => .{ .musl_crt_file = .libc_a }, | |
.Dynamic => .{ .musl_crt_file = .libc_so }, | |
}, | |
}); | |
} | |
if (comp.wantBuildWasiLibcFromSource()) { | |
if (!std.zig.target.canBuildLibC(target)) return error.LibCUnavailable; | |
// worst-case we need all components | |
try comp.work_queue.ensureUnusedCapacity(comp.wasi_emulated_libs.len + 2); | |
for (comp.wasi_emulated_libs) |crt_file| { | |
comp.work_queue.writeItemAssumeCapacity(.{ | |
.wasi_libc_crt_file = crt_file, | |
}); | |
} | |
comp.work_queue.writeAssumeCapacity(&[_]Job{ | |
.{ .wasi_libc_crt_file = wasi_libc.execModelCrtFile(comp.config.wasi_exec_model) }, | |
.{ .wasi_libc_crt_file = .libc_a }, | |
}); | |
} | |
if (comp.wantBuildMinGWFromSource()) { | |
if (!std.zig.target.canBuildLibC(target)) return error.LibCUnavailable; | |
const crt_job: Job = .{ .mingw_crt_file = if (is_dyn_lib) .dllcrt2_o else .crt2_o }; | |
try comp.work_queue.ensureUnusedCapacity(2); | |
comp.work_queue.writeItemAssumeCapacity(.{ .mingw_crt_file = .mingwex_lib }); | |
comp.work_queue.writeItemAssumeCapacity(crt_job); | |
// When linking mingw-w64 there are some import libs we always need. | |
for (mingw.always_link_libs) |name| { | |
try comp.system_libs.put(comp.gpa, name, .{ | |
.needed = false, | |
.weak = false, | |
.path = null, | |
}); | |
} | |
} | |
// Generate Windows import libs. | |
if (target.os.tag == .windows) { | |
const count = comp.system_libs.count(); | |
try comp.work_queue.ensureUnusedCapacity(count); | |
for (0..count) |i| { | |
comp.work_queue.writeItemAssumeCapacity(.{ .windows_import_lib = i }); | |
} | |
} | |
if (comp.wantBuildLibUnwindFromSource()) { | |
try comp.work_queue.writeItem(.{ .libunwind = {} }); | |
} | |
if (build_options.have_llvm and is_exe_or_dyn_lib and comp.config.link_libcpp) { | |
try comp.work_queue.writeItem(.libcxx); | |
try comp.work_queue.writeItem(.libcxxabi); | |
} | |
if (build_options.have_llvm and comp.config.any_sanitize_thread) { | |
try comp.work_queue.writeItem(.libtsan); | |
} | |
if (target.isMinGW() and comp.config.any_non_single_threaded) { | |
// LLD might drop some symbols as unused during LTO and GCing, therefore, | |
// we force mark them for resolution here. | |
const tls_index_sym = switch (target.cpu.arch) { | |
.x86 => "__tls_index", | |
else => "_tls_index", | |
}; | |
try comp.force_undefined_symbols.put(comp.gpa, tls_index_sym, {}); | |
} | |
if (comp.include_compiler_rt and capable_of_building_compiler_rt) { | |
if (is_exe_or_dyn_lib) { | |
log.debug("queuing a job to build compiler_rt_lib", .{}); | |
comp.job_queued_compiler_rt_lib = true; | |
} else if (output_mode != .Obj) { | |
log.debug("queuing a job to build compiler_rt_obj", .{}); | |
// In this case we are making a static library, so we ask | |
// for a compiler-rt object to put in it. | |
comp.job_queued_compiler_rt_obj = true; | |
} | |
} | |
if (!comp.skip_linker_dependencies and is_exe_or_dyn_lib and | |
!comp.config.link_libc and capable_of_building_zig_libc) | |
{ | |
try comp.work_queue.writeItem(.{ .zig_libc = {} }); | |
} | |
} | |
return comp; | |
} | |
pub fn destroy(comp: *Compilation) void { | |
if (comp.bin_file) |lf| lf.destroy(); | |
if (comp.module) |zcu| zcu.deinit(); | |
comp.cache_use.deinit(); | |
comp.work_queue.deinit(); | |
comp.anon_work_queue.deinit(); | |
comp.c_object_work_queue.deinit(); | |
if (!build_options.only_core_functionality) { | |
comp.win32_resource_work_queue.deinit(); | |
} | |
comp.astgen_work_queue.deinit(); | |
comp.embed_file_work_queue.deinit(); | |
const gpa = comp.gpa; | |
comp.system_libs.deinit(gpa); | |
{ | |
var it = comp.crt_files.iterator(); | |
while (it.next()) |entry| { | |
gpa.free(entry.key_ptr.*); | |
entry.value_ptr.deinit(gpa); | |
} | |
comp.crt_files.deinit(gpa); | |
} | |
if (comp.libunwind_static_lib) |*crt_file| { | |
crt_file.deinit(gpa); | |
} | |
if (comp.libcxx_static_lib) |*crt_file| { | |
crt_file.deinit(gpa); | |
} | |
if (comp.libcxxabi_static_lib) |*crt_file| { | |
crt_file.deinit(gpa); | |
} | |
if (comp.compiler_rt_lib) |*crt_file| { | |
crt_file.deinit(gpa); | |
} | |
if (comp.compiler_rt_obj) |*crt_file| { | |
crt_file.deinit(gpa); | |
} | |
if (comp.libc_static_lib) |*crt_file| { | |
crt_file.deinit(gpa); | |
} | |
if (comp.glibc_so_files) |*glibc_file| { | |
glibc_file.deinit(gpa); | |
} | |
for (comp.c_object_table.keys()) |key| { | |
key.destroy(gpa); | |
} | |
comp.c_object_table.deinit(gpa); | |
for (comp.failed_c_objects.values()) |bundle| { | |
bundle.destroy(gpa); | |
} | |
comp.failed_c_objects.deinit(gpa); | |
if (!build_options.only_core_functionality) { | |
for (comp.win32_resource_table.keys()) |key| { | |
key.destroy(gpa); | |
} | |
comp.win32_resource_table.deinit(gpa); | |
for (comp.failed_win32_resources.values()) |*value| { | |
value.deinit(gpa); | |
} | |
comp.failed_win32_resources.deinit(gpa); | |
} | |
for (comp.link_errors.items) |*item| item.deinit(gpa); | |
comp.link_errors.deinit(gpa); | |
for (comp.lld_errors.items) |*lld_error| { | |
lld_error.deinit(gpa); | |
} | |
comp.lld_errors.deinit(gpa); | |
comp.clearMiscFailures(); | |
comp.cache_parent.manifest_dir.close(); | |
} | |
pub fn clearMiscFailures(comp: *Compilation) void { | |
comp.alloc_failure_occurred = false; | |
for (comp.misc_failures.values()) |*value| { | |
value.deinit(comp.gpa); | |
} | |
comp.misc_failures.deinit(comp.gpa); | |
comp.misc_failures = .{}; | |
} | |
pub fn getTarget(self: Compilation) Target { | |
return self.root_mod.resolved_target.result; | |
} | |
/// Only legal to call when cache mode is incremental and a link file is present. | |
pub fn hotCodeSwap( | |
comp: *Compilation, | |
prog_node: *std.Progress.Node, | |
pid: std.ChildProcess.Id, | |
) !void { | |
const lf = comp.bin_file.?; | |
lf.child_pid = pid; | |
try lf.makeWritable(); | |
try comp.update(prog_node); | |
try lf.makeExecutable(); | |
} | |
fn cleanupAfterUpdate(comp: *Compilation) void { | |
switch (comp.cache_use) { | |
.incremental => return, | |
.whole => |whole| { | |
if (whole.cache_manifest) |man| { | |
man.deinit(); | |
whole.cache_manifest = null; | |
} | |
if (comp.bin_file) |lf| { | |
lf.destroy(); | |
comp.bin_file = null; | |
} | |
if (whole.tmp_artifact_directory) |*directory| { | |
directory.handle.close(); | |
if (directory.path) |p| comp.gpa.free(p); | |
whole.tmp_artifact_directory = null; | |
} | |
}, | |
} | |
} | |
/// Detect changes to source files, perform semantic analysis, and update the output files. | |
pub fn update(comp: *Compilation, main_progress_node: *std.Progress.Node) !void { | |
const tracy_trace = trace(@src()); | |
defer tracy_trace.end(); | |
// This arena is scoped to this one update. | |
const gpa = comp.gpa; | |
var arena_allocator = std.heap.ArenaAllocator.init(gpa); | |
defer arena_allocator.deinit(); | |
const arena = arena_allocator.allocator(); | |
comp.clearMiscFailures(); | |
comp.last_update_was_cache_hit = false; | |
var man: Cache.Manifest = undefined; | |
defer cleanupAfterUpdate(comp); | |
var tmp_dir_rand_int: u64 = undefined; | |
// If using the whole caching strategy, we check for *everything* up front, including | |
// C source files. | |
switch (comp.cache_use) { | |
.whole => |whole| { | |
assert(comp.bin_file == null); | |
// We are about to obtain this lock, so here we give other processes a chance first. | |
whole.releaseLock(); | |
man = comp.cache_parent.obtain(); | |
whole.cache_manifest = &man; | |
try addNonIncrementalStuffToCacheManifest(comp, arena, &man); | |
const is_hit = man.hit() catch |err| { | |
const i = man.failed_file_index orelse return err; | |
const pp = man.files.items[i].prefixed_path orelse return err; | |
const prefix = man.cache.prefixes()[pp.prefix]; | |
return comp.setMiscFailure( | |
.check_whole_cache, | |
"unable to check cache: stat file '{}{s}' failed: {s}", | |
.{ prefix, pp.sub_path, @errorName(err) }, | |
); | |
}; | |
if (is_hit) { | |
comp.last_update_was_cache_hit = true; | |
log.debug("CacheMode.whole cache hit for {s}", .{comp.root_name}); | |
const digest = man.final(); | |
comp.wholeCacheModeSetBinFilePath(whole, &digest); | |
assert(whole.lock == null); | |
whole.lock = man.toOwnedLock(); | |
return; | |
} | |
log.debug("CacheMode.whole cache miss for {s}", .{comp.root_name}); | |
// Compile the artifacts to a temporary directory. | |
const tmp_artifact_directory = d: { | |
const s = std.fs.path.sep_str; | |
tmp_dir_rand_int = std.crypto.random.int(u64); | |
const tmp_dir_sub_path = "tmp" ++ s ++ Package.Manifest.hex64(tmp_dir_rand_int); | |
const path = try comp.local_cache_directory.join(gpa, &.{tmp_dir_sub_path}); | |
errdefer gpa.free(path); | |
const handle = try comp.local_cache_directory.handle.makeOpenPath(tmp_dir_sub_path, .{}); | |
errdefer handle.close(); | |
break :d .{ | |
.path = path, | |
.handle = handle, | |
}; | |
}; | |
whole.tmp_artifact_directory = tmp_artifact_directory; | |
// Now that the directory is known, it is time to create the Emit | |
// objects and call link.File.open. | |
if (whole.implib_sub_path) |sub_path| { | |
comp.implib_emit = .{ | |
.directory = tmp_artifact_directory, | |
.sub_path = std.fs.path.basename(sub_path), | |
}; | |
} | |
if (whole.docs_sub_path) |sub_path| { | |
comp.docs_emit = .{ | |
.directory = tmp_artifact_directory, | |
.sub_path = std.fs.path.basename(sub_path), | |
}; | |
} | |
if (whole.bin_sub_path) |sub_path| { | |
const emit: Emit = .{ | |
.directory = tmp_artifact_directory, | |
.sub_path = std.fs.path.basename(sub_path), | |
}; | |
comp.bin_file = try link.File.createEmpty(arena, comp, emit, whole.lf_open_opts); | |
} | |
}, | |
.incremental => {}, | |
} | |
// For compiling C objects, we rely on the cache hash system to avoid duplicating work. | |
// Add a Job for each C object. | |
try comp.c_object_work_queue.ensureUnusedCapacity(comp.c_object_table.count()); | |
for (comp.c_object_table.keys()) |key| { | |
comp.c_object_work_queue.writeItemAssumeCapacity(key); | |
} | |
// For compiling Win32 resources, we rely on the cache hash system to avoid duplicating work. | |
// Add a Job for each Win32 resource file. | |
if (!build_options.only_core_functionality) { | |
try comp.win32_resource_work_queue.ensureUnusedCapacity(comp.win32_resource_table.count()); | |
for (comp.win32_resource_table.keys()) |key| { | |
comp.win32_resource_work_queue.writeItemAssumeCapacity(key); | |
} | |
} | |
if (comp.module) |module| { | |
module.compile_log_text.shrinkAndFree(gpa, 0); | |
// Make sure std.zig is inside the import_table. We unconditionally need | |
// it for start.zig. | |
const std_mod = module.std_mod; | |
_ = try module.importPkg(std_mod); | |
// Normally we rely on importing std to in turn import the root source file | |
// in the start code, but when using the stage1 backend that won't happen, | |
// so in order to run AstGen on the root source file we put it into the | |
// import_table here. | |
// Likewise, in the case of `zig test`, the test runner is the root source file, | |
// and so there is nothing to import the main file. | |
if (comp.config.is_test) { | |
_ = try module.importPkg(module.main_mod); | |
} | |
if (module.root_mod.deps.get("compiler_rt")) |compiler_rt_mod| { | |
_ = try module.importPkg(compiler_rt_mod); | |
} | |
// Put a work item in for every known source file to detect if | |
// it changed, and, if so, re-compute ZIR and then queue the job | |
// to update it. | |
try comp.astgen_work_queue.ensureUnusedCapacity(module.import_table.count()); | |
for (module.import_table.values()) |file| { | |
if (file.mod.isBuiltin()) continue; | |
comp.astgen_work_queue.writeItemAssumeCapacity(file); | |
} | |
// Put a work item in for checking if any files used with `@embedFile` changed. | |
try comp.embed_file_work_queue.ensureUnusedCapacity(module.embed_table.count()); | |
for (module.embed_table.values()) |embed_file| { | |
comp.embed_file_work_queue.writeItemAssumeCapacity(embed_file); | |
} | |
try comp.work_queue.writeItem(.{ .analyze_mod = std_mod }); | |
if (comp.config.is_test) { | |
try comp.work_queue.writeItem(.{ .analyze_mod = module.main_mod }); | |
} | |
if (module.root_mod.deps.get("compiler_rt")) |compiler_rt_mod| { | |
try comp.work_queue.writeItem(.{ .analyze_mod = compiler_rt_mod }); | |
} | |
} | |
try comp.performAllTheWork(main_progress_node); | |
if (comp.module) |module| { | |
if (build_options.enable_debug_extensions and comp.verbose_intern_pool) { | |
std.debug.print("intern pool stats for '{s}':\n", .{ | |
comp.root_name, | |
}); | |
module.intern_pool.dump(); | |
} | |
if (build_options.enable_debug_extensions and comp.verbose_generic_instances) { | |
std.debug.print("generic instances for '{s}:0x{x}':\n", .{ | |
comp.root_name, | |
@as(usize, @intFromPtr(module)), | |
}); | |
module.intern_pool.dumpGenericInstances(gpa); | |
} | |
if (comp.config.is_test and comp.totalErrorCount() == 0) { | |
// The `test_functions` decl has been intentionally postponed until now, | |
// at which point we must populate it with the list of test functions that | |
// have been discovered and not filtered out. | |
try module.populateTestFunctions(main_progress_node); | |
} | |
try module.processExports(); | |
} | |
if (comp.totalErrorCount() != 0) { | |
// Skip flushing and keep source files loaded for error reporting. | |
comp.link_error_flags = .{}; | |
return; | |
} | |
// Flush below handles -femit-bin but there is still -femit-llvm-ir, | |
// -femit-llvm-bc, and -femit-asm, in the case of C objects. | |
comp.emitOthers(); | |
switch (comp.cache_use) { | |
.whole => |whole| { | |
const digest = man.final(); | |
// Rename the temporary directory into place. | |
// Close tmp dir and link.File to avoid open handle during rename. | |
if (whole.tmp_artifact_directory) |*tmp_directory| { | |
tmp_directory.handle.close(); | |
if (tmp_directory.path) |p| gpa.free(p); | |
whole.tmp_artifact_directory = null; | |
} else unreachable; | |
const s = std.fs.path.sep_str; | |
const tmp_dir_sub_path = "tmp" ++ s ++ Package.Manifest.hex64(tmp_dir_rand_int); | |
const o_sub_path = "o" ++ s ++ digest; | |
// Work around windows `AccessDenied` if any files within this | |
// directory are open by closing and reopening the file handles. | |
const need_writable_dance = w: { | |
if (builtin.os.tag == .windows) { | |
if (comp.bin_file) |lf| { | |
// We cannot just call `makeExecutable` as it makes a false | |
// assumption that we have a file handle open only when linking | |
// an executable file. This used to be true when our linkers | |
// were incapable of emitting relocatables and static archive. | |
// Now that they are capable, we need to unconditionally close | |
// the file handle and re-open it in the follow up call to | |
// `makeWritable`. | |
if (lf.file) |f| { | |
f.close(); | |
lf.file = null; | |
break :w true; | |
} | |
} | |
} | |
break :w false; | |
}; | |
renameTmpIntoCache(comp.local_cache_directory, tmp_dir_sub_path, o_sub_path) catch |err| { | |
return comp.setMiscFailure( | |
.rename_results, | |
"failed to rename compilation results ('{}{s}') into local cache ('{}{s}'): {s}", | |
.{ | |
comp.local_cache_directory, tmp_dir_sub_path, | |
comp.local_cache_directory, o_sub_path, | |
@errorName(err), | |
}, | |
); | |
}; | |
comp.wholeCacheModeSetBinFilePath(whole, &digest); | |
// The linker flush functions need to know the final output path | |
// for debug info purposes because executable debug info contains | |
// references object file paths. | |
if (comp.bin_file) |lf| { | |
lf.emit = .{ | |
.directory = comp.local_cache_directory, | |
.sub_path = whole.bin_sub_path.?, | |
}; | |
// Has to be after the `wholeCacheModeSetBinFilePath` above. | |
if (need_writable_dance) { | |
try lf.makeWritable(); | |
} | |
} | |
try flush(comp, arena, main_progress_node); | |
if (comp.totalErrorCount() != 0) return; | |
// Failure here only means an unnecessary cache miss. | |
man.writeManifest() catch |err| { | |
log.warn("failed to write cache manifest: {s}", .{@errorName(err)}); | |
}; | |
if (comp.bin_file) |lf| { | |
lf.destroy(); | |
comp.bin_file = null; | |
} | |
assert(whole.lock == null); | |
whole.lock = man.toOwnedLock(); | |
}, | |
.incremental => { | |
try flush(comp, arena, main_progress_node); | |
if (comp.totalErrorCount() != 0) return; | |
}, | |
} | |
} | |
fn flush(comp: *Compilation, arena: Allocator, prog_node: *std.Progress.Node) !void { | |
if (comp.bin_file) |lf| { | |
// This is needed before reading the error flags. | |
lf.flush(arena, prog_node) catch |err| switch (err) { | |
error.FlushFailure => {}, // error reported through link_error_flags | |
error.LLDReportedFailure => {}, // error reported via lockAndParseLldStderr | |
else => |e| return e, | |
}; | |
} | |
if (comp.module) |zcu| { | |
try link.File.C.flushEmitH(zcu); | |
if (zcu.llvm_object) |llvm_object| { | |
if (build_options.only_c) unreachable; | |
const default_emit = switch (comp.cache_use) { | |
.whole => |whole| .{ | |
.directory = whole.tmp_artifact_directory.?, | |
.sub_path = "dummy", | |
}, | |
.incremental => |incremental| .{ | |
.directory = incremental.artifact_directory, | |
.sub_path = "dummy", | |
}, | |
}; | |
try emitLlvmObject(comp, arena, default_emit, null, llvm_object, prog_node); | |
} | |
} | |
if (comp.totalErrorCount() == 0) { | |
try maybeGenerateAutodocs(comp, prog_node); | |
} | |
} | |
/// This function is called by the frontend before flush(). It communicates that | |
/// `options.bin_file.emit` directory needs to be renamed from | |
/// `[zig-cache]/tmp/[random]` to `[zig-cache]/o/[digest]`. | |
/// The frontend would like to simply perform a file system rename, however, | |
/// some linker backends care about the file paths of the objects they are linking. | |
/// So this function call tells linker backends to rename the paths of object files | |
/// to observe the new directory path. | |
/// Linker backends which do not have this requirement can fall back to the simple | |
/// implementation at the bottom of this function. | |
/// This function is only called when CacheMode is `whole`. | |
fn renameTmpIntoCache( | |
cache_directory: Compilation.Directory, | |
tmp_dir_sub_path: []const u8, | |
o_sub_path: []const u8, | |
) !void { | |
var seen_eaccess = false; | |
while (true) { | |
std.fs.rename( | |
cache_directory.handle, | |
tmp_dir_sub_path, | |
cache_directory.handle, | |
o_sub_path, | |
) catch |err| switch (err) { | |
// On Windows, rename fails with `AccessDenied` rather than `PathAlreadyExists`. | |
// See https://github.com/ziglang/zig/issues/8362 | |
error.AccessDenied => switch (builtin.os.tag) { | |
.windows => { | |
if (seen_eaccess) return error.AccessDenied; | |
seen_eaccess = true; | |
try cache_directory.handle.deleteTree(o_sub_path); | |
continue; | |
}, | |
else => return error.AccessDenied, | |
}, | |
error.PathAlreadyExists => { | |
try cache_directory.handle.deleteTree(o_sub_path); | |
continue; | |
}, | |
error.FileNotFound => { | |
try cache_directory.handle.makePath("o"); | |
continue; | |
}, | |
else => |e| return e, | |
}; | |
break; | |
} | |
} | |
fn maybeGenerateAutodocs(comp: *Compilation, prog_node: *std.Progress.Node) !void { | |
const mod = comp.module orelse return; | |
// TODO: do this in a separate job during performAllTheWork(). The | |
// file copies at the end of generate() can also be extracted to | |
// separate jobs | |
if (!build_options.only_c and !build_options.only_core_functionality) { | |
if (comp.docs_emit) |emit| { | |
var dir = try emit.directory.handle.makeOpenPath(emit.sub_path, .{}); | |
defer dir.close(); | |
var sub_prog_node = prog_node.start("Generating documentation", 0); | |
sub_prog_node.activate(); | |
sub_prog_node.context.refresh(); | |
defer sub_prog_node.end(); | |
try Autodoc.generate(mod, dir); | |
} | |
} | |
} | |
/// Communicate the output binary location to parent Compilations. | |
fn wholeCacheModeSetBinFilePath( | |
comp: *Compilation, | |
whole: *CacheUse.Whole, | |
digest: *const [Cache.hex_digest_len]u8, | |
) void { | |
const digest_start = 2; // "o/[digest]/[basename]" | |
if (whole.bin_sub_path) |sub_path| { | |
@memcpy(sub_path[digest_start..][0..digest.len], digest); | |
} | |
if (whole.implib_sub_path) |sub_path| { | |
@memcpy(sub_path[digest_start..][0..digest.len], digest); | |
comp.implib_emit = .{ | |
.directory = comp.local_cache_directory, | |
.sub_path = sub_path, | |
}; | |
} | |
if (whole.docs_sub_path) |sub_path| { | |
@memcpy(sub_path[digest_start..][0..digest.len], digest); | |
comp.docs_emit = .{ | |
.directory = comp.local_cache_directory, | |
.sub_path = sub_path, | |
}; | |
} | |
} | |
fn prepareWholeEmitSubPath(arena: Allocator, opt_emit: ?EmitLoc) error{OutOfMemory}!?[]u8 { | |
const emit = opt_emit orelse return null; | |
if (emit.directory != null) return null; | |
const s = std.fs.path.sep_str; | |
const format = "o" ++ s ++ ("x" ** Cache.hex_digest_len) ++ s ++ "{s}"; | |
return try std.fmt.allocPrint(arena, format, .{emit.basename}); | |
} | |
/// This is only observed at compile-time and used to emit a compile error | |
/// to remind the programmer to update multiple related pieces of code that | |
/// are in different locations. Bump this number when adding or deleting | |
/// anything from the link cache manifest. | |
pub const link_hash_implementation_version = 12; | |
fn addNonIncrementalStuffToCacheManifest( | |
comp: *Compilation, | |
arena: Allocator, | |
man: *Cache.Manifest, | |
) !void { | |
const gpa = comp.gpa; | |
comptime assert(link_hash_implementation_version == 12); | |
if (comp.module) |mod| { | |
try addModuleTableToCacheHash(gpa, arena, &man.hash, mod.root_mod, mod.main_mod, .{ .files = man }); | |
// Synchronize with other matching comments: ZigOnlyHashStuff | |
man.hash.addListOfBytes(comp.test_filters); | |
man.hash.addOptionalBytes(comp.test_name_prefix); | |
man.hash.add(comp.skip_linker_dependencies); | |
man.hash.add(comp.formatted_panics); | |
man.hash.add(mod.emit_h != null); | |
man.hash.add(mod.error_limit); | |
} else { | |
cache_helpers.addModule(&man.hash, comp.root_mod); | |
} | |
for (comp.objects) |obj| { | |
_ = try man.addFile(obj.path, null); | |
man.hash.add(obj.must_link); | |
man.hash.add(obj.loption); | |
} | |
for (comp.c_object_table.keys()) |key| { | |
_ = try man.addFile(key.src.src_path, null); | |
man.hash.addOptional(key.src.ext); | |
man.hash.addListOfBytes(key.src.extra_flags); | |
} | |
if (!build_options.only_core_functionality) { | |
for (comp.win32_resource_table.keys()) |key| { | |
switch (key.src) { | |
.rc => |rc_src| { | |
_ = try man.addFile(rc_src.src_path, null); | |
man.hash.addListOfBytes(rc_src.extra_flags); | |
}, | |
.manifest => |manifest_path| { | |
_ = try man.addFile(manifest_path, null); | |
}, | |
} | |
} | |
} | |
man.hash.add(comp.config.use_llvm); | |
man.hash.add(comp.config.use_lib_llvm); | |
man.hash.add(comp.config.is_test); | |
man.hash.add(comp.config.import_memory); | |
man.hash.add(comp.config.export_memory); | |
man.hash.add(comp.config.shared_memory); | |
man.hash.add(comp.config.dll_export_fns); | |
man.hash.add(comp.config.rdynamic); | |
man.hash.addOptionalBytes(comp.sysroot); | |
man.hash.addOptional(comp.version); | |
man.hash.add(comp.link_eh_frame_hdr); | |
man.hash.add(comp.skip_linker_dependencies); | |
man.hash.add(comp.include_compiler_rt); | |
man.hash.addListOfBytes(comp.rc_include_dir_list); | |
man.hash.addListOfBytes(comp.force_undefined_symbols.keys()); | |
man.hash.addListOfBytes(comp.framework_dirs); | |
try link.hashAddSystemLibs(man, comp.system_libs); | |
cache_helpers.addOptionalEmitLoc(&man.hash, comp.emit_asm); | |
cache_helpers.addOptionalEmitLoc(&man.hash, comp.emit_llvm_ir); | |
cache_helpers.addOptionalEmitLoc(&man.hash, comp.emit_llvm_bc); | |
man.hash.addListOfBytes(comp.global_cc_argv); | |
const opts = comp.cache_use.whole.lf_open_opts; | |
try man.addOptionalFile(opts.linker_script); | |
try man.addOptionalFile(opts.version_script); | |
man.hash.add(opts.allow_undefined_version); | |
man.hash.addOptional(opts.stack_size); | |
man.hash.addOptional(opts.image_base); | |
man.hash.addOptional(opts.gc_sections); | |
man.hash.add(opts.emit_relocs); | |
man.hash.addListOfBytes(opts.lib_dirs); | |
man.hash.addListOfBytes(opts.rpath_list); | |
man.hash.addListOfBytes(opts.symbol_wrap_set.keys()); | |
if (comp.config.link_libc) { | |
man.hash.add(comp.libc_installation != null); | |
const target = comp.root_mod.resolved_target.result; | |
if (comp.libc_installation) |libc_installation| { | |
man.hash.addOptionalBytes(libc_installation.crt_dir); | |
if (target.abi == .msvc) { | |
man.hash.addOptionalBytes(libc_installation.msvc_lib_dir); | |
man.hash.addOptionalBytes(libc_installation.kernel32_lib_dir); | |
} | |
} | |
man.hash.addOptionalBytes(target.dynamic_linker.get()); | |
} | |
man.hash.addOptional(opts.allow_shlib_undefined); | |
man.hash.add(opts.bind_global_refs_locally); | |
// ELF specific stuff | |
man.hash.add(opts.z_nodelete); | |
man.hash.add(opts.z_notext); | |
man.hash.add(opts.z_defs); | |
man.hash.add(opts.z_origin); | |
man.hash.add(opts.z_nocopyreloc); | |
man.hash.add(opts.z_now); | |
man.hash.add(opts.z_relro); | |
man.hash.add(opts.z_common_page_size orelse 0); | |
man.hash.add(opts.z_max_page_size orelse 0); | |
man.hash.add(opts.hash_style); | |
man.hash.add(opts.compress_debug_sections); | |
man.hash.addOptional(opts.sort_section); | |
man.hash.addOptionalBytes(opts.soname); | |
man.hash.add(opts.build_id); | |
// WASM specific stuff | |
man.hash.addOptional(opts.initial_memory); | |
man.hash.addOptional(opts.max_memory); | |
man.hash.addOptional(opts.global_base); | |
man.hash.addListOfBytes(opts.export_symbol_names); | |
// Mach-O specific stuff | |
try link.File.MachO.hashAddFrameworks(man, opts.frameworks); | |
try man.addOptionalFile(opts.entitlements); | |
man.hash.addOptional(opts.pagezero_size); | |
man.hash.addOptional(opts.headerpad_size); | |
man.hash.add(opts.headerpad_max_install_names); | |
man.hash.add(opts.dead_strip_dylibs); | |
man.hash.add(opts.force_load_objc); | |
// COFF specific stuff | |
man.hash.addOptional(opts.subsystem); | |
man.hash.add(opts.tsaware); | |
man.hash.add(opts.nxcompat); | |
man.hash.add(opts.dynamicbase); | |
man.hash.addOptional(opts.major_subsystem_version); | |
man.hash.addOptional(opts.minor_subsystem_version); | |
} | |
fn emitOthers(comp: *Compilation) void { | |
if (comp.config.output_mode != .Obj or comp.module != null or | |
comp.c_object_table.count() == 0) | |
{ | |
return; | |
} | |
const obj_path = comp.c_object_table.keys()[0].status.success.object_path; | |
const cwd = std.fs.cwd(); | |
const ext = std.fs.path.extension(obj_path); | |
const basename = obj_path[0 .. obj_path.len - ext.len]; | |
// This obj path always ends with the object file extension, but if we change the | |
// extension to .ll, .bc, or .s, then it will be the path to those things. | |
const outs = [_]struct { | |
emit: ?EmitLoc, | |
ext: []const u8, | |
}{ | |
.{ .emit = comp.emit_asm, .ext = ".s" }, | |
.{ .emit = comp.emit_llvm_ir, .ext = ".ll" }, | |
.{ .emit = comp.emit_llvm_bc, .ext = ".bc" }, | |
}; | |
for (outs) |out| { | |
if (out.emit) |loc| { | |
if (loc.directory) |directory| { | |
const src_path = std.fmt.allocPrint(comp.gpa, "{s}{s}", .{ | |
basename, out.ext, | |
}) catch |err| { | |
log.err("unable to copy {s}{s}: {s}", .{ basename, out.ext, @errorName(err) }); | |
continue; | |
}; | |
defer comp.gpa.free(src_path); | |
cwd.copyFile(src_path, directory.handle, loc.basename, .{}) catch |err| { | |
log.err("unable to copy {s}: {s}", .{ src_path, @errorName(err) }); | |
}; | |
} | |
} | |
} | |
} | |
pub fn emitLlvmObject( | |
comp: *Compilation, | |
arena: Allocator, | |
default_emit: Emit, | |
bin_emit_loc: ?EmitLoc, | |
llvm_object: *LlvmObject, | |
prog_node: *std.Progress.Node, | |
) !void { | |
if (build_options.only_c) @compileError("unreachable"); | |
var sub_prog_node = prog_node.start("LLVM Emit Object", 0); | |
sub_prog_node.activate(); | |
sub_prog_node.context.refresh(); | |
defer sub_prog_node.end(); | |
try llvm_object.emit(.{ | |
.pre_ir_path = comp.verbose_llvm_ir, | |
.pre_bc_path = comp.verbose_llvm_bc, | |
.bin_path = try resolveEmitLoc(arena, default_emit, bin_emit_loc), | |
.asm_path = try resolveEmitLoc(arena, default_emit, comp.emit_asm), | |
.post_ir_path = try resolveEmitLoc(arena, default_emit, comp.emit_llvm_ir), | |
.post_bc_path = try resolveEmitLoc(arena, default_emit, comp.emit_llvm_bc), | |
.is_debug = comp.root_mod.optimize_mode == .Debug, | |
.is_small = comp.root_mod.optimize_mode == .ReleaseSmall, | |
.time_report = comp.time_report, | |
.sanitize_thread = comp.config.any_sanitize_thread, | |
.lto = comp.config.lto, | |
}); | |
} | |
fn resolveEmitLoc( | |
arena: Allocator, | |
default_emit: Emit, | |
opt_loc: ?EmitLoc, | |
) Allocator.Error!?[*:0]const u8 { | |
const loc = opt_loc orelse return null; | |
const slice = if (loc.directory) |directory| | |
try directory.joinZ(arena, &.{loc.basename}) | |
else | |
try default_emit.basenamePath(arena, loc.basename); | |
return slice.ptr; | |
} | |
fn reportMultiModuleErrors(mod: *Module) !void { | |
// Some cases can give you a whole bunch of multi-module errors, which it's not helpful to | |
// print all of, so we'll cap the number of these to emit. | |
var num_errors: u32 = 0; | |
const max_errors = 5; | |
// Attach the "some omitted" note to the final error message | |
var last_err: ?*Module.ErrorMsg = null; | |
for (mod.import_table.values()) |file| { | |
if (!file.multi_pkg) continue; | |
num_errors += 1; | |
if (num_errors > max_errors) continue; | |
const err = err_blk: { | |
// Like with errors, let's cap the number of notes to prevent a huge error spew. | |
const max_notes = 5; | |
const omitted = file.references.items.len -| max_notes; | |
const num_notes = file.references.items.len - omitted; | |
const notes = try mod.gpa.alloc(Module.ErrorMsg, if (omitted > 0) num_notes + 1 else num_notes); | |
errdefer mod.gpa.free(notes); | |
for (notes[0..num_notes], file.references.items[0..num_notes], 0..) |*note, ref, i| { | |
errdefer for (notes[0..i]) |*n| n.deinit(mod.gpa); | |
note.* = switch (ref) { | |
.import => |loc| blk: { | |
break :blk try Module.ErrorMsg.init( | |
mod.gpa, | |
loc, | |
"imported from module {s}", | |
.{loc.file_scope.mod.fully_qualified_name}, | |
); | |
}, | |
.root => |pkg| blk: { | |
break :blk try Module.ErrorMsg.init( | |
mod.gpa, | |
.{ .file_scope = file, .parent_decl_node = 0, .lazy = .entire_file }, | |
"root of module {s}", | |
.{pkg.fully_qualified_name}, | |
); | |
}, | |
}; | |
} | |
errdefer for (notes[0..num_notes]) |*n| n.deinit(mod.gpa); | |
if (omitted > 0) { | |
notes[num_notes] = try Module.ErrorMsg.init( | |
mod.gpa, | |
.{ .file_scope = file, .parent_decl_node = 0, .lazy = .entire_file }, | |
"{} more references omitted", | |
.{omitted}, | |
); | |
} | |
errdefer if (omitted > 0) notes[num_notes].deinit(mod.gpa); | |
const err = try Module.ErrorMsg.create( | |
mod.gpa, | |
.{ .file_scope = file, .parent_decl_node = 0, .lazy = .entire_file }, | |
"file exists in multiple modules", | |
.{}, | |
); | |
err.notes = notes; | |
break :err_blk err; | |
}; | |
errdefer err.destroy(mod.gpa); | |
try mod.failed_files.putNoClobber(mod.gpa, file, err); | |
last_err = err; | |
} | |
// If we omitted any errors, add a note saying that | |
if (num_errors > max_errors) { | |
const err = last_err.?; | |
// There isn't really any meaningful place to put this note, so just attach it to the | |
// last failed file | |
var note = try Module.ErrorMsg.init( | |
mod.gpa, | |
err.src_loc, | |
"{} more errors omitted", | |
.{num_errors - max_errors}, | |
); | |
errdefer note.deinit(mod.gpa); | |
const i = err.notes.len; | |
err.notes = try mod.gpa.realloc(err.notes, i + 1); | |
err.notes[i] = note; | |
} | |
// Now that we've reported the errors, we need to deal with | |
// dependencies. Any file referenced by a multi_pkg file should also be | |
// marked multi_pkg and have its status set to astgen_failure, as it's | |
// ambiguous which package they should be analyzed as a part of. We need | |
// to add this flag after reporting the errors however, as otherwise | |
// we'd get an error for every single downstream file, which wouldn't be | |
// very useful. | |
for (mod.import_table.values()) |file| { | |
if (file.multi_pkg) file.recursiveMarkMultiPkg(mod); | |
} | |
} | |
/// Having the file open for writing is problematic as far as executing the | |
/// binary is concerned. This will remove the write flag, or close the file, | |
/// or whatever is needed so that it can be executed. | |
/// After this, one must call` makeFileWritable` before calling `update`. | |
pub fn makeBinFileExecutable(comp: *Compilation) !void { | |
const lf = comp.bin_file orelse return; | |
return lf.makeExecutable(); | |
} | |
pub fn makeBinFileWritable(comp: *Compilation) !void { | |
const lf = comp.bin_file orelse return; | |
return lf.makeWritable(); | |
} | |
const Header = extern struct { | |
intern_pool: extern struct { | |
items_len: u32, | |
extra_len: u32, | |
limbs_len: u32, | |
string_bytes_len: u32, | |
tracked_insts_len: u32, | |
src_hash_deps_len: u32, | |
decl_val_deps_len: u32, | |
namespace_deps_len: u32, | |
namespace_name_deps_len: u32, | |
first_dependency_len: u32, | |
dep_entries_len: u32, | |
free_dep_entries_len: u32, | |
}, | |
}; | |
/// Note that all state that is included in the cache hash namespace is *not* | |
/// saved, such as the target and most CLI flags. A cache hit will only occur | |
/// when subsequent compiler invocations use the same set of flags. | |
pub fn saveState(comp: *Compilation) !void { | |
var bufs_list: [19]std.os.iovec_const = undefined; | |
var bufs_len: usize = 0; | |
const lf = comp.bin_file orelse return; | |
if (comp.module) |zcu| { | |
const ip = &zcu.intern_pool; | |
const header: Header = .{ | |
.intern_pool = .{ | |
.items_len = @intCast(ip.items.len), | |
.extra_len = @intCast(ip.extra.items.len), | |
.limbs_len = @intCast(ip.limbs.items.len), | |
.string_bytes_len = @intCast(ip.string_bytes.items.len), | |
.tracked_insts_len = @intCast(ip.tracked_insts.count()), | |
.src_hash_deps_len = @intCast(ip.src_hash_deps.count()), | |
.decl_val_deps_len = @intCast(ip.decl_val_deps.count()), | |
.namespace_deps_len = @intCast(ip.namespace_deps.count()), | |
.namespace_name_deps_len = @intCast(ip.namespace_name_deps.count()), | |
.first_dependency_len = @intCast(ip.first_dependency.count()), | |
.dep_entries_len = @intCast(ip.dep_entries.items.len), | |
.free_dep_entries_len = @intCast(ip.free_dep_entries.items.len), | |
}, | |
}; | |
addBuf(&bufs_list, &bufs_len, mem.asBytes(&header)); | |
addBuf(&bufs_list, &bufs_len, mem.sliceAsBytes(ip.limbs.items)); | |
addBuf(&bufs_list, &bufs_len, mem.sliceAsBytes(ip.extra.items)); | |
addBuf(&bufs_list, &bufs_len, mem.sliceAsBytes(ip.items.items(.data))); | |
addBuf(&bufs_list, &bufs_len, mem.sliceAsBytes(ip.items.items(.tag))); | |
addBuf(&bufs_list, &bufs_len, ip.string_bytes.items); | |
addBuf(&bufs_list, &bufs_len, mem.sliceAsBytes(ip.tracked_insts.keys())); | |
addBuf(&bufs_list, &bufs_len, mem.sliceAsBytes(ip.src_hash_deps.keys())); | |
addBuf(&bufs_list, &bufs_len, mem.sliceAsBytes(ip.src_hash_deps.values())); | |
addBuf(&bufs_list, &bufs_len, mem.sliceAsBytes(ip.decl_val_deps.keys())); | |
addBuf(&bufs_list, &bufs_len, mem.sliceAsBytes(ip.decl_val_deps.values())); | |
addBuf(&bufs_list, &bufs_len, mem.sliceAsBytes(ip.namespace_deps.keys())); | |
addBuf(&bufs_list, &bufs_len, mem.sliceAsBytes(ip.namespace_deps.values())); | |
addBuf(&bufs_list, &bufs_len, mem.sliceAsBytes(ip.namespace_name_deps.keys())); | |
addBuf(&bufs_list, &bufs_len, mem.sliceAsBytes(ip.namespace_name_deps.values())); | |
addBuf(&bufs_list, &bufs_len, mem.sliceAsBytes(ip.first_dependency.keys())); | |
addBuf(&bufs_list, &bufs_len, mem.sliceAsBytes(ip.first_dependency.values())); | |
addBuf(&bufs_list, &bufs_len, mem.sliceAsBytes(ip.dep_entries.items)); | |
addBuf(&bufs_list, &bufs_len, mem.sliceAsBytes(ip.free_dep_entries.items)); | |
// TODO: compilation errors | |
// TODO: files | |
// TODO: namespaces | |
// TODO: decls | |
// TODO: linker state | |
} | |
var basename_buf: [255]u8 = undefined; | |
const basename = std.fmt.bufPrint(&basename_buf, "{s}.zcs", .{ | |
comp.root_name, | |
}) catch o: { | |
basename_buf[basename_buf.len - 4 ..].* = ".zcs".*; | |
break :o &basename_buf; | |
}; | |
// Using an atomic file prevents a crash or power failure from corrupting | |
// the previous incremental compilation state. | |
var af = try lf.emit.directory.handle.atomicFile(basename, .{}); | |
defer af.deinit(); | |
try af.file.pwritevAll(bufs_list[0..bufs_len], 0); | |
try af.finish(); | |
} | |
fn addBuf(bufs_list: []std.os.iovec_const, bufs_len: *usize, buf: []const u8) void { | |
const i = bufs_len.*; | |
bufs_len.* = i + 1; | |
bufs_list[i] = .{ | |
.iov_base = buf.ptr, | |
.iov_len = buf.len, | |
}; | |
} | |
/// This function is temporally single-threaded. | |
pub fn totalErrorCount(comp: *Compilation) u32 { | |
var total: usize = | |
comp.misc_failures.count() + | |
@intFromBool(comp.alloc_failure_occurred) + | |
comp.lld_errors.items.len; | |
for (comp.failed_c_objects.values()) |bundle| { | |
total += bundle.diags.len; | |
} | |
if (!build_options.only_core_functionality) { | |
for (comp.failed_win32_resources.values()) |errs| { | |
total += errs.errorMessageCount(); | |
} | |
} | |
if (comp.module) |module| { | |
total += module.failed_exports.count(); | |
total += module.failed_embed_files.count(); | |
for (module.failed_files.keys(), module.failed_files.values()) |file, error_msg| { | |
if (error_msg) |_| { | |
total += 1; | |
} else { | |
assert(file.zir_loaded); | |
const payload_index = file.zir.extra[@intFromEnum(Zir.ExtraIndex.compile_errors)]; | |
assert(payload_index != 0); | |
const header = file.zir.extraData(Zir.Inst.CompileErrors, payload_index); | |
total += header.data.items_len; | |
} | |
} | |
// Skip errors for Decls within files that failed parsing. | |
// When a parse error is introduced, we keep all the semantic analysis for | |
// the previous parse success, including compile errors, but we cannot | |
// emit them until the file succeeds parsing. | |
for (module.failed_decls.keys()) |key| { | |
if (module.declFileScope(key).okToReportErrors()) { | |
total += 1; | |
if (module.cimport_errors.get(key)) |errors| { | |
total += errors.errorMessageCount(); | |
} | |
} | |
} | |
if (module.emit_h) |emit_h| { | |
for (emit_h.failed_decls.keys()) |key| { | |
if (module.declFileScope(key).okToReportErrors()) { | |
total += 1; | |
} | |
} | |
} | |
if (module.global_error_set.entries.len - 1 > module.error_limit) { | |
total += 1; | |
} | |
} | |
// The "no entry point found" error only counts if there are no semantic analysis errors. | |
if (total == 0) { | |
total += @intFromBool(comp.link_error_flags.no_entry_point_found); | |
} | |
total += @intFromBool(comp.link_error_flags.missing_libc); | |
total += comp.link_errors.items.len; | |
// Compile log errors only count if there are no other errors. | |
if (total == 0) { | |
if (comp.module) |module| { | |
total += @intFromBool(module.compile_log_decls.count() != 0); | |
} | |
} | |
return @as(u32, @intCast(total)); | |
} | |
/// This function is temporally single-threaded. | |
pub fn getAllErrorsAlloc(comp: *Compilation) !ErrorBundle { | |
const gpa = comp.gpa; | |
var bundle: ErrorBundle.Wip = undefined; | |
try bundle.init(gpa); | |
defer bundle.deinit(); | |
for (comp.failed_c_objects.values()) |diag_bundle| { | |
try diag_bundle.addToErrorBundle(&bundle); | |
} | |
if (!build_options.only_core_functionality) { | |
for (comp.failed_win32_resources.values()) |error_bundle| { | |
try bundle.addBundleAsRoots(error_bundle); | |
} | |
} | |
for (comp.lld_errors.items) |lld_error| { | |
const notes_len = @as(u32, @intCast(lld_error.context_lines.len)); | |
try bundle.addRootErrorMessage(.{ | |
.msg = try bundle.addString(lld_error.msg), | |
.notes_len = notes_len, | |
}); | |
const notes_start = try bundle.reserveNotes(notes_len); | |
for (notes_start.., lld_error.context_lines) |note, context_line| { | |
bundle.extra.items[note] = @intFromEnum(bundle.addErrorMessageAssumeCapacity(.{ | |
.msg = try bundle.addString(context_line), | |
})); | |
} | |
} | |
for (comp.misc_failures.values()) |*value| { | |
try bundle.addRootErrorMessage(.{ | |
.msg = try bundle.addString(value.msg), | |
.notes_len = if (value.children) |b| b.errorMessageCount() else 0, | |
}); | |
if (value.children) |b| try bundle.addBundleAsNotes(b); | |
} | |
if (comp.alloc_failure_occurred) { | |
try bundle.addRootErrorMessage(.{ | |
.msg = try bundle.addString("memory allocation failure"), | |
}); | |
} | |
if (comp.module) |module| { | |
for (module.failed_files.keys(), module.failed_files.values()) |file, error_msg| { | |
if (error_msg) |msg| { | |
try addModuleErrorMsg(module, &bundle, msg.*); | |
} else { | |
// Must be ZIR errors. Note that this may include AST errors. | |
// addZirErrorMessages asserts that the tree is loaded. | |
_ = try file.getTree(gpa); | |
try addZirErrorMessages(&bundle, file); | |
} | |
} | |
for (module.failed_embed_files.values()) |error_msg| { | |
try addModuleErrorMsg(module, &bundle, error_msg.*); | |
} | |
for (module.failed_decls.keys(), module.failed_decls.values()) |decl_index, error_msg| { | |
// Skip errors for Decls within files that had a parse failure. | |
// We'll try again once parsing succeeds. | |
if (module.declFileScope(decl_index).okToReportErrors()) { | |
try addModuleErrorMsg(module, &bundle, error_msg.*); | |
if (module.cimport_errors.get(decl_index)) |errors| { | |
for (errors.getMessages()) |err_msg_index| { | |
const err_msg = errors.getErrorMessage(err_msg_index); | |
try bundle.addRootErrorMessage(.{ | |
.msg = try bundle.addString(errors.nullTerminatedString(err_msg.msg)), | |
.src_loc = if (err_msg.src_loc != .none) blk: { | |
const src_loc = errors.getSourceLocation(err_msg.src_loc); | |
break :blk try bundle.addSourceLocation(.{ | |
.src_path = try bundle.addString(errors.nullTerminatedString(src_loc.src_path)), | |
.span_start = src_loc.span_start, | |
.span_main = src_loc.span_main, | |
.span_end = src_loc.span_end, | |
.line = src_loc.line, | |
.column = src_loc.column, | |
.source_line = if (src_loc.source_line != 0) try bundle.addString(errors.nullTerminatedString(src_loc.source_line)) else 0, | |
}); | |
} else .none, | |
}); | |
} | |
} | |
} | |
} | |
if (module.emit_h) |emit_h| { | |
for (emit_h.failed_decls.keys(), emit_h.failed_decls.values()) |decl_index, error_msg| { | |
// Skip errors for Decls within files that had a parse failure. | |
// We'll try again once parsing succeeds. | |
if (module.declFileScope(decl_index).okToReportErrors()) { | |
try addModuleErrorMsg(module, &bundle, error_msg.*); | |
} | |
} | |
} | |
for (module.failed_exports.values()) |value| { | |
try addModuleErrorMsg(module, &bundle, value.*); | |
} | |
const actual_error_count = module.global_error_set.entries.len - 1; | |
if (actual_error_count > module.error_limit) { | |
try bundle.addRootErrorMessage(.{ | |
.msg = try bundle.printString("module used more errors than possible: used {d}, max {d}", .{ | |
actual_error_count, module.error_limit, | |
}), | |
.notes_len = 1, | |
}); | |
const notes_start = try bundle.reserveNotes(1); | |
bundle.extra.items[notes_start] = @intFromEnum(try bundle.addErrorMessage(.{ | |
.msg = try bundle.printString("use '--error-limit {d}' to increase limit", .{ | |
actual_error_count, | |
}), | |
})); | |
} | |
} | |
if (bundle.root_list.items.len == 0) { | |
if (comp.link_error_flags.no_entry_point_found) { | |
try bundle.addRootErrorMessage(.{ | |
.msg = try bundle.addString("no entry point found"), | |
}); | |
} | |
} | |
if (comp.link_error_flags.missing_libc) { | |
try bundle.addRootErrorMessage(.{ | |
.msg = try bundle.addString("libc not available"), | |
.notes_len = 2, | |
}); | |
const notes_start = try bundle.reserveNotes(2); | |
bundle.extra.items[notes_start + 0] = @intFromEnum(try bundle.addErrorMessage(.{ | |
.msg = try bundle.addString("run 'zig libc -h' to learn about libc installations"), | |
})); | |
bundle.extra.items[notes_start + 1] = @intFromEnum(try bundle.addErrorMessage(.{ | |
.msg = try bundle.addString("run 'zig targets' to see the targets for which zig can always provide libc"), | |
})); | |
} | |
for (comp.link_errors.items) |link_err| { | |
try bundle.addRootErrorMessage(.{ | |
.msg = try bundle.addString(link_err.msg), | |
.notes_len = @intCast(link_err.notes.len), | |
}); | |
const notes_start = try bundle.reserveNotes(@intCast(link_err.notes.len)); | |
for (link_err.notes, 0..) |note, i| { | |
bundle.extra.items[notes_start + i] = @intFromEnum(try bundle.addErrorMessage(.{ | |
.msg = try bundle.addString(note.msg), | |
})); | |
} | |
} | |
if (comp.module) |module| { | |
if (bundle.root_list.items.len == 0 and module.compile_log_decls.count() != 0) { | |
const keys = module.compile_log_decls.keys(); | |
const values = module.compile_log_decls.values(); | |
// First one will be the error; subsequent ones will be notes. | |
const err_decl = module.declPtr(keys[0]); | |
const src_loc = err_decl.nodeOffsetSrcLoc(values[0], module); | |
const err_msg = Module.ErrorMsg{ | |
.src_loc = src_loc, | |
.msg = "found compile log statement", | |
.notes = try gpa.alloc(Module.ErrorMsg, module.compile_log_decls.count() - 1), | |
}; | |
defer gpa.free(err_msg.notes); | |
for (keys[1..], 0..) |key, i| { | |
const note_decl = module.declPtr(key); | |
err_msg.notes[i] = .{ | |
.src_loc = note_decl.nodeOffsetSrcLoc(values[i + 1], module), | |
.msg = "also here", | |
}; | |
} | |
try addModuleErrorMsg(module, &bundle, err_msg); | |
} | |
} | |
assert(comp.totalErrorCount() == bundle.root_list.items.len); | |
const compile_log_text = if (comp.module) |m| m.compile_log_text.items else ""; | |
return bundle.toOwnedBundle(compile_log_text); | |
} | |
pub const ErrorNoteHashContext = struct { | |
eb: *const ErrorBundle.Wip, | |
pub fn hash(ctx: ErrorNoteHashContext, key: ErrorBundle.ErrorMessage) u32 { | |
var hasher = std.hash.Wyhash.init(0); | |
const eb = ctx.eb.tmpBundle(); | |
hasher.update(eb.nullTerminatedString(key.msg)); | |
if (key.src_loc != .none) { | |
const src = eb.getSourceLocation(key.src_loc); | |
hasher.update(eb.nullTerminatedString(src.src_path)); | |
std.hash.autoHash(&hasher, src.line); | |
std.hash.autoHash(&hasher, src.column); | |
std.hash.autoHash(&hasher, src.span_main); | |
} | |
return @as(u32, @truncate(hasher.final())); | |
} | |
pub fn eql( | |
ctx: ErrorNoteHashContext, | |
a: ErrorBundle.ErrorMessage, | |
b: ErrorBundle.ErrorMessage, | |
b_index: usize, | |
) bool { | |
_ = b_index; | |
const eb = ctx.eb.tmpBundle(); | |
const msg_a = eb.nullTerminatedString(a.msg); | |
const msg_b = eb.nullTerminatedString(b.msg); | |
if (!mem.eql(u8, msg_a, msg_b)) return false; | |
if (a.src_loc == .none and b.src_loc == .none) return true; | |
if (a.src_loc == .none or b.src_loc == .none) return false; | |
const src_a = eb.getSourceLocation(a.src_loc); | |
const src_b = eb.getSourceLocation(b.src_loc); | |
const src_path_a = eb.nullTerminatedString(src_a.src_path); | |
const src_path_b = eb.nullTerminatedString(src_b.src_path); | |
return mem.eql(u8, src_path_a, src_path_b) and | |
src_a.line == src_b.line and | |
src_a.column == src_b.column and | |
src_a.span_main == src_b.span_main; | |
} | |
}; | |
pub fn addModuleErrorMsg(mod: *Module, eb: *ErrorBundle.Wip, module_err_msg: Module.ErrorMsg) !void { | |
const gpa = eb.gpa; | |
const ip = &mod.intern_pool; | |
const err_source = module_err_msg.src_loc.file_scope.getSource(gpa) catch |err| { | |
const file_path = try module_err_msg.src_loc.file_scope.fullPath(gpa); | |
defer gpa.free(file_path); | |
try eb.addRootErrorMessage(.{ | |
.msg = try eb.printString("unable to load '{s}': {s}", .{ | |
file_path, @errorName(err), | |
}), | |
}); | |
return; | |
}; | |
const err_span = try module_err_msg.src_loc.span(gpa); | |
const err_loc = std.zig.findLineColumn(err_source.bytes, err_span.main); | |
const file_path = try module_err_msg.src_loc.file_scope.fullPath(gpa); | |
defer gpa.free(file_path); | |
var ref_traces: std.ArrayListUnmanaged(ErrorBundle.ReferenceTrace) = .{}; | |
defer ref_traces.deinit(gpa); | |
const remaining_references: ?u32 = remaining: { | |
if (mod.comp.reference_trace) |_| { | |
if (module_err_msg.hidden_references > 0) break :remaining module_err_msg.hidden_references; | |
} else { | |
if (module_err_msg.reference_trace.len > 0) break :remaining 0; | |
} | |
break :remaining null; | |
}; | |
try ref_traces.ensureTotalCapacityPrecise(gpa, module_err_msg.reference_trace.len + | |
@intFromBool(remaining_references != null)); | |
for (module_err_msg.reference_trace) |module_reference| { | |
const source = try module_reference.src_loc.file_scope.getSource(gpa); | |
const span = try module_reference.src_loc.span(gpa); | |
const loc = std.zig.findLineColumn(source.bytes, span.main); | |
const rt_file_path = try module_reference.src_loc.file_scope.fullPath(gpa); | |
defer gpa.free(rt_file_path); | |
ref_traces.appendAssumeCapacity(.{ | |
.decl_name = try eb.addString(ip.stringToSlice(module_reference.decl)), | |
.src_loc = try eb.addSourceLocation(.{ | |
.src_path = try eb.addString(rt_file_path), | |
.span_start = span.start, | |
.span_main = span.main, | |
.span_end = span.end, | |
.line = @intCast(loc.line), | |
.column = @intCast(loc.column), | |
.source_line = 0, | |
}), | |
}); | |
} | |
if (remaining_references) |remaining| ref_traces.appendAssumeCapacity( | |
.{ .decl_name = remaining, .src_loc = .none }, | |
); | |
const src_loc = try eb.addSourceLocation(.{ | |
.src_path = try eb.addString(file_path), | |
.span_start = err_span.start, | |
.span_main = err_span.main, | |
.span_end = err_span.end, | |
.line = @intCast(err_loc.line), | |
.column = @intCast(err_loc.column), | |
.source_line = if (module_err_msg.src_loc.lazy == .entire_file) | |
0 | |
else | |
try eb.addString(err_loc.source_line), | |
.reference_trace_len = @intCast(ref_traces.items.len), | |
}); | |
for (ref_traces.items) |rt| { | |
try eb.addReferenceTrace(rt); | |
} | |
// De-duplicate error notes. The main use case in mind for this is | |
// too many "note: called from here" notes when eval branch quota is reached. | |
var notes: std.ArrayHashMapUnmanaged(ErrorBundle.ErrorMessage, void, ErrorNoteHashContext, true) = .{}; | |
defer notes.deinit(gpa); | |
for (module_err_msg.notes) |module_note| { | |
const source = try module_note.src_loc.file_scope.getSource(gpa); | |
const span = try module_note.src_loc.span(gpa); | |
const loc = std.zig.findLineColumn(source.bytes, span.main); | |
const note_file_path = try module_note.src_loc.file_scope.fullPath(gpa); | |
defer gpa.free(note_file_path); | |
const gop = try notes.getOrPutContext(gpa, .{ | |
.msg = try eb.addString(module_note.msg), | |
.src_loc = try eb.addSourceLocation(.{ | |
.src_path = try eb.addString(note_file_path), | |
.span_start = span.start, | |
.span_main = span.main, | |
.span_end = span.end, | |
.line = @intCast(loc.line), | |
.column = @intCast(loc.column), | |
.source_line = if (err_loc.eql(loc)) 0 else try eb.addString(loc.source_line), | |
}), | |
}, .{ .eb = eb }); | |
if (gop.found_existing) { | |
gop.key_ptr.count += 1; | |
} | |
} | |
const notes_len: u32 = @intCast(notes.entries.len); | |
try eb.addRootErrorMessage(.{ | |
.msg = try eb.addString(module_err_msg.msg), | |
.src_loc = src_loc, | |
.notes_len = notes_len, | |
}); | |
const notes_start = try eb.reserveNotes(notes_len); | |
for (notes_start.., notes.keys()) |i, note| { | |
eb.extra.items[i] = @intFromEnum(try eb.addErrorMessage(note)); | |
} | |
} | |
pub fn addZirErrorMessages(eb: *ErrorBundle.Wip, file: *Module.File) !void { | |
assert(file.zir_loaded); | |
assert(file.tree_loaded); | |
assert(file.source_loaded); | |
const gpa = eb.gpa; | |
const src_path = try file.fullPath(gpa); | |
defer gpa.free(src_path); | |
return eb.addZirErrorMessages(file.zir, file.tree, file.source, src_path); | |
} | |
pub fn performAllTheWork( | |
comp: *Compilation, | |
main_progress_node: *std.Progress.Node, | |
) error{ TimerUnsupported, OutOfMemory }!void { | |
// Here we queue up all the AstGen tasks first, followed by C object compilation. | |
// We wait until the AstGen tasks are all completed before proceeding to the | |
// (at least for now) single-threaded main work queue. However, C object compilation | |
// only needs to be finished by the end of this function. | |
var zir_prog_node = main_progress_node.start("AST Lowering", 0); | |
defer zir_prog_node.end(); | |
var c_obj_prog_node = main_progress_node.start("Compile C Objects", comp.c_source_files.len); | |
defer c_obj_prog_node.end(); | |
var win32_resource_prog_node = main_progress_node.start("Compile Win32 Resources", comp.rc_source_files.len); | |
defer win32_resource_prog_node.end(); | |
comp.work_queue_wait_group.reset(); | |
defer comp.work_queue_wait_group.wait(); | |
{ | |
const astgen_frame = tracy.namedFrame("astgen"); | |
defer astgen_frame.end(); | |
comp.astgen_wait_group.reset(); | |
defer comp.astgen_wait_group.wait(); | |
// builtin.zig is handled specially for two reasons: | |
// 1. to avoid race condition of zig processes truncating each other's builtin.zig files | |
// 2. optimization; in the hot path it only incurs a stat() syscall, which happens | |
// in the `astgen_wait_group`. | |
if (comp.job_queued_update_builtin_zig) b: { | |
comp.job_queued_update_builtin_zig = false; | |
const zcu = comp.module orelse break :b; | |
_ = zcu; | |
// TODO put all the modules in a flat array to make them easy to iterate. | |
var seen: std.AutoArrayHashMapUnmanaged(*Package.Module, void) = .{}; | |
defer seen.deinit(comp.gpa); | |
try seen.put(comp.gpa, comp.root_mod, {}); | |
var i: usize = 0; | |
while (i < seen.count()) : (i += 1) { | |
const mod = seen.keys()[i]; | |
for (mod.deps.values()) |dep| | |
try seen.put(comp.gpa, dep, {}); | |
const file = mod.builtin_file orelse continue; | |
comp.astgen_wait_group.start(); | |
try comp.thread_pool.spawn(workerUpdateBuiltinZigFile, .{ | |
comp, mod, file, &comp.astgen_wait_group, | |
}); | |
} | |
} | |
while (comp.astgen_work_queue.readItem()) |file| { | |
comp.astgen_wait_group.start(); | |
try comp.thread_pool.spawn(workerAstGenFile, .{ | |
comp, file, &zir_prog_node, &comp.astgen_wait_group, .root, | |
}); | |
} | |
while (comp.embed_file_work_queue.readItem()) |embed_file| { | |
comp.astgen_wait_group.start(); | |
try comp.thread_pool.spawn(workerCheckEmbedFile, .{ | |
comp, embed_file, &comp.astgen_wait_group, | |
}); | |
} | |
while (comp.c_object_work_queue.readItem()) |c_object| { | |
comp.work_queue_wait_group.start(); | |
try comp.thread_pool.spawn(workerUpdateCObject, .{ | |
comp, c_object, &c_obj_prog_node, &comp.work_queue_wait_group, | |
}); | |
} | |
if (!build_options.only_core_functionality) { | |
while (comp.win32_resource_work_queue.readItem()) |win32_resource| { | |
comp.work_queue_wait_group.start(); | |
try comp.thread_pool.spawn(workerUpdateWin32Resource, .{ | |
comp, win32_resource, &win32_resource_prog_node, &comp.work_queue_wait_group, | |
}); | |
} | |
} | |
} | |
if (comp.module) |mod| { | |
try reportMultiModuleErrors(mod); | |
try mod.flushRetryableFailures(); | |
mod.sema_prog_node = main_progress_node.start("Semantic Analysis", 0); | |
mod.sema_prog_node.activate(); | |
} | |
defer if (comp.module) |mod| { | |
mod.sema_prog_node.end(); | |
mod.sema_prog_node = undefined; | |
}; | |
// In this main loop we give priority to non-anonymous Decls in the work queue, so | |
// that they can establish references to anonymous Decls, setting alive=true in the | |
// backend, preventing anonymous Decls from being prematurely destroyed. | |
while (true) { | |
if (comp.work_queue.readItem()) |work_item| { | |
try processOneJob(comp, work_item, main_progress_node); | |
continue; | |
} | |
if (comp.anon_work_queue.readItem()) |work_item| { | |
try processOneJob(comp, work_item, main_progress_node); | |
continue; | |
} | |
if (comp.module) |zcu| { | |
// If there's no work queued, check if there's anything outdated | |
// which we need to work on, and queue it if so. | |
if (try zcu.findOutdatedToAnalyze()) |outdated| { | |
switch (outdated.unwrap()) { | |
.decl => |decl| try comp.work_queue.writeItem(.{ .analyze_decl = decl }), | |
.func => |func| try comp.work_queue.writeItem(.{ .codegen_func = func }), | |
} | |
continue; | |
} | |
} | |
break; | |
} | |
if (comp.job_queued_compiler_rt_lib) { | |
comp.job_queued_compiler_rt_lib = false; | |
buildCompilerRtOneShot(comp, .Lib, &comp.compiler_rt_lib, main_progress_node); | |
} | |
if (comp.job_queued_compiler_rt_obj) { | |
comp.job_queued_compiler_rt_obj = false; | |
buildCompilerRtOneShot(comp, .Obj, &comp.compiler_rt_obj, main_progress_node); | |
} | |
} | |
fn processOneJob(comp: *Compilation, job: Job, prog_node: *std.Progress.Node) !void { | |
switch (job) { | |
.codegen_decl => |decl_index| { | |
const module = comp.module.?; | |
const decl = module.declPtr(decl_index); | |
switch (decl.analysis) { | |
.unreferenced => unreachable, | |
.in_progress => unreachable, | |
.file_failure, | |
.sema_failure, | |
.codegen_failure, | |
.dependency_failure, | |
=> return, | |
.complete => { | |
const named_frame = tracy.namedFrame("codegen_decl"); | |
defer named_frame.end(); | |
assert(decl.has_tv); | |
if (decl.alive) { | |
try module.linkerUpdateDecl(decl_index); | |
return; | |
} | |
// Instead of sending this decl to the linker, we actually will delete it | |
// because we found out that it in fact was never referenced. | |
module.deleteUnusedDecl(decl_index); | |
return; | |
}, | |
} | |
}, | |
.codegen_func => |func| { | |
const named_frame = tracy.namedFrame("codegen_func"); | |
defer named_frame.end(); | |
const module = comp.module.?; | |
module.ensureFuncBodyAnalyzed(func) catch |err| switch (err) { | |
error.OutOfMemory => return error.OutOfMemory, | |
error.AnalysisFail => return, | |
}; | |
}, | |
.emit_h_decl => |decl_index| { | |
const module = comp.module.?; | |
const decl = module.declPtr(decl_index); | |
switch (decl.analysis) { | |
.unreferenced => unreachable, | |
.in_progress => unreachable, | |
.file_failure, | |
.sema_failure, | |
.dependency_failure, | |
=> return, | |
// emit-h only requires semantic analysis of the Decl to be complete, | |
// it does not depend on machine code generation to succeed. | |
.codegen_failure, .complete => { | |
const named_frame = tracy.namedFrame("emit_h_decl"); | |
defer named_frame.end(); | |
const gpa = comp.gpa; | |
const emit_h = module.emit_h.?; | |
_ = try emit_h.decl_table.getOrPut(gpa, decl_index); | |
const decl_emit_h = emit_h.declPtr(decl_index); | |
const fwd_decl = &decl_emit_h.fwd_decl; | |
fwd_decl.shrinkRetainingCapacity(0); | |
var ctypes_arena = std.heap.ArenaAllocator.init(gpa); | |
defer ctypes_arena.deinit(); | |
var dg: c_codegen.DeclGen = .{ | |
.gpa = gpa, | |
.module = module, | |
.error_msg = null, | |
.pass = .{ .decl = decl_index }, | |
.is_naked_fn = false, | |
.fwd_decl = fwd_decl.toManaged(gpa), | |
.ctypes = .{}, | |
.anon_decl_deps = .{}, | |
.aligned_anon_decls = .{}, | |
}; | |
defer { | |
dg.ctypes.deinit(gpa); | |
dg.fwd_decl.deinit(); | |
} | |
c_codegen.genHeader(&dg) catch |err| switch (err) { | |
error.AnalysisFail => { | |
try emit_h.failed_decls.put(gpa, decl_index, dg.error_msg.?); | |
return; | |
}, | |
else => |e| return e, | |
}; | |
fwd_decl.* = dg.fwd_decl.moveToUnmanaged(); | |
fwd_decl.shrinkAndFree(gpa, fwd_decl.items.len); | |
}, | |
} | |
}, | |
.analyze_decl => |decl_index| { | |
const module = comp.module.?; | |
module.ensureDeclAnalyzed(decl_index) catch |err| switch (err) { | |
error.OutOfMemory => return error.OutOfMemory, | |
error.AnalysisFail => return, | |
}; | |
const decl = module.declPtr(decl_index); | |
if (decl.kind == .@"test" and comp.config.is_test) { | |
// Tests are always emitted in test binaries. The decl_refs are created by | |
// Module.populateTestFunctions, but this will not queue body analysis, so do | |
// that now. | |
try module.ensureFuncBodyAnalysisQueued(decl.val.toIntern()); | |
} | |
}, | |
.update_line_number => |decl_index| { | |
const named_frame = tracy.namedFrame("update_line_number"); | |
defer named_frame.end(); | |
const gpa = comp.gpa; | |
const module = comp.module.?; | |
const decl = module.declPtr(decl_index); | |
const lf = comp.bin_file.?; | |
lf.updateDeclLineNumber(module, decl_index) catch |err| { | |
try module.failed_decls.ensureUnusedCapacity(gpa, 1); | |
module.failed_decls.putAssumeCapacityNoClobber(decl_index, try Module.ErrorMsg.create( | |
gpa, | |
decl.srcLoc(module), | |
"unable to update line number: {s}", | |
.{@errorName(err)}, | |
)); | |
decl.analysis = .codegen_failure; | |
try module.retryable_failures.append(gpa, InternPool.Depender.wrap(.{ .decl = decl_index })); | |
}; | |
}, | |
.analyze_mod => |pkg| { | |
const named_frame = tracy.namedFrame("analyze_mod"); | |
defer named_frame.end(); | |
const module = comp.module.?; | |
module.semaPkg(pkg) catch |err| switch (err) { | |
error.OutOfMemory => return error.OutOfMemory, | |
error.AnalysisFail => return, | |
}; | |
}, | |
.glibc_crt_file => |crt_file| { | |
const named_frame = tracy.namedFrame("glibc_crt_file"); | |
defer named_frame.end(); | |
glibc.buildCRTFile(comp, crt_file, prog_node) catch |err| { | |
// TODO Surface more error details. | |
comp.lockAndSetMiscFailure(.glibc_crt_file, "unable to build glibc CRT file: {s}", .{ | |
@errorName(err), | |
}); | |
}; | |
}, | |
.glibc_shared_objects => { | |
const named_frame = tracy.namedFrame("glibc_shared_objects"); | |
defer named_frame.end(); | |
glibc.buildSharedObjects(comp, prog_node) catch |err| { | |
// TODO Surface more error details. | |
comp.lockAndSetMiscFailure( | |
.glibc_shared_objects, | |
"unable to build glibc shared objects: {s}", | |
.{@errorName(err)}, | |
); | |
}; | |
}, | |
.musl_crt_file => |crt_file| { | |
const named_frame = tracy.namedFrame("musl_crt_file"); | |
defer named_frame.end(); | |
musl.buildCRTFile(comp, crt_file, prog_node) catch |err| { | |
// TODO Surface more error details. | |
comp.lockAndSetMiscFailure( | |
.musl_crt_file, | |
"unable to build musl CRT file: {s}", | |
.{@errorName(err)}, | |
); | |
}; | |
}, | |
.mingw_crt_file => |crt_file| { | |
const named_frame = tracy.namedFrame("mingw_crt_file"); | |
defer named_frame.end(); | |
mingw.buildCRTFile(comp, crt_file, prog_node) catch |err| { | |
// TODO Surface more error details. | |
comp.lockAndSetMiscFailure( | |
.mingw_crt_file, | |
"unable to build mingw-w64 CRT file {s}: {s}", | |
.{ @tagName(crt_file), @errorName(err) }, | |
); | |
}; | |
}, | |
.windows_import_lib => |index| { | |
if (build_options.only_c) | |
@panic("building import libs not included in core functionality"); | |
const named_frame = tracy.namedFrame("windows_import_lib"); | |
defer named_frame.end(); | |
const link_lib = comp.system_libs.keys()[index]; | |
mingw.buildImportLib(comp, link_lib) catch |err| { | |
// TODO Surface more error details. | |
comp.lockAndSetMiscFailure( | |
.windows_import_lib, | |
"unable to generate DLL import .lib file for {s}: {s}", | |
.{ link_lib, @errorName(err) }, | |
); | |
}; | |
}, | |
.libunwind => { | |
const named_frame = tracy.namedFrame("libunwind"); | |
defer named_frame.end(); | |
libunwind.buildStaticLib(comp, prog_node) catch |err| { | |
// TODO Surface more error details. | |
comp.lockAndSetMiscFailure( | |
.libunwind, | |
"unable to build libunwind: {s}", | |
.{@errorName(err)}, | |
); | |
}; | |
}, | |
.libcxx => { | |
const named_frame = tracy.namedFrame("libcxx"); | |
defer named_frame.end(); | |
libcxx.buildLibCXX(comp, prog_node) catch |err| { | |
// TODO Surface more error details. | |
comp.lockAndSetMiscFailure( | |
.libcxx, | |
"unable to build libcxx: {s}", | |
.{@errorName(err)}, | |
); | |
}; | |
}, | |
.libcxxabi => { | |
const named_frame = tracy.namedFrame("libcxxabi"); | |
defer named_frame.end(); | |
libcxx.buildLibCXXABI(comp, prog_node) catch |err| { | |
// TODO Surface more error details. | |
comp.lockAndSetMiscFailure( | |
.libcxxabi, | |
"unable to build libcxxabi: {s}", | |
.{@errorName(err)}, | |
); | |
}; | |
}, | |
.libtsan => { | |
const named_frame = tracy.namedFrame("libtsan"); | |
defer named_frame.end(); | |
libtsan.buildTsan(comp, prog_node) catch |err| switch (err) { | |
error.OutOfMemory => return error.OutOfMemory, | |
error.SubCompilationFailed => return, // error reported already | |
else => comp.lockAndSetMiscFailure( | |
.libtsan, | |
"unable to build TSAN library: {s}", | |
.{@errorName(err)}, | |
), | |
}; | |
}, | |
.wasi_libc_crt_file => |crt_file| { | |
const named_frame = tracy.namedFrame("wasi_libc_crt_file"); | |
defer named_frame.end(); | |
wasi_libc.buildCRTFile(comp, crt_file, prog_node) catch |err| { | |
// TODO Surface more error details. | |
comp.lockAndSetMiscFailure( | |
.wasi_libc_crt_file, | |
"unable to build WASI libc CRT file: {s}", | |
.{@errorName(err)}, | |
); | |
}; | |
}, | |
.zig_libc => { | |
const named_frame = tracy.namedFrame("zig_libc"); | |
defer named_frame.end(); | |
comp.buildOutputFromZig( | |
"c.zig", | |
.Lib, | |
&comp.libc_static_lib, | |
.zig_libc, | |
prog_node, | |
) catch |err| switch (err) { | |
error.OutOfMemory => return error.OutOfMemory, | |
error.SubCompilationFailed => return, // error reported already | |
else => comp.lockAndSetMiscFailure( | |
.zig_libc, | |
"unable to build zig's multitarget libc: {s}", | |
.{@errorName(err)}, | |
), | |
}; | |
}, | |
} | |
} | |
const AstGenSrc = union(enum) { | |
root, | |
import: struct { | |
importing_file: *Module.File, | |
import_tok: std.zig.Ast.TokenIndex, | |
}, | |
}; | |
fn workerAstGenFile( | |
comp: *Compilation, | |
file: *Module.File, | |
prog_node: *std.Progress.Node, | |
wg: *WaitGroup, | |
src: AstGenSrc, | |
) void { | |
defer wg.finish(); | |
var child_prog_node = prog_node.start(file.sub_file_path, 0); | |
child_prog_node.activate(); | |
defer child_prog_node.end(); | |
const mod = comp.module.?; | |
mod.astGenFile(file) catch |err| switch (err) { | |
error.AnalysisFail => return, | |
else => { | |
file.status = .retryable_failure; | |
comp.reportRetryableAstGenError(src, file, err) catch |oom| switch (oom) { | |
// Swallowing this error is OK because it's implied to be OOM when | |
// there is a missing `failed_files` error message. | |
error.OutOfMemory => {}, | |
}; | |
return; | |
}, | |
}; | |
// Pre-emptively look for `@import` paths and queue them up. | |
// If we experience an error preemptively fetching the | |
// file, just ignore it and let it happen again later during Sema. | |
assert(file.zir_loaded); | |
const imports_index = file.zir.extra[@intFromEnum(Zir.ExtraIndex.imports)]; | |
if (imports_index != 0) { | |
const extra = file.zir.extraData(Zir.Inst.Imports, imports_index); | |
var import_i: u32 = 0; | |
var extra_index = extra.end; | |
while (import_i < extra.data.imports_len) : (import_i += 1) { | |
const item = file.zir.extraData(Zir.Inst.Imports.Item, extra_index); | |
extra_index = item.end; | |
const import_path = file.zir.nullTerminatedString(item.data.name); | |
// `@import("builtin")` is handled specially. | |
if (mem.eql(u8, import_path, "builtin")) continue; | |
const import_result = blk: { | |
comp.mutex.lock(); | |
defer comp.mutex.unlock(); | |
const res = mod.importFile(file, import_path) catch continue; | |
if (!res.is_pkg) { | |
res.file.addReference(mod.*, .{ .import = .{ | |
.file_scope = file, | |
.parent_decl_node = 0, | |
.lazy = .{ .token_abs = item.data.token }, | |
} }) catch continue; | |
} | |
break :blk res; | |
}; | |
if (import_result.is_new) { | |
log.debug("AstGen of {s} has import '{s}'; queuing AstGen of {s}", .{ | |
file.sub_file_path, import_path, import_result.file.sub_file_path, | |
}); | |
const sub_src: AstGenSrc = .{ .import = .{ | |
.importing_file = file, | |
.import_tok = item.data.token, | |
} }; | |
wg.start(); | |
comp.thread_pool.spawn(workerAstGenFile, .{ | |
comp, import_result.file, prog_node, wg, sub_src, | |
}) catch { | |
wg.finish(); | |
continue; | |
}; | |
} | |
} | |
} | |
} | |
fn workerUpdateBuiltinZigFile( | |
comp: *Compilation, | |
mod: *Package.Module, | |
file: *Module.File, | |
wg: *WaitGroup, | |
) void { | |
defer wg.finish(); | |
Builtin.populateFile(comp, mod, file) catch |err| { | |
comp.mutex.lock(); | |
defer comp.mutex.unlock(); | |
comp.setMiscFailure(.write_builtin_zig, "unable to write '{}{s}': {s}", .{ | |
mod.root, mod.root_src_path, @errorName(err), | |
}); | |
}; | |
} | |
fn workerCheckEmbedFile( | |
comp: *Compilation, | |
embed_file: *Module.EmbedFile, | |
wg: *WaitGroup, | |
) void { | |
defer wg.finish(); | |
comp.detectEmbedFileUpdate(embed_file) catch |err| { | |
comp.reportRetryableEmbedFileError(embed_file, err) catch |oom| switch (oom) { | |
// Swallowing this error is OK because it's implied to be OOM when | |
// there is a missing `failed_embed_files` error message. | |
error.OutOfMemory => {}, | |
}; | |
return; | |
}; | |
} | |
fn detectEmbedFileUpdate(comp: *Compilation, embed_file: *Module.EmbedFile) !void { | |
const mod = comp.module.?; | |
const ip = &mod.intern_pool; | |
const sub_file_path = ip.stringToSlice(embed_file.sub_file_path); | |
var file = try embed_file.owner.root.openFile(sub_file_path, .{}); | |
defer file.close(); | |
const stat = try file.stat(); | |
const unchanged_metadata = | |
stat.size == embed_file.stat.size and | |
stat.mtime == embed_file.stat.mtime and | |
stat.inode == embed_file.stat.inode; | |
if (unchanged_metadata) return; | |
@panic("TODO: handle embed file incremental update"); | |
} | |
pub fn obtainCObjectCacheManifest( | |
comp: *const Compilation, | |
owner_mod: *Package.Module, | |
) Cache.Manifest { | |
var man = comp.cache_parent.obtain(); | |
// Only things that need to be added on top of the base hash, and only things | |
// that apply both to @cImport and compiling C objects. No linking stuff here! | |
// Also nothing that applies only to compiling .zig code. | |
cache_helpers.addModule(&man.hash, owner_mod); | |
man.hash.addListOfBytes(comp.global_cc_argv); | |
man.hash.add(comp.config.link_libcpp); | |
// When libc_installation is null it means that Zig generated this dir list | |
// based on the zig library directory alone. The zig lib directory file | |
// path is purposefully either in the cache or not in the cache. The | |
// decision should not be overridden here. | |
if (comp.libc_installation != null) { | |
man.hash.addListOfBytes(comp.libc_include_dir_list); | |
} | |
return man; | |
} | |
pub fn obtainWin32ResourceCacheManifest(comp: *const Compilation) Cache.Manifest { | |
var man = comp.cache_parent.obtain(); | |
man.hash.addListOfBytes(comp.rc_include_dir_list); | |
return man; | |
} | |
pub const CImportResult = struct { | |
out_zig_path: []u8, | |
cache_hit: bool, | |
errors: std.zig.ErrorBundle, | |
pub fn deinit(result: *CImportResult, gpa: mem.Allocator) void { | |
result.errors.deinit(gpa); | |
} | |
}; | |
/// Caller owns returned memory. | |
/// This API is currently coupled pretty tightly to stage1's needs; it will need to be reworked | |
/// a bit when we want to start using it from self-hosted. | |
pub fn cImport(comp: *Compilation, c_src: []const u8, owner_mod: *Package.Module) !CImportResult { | |
if (build_options.only_core_functionality) @panic("@cImport is not available in a zig2.c build"); | |
const tracy_trace = trace(@src()); | |
defer tracy_trace.end(); | |
const cimport_zig_basename = "cimport.zig"; | |
var man = comp.obtainCObjectCacheManifest(owner_mod); | |
defer man.deinit(); | |
man.hash.add(@as(u16, 0xb945)); // Random number to distinguish translate-c from compiling C objects | |
man.hash.addBytes(c_src); | |
man.hash.add(comp.config.c_frontend); | |
// If the previous invocation resulted in clang errors, we will see a hit | |
// here with 0 files in the manifest, in which case it is actually a miss. | |
// We need to "unhit" in this case, to keep the digests matching. | |
const prev_hash_state = man.hash.peekBin(); | |
const actual_hit = hit: { | |
_ = try man.hit(); | |
if (man.files.items.len == 0) { | |
man.unhit(prev_hash_state, 0); | |
break :hit false; | |
} | |
break :hit true; | |
}; | |
const digest = if (!actual_hit) digest: { | |
var arena_allocator = std.heap.ArenaAllocator.init(comp.gpa); | |
defer arena_allocator.deinit(); | |
const arena = arena_allocator.allocator(); | |
const tmp_digest = man.hash.peek(); | |
const tmp_dir_sub_path = try std.fs.path.join(arena, &[_][]const u8{ "o", &tmp_digest }); | |
var zig_cache_tmp_dir = try comp.local_cache_directory.handle.makeOpenPath(tmp_dir_sub_path, .{}); | |
defer zig_cache_tmp_dir.close(); | |
const cimport_basename = "cimport.h"; | |
const out_h_path = try comp.local_cache_directory.join(arena, &[_][]const u8{ | |
tmp_dir_sub_path, cimport_basename, | |
}); | |
const out_dep_path = try std.fmt.allocPrint(arena, "{s}.d", .{out_h_path}); | |
try zig_cache_tmp_dir.writeFile(cimport_basename, c_src); | |
if (comp.verbose_cimport) { | |
log.info("C import source: {s}", .{out_h_path}); | |
} | |
var argv = std.ArrayList([]const u8).init(comp.gpa); | |
defer argv.deinit(); | |
try argv.append(@tagName(comp.config.c_frontend)); // argv[0] is program name, actual args start at [1] | |
try comp.addTranslateCCArgs(arena, &argv, .c, out_dep_path, owner_mod); | |
try argv.append(out_h_path); | |
if (comp.verbose_cc) { | |
dump_argv(argv.items); | |
} | |
var tree = switch (comp.config.c_frontend) { | |
.aro => tree: { | |
if (true) @panic("TODO"); | |
break :tree undefined; | |
}, | |
.clang => tree: { | |
if (!build_options.have_llvm) unreachable; | |
const translate_c = @import("translate_c.zig"); | |
// Convert to null terminated args. | |
const new_argv_with_sentinel = try arena.alloc(?[*:0]const u8, argv.items.len + 1); | |
new_argv_with_sentinel[argv.items.len] = null; | |
const new_argv = new_argv_with_sentinel[0..argv.items.len :null]; | |
for (argv.items, 0..) |arg, i| { | |
new_argv[i] = try arena.dupeZ(u8, arg); | |
} | |
const c_headers_dir_path_z = try comp.zig_lib_directory.joinZ(arena, &[_][]const u8{"include"}); | |
var errors = std.zig.ErrorBundle.empty; | |
errdefer errors.deinit(comp.gpa); | |
break :tree translate_c.translate( | |
comp.gpa, | |
new_argv.ptr, | |
new_argv.ptr + new_argv.len, | |
&errors, | |
c_headers_dir_path_z, | |
) catch |err| switch (err) { | |
error.OutOfMemory => return error.OutOfMemory, | |
error.SemanticAnalyzeFail => { | |
return CImportResult{ | |
.out_zig_path = "", | |
.cache_hit = actual_hit, | |
.errors = errors, | |
}; | |
}, | |
}; | |
}, | |
}; | |
defer tree.deinit(comp.gpa); | |
if (comp.verbose_cimport) { | |
log.info("C import .d file: {s}", .{out_dep_path}); | |
} | |
const dep_basename = std.fs.path.basename(out_dep_path); | |
try man.addDepFilePost(zig_cache_tmp_dir, dep_basename); | |
switch (comp.cache_use) { | |
.whole => |whole| if (whole.cache_manifest) |whole_cache_manifest| { | |
whole.cache_manifest_mutex.lock(); | |
defer whole.cache_manifest_mutex.unlock(); | |
try whole_cache_manifest.addDepFilePost(zig_cache_tmp_dir, dep_basename); | |
}, | |
.incremental => {}, | |
} | |
const digest = man.final(); | |
const o_sub_path = try std.fs.path.join(arena, &[_][]const u8{ "o", &digest }); | |
var o_dir = try comp.local_cache_directory.handle.makeOpenPath(o_sub_path, .{}); | |
defer o_dir.close(); | |
var out_zig_file = try o_dir.createFile(cimport_zig_basename, .{}); | |
defer out_zig_file.close(); | |
const formatted = try tree.render(comp.gpa); | |
defer comp.gpa.free(formatted); | |
try out_zig_file.writeAll(formatted); | |
break :digest digest; | |
} else man.final(); | |
if (man.have_exclusive_lock) { | |
// Write the updated manifest. This is a no-op if the manifest is not dirty. Note that it is | |
// possible we had a hit and the manifest is dirty, for example if the file mtime changed but | |
// the contents were the same, we hit the cache but the manifest is dirty and we need to update | |
// it to prevent doing a full file content comparison the next time around. | |
man.writeManifest() catch |err| { | |
log.warn("failed to write cache manifest for C import: {s}", .{@errorName(err)}); | |
}; | |
} | |
const out_zig_path = try comp.local_cache_directory.join(comp.arena, &.{ | |
"o", &digest, cimport_zig_basename, | |
}); | |
if (comp.verbose_cimport) { | |
log.info("C import output: {s}", .{out_zig_path}); | |
} | |
return CImportResult{ | |
.out_zig_path = out_zig_path, | |
.cache_hit = actual_hit, | |
.errors = std.zig.ErrorBundle.empty, | |
}; | |
} | |
fn workerUpdateCObject( | |
comp: *Compilation, | |
c_object: *CObject, | |
progress_node: *std.Progress.Node, | |
wg: *WaitGroup, | |
) void { | |
defer wg.finish(); | |
comp.updateCObject(c_object, progress_node) catch |err| switch (err) { | |
error.AnalysisFail => return, | |
else => { | |
comp.reportRetryableCObjectError(c_object, err) catch |oom| switch (oom) { | |
// Swallowing this error is OK because it's implied to be OOM when | |
// there is a missing failed_c_objects error message. | |
error.OutOfMemory => {}, | |
}; | |
}, | |
}; | |
} | |
fn workerUpdateWin32Resource( | |
comp: *Compilation, | |
win32_resource: *Win32Resource, | |
progress_node: *std.Progress.Node, | |
wg: *WaitGroup, | |
) void { | |
defer wg.finish(); | |
comp.updateWin32Resource(win32_resource, progress_node) catch |err| switch (err) { | |
error.AnalysisFail => return, | |
else => { | |
comp.reportRetryableWin32ResourceError(win32_resource, err) catch |oom| switch (oom) { | |
// Swallowing this error is OK because it's implied to be OOM when | |
// there is a missing failed_win32_resources error message. | |
error.OutOfMemory => {}, | |
}; | |
}, | |
}; | |
} | |
fn buildCompilerRtOneShot( | |
comp: *Compilation, | |
output_mode: std.builtin.OutputMode, | |
out: *?CRTFile, | |
prog_node: *std.Progress.Node, | |
) void { | |
comp.buildOutputFromZig( | |
"compiler_rt.zig", | |
output_mode, | |
out, | |
.compiler_rt, | |
prog_node, | |
) catch |err| switch (err) { | |
error.SubCompilationFailed => return, // error reported already | |
else => comp.lockAndSetMiscFailure( | |
.compiler_rt, | |
"unable to build compiler_rt: {s}", | |
.{@errorName(err)}, | |
), | |
}; | |
} | |
fn reportRetryableCObjectError( | |
comp: *Compilation, | |
c_object: *CObject, | |
err: anyerror, | |
) error{OutOfMemory}!void { | |
c_object.status = .failure_retryable; | |
switch (comp.failCObj(c_object, "{s}", .{@errorName(err)})) { | |
error.AnalysisFail => return, | |
else => |e| return e, | |
} | |
} | |
fn reportRetryableWin32ResourceError( | |
comp: *Compilation, | |
win32_resource: *Win32Resource, | |
err: anyerror, | |
) error{OutOfMemory}!void { | |
win32_resource.status = .failure_retryable; | |
var bundle: ErrorBundle.Wip = undefined; | |
try bundle.init(comp.gpa); | |
errdefer bundle.deinit(); | |
try bundle.addRootErrorMessage(.{ | |
.msg = try bundle.printString("{s}", .{@errorName(err)}), | |
.src_loc = try bundle.addSourceLocation(.{ | |
.src_path = try bundle.addString(switch (win32_resource.src) { | |
.rc => |rc_src| rc_src.src_path, | |
.manifest => |manifest_src| manifest_src, | |
}), | |
.line = 0, | |
.column = 0, | |
.span_start = 0, | |
.span_main = 0, | |
.span_end = 0, | |
}), | |
}); | |
const finished_bundle = try bundle.toOwnedBundle(""); | |
{ | |
comp.mutex.lock(); | |
defer comp.mutex.unlock(); | |
try comp.failed_win32_resources.putNoClobber(comp.gpa, win32_resource, finished_bundle); | |
} | |
} | |
fn reportRetryableAstGenError( | |
comp: *Compilation, | |
src: AstGenSrc, | |
file: *Module.File, | |
err: anyerror, | |
) error{OutOfMemory}!void { | |
const mod = comp.module.?; | |
const gpa = mod.gpa; | |
file.status = .retryable_failure; | |
const src_loc: Module.SrcLoc = switch (src) { | |
.root => .{ | |
.file_scope = file, | |
.parent_decl_node = 0, | |
.lazy = .entire_file, | |
}, | |
.import => |info| blk: { | |
const importing_file = info.importing_file; | |
break :blk .{ | |
.file_scope = importing_file, | |
.parent_decl_node = 0, | |
.lazy = .{ .token_abs = info.import_tok }, | |
}; | |
}, | |
}; | |
const err_msg = try Module.ErrorMsg.create(gpa, src_loc, "unable to load '{}{s}': {s}", .{ | |
file.mod.root, file.sub_file_path, @errorName(err), | |
}); | |
errdefer err_msg.destroy(gpa); | |
{ | |
comp.mutex.lock(); | |
defer comp.mutex.unlock(); | |
try mod.failed_files.putNoClobber(gpa, file, err_msg); | |
} | |
} | |
fn reportRetryableEmbedFileError( | |
comp: *Compilation, | |
embed_file: *Module.EmbedFile, | |
err: anyerror, | |
) error{OutOfMemory}!void { | |
const mod = comp.module.?; | |
const gpa = mod.gpa; | |
const src_loc = embed_file.src_loc; | |
const ip = &mod.intern_pool; | |
const err_msg = try Module.ErrorMsg.create(gpa, src_loc, "unable to load '{}{s}': {s}", .{ | |
embed_file.owner.root, | |
ip.stringToSlice(embed_file.sub_file_path), | |
@errorName(err), | |
}); | |
errdefer err_msg.destroy(gpa); | |
{ | |
comp.mutex.lock(); | |
defer comp.mutex.unlock(); | |
try mod.failed_embed_files.putNoClobber(gpa, embed_file, err_msg); | |
} | |
} | |
fn updateCObject(comp: *Compilation, c_object: *CObject, c_obj_prog_node: *std.Progress.Node) !void { | |
if (comp.config.c_frontend == .aro) { | |
return comp.failCObj(c_object, "aro does not support compiling C objects yet", .{}); | |
} | |
if (!build_options.have_llvm) { | |
return comp.failCObj(c_object, "clang not available: compiler built without LLVM extensions", .{}); | |
} | |
const self_exe_path = comp.self_exe_path orelse | |
return comp.failCObj(c_object, "clang compilation disabled", .{}); | |
const tracy_trace = trace(@src()); | |
defer tracy_trace.end(); | |
log.debug("updating C object: {s}", .{c_object.src.src_path}); | |
if (c_object.clearStatus(comp.gpa)) { | |
// There was previous failure. | |
comp.mutex.lock(); | |
defer comp.mutex.unlock(); | |
// If the failure was OOM, there will not be an entry here, so we do | |
// not assert discard. | |
_ = comp.failed_c_objects.swapRemove(c_object); | |
} | |
var man = comp.obtainCObjectCacheManifest(c_object.src.owner); | |
defer man.deinit(); | |
man.hash.add(comp.clang_preprocessor_mode); | |
cache_helpers.addOptionalEmitLoc(&man.hash, comp.emit_asm); | |
cache_helpers.addOptionalEmitLoc(&man.hash, comp.emit_llvm_ir); | |
cache_helpers.addOptionalEmitLoc(&man.hash, comp.emit_llvm_bc); | |
try cache_helpers.hashCSource(&man, c_object.src); | |
var arena_allocator = std.heap.ArenaAllocator.init(comp.gpa); | |
defer arena_allocator.deinit(); | |
const arena = arena_allocator.allocator(); | |
const c_source_basename = std.fs.path.basename(c_object.src.src_path); | |
c_obj_prog_node.activate(); | |
var child_progress_node = c_obj_prog_node.start(c_source_basename, 0); | |
child_progress_node.activate(); | |
defer child_progress_node.end(); | |
// Special case when doing build-obj for just one C file. When there are more than one object | |
// file and building an object we need to link them together, but with just one it should go | |
// directly to the output file. | |
const direct_o = comp.c_source_files.len == 1 and comp.module == null and | |
comp.config.output_mode == .Obj and comp.objects.len == 0; | |
const o_basename_noext = if (direct_o) | |
comp.root_name | |
else | |
c_source_basename[0 .. c_source_basename.len - std.fs.path.extension(c_source_basename).len]; | |
const target = comp.getTarget(); | |
const o_ext = target.ofmt.fileExt(target.cpu.arch); | |
const digest = if (!comp.disable_c_depfile and try man.hit()) man.final() else blk: { | |
var argv = std.ArrayList([]const u8).init(comp.gpa); | |
defer argv.deinit(); | |
// In case we are doing passthrough mode, we need to detect -S and -emit-llvm. | |
const out_ext = e: { | |
if (!comp.clang_passthrough_mode) | |
break :e o_ext; | |
if (comp.emit_asm != null) | |
break :e ".s"; | |
if (comp.emit_llvm_ir != null) | |
break :e ".ll"; | |
if (comp.emit_llvm_bc != null) | |
break :e ".bc"; | |
break :e o_ext; | |
}; | |
const o_basename = try std.fmt.allocPrint(arena, "{s}{s}", .{ o_basename_noext, out_ext }); | |
const ext = c_object.src.ext orelse classifyFileExt(c_object.src.src_path); | |
try argv.appendSlice(&[_][]const u8{ self_exe_path, "clang" }); | |
// if "ext" is explicit, add "-x <lang>". Otherwise let clang do its thing. | |
if (c_object.src.ext != null) { | |
try argv.appendSlice(&[_][]const u8{ "-x", switch (ext) { | |
.assembly => "assembler", | |
.assembly_with_cpp => "assembler-with-cpp", | |
.c => "c", | |
.cpp => "c++", | |
.h => "c-header", | |
.hpp => "c++-header", | |
.hm => "objective-c-header", | |
.hmm => "objective-c++-header", | |
.cu => "cuda", | |
.m => "objective-c", | |
.mm => "objective-c++", | |
else => fatal("language '{s}' is unsupported in this context", .{@tagName(ext)}), | |
} }); | |
} | |
try argv.append(c_object.src.src_path); | |
// When all these flags are true, it means that the entire purpose of | |
// this compilation is to perform a single zig cc operation. This means | |
// that we could "tail call" clang by doing an execve, and any use of | |
// the caching system would actually be problematic since the user is | |
// presumably doing their own caching by using dep file flags. | |
if (std.process.can_execv and direct_o and | |
comp.disable_c_depfile and comp.clang_passthrough_mode) | |
{ | |
try comp.addCCArgs(arena, &argv, ext, null, c_object.src.owner); | |
try argv.appendSlice(c_object.src.extra_flags); | |
try argv.appendSlice(c_object.src.cache_exempt_flags); | |
const out_obj_path = if (comp.bin_file) |lf| | |
try lf.emit.directory.join(arena, &.{lf.emit.sub_path}) | |
else | |
"/dev/null"; | |
try argv.ensureUnusedCapacity(6); | |
switch (comp.clang_preprocessor_mode) { | |
.no => argv.appendSliceAssumeCapacity(&.{ "-c", "-o", out_obj_path }), | |
.yes => argv.appendSliceAssumeCapacity(&.{ "-E", "-o", out_obj_path }), | |
.pch => argv.appendSliceAssumeCapacity(&.{ "-Xclang", "-emit-pch", "-o", out_obj_path }), | |
.stdout => argv.appendAssumeCapacity("-E"), | |
} | |
if (comp.emit_asm != null) { | |
argv.appendAssumeCapacity("-S"); | |
} else if (comp.emit_llvm_ir != null) { | |
argv.appendSliceAssumeCapacity(&[_][]const u8{ "-emit-llvm", "-S" }); | |
} else if (comp.emit_llvm_bc != null) { | |
argv.appendAssumeCapacity("-emit-llvm"); | |
} | |
if (comp.verbose_cc) { | |
dump_argv(argv.items); | |
} | |
const err = std.process.execv(arena, argv.items); | |
fatal("unable to execv clang: {s}", .{@errorName(err)}); | |
} | |
// We can't know the digest until we do the C compiler invocation, | |
// so we need a temporary filename. | |
const out_obj_path = try comp.tmpFilePath(arena, o_basename); | |
const out_diag_path = try std.fmt.allocPrint(arena, "{s}.diag", .{out_obj_path}); | |
var zig_cache_tmp_dir = try comp.local_cache_directory.handle.makeOpenPath("tmp", .{}); | |
defer zig_cache_tmp_dir.close(); | |
const out_dep_path: ?[]const u8 = if (comp.disable_c_depfile or !ext.clangSupportsDepFile()) | |
null | |
else | |
try std.fmt.allocPrint(arena, "{s}.d", .{out_obj_path}); | |
try comp.addCCArgs(arena, &argv, ext, out_dep_path, c_object.src.owner); | |
try argv.appendSlice(c_object.src.extra_flags); | |
try argv.appendSlice(c_object.src.cache_exempt_flags); | |
try argv.ensureUnusedCapacity(6); | |
switch (comp.clang_preprocessor_mode) { | |
.no => argv.appendSliceAssumeCapacity(&.{ "-c", "-o", out_obj_path }), | |
.yes => argv.appendSliceAssumeCapacity(&.{ "-E", "-o", out_obj_path }), | |
.pch => argv.appendSliceAssumeCapacity(&.{ "-Xclang", "-emit-pch", "-o", out_obj_path }), | |
.stdout => argv.appendAssumeCapacity("-E"), | |
} | |
if (comp.clang_passthrough_mode) { | |
if (comp.emit_asm != null) { | |
argv.appendAssumeCapacity("-S"); | |
} else if (comp.emit_llvm_ir != null) { | |
argv.appendSliceAssumeCapacity(&.{ "-emit-llvm", "-S" }); | |
} else if (comp.emit_llvm_bc != null) { | |
argv.appendAssumeCapacity("-emit-llvm"); | |
} | |
} else { | |
argv.appendSliceAssumeCapacity(&.{ "--serialize-diagnostics", out_diag_path }); | |
} | |
if (comp.verbose_cc) { | |
dump_argv(argv.items); | |
} | |
if (std.process.can_spawn) { | |
var child = std.ChildProcess.init(argv.items, arena); | |
if (comp.clang_passthrough_mode) { | |
child.stdin_behavior = .Inherit; | |
child.stdout_behavior = .Inherit; | |
child.stderr_behavior = .Inherit; | |
const term = child.spawnAndWait() catch |err| { | |
return comp.failCObj(c_object, "unable to spawn {s}: {s}", .{ argv.items[0], @errorName(err) }); | |
}; | |
switch (term) { | |
.Exited => |code| { | |
if (code != 0) { | |
std.process.exit(code); | |
} | |
if (comp.clang_preprocessor_mode == .stdout) | |
std.process.exit(0); | |
}, | |
else => std.process.abort(), | |
} | |
} else { | |
child.stdin_behavior = .Ignore; | |
child.stdout_behavior = .Ignore; | |
child.stderr_behavior = .Pipe; | |
try child.spawn(); | |
const stderr = try child.stderr.?.reader().readAllAlloc(arena, std.math.maxInt(usize)); | |
const term = child.wait() catch |err| { | |
return comp.failCObj(c_object, "unable to spawn {s}: {s}", .{ argv.items[0], @errorName(err) }); | |
}; | |
switch (term) { | |
.Exited => |code| { | |
if (code != 0) { | |
const bundle = CObject.Diag.Bundle.parse(comp.gpa, out_diag_path) catch |err| { | |
log.err("{}: failed to parse clang diagnostics: {s}", .{ err, stderr }); | |
return comp.failCObj(c_object, "clang exited with code {d}", .{code}); | |
}; | |
zig_cache_tmp_dir.deleteFile(out_diag_path) catch |err| { | |
log.warn("failed to delete '{s}': {s}", .{ out_diag_path, @errorName(err) }); | |
}; | |
return comp.failCObjWithOwnedDiagBundle(c_object, bundle); | |
} | |
}, | |
else => { | |
log.err("clang terminated with stderr: {s}", .{stderr}); | |
return comp.failCObj(c_object, "clang terminated unexpectedly", .{}); | |
}, | |
} | |
} | |
} else { | |
const exit_code = try clangMain(arena, argv.items); | |
if (exit_code != 0) { | |
if (comp.clang_passthrough_mode) { | |
std.process.exit(exit_code); | |
} else { | |
return comp.failCObj(c_object, "clang exited with code {d}", .{exit_code}); | |
} | |
} | |
if (comp.clang_passthrough_mode and | |
comp.clang_preprocessor_mode == .stdout) | |
{ | |
std.process.exit(0); | |
} | |
} | |
if (out_dep_path) |dep_file_path| { | |
const dep_basename = std.fs.path.basename(dep_file_path); | |
// Add the files depended on to the cache system. | |
try man.addDepFilePost(zig_cache_tmp_dir, dep_basename); | |
switch (comp.cache_use) { | |
.whole => |whole| { | |
if (whole.cache_manifest) |whole_cache_manifest| { | |
whole.cache_manifest_mutex.lock(); | |
defer whole.cache_manifest_mutex.unlock(); | |
try whole_cache_manifest.addDepFilePost(zig_cache_tmp_dir, dep_basename); | |
} | |
}, | |
.incremental => {}, | |
} | |
// Just to save disk space, we delete the file because it is never needed again. | |
zig_cache_tmp_dir.deleteFile(dep_basename) catch |err| { | |
log.warn("failed to delete '{s}': {s}", .{ dep_file_path, @errorName(err) }); | |
}; | |
} | |
// We don't actually care whether it's a cache hit or miss; we just need the digest and the lock. | |
if (comp.disable_c_depfile) _ = try man.hit(); | |
// Rename into place. | |
const digest = man.final(); | |
const o_sub_path = try std.fs.path.join(arena, &[_][]const u8{ "o", &digest }); | |
var o_dir = try comp.local_cache_directory.handle.makeOpenPath(o_sub_path, .{}); | |
defer o_dir.close(); | |
const tmp_basename = std.fs.path.basename(out_obj_path); | |
try std.fs.rename(zig_cache_tmp_dir, tmp_basename, o_dir, o_basename); | |
break :blk digest; | |
}; | |
if (man.have_exclusive_lock) { | |
// Write the updated manifest. This is a no-op if the manifest is not dirty. Note that it is | |
// possible we had a hit and the manifest is dirty, for example if the file mtime changed but | |
// the contents were the same, we hit the cache but the manifest is dirty and we need to update | |
// it to prevent doing a full file content comparison the next time around. | |
man.writeManifest() catch |err| { | |
log.warn("failed to write cache manifest when compiling '{s}': {s}", .{ c_object.src.src_path, @errorName(err) }); | |
}; | |
} | |
const o_basename = try std.fmt.allocPrint(arena, "{s}{s}", .{ o_basename_noext, o_ext }); | |
c_object.status = .{ | |
.success = .{ | |
.object_path = try comp.local_cache_directory.join(comp.gpa, &[_][]const u8{ | |
"o", &digest, o_basename, | |
}), | |
.lock = man.toOwnedLock(), | |
}, | |
}; | |
} | |
fn updateWin32Resource(comp: *Compilation, win32_resource: *Win32Resource, win32_resource_prog_node: *std.Progress.Node) !void { | |
if (!build_options.have_llvm) { | |
return comp.failWin32Resource(win32_resource, "clang not available: compiler built without LLVM extensions", .{}); | |
} | |
const self_exe_path = comp.self_exe_path orelse | |
return comp.failWin32Resource(win32_resource, "clang compilation disabled", .{}); | |
const tracy_trace = trace(@src()); | |
defer tracy_trace.end(); | |
const src_path = switch (win32_resource.src) { | |
.rc => |rc_src| rc_src.src_path, | |
.manifest => |src_path| src_path, | |
}; | |
const src_basename = std.fs.path.basename(src_path); | |
log.debug("updating win32 resource: {s}", .{src_path}); | |
var arena_allocator = std.heap.ArenaAllocator.init(comp.gpa); | |
defer arena_allocator.deinit(); | |
const arena = arena_allocator.allocator(); | |
if (win32_resource.clearStatus(comp.gpa)) { | |
// There was previous failure. | |
comp.mutex.lock(); | |
defer comp.mutex.unlock(); | |
// If the failure was OOM, there will not be an entry here, so we do | |
// not assert discard. | |
_ = comp.failed_win32_resources.swapRemove(win32_resource); | |
} | |
win32_resource_prog_node.activate(); | |
var child_progress_node = win32_resource_prog_node.start(src_basename, 0); | |
child_progress_node.activate(); | |
defer child_progress_node.end(); | |
var man = comp.obtainWin32ResourceCacheManifest(); | |
defer man.deinit(); | |
// For .manifest files, we ultimately just want to generate a .res with | |
// the XML data as a RT_MANIFEST resource. This means we can skip preprocessing, | |
// include paths, CLI options, etc. | |
if (win32_resource.src == .manifest) { | |
_ = try man.addFile(src_path, null); | |
const res_basename = try std.fmt.allocPrint(arena, "{s}.res", .{src_basename}); | |
const digest = if (try man.hit()) man.final() else blk: { | |
// The digest only depends on the .manifest file, so we can | |
// get the digest now and write the .res directly to the cache | |
const digest = man.final(); | |
const o_sub_path = try std.fs.path.join(arena, &.{ "o", &digest }); | |
var o_dir = try comp.local_cache_directory.handle.makeOpenPath(o_sub_path, .{}); | |
defer o_dir.close(); | |
var output_file = o_dir.createFile(res_basename, .{}) catch |err| { | |
const output_file_path = try comp.local_cache_directory.join(arena, &.{ o_sub_path, res_basename }); | |
return comp.failWin32Resource(win32_resource, "failed to create output file '{s}': {s}", .{ output_file_path, @errorName(err) }); | |
}; | |
var output_file_closed = false; | |
defer if (!output_file_closed) output_file.close(); | |
var diagnostics = resinator.errors.Diagnostics.init(arena); | |
defer diagnostics.deinit(); | |
var output_buffered_stream = std.io.bufferedWriter(output_file.writer()); | |
// In .rc files, a " within a quoted string is escaped as "" | |
const fmtRcEscape = struct { | |
fn formatRcEscape(bytes: []const u8, comptime fmt: []const u8, options: std.fmt.FormatOptions, writer: anytype) !void { | |
_ = fmt; | |
_ = options; | |
for (bytes) |byte| switch (byte) { | |
'"' => try writer.writeAll("\"\""), | |
'\\' => try writer.writeAll("\\\\"), | |
else => try writer.writeByte(byte), | |
}; | |
} | |
pub fn fmtRcEscape(bytes: []const u8) std.fmt.Formatter(formatRcEscape) { | |
return .{ .data = bytes }; | |
} | |
}.fmtRcEscape; | |
// 1 is CREATEPROCESS_MANIFEST_RESOURCE_ID which is the default ID used for RT_MANIFEST resources | |
// 24 is RT_MANIFEST | |
const input = try std.fmt.allocPrint(arena, "1 24 \"{s}\"", .{fmtRcEscape(src_path)}); | |
resinator.compile.compile(arena, input, output_buffered_stream.writer(), .{ | |
.cwd = std.fs.cwd(), | |
.diagnostics = &diagnostics, | |
.ignore_include_env_var = true, | |
.default_code_page = .utf8, | |
}) catch |err| switch (err) { | |
error.ParseError, error.CompileError => { | |
// Delete the output file on error | |
output_file.close(); | |
output_file_closed = true; | |
// Failing to delete is not really a big deal, so swallow any errors | |
o_dir.deleteFile(res_basename) catch { | |
const output_file_path = try comp.local_cache_directory.join(arena, &.{ o_sub_path, res_basename }); | |
log.warn("failed to delete '{s}': {s}", .{ output_file_path, @errorName(err) }); | |
}; | |
return comp.failWin32ResourceCompile(win32_resource, input, &diagnostics, null); | |
}, | |
else => |e| return e, | |
}; | |
try output_buffered_stream.flush(); | |
break :blk digest; | |
}; | |
if (man.have_exclusive_lock) { | |
man.writeManifest() catch |err| { | |
log.warn("failed to write cache manifest when compiling '{s}': {s}", .{ src_path, @errorName(err) }); | |
}; | |
} | |
win32_resource.status = .{ | |
.success = .{ | |
.res_path = try comp.local_cache_directory.join(comp.gpa, &[_][]const u8{ | |
"o", &digest, res_basename, | |
}), | |
.lock = man.toOwnedLock(), | |
}, | |
}; | |
return; | |
} | |
// We now know that we're compiling an .rc file | |
const rc_src = win32_resource.src.rc; | |
_ = try man.addFile(rc_src.src_path, null); | |
man.hash.addListOfBytes(rc_src.extra_flags); | |
const rc_basename_noext = src_basename[0 .. src_basename.len - std.fs.path.extension(src_basename).len]; | |
const digest = if (try man.hit()) man.final() else blk: { | |
const rcpp_filename = try std.fmt.allocPrint(arena, "{s}.rcpp", .{rc_basename_noext}); | |
const out_rcpp_path = try comp.tmpFilePath(arena, rcpp_filename); | |
var zig_cache_tmp_dir = try comp.local_cache_directory.handle.makeOpenPath("tmp", .{}); | |
defer zig_cache_tmp_dir.close(); | |
const res_filename = try std.fmt.allocPrint(arena, "{s}.res", .{rc_basename_noext}); | |
// We can't know the digest until we do the compilation, | |
// so we need a temporary filename. | |
const out_res_path = try comp.tmpFilePath(arena, res_filename); | |
var options = options: { | |
var resinator_args = try std.ArrayListUnmanaged([]const u8).initCapacity(comp.gpa, rc_src.extra_flags.len + 4); | |
defer resinator_args.deinit(comp.gpa); | |
resinator_args.appendAssumeCapacity(""); // dummy 'process name' arg | |
resinator_args.appendSliceAssumeCapacity(rc_src.extra_flags); | |
resinator_args.appendSliceAssumeCapacity(&.{ "--", out_rcpp_path, out_res_path }); | |
var cli_diagnostics = resinator.cli.Diagnostics.init(comp.gpa); | |
defer cli_diagnostics.deinit(); | |
const options = resinator.cli.parse(comp.gpa, resinator_args.items, &cli_diagnostics) catch |err| switch (err) { | |
error.ParseError => { | |
return comp.failWin32ResourceCli(win32_resource, &cli_diagnostics); | |
}, | |
else => |e| return e, | |
}; | |
break :options options; | |
}; | |
defer options.deinit(); | |
// We never want to read the INCLUDE environment variable, so | |
// unconditionally set `ignore_include_env_var` to true | |
options.ignore_include_env_var = true; | |
if (options.preprocess != .yes) { | |
return comp.failWin32Resource(win32_resource, "the '{s}' option is not supported in this context", .{switch (options.preprocess) { | |
.no => "/:no-preprocess", | |
.only => "/p", | |
.yes => unreachable, | |
}}); | |
} | |
var argv = std.ArrayList([]const u8).init(comp.gpa); | |
defer argv.deinit(); | |
try argv.appendSlice(&[_][]const u8{ self_exe_path, "clang" }); | |
try resinator.preprocess.appendClangArgs(arena, &argv, options, .{ | |
.clang_target = null, // handled by addCCArgs | |
.system_include_paths = &.{}, // handled by addCCArgs | |
.needs_gnu_workaround = comp.getTarget().isGnu(), | |
.nostdinc = false, // handled by addCCArgs | |
}); | |
try argv.append(rc_src.src_path); | |
try argv.appendSlice(&[_][]const u8{ | |
"-o", | |
out_rcpp_path, | |
}); | |
const out_dep_path = try std.fmt.allocPrint(arena, "{s}.d", .{out_rcpp_path}); | |
// Note: addCCArgs will implicitly add _DEBUG/NDEBUG depending on the optimization | |
// mode. While these defines are not normally present when calling rc.exe directly, | |
// them being defined matches the behavior of how MSVC calls rc.exe which is the more | |
// relevant behavior in this case. | |
try comp.addCCArgs(arena, &argv, .rc, out_dep_path, rc_src.owner); | |
if (comp.verbose_cc) { | |
dump_argv(argv.items); | |
} | |
if (std.process.can_spawn) { | |
var child = std.ChildProcess.init(argv.items, arena); | |
child.stdin_behavior = .Ignore; | |
child.stdout_behavior = .Ignore; | |
child.stderr_behavior = .Pipe; | |
try child.spawn(); | |
const stderr_reader = child.stderr.?.reader(); | |
const stderr = try stderr_reader.readAllAlloc(arena, 10 * 1024 * 1024); | |
const term = child.wait() catch |err| { | |
return comp.failWin32Resource(win32_resource, "unable to spawn {s}: {s}", .{ argv.items[0], @errorName(err) }); | |
}; | |
switch (term) { | |
.Exited => |code| { | |
if (code != 0) { | |
// TODO parse clang stderr and turn it into an error message | |
// and then call failCObjWithOwnedErrorMsg | |
log.err("clang preprocessor failed with stderr:\n{s}", .{stderr}); | |
return comp.failWin32Resource(win32_resource, "clang preprocessor exited with code {d}", .{code}); | |
} | |
}, | |
else => { | |
log.err("clang preprocessor terminated with stderr:\n{s}", .{stderr}); | |
return comp.failWin32Resource(win32_resource, "clang preprocessor terminated unexpectedly", .{}); | |
}, | |
} | |
} else { | |
const exit_code = try clangMain(arena, argv.items); | |
if (exit_code != 0) { | |
return comp.failWin32Resource(win32_resource, "clang preprocessor exited with code {d}", .{exit_code}); | |
} | |
} | |
const dep_basename = std.fs.path.basename(out_dep_path); | |
// Add the files depended on to the cache system. | |
try man.addDepFilePost(zig_cache_tmp_dir, dep_basename); | |
switch (comp.cache_use) { | |
.whole => |whole| if (whole.cache_manifest) |whole_cache_manifest| { | |
whole.cache_manifest_mutex.lock(); | |
defer whole.cache_manifest_mutex.unlock(); | |
try whole_cache_manifest.addDepFilePost(zig_cache_tmp_dir, dep_basename); | |
}, | |
.incremental => {}, | |
} | |
// Just to save disk space, we delete the file because it is never needed again. | |
zig_cache_tmp_dir.deleteFile(dep_basename) catch |err| { | |
log.warn("failed to delete '{s}': {s}", .{ out_dep_path, @errorName(err) }); | |
}; | |
const full_input = std.fs.cwd().readFileAlloc(arena, out_rcpp_path, std.math.maxInt(usize)) catch |err| switch (err) { | |
error.OutOfMemory => return error.OutOfMemory, | |
else => |e| { | |
return comp.failWin32Resource(win32_resource, "failed to read preprocessed file '{s}': {s}", .{ out_rcpp_path, @errorName(e) }); | |
}, | |
}; | |
var mapping_results = try resinator.source_mapping.parseAndRemoveLineCommands(arena, full_input, full_input, .{ .initial_filename = rc_src.src_path }); | |
defer mapping_results.mappings.deinit(arena); | |
const final_input = resinator.comments.removeComments(mapping_results.result, mapping_results.result, &mapping_results.mappings); | |
var output_file = zig_cache_tmp_dir.createFile(out_res_path, .{}) catch |err| { | |
return comp.failWin32Resource(win32_resource, "failed to create output file '{s}': {s}", .{ out_res_path, @errorName(err) }); | |
}; | |
var output_file_closed = false; | |
defer if (!output_file_closed) output_file.close(); | |
var diagnostics = resinator.errors.Diagnostics.init(arena); | |
defer diagnostics.deinit(); | |
var dependencies_list = std.ArrayList([]const u8).init(comp.gpa); | |
defer { | |
for (dependencies_list.items) |item| { | |
comp.gpa.free(item); | |
} | |
dependencies_list.deinit(); | |
} | |
var output_buffered_stream = std.io.bufferedWriter(output_file.writer()); | |
resinator.compile.compile(arena, final_input, output_buffered_stream.writer(), .{ | |
.cwd = std.fs.cwd(), | |
.diagnostics = &diagnostics, | |
.source_mappings = &mapping_results.mappings, | |
.dependencies_list = &dependencies_list, | |
.system_include_paths = comp.rc_include_dir_list, | |
.ignore_include_env_var = true, | |
// options | |
.extra_include_paths = options.extra_include_paths.items, | |
.default_language_id = options.default_language_id, | |
.default_code_page = options.default_code_page orelse .windows1252, | |
.verbose = options.verbose, | |
.null_terminate_string_table_strings = options.null_terminate_string_table_strings, | |
.max_string_literal_codepoints = options.max_string_literal_codepoints, | |
.silent_duplicate_control_ids = options.silent_duplicate_control_ids, | |
.warn_instead_of_error_on_invalid_code_page = options.warn_instead_of_error_on_invalid_code_page, | |
}) catch |err| switch (err) { | |
error.ParseError, error.CompileError => { | |
// Delete the output file on error | |
output_file.close(); | |
output_file_closed = true; | |
// Failing to delete is not really a big deal, so swallow any errors | |
zig_cache_tmp_dir.deleteFile(out_res_path) catch { | |
log.warn("failed to delete '{s}': {s}", .{ out_res_path, @errorName(err) }); | |
}; | |
return comp.failWin32ResourceCompile(win32_resource, final_input, &diagnostics, mapping_results.mappings); | |
}, | |
else => |e| return e, | |
}; | |
try output_buffered_stream.flush(); | |
for (dependencies_list.items) |dep_file_path| { | |
try man.addFilePost(dep_file_path); | |
switch (comp.cache_use) { | |
.whole => |whole| if (whole.cache_manifest) |whole_cache_manifest| { | |
whole.cache_manifest_mutex.lock(); | |
defer whole.cache_manifest_mutex.unlock(); | |
try whole_cache_manifest.addFilePost(dep_file_path); | |
}, | |
.incremental => {}, | |
} | |
} | |
// Rename into place. | |
const digest = man.final(); | |
const o_sub_path = try std.fs.path.join(arena, &[_][]const u8{ "o", &digest }); | |
var o_dir = try comp.local_cache_directory.handle.makeOpenPath(o_sub_path, .{}); | |
defer o_dir.close(); | |
const tmp_basename = std.fs.path.basename(out_res_path); | |
try std.fs.rename(zig_cache_tmp_dir, tmp_basename, o_dir, res_filename); | |
const tmp_rcpp_basename = std.fs.path.basename(out_rcpp_path); | |
try std.fs.rename(zig_cache_tmp_dir, tmp_rcpp_basename, o_dir, rcpp_filename); | |
break :blk digest; | |
}; | |
if (man.have_exclusive_lock) { | |
// Write the updated manifest. This is a no-op if the manifest is not dirty. Note that it is | |
// possible we had a hit and the manifest is dirty, for example if the file mtime changed but | |
// the contents were the same, we hit the cache but the manifest is dirty and we need to update | |
// it to prevent doing a full file content comparison the next time around. | |
man.writeManifest() catch |err| { | |
log.warn("failed to write cache manifest when compiling '{s}': {s}", .{ rc_src.src_path, @errorName(err) }); | |
}; | |
} | |
const res_basename = try std.fmt.allocPrint(arena, "{s}.res", .{rc_basename_noext}); | |
win32_resource.status = .{ | |
.success = .{ | |
.res_path = try comp.local_cache_directory.join(comp.gpa, &[_][]const u8{ | |
"o", &digest, res_basename, | |
}), | |
.lock = man.toOwnedLock(), | |
}, | |
}; | |
} | |
pub fn tmpFilePath(comp: *Compilation, ally: Allocator, suffix: []const u8) error{OutOfMemory}![]const u8 { | |
const s = std.fs.path.sep_str; | |
const rand_int = std.crypto.random.int(u64); | |
if (comp.local_cache_directory.path) |p| { | |
return std.fmt.allocPrint(ally, "{s}" ++ s ++ "tmp" ++ s ++ "{x}-{s}", .{ p, rand_int, suffix }); | |
} else { | |
return std.fmt.allocPrint(ally, "tmp" ++ s ++ "{x}-{s}", .{ rand_int, suffix }); | |
} | |
} | |
pub fn addTranslateCCArgs( | |
comp: *Compilation, | |
arena: Allocator, | |
argv: *std.ArrayList([]const u8), | |
ext: FileExt, | |
out_dep_path: ?[]const u8, | |
owner_mod: *Package.Module, | |
) !void { | |
try argv.appendSlice(&.{ "-x", "c" }); | |
try comp.addCCArgs(arena, argv, ext, out_dep_path, owner_mod); | |
// This gives us access to preprocessing entities, presumably at the cost of performance. | |
try argv.appendSlice(&.{ "-Xclang", "-detailed-preprocessing-record" }); | |
} | |
/// Add common C compiler args between translate-c and C object compilation. | |
pub fn addCCArgs( | |
comp: *const Compilation, | |
arena: Allocator, | |
argv: *std.ArrayList([]const u8), | |
ext: FileExt, | |
out_dep_path: ?[]const u8, | |
mod: *Package.Module, | |
) !void { | |
const target = mod.resolved_target.result; | |
// As of Clang 16.x, it will by default read extra flags from /etc/clang. | |
// I'm sure the person who implemented this means well, but they have a lot | |
// to learn about abstractions and where the appropriate boundaries between | |
// them are. The road to hell is paved with good intentions. Fortunately it | |
// can be disabled. | |
try argv.append("--no-default-config"); | |
if (ext == .cpp) { | |
try argv.append("-nostdinc++"); | |
} | |
// We don't ever put `-fcolor-diagnostics` or `-fno-color-diagnostics` because in passthrough mode | |
// we want Clang to infer it, and in normal mode we always want it off, which will be true since | |
// clang will detect stderr as a pipe rather than a terminal. | |
if (!comp.clang_passthrough_mode) { | |
// Make stderr more easily parseable. | |
try argv.append("-fno-caret-diagnostics"); | |
} | |
if (comp.function_sections) { | |
try argv.append("-ffunction-sections"); | |
} | |
if (comp.data_sections) { | |
try argv.append("-fdata-sections"); | |
} | |
if (comp.no_builtin) { | |
try argv.append("-fno-builtin"); | |
} | |
if (comp.config.link_libcpp) { | |
const libcxx_include_path = try std.fs.path.join(arena, &[_][]const u8{ | |
comp.zig_lib_directory.path.?, "libcxx", "include", | |
}); | |
const libcxxabi_include_path = try std.fs.path.join(arena, &[_][]const u8{ | |
comp.zig_lib_directory.path.?, "libcxxabi", "include", | |
}); | |
try argv.append("-isystem"); | |
try argv.append(libcxx_include_path); | |
try argv.append("-isystem"); | |
try argv.append(libcxxabi_include_path); | |
if (target.abi.isMusl()) { | |
try argv.append("-D_LIBCPP_HAS_MUSL_LIBC"); | |
} | |
try argv.append("-D_LIBCPP_DISABLE_VISIBILITY_ANNOTATIONS"); | |
try argv.append("-D_LIBCXXABI_DISABLE_VISIBILITY_ANNOTATIONS"); | |
try argv.append("-D_LIBCPP_HAS_NO_VENDOR_AVAILABILITY_ANNOTATIONS"); | |
if (!comp.config.any_non_single_threaded) { | |
try argv.append("-D_LIBCPP_HAS_NO_THREADS"); | |
} | |
// See the comment in libcxx.zig for more details about this. | |
try argv.append("-D_LIBCPP_PSTL_CPU_BACKEND_SERIAL"); | |
try argv.append(try std.fmt.allocPrint(arena, "-D_LIBCPP_ABI_VERSION={d}", .{ | |
@intFromEnum(comp.libcxx_abi_version), | |
})); | |
try argv.append(try std.fmt.allocPrint(arena, "-D_LIBCPP_ABI_NAMESPACE=__{d}", .{ | |
@intFromEnum(comp.libcxx_abi_version), | |
})); | |
} | |
if (comp.config.link_libunwind) { | |
const libunwind_include_path = try std.fs.path.join(arena, &[_][]const u8{ | |
comp.zig_lib_directory.path.?, "libunwind", "include", | |
}); | |
try argv.append("-isystem"); | |
try argv.append(libunwind_include_path); | |
} | |
if (comp.config.link_libc) { | |
if (target.isGnuLibC()) { | |
const target_version = target.os.version_range.linux.glibc; | |
const glibc_minor_define = try std.fmt.allocPrint(arena, "-D__GLIBC_MINOR__={d}", .{ | |
target_version.minor, | |
}); | |
try argv.append(glibc_minor_define); | |
} else if (target.isMinGW()) { | |
try argv.append("-D__MSVCRT_VERSION__=0xE00"); // use ucrt | |
} | |
} | |
const llvm_triple = try @import("codegen/llvm.zig").targetTriple(arena, target); | |
try argv.appendSlice(&[_][]const u8{ "-target", llvm_triple }); | |
if (target.os.tag == .windows) switch (ext) { | |
.c, .cpp, .m, .mm, .h, .hpp, .hm, .hmm, .cu, .rc, .assembly, .assembly_with_cpp => { | |
const minver: u16 = @truncate(@intFromEnum(target.os.getVersionRange().windows.min) >> 16); | |
try argv.append( | |
try std.fmt.allocPrint(arena, "-D_WIN32_WINNT=0x{x:0>4}", .{minver}), | |
); | |
}, | |
else => {}, | |
}; | |
switch (ext) { | |
.c, .cpp, .m, .mm, .h, .hpp, .hm, .hmm, .cu, .rc => { | |
try argv.appendSlice(&[_][]const u8{ | |
"-nostdinc", | |
"-fno-spell-checking", | |
}); | |
if (comp.config.lto) { | |
try argv.append("-flto"); | |
} | |
if (ext == .mm) { | |
try argv.append("-ObjC++"); | |
} | |
for (comp.libc_framework_dir_list) |framework_dir| { | |
try argv.appendSlice(&.{ "-iframework", framework_dir }); | |
} | |
for (comp.framework_dirs) |framework_dir| { | |
try argv.appendSlice(&.{ "-F", framework_dir }); | |
} | |
// According to Rich Felker libc headers are supposed to go before C language headers. | |
// However as noted by @dimenus, appending libc headers before c_headers breaks intrinsics | |
// and other compiler specific items. | |
const c_headers_dir = try std.fs.path.join(arena, &[_][]const u8{ comp.zig_lib_directory.path.?, "include" }); | |
try argv.append("-isystem"); | |
try argv.append(c_headers_dir); | |
if (ext == .rc) { | |
for (comp.rc_include_dir_list) |include_dir| { | |
try argv.append("-isystem"); | |
try argv.append(include_dir); | |
} | |
} else { | |
for (comp.libc_include_dir_list) |include_dir| { | |
try argv.append("-isystem"); | |
try argv.append(include_dir); | |
} | |
} | |
if (target.cpu.model.llvm_name) |llvm_name| { | |
try argv.appendSlice(&[_][]const u8{ | |
"-Xclang", "-target-cpu", "-Xclang", llvm_name, | |
}); | |
} | |
// It would be really nice if there was a more compact way to communicate this info to Clang. | |
const all_features_list = target.cpu.arch.allFeaturesList(); | |
try argv.ensureUnusedCapacity(all_features_list.len * 4); | |
for (all_features_list, 0..) |feature, index_usize| { | |
const index = @as(std.Target.Cpu.Feature.Set.Index, @intCast(index_usize)); | |
const is_enabled = target.cpu.features.isEnabled(index); | |
if (feature.llvm_name) |llvm_name| { | |
argv.appendSliceAssumeCapacity(&[_][]const u8{ "-Xclang", "-target-feature", "-Xclang" }); | |
const plus_or_minus = "-+"[@intFromBool(is_enabled)]; | |
const arg = try std.fmt.allocPrint(arena, "{c}{s}", .{ plus_or_minus, llvm_name }); | |
argv.appendAssumeCapacity(arg); | |
} | |
} | |
if (mod.code_model != .default) { | |
try argv.append(try std.fmt.allocPrint(arena, "-mcmodel={s}", .{@tagName(mod.code_model)})); | |
} | |
switch (target.os.tag) { | |
.windows => { | |
// windows.h has files such as pshpack1.h which do #pragma packing, | |
// triggering a clang warning. So for this target, we disable this warning. | |
if (target.abi.isGnu()) { | |
try argv.append("-Wno-pragma-pack"); | |
} | |
}, | |
.macos => { | |
try argv.ensureUnusedCapacity(2); | |
// Pass the proper -m<os>-version-min argument for darwin. | |
const ver = target.os.version_range.semver.min; | |
argv.appendAssumeCapacity(try std.fmt.allocPrint(arena, "-mmacos-version-min={d}.{d}.{d}", .{ | |
ver.major, ver.minor, ver.patch, | |
})); | |
// This avoids a warning that sometimes occurs when | |
// providing both a -target argument that contains a | |
// version as well as the -mmacosx-version-min argument. | |
// Zig provides the correct value in both places, so it | |
// doesn't matter which one gets overridden. | |
argv.appendAssumeCapacity("-Wno-overriding-t-option"); | |
}, | |
.ios, .tvos, .watchos => switch (target.cpu.arch) { | |
// Pass the proper -m<os>-version-min argument for darwin. | |
.x86, .x86_64 => { | |
const ver = target.os.version_range.semver.min; | |
try argv.append(try std.fmt.allocPrint( | |
arena, | |
"-m{s}-simulator-version-min={d}.{d}.{d}", | |
.{ @tagName(target.os.tag), ver.major, ver.minor, ver.patch }, | |
)); | |
}, | |
else => { | |
const ver = target.os.version_range.semver.min; | |
try argv.append(try std.fmt.allocPrint(arena, "-m{s}-version-min={d}.{d}.{d}", .{ | |
@tagName(target.os.tag), ver.major, ver.minor, ver.patch, | |
})); | |
}, | |
}, | |
else => {}, | |
} | |
if (target.cpu.arch.isThumb()) { | |
try argv.append("-mthumb"); | |
} | |
if (mod.sanitize_c and !mod.sanitize_thread) { | |
try argv.append("-fsanitize=undefined"); | |
try argv.append("-fsanitize-trap=undefined"); | |
// It is very common, and well-defined, for a pointer on one side of a C ABI | |
// to have a different but compatible element type. Examples include: | |
// `char*` vs `uint8_t*` on a system with 8-bit bytes | |
// `const char*` vs `char*` | |
// `char*` vs `unsigned char*` | |
// Without this flag, Clang would invoke UBSAN when such an extern | |
// function was called. | |
try argv.append("-fno-sanitize=function"); | |
} else if (mod.sanitize_c and mod.sanitize_thread) { | |
try argv.append("-fsanitize=undefined,thread"); | |
try argv.append("-fsanitize-trap=undefined"); | |
try argv.append("-fno-sanitize=function"); | |
} else if (!mod.sanitize_c and mod.sanitize_thread) { | |
try argv.append("-fsanitize=thread"); | |
} | |
if (mod.red_zone) { | |
try argv.append("-mred-zone"); | |
} else if (target_util.hasRedZone(target)) { | |
try argv.append("-mno-red-zone"); | |
} | |
if (mod.omit_frame_pointer) { | |
try argv.append("-fomit-frame-pointer"); | |
} else { | |
try argv.append("-fno-omit-frame-pointer"); | |
} | |
const ssp_buf_size = mod.stack_protector; | |
if (ssp_buf_size != 0) { | |
try argv.appendSlice(&[_][]const u8{ | |
"-fstack-protector-strong", | |
"--param", | |
try std.fmt.allocPrint(arena, "ssp-buffer-size={d}", .{ssp_buf_size}), | |
}); | |
} else { | |
try argv.append("-fno-stack-protector"); | |
} | |
switch (mod.optimize_mode) { | |
.Debug => { | |
// windows c runtime requires -D_DEBUG if using debug libraries | |
try argv.append("-D_DEBUG"); | |
// Clang has -Og for compatibility with GCC, but currently it is just equivalent | |
// to -O1. Besides potentially impairing debugging, -O1/-Og significantly | |
// increases compile times. | |
try argv.append("-O0"); | |
}, | |
.ReleaseSafe => { | |
// See the comment in the BuildModeFastRelease case for why we pass -O2 rather | |
// than -O3 here. | |
try argv.append("-O2"); | |
try argv.append("-D_FORTIFY_SOURCE=2"); | |
}, | |
.ReleaseFast => { | |
try argv.append("-DNDEBUG"); | |
// Here we pass -O2 rather than -O3 because, although we do the equivalent of | |
// -O3 in Zig code, the justification for the difference here is that Zig | |
// has better detection and prevention of undefined behavior, so -O3 is safer for | |
// Zig code than it is for C code. Also, C programmers are used to their code | |
// running in -O2 and thus the -O3 path has been tested less. | |
try argv.append("-O2"); | |
}, | |
.ReleaseSmall => { | |
try argv.append("-DNDEBUG"); | |
try argv.append("-Os"); | |
}, | |
} | |
if (target_util.supports_fpic(target) and mod.pic) { | |
try argv.append("-fPIC"); | |
} | |
if (mod.unwind_tables) { | |
try argv.append("-funwind-tables"); | |
} else { | |
try argv.append("-fno-unwind-tables"); | |
} | |
}, | |
.shared_library, .ll, .bc, .unknown, .static_library, .object, .def, .zig, .res, .manifest => {}, | |
.assembly, .assembly_with_cpp => { | |
if (ext == .assembly_with_cpp) { | |
const c_headers_dir = try std.fs.path.join(arena, &[_][]const u8{ comp.zig_lib_directory.path.?, "include" }); | |
try argv.append("-isystem"); | |
try argv.append(c_headers_dir); | |
} | |
// The Clang assembler does not accept the list of CPU features like the | |
// compiler frontend does. Therefore we must hard-code the -m flags for | |
// all CPU features here. | |
switch (target.cpu.arch) { | |
.riscv32, .riscv64 => { | |
const RvArchFeat = struct { char: u8, feat: std.Target.riscv.Feature }; | |
const letters = [_]RvArchFeat{ | |
.{ .char = 'm', .feat = .m }, | |
.{ .char = 'a', .feat = .a }, | |
.{ .char = 'f', .feat = .f }, | |
.{ .char = 'd', .feat = .d }, | |
.{ .char = 'c', .feat = .c }, | |
}; | |
const prefix: []const u8 = if (target.cpu.arch == .riscv64) "rv64" else "rv32"; | |
const prefix_len = 4; | |
assert(prefix.len == prefix_len); | |
var march_buf: [prefix_len + letters.len + 1]u8 = undefined; | |
var march_index: usize = prefix_len; | |
@memcpy(march_buf[0..prefix.len], prefix); | |
if (std.Target.riscv.featureSetHas(target.cpu.features, .e)) { | |
march_buf[march_index] = 'e'; | |
} else { | |
march_buf[march_index] = 'i'; | |
} | |
march_index += 1; | |
for (letters) |letter| { | |
if (std.Target.riscv.featureSetHas(target.cpu.features, letter.feat)) { | |
march_buf[march_index] = letter.char; | |
march_index += 1; | |
} | |
} | |
const march_arg = try std.fmt.allocPrint(arena, "-march={s}", .{ | |
march_buf[0..march_index], | |
}); | |
try argv.append(march_arg); | |
if (std.Target.riscv.featureSetHas(target.cpu.features, .relax)) { | |
try argv.append("-mrelax"); | |
} else { | |
try argv.append("-mno-relax"); | |
} | |
if (std.Target.riscv.featureSetHas(target.cpu.features, .save_restore)) { | |
try argv.append("-msave-restore"); | |
} else { | |
try argv.append("-mno-save-restore"); | |
} | |
}, | |
.mips, .mipsel, .mips64, .mips64el => { | |
if (target.cpu.model.llvm_name) |llvm_name| { | |
try argv.append(try std.fmt.allocPrint(arena, "-march={s}", .{llvm_name})); | |
} | |
if (std.Target.mips.featureSetHas(target.cpu.features, .soft_float)) { | |
try argv.append("-msoft-float"); | |
} | |
}, | |
else => { | |
// TODO | |
}, | |
} | |
if (target_util.clangAssemblerSupportsMcpuArg(target)) { | |
if (target.cpu.model.llvm_name) |llvm_name| { | |
try argv.append(try std.fmt.allocPrint(arena, "-mcpu={s}", .{llvm_name})); | |
} | |
} | |
}, | |
} | |
try argv.ensureUnusedCapacity(2); | |
switch (comp.config.debug_format) { | |
.strip => {}, | |
.code_view => { | |
// -g is required here because -gcodeview doesn't trigger debug info | |
// generation, it only changes the type of information generated. | |
argv.appendSliceAssumeCapacity(&.{ "-g", "-gcodeview" }); | |
}, | |
.dwarf => |f| { | |
argv.appendAssumeCapacity("-gdwarf-4"); | |
switch (f) { | |
.@"32" => argv.appendAssumeCapacity("-gdwarf32"), | |
.@"64" => argv.appendAssumeCapacity("-gdwarf64"), | |
} | |
}, | |
} | |
if (target_util.llvmMachineAbi(target)) |mabi| { | |
try argv.append(try std.fmt.allocPrint(arena, "-mabi={s}", .{mabi})); | |
} | |
if (out_dep_path) |p| { | |
try argv.appendSlice(&[_][]const u8{ "-MD", "-MV", "-MF", p }); | |
} | |
// We never want clang to invoke the system assembler for anything. So we would want | |
// this option always enabled. However, it only matters for some targets. To avoid | |
// "unused parameter" warnings, and to keep CLI spam to a minimum, we only put this | |
// flag on the command line if it is necessary. | |
if (target_util.clangMightShellOutForAssembly(target)) { | |
try argv.append("-integrated-as"); | |
} | |
if (target.os.tag == .freestanding) { | |
try argv.append("-ffreestanding"); | |
} | |
if (mod.resolved_target.is_native_os and mod.resolved_target.is_native_abi) { | |
try argv.ensureUnusedCapacity(comp.native_system_include_paths.len * 2); | |
for (comp.native_system_include_paths) |include_path| { | |
argv.appendAssumeCapacity("-isystem"); | |
argv.appendAssumeCapacity(include_path); | |
} | |
} | |
try argv.appendSlice(comp.global_cc_argv); | |
try argv.appendSlice(mod.cc_argv); | |
} | |
fn failCObj( | |
comp: *Compilation, | |
c_object: *CObject, | |
comptime format: []const u8, | |
args: anytype, | |
) SemaError { | |
@setCold(true); | |
const diag_bundle = blk: { | |
const diag_bundle = try comp.gpa.create(CObject.Diag.Bundle); | |
diag_bundle.* = .{}; | |
errdefer diag_bundle.destroy(comp.gpa); | |
try diag_bundle.file_names.ensureTotalCapacity(comp.gpa, 1); | |
diag_bundle.file_names.putAssumeCapacity(1, try comp.gpa.dupe(u8, c_object.src.src_path)); | |
diag_bundle.diags = try comp.gpa.alloc(CObject.Diag, 1); | |
diag_bundle.diags[0] = .{}; | |
diag_bundle.diags[0].level = 3; | |
diag_bundle.diags[0].msg = try std.fmt.allocPrint(comp.gpa, format, args); | |
diag_bundle.diags[0].src_loc.file = 1; | |
break :blk diag_bundle; | |
}; | |
return comp.failCObjWithOwnedDiagBundle(c_object, diag_bundle); | |
} | |
fn failCObjWithOwnedDiagBundle( | |
comp: *Compilation, | |
c_object: *CObject, | |
diag_bundle: *CObject.Diag.Bundle, | |
) SemaError { | |
@setCold(true); | |
{ | |
comp.mutex.lock(); | |
defer comp.mutex.unlock(); | |
{ | |
errdefer diag_bundle.destroy(comp.gpa); | |
try comp.failed_c_objects.ensureUnusedCapacity(comp.gpa, 1); | |
} | |
comp.failed_c_objects.putAssumeCapacityNoClobber(c_object, diag_bundle); | |
} | |
c_object.status = .failure; | |
return error.AnalysisFail; | |
} | |
fn failWin32Resource(comp: *Compilation, win32_resource: *Win32Resource, comptime format: []const u8, args: anytype) SemaError { | |
@setCold(true); | |
var bundle: ErrorBundle.Wip = undefined; | |
try bundle.init(comp.gpa); | |
errdefer bundle.deinit(); | |
try bundle.addRootErrorMessage(.{ | |
.msg = try bundle.printString(format, args), | |
.src_loc = try bundle.addSourceLocation(.{ | |
.src_path = try bundle.addString(switch (win32_resource.src) { | |
.rc => |rc_src| rc_src.src_path, | |
.manifest => |manifest_src| manifest_src, | |
}), | |
.line = 0, | |
.column = 0, | |
.span_start = 0, | |
.span_main = 0, | |
.span_end = 0, | |
}), | |
}); | |
const finished_bundle = try bundle.toOwnedBundle(""); | |
return comp.failWin32ResourceWithOwnedBundle(win32_resource, finished_bundle); | |
} | |
fn failWin32ResourceWithOwnedBundle( | |
comp: *Compilation, | |
win32_resource: *Win32Resource, | |
err_bundle: ErrorBundle, | |
) SemaError { | |
@setCold(true); | |
{ | |
comp.mutex.lock(); | |
defer comp.mutex.unlock(); | |
try comp.failed_win32_resources.putNoClobber(comp.gpa, win32_resource, err_bundle); | |
} | |
win32_resource.status = .failure; | |
return error.AnalysisFail; | |
} | |
fn failWin32ResourceCli( | |
comp: *Compilation, | |
win32_resource: *Win32Resource, | |
diagnostics: *resinator.cli.Diagnostics, | |
) SemaError { | |
@setCold(true); | |
var bundle: ErrorBundle.Wip = undefined; | |
try bundle.init(comp.gpa); | |
errdefer bundle.deinit(); | |
try bundle.addRootErrorMessage(.{ | |
.msg = try bundle.addString("invalid command line option(s)"), | |
.src_loc = try bundle.addSourceLocation(.{ | |
.src_path = try bundle.addString(switch (win32_resource.src) { | |
.rc => |rc_src| rc_src.src_path, | |
.manifest => |manifest_src| manifest_src, | |
}), | |
.line = 0, | |
.column = 0, | |
.span_start = 0, | |
.span_main = 0, | |
.span_end = 0, | |
}), | |
}); | |
var cur_err: ?ErrorBundle.ErrorMessage = null; | |
var cur_notes: std.ArrayListUnmanaged(ErrorBundle.ErrorMessage) = .{}; | |
defer cur_notes.deinit(comp.gpa); | |
for (diagnostics.errors.items) |err_details| { | |
switch (err_details.type) { | |
.err => { | |
if (cur_err) |err| { | |
try win32ResourceFlushErrorMessage(&bundle, err, cur_notes.items); | |
} | |
cur_err = .{ | |
.msg = try bundle.addString(err_details.msg.items), | |
}; | |
cur_notes.clearRetainingCapacity(); | |
}, | |
.warning => cur_err = null, | |
.note => { | |
if (cur_err == null) continue; | |
cur_err.?.notes_len += 1; | |
try cur_notes.append(comp.gpa, .{ | |
.msg = try bundle.addString(err_details.msg.items), | |
}); | |
}, | |
} | |
} | |
if (cur_err) |err| { | |
try win32ResourceFlushErrorMessage(&bundle, err, cur_notes.items); | |
} | |
const finished_bundle = try bundle.toOwnedBundle(""); | |
return comp.failWin32ResourceWithOwnedBundle(win32_resource, finished_bundle); | |
} | |
fn failWin32ResourceCompile( | |
comp: *Compilation, | |
win32_resource: *Win32Resource, | |
source: []const u8, | |
diagnostics: *resinator.errors.Diagnostics, | |
opt_mappings: ?resinator.source_mapping.SourceMappings, | |
) SemaError { | |
@setCold(true); | |
var bundle: ErrorBundle.Wip = undefined; | |
try bundle.init(comp.gpa); | |
errdefer bundle.deinit(); | |
var msg_buf: std.ArrayListUnmanaged(u8) = .{}; | |
defer msg_buf.deinit(comp.gpa); | |
var cur_err: ?ErrorBundle.ErrorMessage = null; | |
var cur_notes: std.ArrayListUnmanaged(ErrorBundle.ErrorMessage) = .{}; | |
defer cur_notes.deinit(comp.gpa); | |
for (diagnostics.errors.items) |err_details| { | |
switch (err_details.type) { | |
.hint => continue, | |
// Clear the current error so that notes don't bleed into unassociated errors | |
.warning => { | |
cur_err = null; | |
continue; | |
}, | |
.note => if (cur_err == null) continue, | |
.err => {}, | |
} | |
const err_line, const err_filename = blk: { | |
if (opt_mappings) |mappings| { | |
const corresponding_span = mappings.get(err_details.token.line_number); | |
const corresponding_file = mappings.files.get(corresponding_span.filename_offset); | |
const err_line = corresponding_span.start_line; | |
break :blk .{ err_line, corresponding_file }; | |
} else { | |
break :blk .{ err_details.token.line_number, "<generated rc>" }; | |
} | |
}; | |
const source_line_start = err_details.token.getLineStart(source); | |
const column = err_details.token.calculateColumn(source, 1, source_line_start); | |
msg_buf.clearRetainingCapacity(); | |
try err_details.render(msg_buf.writer(comp.gpa), source, diagnostics.strings.items); | |
const src_loc = src_loc: { | |
var src_loc: ErrorBundle.SourceLocation = .{ | |
.src_path = try bundle.addString(err_filename), | |
.line = @intCast(err_line - 1), // 1-based -> 0-based | |
.column = @intCast(column), | |
.span_start = 0, | |
.span_main = 0, | |
.span_end = 0, | |
}; | |
if (err_details.print_source_line) { | |
const source_line = err_details.token.getLine(source, source_line_start); | |
const visual_info = err_details.visualTokenInfo(source_line_start, source_line_start + source_line.len); | |
src_loc.span_start = @intCast(visual_info.point_offset - visual_info.before_len); | |
src_loc.span_main = @intCast(visual_info.point_offset); | |
src_loc.span_end = @intCast(visual_info.point_offset + 1 + visual_info.after_len); | |
src_loc.source_line = try bundle.addString(source_line); | |
} | |
break :src_loc try bundle.addSourceLocation(src_loc); | |
}; | |
switch (err_details.type) { | |
.err => { | |
if (cur_err) |err| { | |
try win32ResourceFlushErrorMessage(&bundle, err, cur_notes.items); | |
} | |
cur_err = .{ | |
.msg = try bundle.addString(msg_buf.items), | |
.src_loc = src_loc, | |
}; | |
cur_notes.clearRetainingCapacity(); | |
}, | |
.note => { | |
cur_err.?.notes_len += 1; | |
try cur_notes.append(comp.gpa, .{ | |
.msg = try bundle.addString(msg_buf.items), | |
.src_loc = src_loc, | |
}); | |
}, | |
.warning, .hint => unreachable, | |
} | |
} | |
if (cur_err) |err| { | |
try win32ResourceFlushErrorMessage(&bundle, err, cur_notes.items); | |
} | |
const finished_bundle = try bundle.toOwnedBundle(""); | |
return comp.failWin32ResourceWithOwnedBundle(win32_resource, finished_bundle); | |
} | |
fn win32ResourceFlushErrorMessage(wip: *ErrorBundle.Wip, msg: ErrorBundle.ErrorMessage, notes: []const ErrorBundle.ErrorMessage) !void { | |
try wip.addRootErrorMessage(msg); | |
const notes_start = try wip.reserveNotes(@intCast(notes.len)); | |
for (notes_start.., notes) |i, note| { | |
wip.extra.items[i] = @intFromEnum(wip.addErrorMessageAssumeCapacity(note)); | |
} | |
} | |
pub const FileExt = enum { | |
c, | |
cpp, | |
cu, | |
h, | |
hpp, | |
hm, | |
hmm, | |
m, | |
mm, | |
ll, | |
bc, | |
assembly, | |
assembly_with_cpp, | |
shared_library, | |
object, | |
static_library, | |
zig, | |
def, | |
rc, | |
res, | |
manifest, | |
unknown, | |
pub fn clangSupportsDepFile(ext: FileExt) bool { | |
return switch (ext) { | |
.c, .cpp, .h, .hpp, .hm, .hmm, .m, .mm, .cu => true, | |
.ll, | |
.bc, | |
.assembly, | |
.assembly_with_cpp, | |
.shared_library, | |
.object, | |
.static_library, | |
.zig, | |
.def, | |
.rc, | |
.res, | |
.manifest, | |
.unknown, | |
=> false, | |
}; | |
} | |
pub fn canonicalName(ext: FileExt, target: Target) [:0]const u8 { | |
return switch (ext) { | |
.c => ".c", | |
.cpp => ".cpp", | |
.cu => ".cu", | |
.h => ".h", | |
.hpp => ".h", | |
.hm => ".h", | |
.hmm => ".h", | |
.m => ".m", | |
.mm => ".mm", | |
.ll => ".ll", | |
.bc => ".bc", | |
.assembly => ".s", | |
.assembly_with_cpp => ".S", | |
.shared_library => target.dynamicLibSuffix(), | |
.object => target.ofmt.fileExt(target.cpu.arch), | |
.static_library => target.staticLibSuffix(), | |
.zig => ".zig", | |
.def => ".def", | |
.rc => ".rc", | |
.res => ".res", | |
.manifest => ".manifest", | |
.unknown => "", | |
}; | |
} | |
}; | |
pub fn hasObjectExt(filename: []const u8) bool { | |
return mem.endsWith(u8, filename, ".o") or mem.endsWith(u8, filename, ".obj"); | |
} | |
pub fn hasStaticLibraryExt(filename: []const u8) bool { | |
return mem.endsWith(u8, filename, ".a") or mem.endsWith(u8, filename, ".lib"); | |
} | |
pub fn hasCExt(filename: []const u8) bool { | |
return mem.endsWith(u8, filename, ".c"); | |
} | |
pub fn hasCppExt(filename: []const u8) bool { | |
return mem.endsWith(u8, filename, ".C") or | |
mem.endsWith(u8, filename, ".cc") or | |
mem.endsWith(u8, filename, ".cpp") or | |
mem.endsWith(u8, filename, ".cxx") or | |
mem.endsWith(u8, filename, ".stub"); | |
} | |
pub fn hasObjCExt(filename: []const u8) bool { | |
return mem.endsWith(u8, filename, ".m"); | |
} | |
pub fn hasObjCppExt(filename: []const u8) bool { | |
return mem.endsWith(u8, filename, ".mm"); | |
} | |
pub fn hasSharedLibraryExt(filename: []const u8) bool { | |
if (mem.endsWith(u8, filename, ".so") or | |
mem.endsWith(u8, filename, ".dll") or | |
mem.endsWith(u8, filename, ".dylib") or | |
mem.endsWith(u8, filename, ".tbd")) | |
{ | |
return true; | |
} | |
// Look for .so.X, .so.X.Y, .so.X.Y.Z | |
var it = mem.splitScalar(u8, filename, '.'); | |
_ = it.first(); | |
var so_txt = it.next() orelse return false; | |
while (!mem.eql(u8, so_txt, "so")) { | |
so_txt = it.next() orelse return false; | |
} | |
const n1 = it.next() orelse return false; | |
const n2 = it.next(); | |
const n3 = it.next(); | |
_ = std.fmt.parseInt(u32, n1, 10) catch return false; | |
if (n2) |x| _ = std.fmt.parseInt(u32, x, 10) catch return false; | |
if (n3) |x| _ = std.fmt.parseInt(u32, x, 10) catch return false; | |
if (it.next() != null) return false; | |
return true; | |
} | |
pub fn classifyFileExt(filename: []const u8) FileExt { | |
if (hasCExt(filename)) { | |
return .c; | |
} else if (hasCppExt(filename)) { | |
return .cpp; | |
} else if (hasObjCExt(filename)) { | |
return .m; | |
} else if (hasObjCppExt(filename)) { | |
return .mm; | |
} else if (mem.endsWith(u8, filename, ".ll")) { | |
return .ll; | |
} else if (mem.endsWith(u8, filename, ".bc")) { | |
return .bc; | |
} else if (mem.endsWith(u8, filename, ".s")) { | |
return .assembly; | |
} else if (mem.endsWith(u8, filename, ".S")) { | |
return .assembly_with_cpp; | |
} else if (mem.endsWith(u8, filename, ".h")) { | |
return .h; | |
} else if (mem.endsWith(u8, filename, ".zig")) { | |
return .zig; | |
} else if (hasSharedLibraryExt(filename)) { | |
return .shared_library; | |
} else if (hasStaticLibraryExt(filename)) { | |
return .static_library; | |
} else if (hasObjectExt(filename)) { | |
return .object; | |
} else if (mem.endsWith(u8, filename, ".cu")) { | |
return .cu; | |
} else if (mem.endsWith(u8, filename, ".def")) { | |
return .def; | |
} else if (std.ascii.endsWithIgnoreCase(filename, ".rc")) { | |
return .rc; | |
} else if (std.ascii.endsWithIgnoreCase(filename, ".res")) { | |
return .res; | |
} else if (std.ascii.endsWithIgnoreCase(filename, ".manifest")) { | |
return .manifest; | |
} else { | |
return .unknown; | |
} | |
} | |
test "classifyFileExt" { | |
try std.testing.expectEqual(FileExt.cpp, classifyFileExt("foo.cc")); | |
try std.testing.expectEqual(FileExt.m, classifyFileExt("foo.m")); | |
try std.testing.expectEqual(FileExt.mm, classifyFileExt("foo.mm")); | |
try std.testing.expectEqual(FileExt.unknown, classifyFileExt("foo.nim")); | |
try std.testing.expectEqual(FileExt.shared_library, classifyFileExt("foo.so")); | |
try std.testing.expectEqual(FileExt.shared_library, classifyFileExt("foo.so.1")); | |
try std.testing.expectEqual(FileExt.shared_library, classifyFileExt("foo.so.1.2")); | |
try std.testing.expectEqual(FileExt.shared_library, classifyFileExt("foo.so.1.2.3")); | |
try std.testing.expectEqual(FileExt.unknown, classifyFileExt("foo.so.1.2.3~")); | |
try std.testing.expectEqual(FileExt.zig, classifyFileExt("foo.zig")); | |
} | |
pub fn get_libc_crt_file(comp: *Compilation, arena: Allocator, basename: []const u8) ![]const u8 { | |
if (comp.wantBuildGLibCFromSource() or | |
comp.wantBuildMuslFromSource() or | |
comp.wantBuildMinGWFromSource() or | |
comp.wantBuildWasiLibcFromSource()) | |
{ | |
return comp.crt_files.get(basename).?.full_object_path; | |
} | |
const lci = comp.libc_installation orelse return error.LibCInstallationNotAvailable; | |
const crt_dir_path = lci.crt_dir orelse return error.LibCInstallationMissingCRTDir; | |
const full_path = try std.fs.path.join(arena, &[_][]const u8{ crt_dir_path, basename }); | |
return full_path; | |
} | |
fn wantBuildLibCFromSource(comp: Compilation) bool { | |
const is_exe_or_dyn_lib = switch (comp.config.output_mode) { | |
.Obj => false, | |
.Lib => comp.config.link_mode == .Dynamic, | |
.Exe => true, | |
}; | |
const ofmt = comp.root_mod.resolved_target.result.ofmt; | |
return comp.config.link_libc and is_exe_or_dyn_lib and | |
comp.libc_installation == null and ofmt != .c; | |
} | |
fn wantBuildGLibCFromSource(comp: Compilation) bool { | |
return comp.wantBuildLibCFromSource() and comp.getTarget().isGnuLibC(); | |
} | |
fn wantBuildMuslFromSource(comp: Compilation) bool { | |
return comp.wantBuildLibCFromSource() and comp.getTarget().isMusl() and | |
!comp.getTarget().isWasm(); | |
} | |
fn wantBuildWasiLibcFromSource(comp: Compilation) bool { | |
return comp.wantBuildLibCFromSource() and comp.getTarget().isWasm() and | |
comp.getTarget().os.tag == .wasi; | |
} | |
fn wantBuildMinGWFromSource(comp: Compilation) bool { | |
return comp.wantBuildLibCFromSource() and comp.getTarget().isMinGW(); | |
} | |
fn wantBuildLibUnwindFromSource(comp: *Compilation) bool { | |
const is_exe_or_dyn_lib = switch (comp.config.output_mode) { | |
.Obj => false, | |
.Lib => comp.config.link_mode == .Dynamic, | |
.Exe => true, | |
}; | |
const ofmt = comp.root_mod.resolved_target.result.ofmt; | |
return is_exe_or_dyn_lib and comp.config.link_libunwind and ofmt != .c; | |
} | |
fn setAllocFailure(comp: *Compilation) void { | |
log.debug("memory allocation failure", .{}); | |
comp.alloc_failure_occurred = true; | |
} | |
/// Assumes that Compilation mutex is locked. | |
/// See also `lockAndSetMiscFailure`. | |
pub fn setMiscFailure( | |
comp: *Compilation, | |
tag: MiscTask, | |
comptime format: []const u8, | |
args: anytype, | |
) void { | |
comp.misc_failures.ensureUnusedCapacity(comp.gpa, 1) catch return comp.setAllocFailure(); | |
const msg = std.fmt.allocPrint(comp.gpa, format, args) catch return comp.setAllocFailure(); | |
const gop = comp.misc_failures.getOrPutAssumeCapacity(tag); | |
if (gop.found_existing) { | |
gop.value_ptr.deinit(comp.gpa); | |
} | |
gop.value_ptr.* = .{ .msg = msg }; | |
} | |
/// See also `setMiscFailure`. | |
pub fn lockAndSetMiscFailure( | |
comp: *Compilation, | |
tag: MiscTask, | |
comptime format: []const u8, | |
args: anytype, | |
) void { | |
comp.mutex.lock(); | |
defer comp.mutex.unlock(); | |
return setMiscFailure(comp, tag, format, args); | |
} | |
fn parseLldStderr(comp: *Compilation, comptime prefix: []const u8, stderr: []const u8) Allocator.Error!void { | |
var context_lines = std.ArrayList([]const u8).init(comp.gpa); | |
defer context_lines.deinit(); | |
var current_err: ?*LldError = null; | |
var lines = mem.splitSequence(u8, stderr, if (builtin.os.tag == .windows) "\r\n" else "\n"); | |
while (lines.next()) |line| { | |
if (mem.startsWith(u8, line, prefix ++ ":")) { | |
if (current_err) |err| { | |
err.context_lines = try context_lines.toOwnedSlice(); | |
} | |
var split = mem.splitSequence(u8, line, "error: "); | |
_ = split.first(); | |
const duped_msg = try std.fmt.allocPrint(comp.gpa, "{s}: {s}", .{ prefix, split.rest() }); | |
errdefer comp.gpa.free(duped_msg); | |
current_err = try comp.lld_errors.addOne(comp.gpa); | |
current_err.?.* = .{ .msg = duped_msg }; | |
} else if (current_err != null) { | |
const context_prefix = ">>> "; | |
var trimmed = mem.trimRight(u8, line, &std.ascii.whitespace); | |
if (mem.startsWith(u8, trimmed, context_prefix)) { | |
trimmed = trimmed[context_prefix.len..]; | |
} | |
if (trimmed.len > 0) { | |
const duped_line = try comp.gpa.dupe(u8, trimmed); | |
try context_lines.append(duped_line); | |
} | |
} | |
} | |
if (current_err) |err| { | |
err.context_lines = try context_lines.toOwnedSlice(); | |
} | |
} | |
pub fn lockAndParseLldStderr(comp: *Compilation, comptime prefix: []const u8, stderr: []const u8) void { | |
comp.mutex.lock(); | |
defer comp.mutex.unlock(); | |
comp.parseLldStderr(prefix, stderr) catch comp.setAllocFailure(); | |
} | |
pub fn dump_argv(argv: []const []const u8) void { | |
std.debug.getStderrMutex().lock(); | |
defer std.debug.getStderrMutex().unlock(); | |
const stderr = std.io.getStdErr().writer(); | |
for (argv[0 .. argv.len - 1]) |arg| { | |
nosuspend stderr.print("{s} ", .{arg}) catch return; | |
} | |
nosuspend stderr.print("{s}\n", .{argv[argv.len - 1]}) catch {}; | |
} | |
fn canBuildLibCompilerRt(target: std.Target, use_llvm: bool) bool { | |
switch (target.os.tag) { | |
.plan9 => return false, | |
else => {}, | |
} | |
switch (target.cpu.arch) { | |
.spirv32, .spirv64 => return false, | |
else => {}, | |
} | |
return switch (target_util.zigBackend(target, use_llvm)) { | |
.stage2_llvm => true, | |
.stage2_x86_64 => if (target.ofmt == .elf or target.ofmt == .macho) true else build_options.have_llvm, | |
else => build_options.have_llvm, | |
}; | |
} | |
/// Not to be confused with canBuildLibC, which builds musl, glibc, and similar. | |
/// This one builds lib/c.zig. | |
fn canBuildZigLibC(target: std.Target, use_llvm: bool) bool { | |
switch (target.os.tag) { | |
.plan9 => return false, | |
else => {}, | |
} | |
switch (target.cpu.arch) { | |
.spirv32, .spirv64 => return false, | |
else => {}, | |
} | |
return switch (target_util.zigBackend(target, use_llvm)) { | |
.stage2_llvm => true, | |
.stage2_x86_64 => if (target.ofmt == .elf or target.ofmt == .macho) true else build_options.have_llvm, | |
else => build_options.have_llvm, | |
}; | |
} | |
pub fn getZigBackend(comp: Compilation) std.builtin.CompilerBackend { | |
const target = comp.root_mod.resolved_target.result; | |
return target_util.zigBackend(target, comp.config.use_llvm); | |
} | |
pub fn updateSubCompilation( | |
parent_comp: *Compilation, | |
sub_comp: *Compilation, | |
misc_task: MiscTask, | |
prog_node: *std.Progress.Node, | |
) !void { | |
{ | |
var sub_node = prog_node.start(@tagName(misc_task), 0); | |
sub_node.activate(); | |
defer sub_node.end(); | |
try sub_comp.update(prog_node); | |
} | |
// Look for compilation errors in this sub compilation | |
const gpa = parent_comp.gpa; | |
var keep_errors = false; | |
var errors = try sub_comp.getAllErrorsAlloc(); | |
defer if (!keep_errors) errors.deinit(gpa); | |
if (errors.errorMessageCount() > 0) { | |
try parent_comp.misc_failures.ensureUnusedCapacity(gpa, 1); | |
parent_comp.misc_failures.putAssumeCapacityNoClobber(misc_task, .{ | |
.msg = try std.fmt.allocPrint(gpa, "sub-compilation of {s} failed", .{ | |
@tagName(misc_task), | |
}), | |
.children = errors, | |
}); | |
keep_errors = true; | |
return error.SubCompilationFailed; | |
} | |
} | |
fn buildOutputFromZig( | |
comp: *Compilation, | |
src_basename: []const u8, | |
output_mode: std.builtin.OutputMode, | |
out: *?CRTFile, | |
misc_task_tag: MiscTask, | |
prog_node: *std.Progress.Node, | |
) !void { | |
const tracy_trace = trace(@src()); | |
defer tracy_trace.end(); | |
const gpa = comp.gpa; | |
var arena_allocator = std.heap.ArenaAllocator.init(gpa); | |
defer arena_allocator.deinit(); | |
const arena = arena_allocator.allocator(); | |
assert(output_mode != .Exe); | |
const unwind_tables = comp.link_eh_frame_hdr; | |
const strip = comp.compilerRtStrip(); | |
const optimize_mode = comp.compilerRtOptMode(); | |
const config = try Config.resolve(.{ | |
.output_mode = output_mode, | |
.link_mode = .Static, | |
.resolved_target = comp.root_mod.resolved_target, | |
.is_test = false, | |
.have_zcu = true, | |
.emit_bin = true, | |
.root_optimize_mode = optimize_mode, | |
.root_strip = strip, | |
.link_libc = comp.config.link_libc, | |
.any_unwind_tables = unwind_tables, | |
}); | |
const root_mod = try Package.Module.create(arena, .{ | |
.global_cache_directory = comp.global_cache_directory, | |
.paths = .{ | |
.root = .{ .root_dir = comp.zig_lib_directory }, | |
.root_src_path = src_basename, | |
}, | |
.fully_qualified_name = "root", | |
.inherited = .{ | |
.resolved_target = comp.root_mod.resolved_target, | |
.strip = strip, | |
.stack_check = false, | |
.stack_protector = 0, | |
.red_zone = comp.root_mod.red_zone, | |
.omit_frame_pointer = comp.root_mod.omit_frame_pointer, | |
.unwind_tables = unwind_tables, | |
.pic = comp.root_mod.pic, | |
.optimize_mode = optimize_mode, | |
.structured_cfg = comp.root_mod.structured_cfg, | |
.code_model = comp.root_mod.code_model, | |
}, | |
.global = config, | |
.cc_argv = &.{}, | |
.parent = null, | |
.builtin_mod = null, | |
}); | |
const root_name = src_basename[0 .. src_basename.len - std.fs.path.extension(src_basename).len]; | |
const target = comp.getTarget(); | |
const bin_basename = try std.zig.binNameAlloc(arena, .{ | |
.root_name = root_name, | |
.target = target, | |
.output_mode = output_mode, | |
}); | |
const sub_compilation = try Compilation.create(gpa, arena, .{ | |
.global_cache_directory = comp.global_cache_directory, | |
.local_cache_directory = comp.global_cache_directory, | |
.zig_lib_directory = comp.zig_lib_directory, | |
.self_exe_path = comp.self_exe_path, | |
.config = config, | |
.root_mod = root_mod, | |
.cache_mode = .whole, | |
.root_name = root_name, | |
.thread_pool = comp.thread_pool, | |
.libc_installation = comp.libc_installation, | |
.emit_bin = .{ | |
.directory = null, // Put it in the cache directory. | |
.basename = bin_basename, | |
}, | |
.function_sections = true, | |
.data_sections = true, | |
.no_builtin = true, | |
.emit_h = null, | |
.verbose_cc = comp.verbose_cc, | |
.verbose_link = comp.verbose_link, | |
.verbose_air = comp.verbose_air, | |
.verbose_intern_pool = comp.verbose_intern_pool, | |
.verbose_generic_instances = comp.verbose_intern_pool, | |
.verbose_llvm_ir = comp.verbose_llvm_ir, | |
.verbose_llvm_bc = comp.verbose_llvm_bc, | |
.verbose_cimport = comp.verbose_cimport, | |
.verbose_llvm_cpu_features = comp.verbose_llvm_cpu_features, | |
.clang_passthrough_mode = comp.clang_passthrough_mode, | |
.skip_linker_dependencies = true, | |
}); | |
defer sub_compilation.destroy(); | |
try comp.updateSubCompilation(sub_compilation, misc_task_tag, prog_node); | |
assert(out.* == null); | |
out.* = try sub_compilation.toCrtFile(); | |
} | |
pub fn build_crt_file( | |
comp: *Compilation, | |
root_name: []const u8, | |
output_mode: std.builtin.OutputMode, | |
misc_task_tag: MiscTask, | |
prog_node: *std.Progress.Node, | |
/// These elements have to get mutated to add the owner module after it is | |
/// created within this function. | |
c_source_files: []CSourceFile, | |
) !void { | |
const tracy_trace = trace(@src()); | |
defer tracy_trace.end(); | |
const gpa = comp.gpa; | |
var arena_allocator = std.heap.ArenaAllocator.init(gpa); | |
defer arena_allocator.deinit(); | |
const arena = arena_allocator.allocator(); | |
const basename = try std.zig.binNameAlloc(gpa, .{ | |
.root_name = root_name, | |
.target = comp.root_mod.resolved_target.result, | |
.output_mode = output_mode, | |
}); | |
const config = try Config.resolve(.{ | |
.output_mode = output_mode, | |
.resolved_target = comp.root_mod.resolved_target, | |
.is_test = false, | |
.have_zcu = false, | |
.emit_bin = true, | |
.root_optimize_mode = comp.compilerRtOptMode(), | |
.root_strip = comp.compilerRtStrip(), | |
.link_libc = false, | |
.lto = switch (output_mode) { | |
.Lib => comp.config.lto, | |
.Obj, .Exe => false, | |
}, | |
}); | |
const root_mod = try Package.Module.create(arena, .{ | |
.global_cache_directory = comp.global_cache_directory, | |
.paths = .{ | |
.root = .{ .root_dir = comp.zig_lib_directory }, | |
.root_src_path = "", | |
}, | |
.fully_qualified_name = "root", | |
.inherited = .{ | |
.resolved_target = comp.root_mod.resolved_target, | |
.strip = comp.compilerRtStrip(), | |
.stack_check = false, | |
.stack_protector = 0, | |
.sanitize_c = false, | |
.sanitize_thread = false, | |
.red_zone = comp.root_mod.red_zone, | |
.omit_frame_pointer = comp.root_mod.omit_frame_pointer, | |
.valgrind = false, | |
.unwind_tables = false, | |
.pic = comp.root_mod.pic, | |
.optimize_mode = comp.compilerRtOptMode(), | |
.structured_cfg = comp.root_mod.structured_cfg, | |
}, | |
.global = config, | |
.cc_argv = &.{}, | |
.parent = null, | |
.builtin_mod = null, | |
}); | |
for (c_source_files) |*item| { | |
item.owner = root_mod; | |
} | |
const sub_compilation = try Compilation.create(gpa, arena, .{ | |
.local_cache_directory = comp.global_cache_directory, | |
.global_cache_directory = comp.global_cache_directory, | |
.zig_lib_directory = comp.zig_lib_directory, | |
.self_exe_path = comp.self_exe_path, | |
.cache_mode = .whole, | |
.config = config, | |
.root_mod = root_mod, | |
.root_name = root_name, | |
.thread_pool = comp.thread_pool, | |
.libc_installation = comp.libc_installation, | |
.emit_bin = .{ | |
.directory = null, // Put it in the cache directory. | |
.basename = basename, | |
}, | |
.emit_h = null, | |
.c_source_files = c_source_files, | |
.verbose_cc = comp.verbose_cc, | |
.verbose_link = comp.verbose_link, | |
.verbose_air = comp.verbose_air, | |
.verbose_intern_pool = comp.verbose_intern_pool, | |
.verbose_generic_instances = comp.verbose_generic_instances, | |
.verbose_llvm_ir = comp.verbose_llvm_ir, | |
.verbose_llvm_bc = comp.verbose_llvm_bc, | |
.verbose_cimport = comp.verbose_cimport, | |
.verbose_llvm_cpu_features = comp.verbose_llvm_cpu_features, | |
.clang_passthrough_mode = comp.clang_passthrough_mode, | |
.skip_linker_dependencies = true, | |
}); | |
defer sub_compilation.destroy(); | |
try comp.updateSubCompilation(sub_compilation, misc_task_tag, prog_node); | |
try comp.crt_files.ensureUnusedCapacity(gpa, 1); | |
comp.crt_files.putAssumeCapacityNoClobber(basename, try sub_compilation.toCrtFile()); | |
} | |
pub fn toCrtFile(comp: *Compilation) Allocator.Error!CRTFile { | |
return .{ | |
.full_object_path = try comp.local_cache_directory.join(comp.gpa, &.{ | |
comp.cache_use.whole.bin_sub_path.?, | |
}), | |
.lock = comp.cache_use.whole.moveLock(), | |
}; | |
} | |
pub fn addLinkLib(comp: *Compilation, lib_name: []const u8) !void { | |
// Avoid deadlocking on building import libs such as kernel32.lib | |
// This can happen when the user uses `build-exe foo.obj -lkernel32` and | |
// then when we create a sub-Compilation for zig libc, it also tries to | |
// build kernel32.lib. | |
if (comp.skip_linker_dependencies) return; | |
// This happens when an `extern "foo"` function is referenced. | |
// If we haven't seen this library yet and we're targeting Windows, we need | |
// to queue up a work item to produce the DLL import library for this. | |
const gop = try comp.system_libs.getOrPut(comp.gpa, lib_name); | |
if (!gop.found_existing) { | |
gop.value_ptr.* = .{ | |
.needed = true, | |
.weak = false, | |
.path = null, | |
}; | |
const target = comp.root_mod.resolved_target.result; | |
if (target.os.tag == .windows and target.ofmt != .c) { | |
try comp.work_queue.writeItem(.{ | |
.windows_import_lib = comp.system_libs.count() - 1, | |
}); | |
} | |
} | |
} | |
/// This decides the optimization mode for all zig-provided libraries, including | |
/// compiler-rt, libcxx, libc, libunwind, etc. | |
pub fn compilerRtOptMode(comp: Compilation) std.builtin.OptimizeMode { | |
if (comp.debug_compiler_runtime_libs) { | |
return comp.root_mod.optimize_mode; | |
} | |
const target = comp.root_mod.resolved_target.result; | |
switch (comp.root_mod.optimize_mode) { | |
.Debug, .ReleaseSafe => return target_util.defaultCompilerRtOptimizeMode(target), | |
.ReleaseFast => return .ReleaseFast, | |
.ReleaseSmall => return .ReleaseSmall, | |
} | |
} | |
/// This decides whether to strip debug info for all zig-provided libraries, including | |
/// compiler-rt, libcxx, libc, libunwind, etc. | |
pub fn compilerRtStrip(comp: Compilation) bool { | |
return comp.root_mod.strip; | |
} | |
//! All interned objects have both a value and a type. | |
//! This data structure is self-contained, with the following exceptions: | |
//! * Module.Namespace has a pointer to Module.File | |
//! * Module.Decl has a pointer to Module.CaptureScope | |
/// Maps `Key` to `Index`. `Key` objects are not stored anywhere; they are | |
/// constructed lazily. | |
map: std.AutoArrayHashMapUnmanaged(void, void) = .{}, | |
items: std.MultiArrayList(Item) = .{}, | |
extra: std.ArrayListUnmanaged(u32) = .{}, | |
/// On 32-bit systems, this array is ignored and extra is used for everything. | |
/// On 64-bit systems, this array is used for big integers and associated metadata. | |
/// Use the helper methods instead of accessing this directly in order to not | |
/// violate the above mechanism. | |
limbs: std.ArrayListUnmanaged(u64) = .{}, | |
/// In order to store references to strings in fewer bytes, we copy all | |
/// string bytes into here. String bytes can be null. It is up to whomever | |
/// is referencing the data here whether they want to store both index and length, | |
/// thus allowing null bytes, or store only index, and use null-termination. The | |
/// `string_bytes` array is agnostic to either usage. | |
string_bytes: std.ArrayListUnmanaged(u8) = .{}, | |
/// Rather than allocating Decl objects with an Allocator, we instead allocate | |
/// them with this SegmentedList. This provides four advantages: | |
/// * Stable memory so that one thread can access a Decl object while another | |
/// thread allocates additional Decl objects from this list. | |
/// * It allows us to use u32 indexes to reference Decl objects rather than | |
/// pointers, saving memory in Type, Value, and dependency sets. | |
/// * Using integers to reference Decl objects rather than pointers makes | |
/// serialization trivial. | |
/// * It provides a unique integer to be used for anonymous symbol names, avoiding | |
/// multi-threaded contention on an atomic counter. | |
allocated_decls: std.SegmentedList(Module.Decl, 0) = .{}, | |
/// When a Decl object is freed from `allocated_decls`, it is pushed into this stack. | |
decls_free_list: std.ArrayListUnmanaged(DeclIndex) = .{}, | |
/// Same pattern as with `allocated_decls`. | |
allocated_namespaces: std.SegmentedList(Module.Namespace, 0) = .{}, | |
/// Same pattern as with `decls_free_list`. | |
namespaces_free_list: std.ArrayListUnmanaged(NamespaceIndex) = .{}, | |
/// Some types such as enums, structs, and unions need to store mappings from field names | |
/// to field index, or value to field index. In such cases, they will store the underlying | |
/// field names and values directly, relying on one of these maps, stored separately, | |
/// to provide lookup. | |
/// These are not serialized; it is computed upon deserialization. | |
maps: std.ArrayListUnmanaged(FieldMap) = .{}, | |
/// Used for finding the index inside `string_bytes`. | |
string_table: std.HashMapUnmanaged( | |
u32, | |
void, | |
std.hash_map.StringIndexContext, | |
std.hash_map.default_max_load_percentage, | |
) = .{}, | |
/// An index into `tracked_insts` gives a reference to a single ZIR instruction which | |
/// persists across incremental updates. | |
tracked_insts: std.AutoArrayHashMapUnmanaged(TrackedInst, void) = .{}, | |
/// Dependencies on the source code hash associated with a ZIR instruction. | |
/// * For a `declaration`, this is the entire declaration body. | |
/// * For a `struct_decl`, `union_decl`, etc, this is the source of the fields (but not declarations). | |
/// * For a `func`, this is the source of the full function signature. | |
/// These are also invalidated if tracking fails for this instruction. | |
/// Value is index into `dep_entries` of the first dependency on this hash. | |
src_hash_deps: std.AutoArrayHashMapUnmanaged(TrackedInst.Index, DepEntry.Index) = .{}, | |
/// Dependencies on the value of a Decl. | |
/// Value is index into `dep_entries` of the first dependency on this Decl value. | |
decl_val_deps: std.AutoArrayHashMapUnmanaged(DeclIndex, DepEntry.Index) = .{}, | |
/// Dependencies on the full set of names in a ZIR namespace. | |
/// Key refers to a `struct_decl`, `union_decl`, etc. | |
/// Value is index into `dep_entries` of the first dependency on this namespace. | |
namespace_deps: std.AutoArrayHashMapUnmanaged(TrackedInst.Index, DepEntry.Index) = .{}, | |
/// Dependencies on the (non-)existence of some name in a namespace. | |
/// Value is index into `dep_entries` of the first dependency on this name. | |
namespace_name_deps: std.AutoArrayHashMapUnmanaged(NamespaceNameKey, DepEntry.Index) = .{}, | |
/// Given a `Depender`, points to an entry in `dep_entries` whose `depender` | |
/// matches. The `next_dependee` field can be used to iterate all such entries | |
/// and remove them from the corresponding lists. | |
first_dependency: std.AutoArrayHashMapUnmanaged(Depender, DepEntry.Index) = .{}, | |
/// Stores dependency information. The hashmaps declared above are used to look | |
/// up entries in this list as required. This is not stored in `extra` so that | |
/// we can use `free_dep_entries` to track free indices, since dependencies are | |
/// removed frequently. | |
dep_entries: std.ArrayListUnmanaged(DepEntry) = .{}, | |
/// Stores unused indices in `dep_entries` which can be reused without a full | |
/// garbage collection pass. | |
free_dep_entries: std.ArrayListUnmanaged(DepEntry.Index) = .{}, | |
pub const TrackedInst = extern struct { | |
path_digest: Cache.BinDigest, | |
inst: Zir.Inst.Index, | |
comptime { | |
// The fields should be tightly packed. See also serialiation logic in `Compilation.saveState`. | |
assert(@sizeOf(@This()) == Cache.bin_digest_len + @sizeOf(Zir.Inst.Index)); | |
} | |
pub const Index = enum(u32) { | |
_, | |
pub fn resolve(i: TrackedInst.Index, ip: *const InternPool) Zir.Inst.Index { | |
return ip.tracked_insts.keys()[@intFromEnum(i)].inst; | |
} | |
pub fn toOptional(i: TrackedInst.Index) Optional { | |
return @enumFromInt(@intFromEnum(i)); | |
} | |
pub const Optional = enum(u32) { | |
none = std.math.maxInt(u32), | |
_, | |
pub fn unwrap(opt: Optional) ?TrackedInst.Index { | |
return switch (opt) { | |
.none => null, | |
_ => @enumFromInt(@intFromEnum(opt)), | |
}; | |
} | |
}; | |
}; | |
}; | |
pub fn trackZir(ip: *InternPool, gpa: Allocator, file: *Module.File, inst: Zir.Inst.Index) Allocator.Error!TrackedInst.Index { | |
const key: TrackedInst = .{ | |
.path_digest = file.path_digest, | |
.inst = inst, | |
}; | |
const gop = try ip.tracked_insts.getOrPut(gpa, key); | |
return @enumFromInt(gop.index); | |
} | |
/// Reperesents the "source" of a dependency edge, i.e. either a Decl or a | |
/// runtime function (represented as an InternPool index). | |
/// MSB is 0 for a Decl, 1 for a function. | |
pub const Depender = enum(u32) { | |
_, | |
pub const Unwrapped = union(enum) { | |
decl: DeclIndex, | |
func: InternPool.Index, | |
}; | |
pub fn unwrap(dep: Depender) Unwrapped { | |
const tag: u1 = @truncate(@intFromEnum(dep) >> 31); | |
const val: u31 = @truncate(@intFromEnum(dep)); | |
return switch (tag) { | |
0 => .{ .decl = @enumFromInt(val) }, | |
1 => .{ .func = @enumFromInt(val) }, | |
}; | |
} | |
pub fn wrap(raw: Unwrapped) Depender { | |
return @enumFromInt(switch (raw) { | |
.decl => |decl| @intFromEnum(decl), | |
.func => |func| (1 << 31) | @intFromEnum(func), | |
}); | |
} | |
pub fn toOptional(dep: Depender) Optional { | |
return @enumFromInt(@intFromEnum(dep)); | |
} | |
pub const Optional = enum(u32) { | |
none = std.math.maxInt(u32), | |
_, | |
pub fn unwrap(opt: Optional) ?Depender { | |
return switch (opt) { | |
.none => null, | |
_ => @enumFromInt(@intFromEnum(opt)), | |
}; | |
} | |
}; | |
}; | |
pub const Dependee = union(enum) { | |
src_hash: TrackedInst.Index, | |
decl_val: DeclIndex, | |
namespace: TrackedInst.Index, | |
namespace_name: NamespaceNameKey, | |
}; | |
pub fn removeDependenciesForDepender(ip: *InternPool, gpa: Allocator, depender: Depender) void { | |
var opt_idx = (ip.first_dependency.fetchSwapRemove(depender) orelse return).value.toOptional(); | |
while (opt_idx.unwrap()) |idx| { | |
const dep = ip.dep_entries.items[@intFromEnum(idx)]; | |
opt_idx = dep.next_dependee; | |
const prev_idx = dep.prev.unwrap() orelse { | |
// This entry is the start of a list in some `*_deps`. | |
// We cannot easily remove this mapping, so this must remain as a dummy entry. | |
ip.dep_entries.items[@intFromEnum(idx)].depender = .none; | |
continue; | |
}; | |
ip.dep_entries.items[@intFromEnum(prev_idx)].next = dep.next; | |
if (dep.next.unwrap()) |next_idx| { | |
ip.dep_entries.items[@intFromEnum(next_idx)].prev = dep.prev; | |
} | |
ip.free_dep_entries.append(gpa, idx) catch { | |
// This memory will be reclaimed on the next garbage collection. | |
// Thus, we do not need to propagate this error. | |
}; | |
} | |
} | |
pub const DependencyIterator = struct { | |
ip: *const InternPool, | |
next_entry: DepEntry.Index.Optional, | |
pub fn next(it: *DependencyIterator) ?Depender { | |
const idx = it.next_entry.unwrap() orelse return null; | |
const entry = it.ip.dep_entries.items[@intFromEnum(idx)]; | |
it.next_entry = entry.next; | |
return entry.depender.unwrap().?; | |
} | |
}; | |
pub fn dependencyIterator(ip: *const InternPool, dependee: Dependee) DependencyIterator { | |
const first_entry = switch (dependee) { | |
.src_hash => |x| ip.src_hash_deps.get(x), | |
.decl_val => |x| ip.decl_val_deps.get(x), | |
.namespace => |x| ip.namespace_deps.get(x), | |
.namespace_name => |x| ip.namespace_name_deps.get(x), | |
} orelse return .{ | |
.ip = ip, | |
.next_entry = .none, | |
}; | |
if (ip.dep_entries.items[@intFromEnum(first_entry)].depender == .none) return .{ | |
.ip = ip, | |
.next_entry = .none, | |
}; | |
return .{ | |
.ip = ip, | |
.next_entry = first_entry.toOptional(), | |
}; | |
} | |
pub fn addDependency(ip: *InternPool, gpa: Allocator, depender: Depender, dependee: Dependee) Allocator.Error!void { | |
const first_depender_dep: DepEntry.Index.Optional = if (ip.first_dependency.get(depender)) |idx| dep: { | |
// The entry already exists, so there is capacity to overwrite it later. | |
break :dep idx.toOptional(); | |
} else none: { | |
// Ensure there is capacity available to add this dependency later. | |
try ip.first_dependency.ensureUnusedCapacity(gpa, 1); | |
break :none .none; | |
}; | |
// We're very likely to need space for a new entry - reserve it now to avoid | |
// the need for error cleanup logic. | |
if (ip.free_dep_entries.items.len == 0) { | |
try ip.dep_entries.ensureUnusedCapacity(gpa, 1); | |
} | |
// This block should allocate an entry and prepend it to the relevant `*_deps` list. | |
// The `next` field should be correctly initialized; all other fields may be undefined. | |
const new_index: DepEntry.Index = switch (dependee) { | |
inline else => |dependee_payload, tag| new_index: { | |
const gop = try switch (tag) { | |
.src_hash => ip.src_hash_deps, | |
.decl_val => ip.decl_val_deps, | |
.namespace => ip.namespace_deps, | |
.namespace_name => ip.namespace_name_deps, | |
}.getOrPut(gpa, dependee_payload); | |
if (gop.found_existing and ip.dep_entries.items[@intFromEnum(gop.value_ptr.*)].depender == .none) { | |
// Dummy entry, so we can reuse it rather than allocating a new one! | |
ip.dep_entries.items[@intFromEnum(gop.value_ptr.*)].next = .none; | |
break :new_index gop.value_ptr.*; | |
} | |
// Prepend a new dependency. | |
const new_index: DepEntry.Index, const ptr = if (ip.free_dep_entries.popOrNull()) |new_index| new: { | |
break :new .{ new_index, &ip.dep_entries.items[@intFromEnum(new_index)] }; | |
} else .{ @enumFromInt(ip.dep_entries.items.len), ip.dep_entries.addOneAssumeCapacity() }; | |
ptr.next = if (gop.found_existing) gop.value_ptr.*.toOptional() else .none; | |
gop.value_ptr.* = new_index; | |
break :new_index new_index; | |
}, | |
}; | |
ip.dep_entries.items[@intFromEnum(new_index)].depender = depender.toOptional(); | |
ip.dep_entries.items[@intFromEnum(new_index)].prev = .none; | |
ip.dep_entries.items[@intFromEnum(new_index)].next_dependee = first_depender_dep; | |
ip.first_dependency.putAssumeCapacity(depender, new_index); | |
} | |
/// String is the name whose existence the dependency is on. | |
/// DepEntry.Index refers to the first such dependency. | |
pub const NamespaceNameKey = struct { | |
/// The instruction (`struct_decl` etc) which owns the namespace in question. | |
namespace: TrackedInst.Index, | |
/// The name whose existence the dependency is on. | |
name: NullTerminatedString, | |
}; | |
pub const DepEntry = extern struct { | |
/// If null, this is a dummy entry - all other fields are `undefined`. It is | |
/// the first and only entry in one of `intern_pool.*_deps`, and does not | |
/// appear in any list by `first_dependency`, but is not in | |
/// `free_dep_entries` since `*_deps` stores a reference to it. | |
depender: Depender.Optional, | |
/// Index into `dep_entries` forming a doubly linked list of all dependencies on this dependee. | |
/// Used to iterate all dependers for a given dependee during an update. | |
/// null if this is the end of the list. | |
next: DepEntry.Index.Optional, | |
/// The other link for `next`. | |
/// null if this is the start of the list. | |
prev: DepEntry.Index.Optional, | |
/// Index into `dep_entries` forming a singly linked list of dependencies *of* `depender`. | |
/// Used to efficiently remove all `DepEntry`s for a single `depender` when it is re-analyzed. | |
/// null if this is the end of the list. | |
next_dependee: DepEntry.Index.Optional, | |
pub const Index = enum(u32) { | |
_, | |
pub fn toOptional(dep: DepEntry.Index) Optional { | |
return @enumFromInt(@intFromEnum(dep)); | |
} | |
pub const Optional = enum(u32) { | |
none = std.math.maxInt(u32), | |
_, | |
pub fn unwrap(opt: Optional) ?DepEntry.Index { | |
return switch (opt) { | |
.none => null, | |
_ => @enumFromInt(@intFromEnum(opt)), | |
}; | |
} | |
}; | |
}; | |
}; | |
const FieldMap = std.ArrayHashMapUnmanaged(void, void, std.array_hash_map.AutoContext(void), false); | |
const builtin = @import("builtin"); | |
const std = @import("std"); | |
const Allocator = std.mem.Allocator; | |
const assert = std.debug.assert; | |
const BigIntConst = std.math.big.int.Const; | |
const BigIntMutable = std.math.big.int.Mutable; | |
const Cache = std.Build.Cache; | |
const Limb = std.math.big.Limb; | |
const Hash = std.hash.Wyhash; | |
const InternPool = @This(); | |
const Module = @import("Module.zig"); | |
const Zcu = Module; | |
const Zir = std.zig.Zir; | |
const KeyAdapter = struct { | |
intern_pool: *const InternPool, | |
pub fn eql(ctx: @This(), a: Key, b_void: void, b_map_index: usize) bool { | |
_ = b_void; | |
return ctx.intern_pool.indexToKey(@as(Index, @enumFromInt(b_map_index))).eql(a, ctx.intern_pool); | |
} | |
pub fn hash(ctx: @This(), a: Key) u32 { | |
return a.hash32(ctx.intern_pool); | |
} | |
}; | |
/// An index into `maps` which might be `none`. | |
pub const OptionalMapIndex = enum(u32) { | |
none = std.math.maxInt(u32), | |
_, | |
pub fn unwrap(oi: OptionalMapIndex) ?MapIndex { | |
if (oi == .none) return null; | |
return @enumFromInt(@intFromEnum(oi)); | |
} | |
}; | |
/// An index into `maps`. | |
pub const MapIndex = enum(u32) { | |
_, | |
pub fn toOptional(i: MapIndex) OptionalMapIndex { | |
return @enumFromInt(@intFromEnum(i)); | |
} | |
}; | |
pub const RuntimeIndex = enum(u32) { | |
zero = 0, | |
comptime_field_ptr = std.math.maxInt(u32), | |
_, | |
pub fn increment(ri: *RuntimeIndex) void { | |
ri.* = @as(RuntimeIndex, @enumFromInt(@intFromEnum(ri.*) + 1)); | |
} | |
}; | |
pub const DeclIndex = std.zig.DeclIndex; | |
pub const OptionalDeclIndex = std.zig.OptionalDeclIndex; | |
pub const NamespaceIndex = enum(u32) { | |
_, | |
pub fn toOptional(i: NamespaceIndex) OptionalNamespaceIndex { | |
return @enumFromInt(@intFromEnum(i)); | |
} | |
}; | |
pub const OptionalNamespaceIndex = enum(u32) { | |
none = std.math.maxInt(u32), | |
_, | |
pub fn init(oi: ?NamespaceIndex) OptionalNamespaceIndex { | |
return @enumFromInt(@intFromEnum(oi orelse return .none)); | |
} | |
pub fn unwrap(oi: OptionalNamespaceIndex) ?NamespaceIndex { | |
if (oi == .none) return null; | |
return @enumFromInt(@intFromEnum(oi)); | |
} | |
}; | |
/// An index into `string_bytes`. | |
pub const String = enum(u32) { | |
_, | |
}; | |
/// An index into `string_bytes`. | |
pub const NullTerminatedString = enum(u32) { | |
/// This is distinct from `none` - it is a valid index that represents empty string. | |
empty = 0, | |
_, | |
/// An array of `NullTerminatedString` existing within the `extra` array. | |
/// This type exists to provide a struct with lifetime that is | |
/// not invalidated when items are added to the `InternPool`. | |
pub const Slice = struct { | |
start: u32, | |
len: u32, | |
pub fn get(slice: Slice, ip: *const InternPool) []NullTerminatedString { | |
return @ptrCast(ip.extra.items[slice.start..][0..slice.len]); | |
} | |
}; | |
pub fn toString(self: NullTerminatedString) String { | |
return @enumFromInt(@intFromEnum(self)); | |
} | |
pub fn toOptional(self: NullTerminatedString) OptionalNullTerminatedString { | |
return @enumFromInt(@intFromEnum(self)); | |
} | |
const Adapter = struct { | |
strings: []const NullTerminatedString, | |
pub fn eql(ctx: @This(), a: NullTerminatedString, b_void: void, b_map_index: usize) bool { | |
_ = b_void; | |
return a == ctx.strings[b_map_index]; | |
} | |
pub fn hash(ctx: @This(), a: NullTerminatedString) u32 { | |
_ = ctx; | |
return std.hash.uint32(@intFromEnum(a)); | |
} | |
}; | |
/// Compare based on integer value alone, ignoring the string contents. | |
pub fn indexLessThan(ctx: void, a: NullTerminatedString, b: NullTerminatedString) bool { | |
_ = ctx; | |
return @intFromEnum(a) < @intFromEnum(b); | |
} | |
pub fn toUnsigned(self: NullTerminatedString, ip: *const InternPool) ?u32 { | |
const s = ip.stringToSlice(self); | |
if (s.len > 1 and s[0] == '0') return null; | |
if (std.mem.indexOfScalar(u8, s, '_')) |_| return null; | |
return std.fmt.parseUnsigned(u32, s, 10) catch null; | |
} | |
const FormatData = struct { | |
string: NullTerminatedString, | |
ip: *const InternPool, | |
}; | |
fn format( | |
data: FormatData, | |
comptime specifier: []const u8, | |
_: std.fmt.FormatOptions, | |
writer: anytype, | |
) @TypeOf(writer).Error!void { | |
const s = data.ip.stringToSlice(data.string); | |
if (comptime std.mem.eql(u8, specifier, "")) { | |
try writer.writeAll(s); | |
} else if (comptime std.mem.eql(u8, specifier, "i")) { | |
try writer.print("{}", .{std.zig.fmtId(s)}); | |
} else @compileError("invalid format string '" ++ specifier ++ "' for '" ++ @typeName(NullTerminatedString) ++ "'"); | |
} | |
pub fn fmt(self: NullTerminatedString, ip: *const InternPool) std.fmt.Formatter(format) { | |
return .{ .data = .{ .string = self, .ip = ip } }; | |
} | |
}; | |
/// An index into `string_bytes` which might be `none`. | |
pub const OptionalNullTerminatedString = enum(u32) { | |
/// This is distinct from `none` - it is a valid index that represents empty string. | |
empty = 0, | |
none = std.math.maxInt(u32), | |
_, | |
pub fn unwrap(oi: OptionalNullTerminatedString) ?NullTerminatedString { | |
if (oi == .none) return null; | |
return @enumFromInt(@intFromEnum(oi)); | |
} | |
}; | |
pub const Key = union(enum) { | |
int_type: IntType, | |
ptr_type: PtrType, | |
array_type: ArrayType, | |
vector_type: VectorType, | |
opt_type: Index, | |
/// `anyframe->T`. The payload is the child type, which may be `none` to indicate | |
/// `anyframe`. | |
anyframe_type: Index, | |
error_union_type: ErrorUnionType, | |
simple_type: SimpleType, | |
/// This represents a struct that has been explicitly declared in source code, | |
/// or was created with `@Type`. It is unique and based on a declaration. | |
/// It may be a tuple, if declared like this: `struct {A, B, C}`. | |
struct_type: StructType, | |
/// This is an anonymous struct or tuple type which has no corresponding | |
/// declaration. It is used for types that have no `struct` keyword in the | |
/// source code, and were not created via `@Type`. | |
anon_struct_type: AnonStructType, | |
union_type: Key.UnionType, | |
opaque_type: OpaqueType, | |
enum_type: EnumType, | |
func_type: FuncType, | |
error_set_type: ErrorSetType, | |
/// The payload is the function body, either a `func_decl` or `func_instance`. | |
inferred_error_set_type: Index, | |
/// Typed `undefined`. This will never be `none`; untyped `undefined` is represented | |
/// via `simple_value` and has a named `Index` tag for it. | |
undef: Index, | |
simple_value: SimpleValue, | |
variable: Variable, | |
extern_func: ExternFunc, | |
func: Func, | |
int: Key.Int, | |
err: Error, | |
error_union: ErrorUnion, | |
enum_literal: NullTerminatedString, | |
/// A specific enum tag, indicated by the integer tag value. | |
enum_tag: EnumTag, | |
/// An empty enum or union. TODO: this value's existence is strange, because such a type in | |
/// reality has no values. See #15909. | |
/// Payload is the type for which we are an empty value. | |
empty_enum_value: Index, | |
float: Float, | |
ptr: Ptr, | |
slice: Slice, | |
opt: Opt, | |
/// An instance of a struct, array, or vector. | |
/// Each element/field stored as an `Index`. | |
/// In the case of sentinel-terminated arrays, the sentinel value *is* stored, | |
/// so the slice length will be one more than the type's array length. | |
aggregate: Aggregate, | |
/// An instance of a union. | |
un: Union, | |
/// A comptime function call with a memoized result. | |
memoized_call: Key.MemoizedCall, | |
pub const TypeValue = extern struct { | |
ty: Index, | |
val: Index, | |
}; | |
pub const IntType = std.builtin.Type.Int; | |
/// Extern for hashing via memory reinterpretation. | |
pub const ErrorUnionType = extern struct { | |
error_set_type: Index, | |
payload_type: Index, | |
}; | |
pub const ErrorSetType = struct { | |
/// Set of error names, sorted by null terminated string index. | |
names: NullTerminatedString.Slice, | |
/// This is ignored by `get` but will always be provided by `indexToKey`. | |
names_map: OptionalMapIndex = .none, | |
/// Look up field index based on field name. | |
pub fn nameIndex(self: ErrorSetType, ip: *const InternPool, name: NullTerminatedString) ?u32 { | |
const map = &ip.maps.items[@intFromEnum(self.names_map.unwrap().?)]; | |
const adapter: NullTerminatedString.Adapter = .{ .strings = self.names.get(ip) }; | |
const field_index = map.getIndexAdapted(name, adapter) orelse return null; | |
return @intCast(field_index); | |
} | |
}; | |
/// Extern layout so it can be hashed with `std.mem.asBytes`. | |
pub const PtrType = extern struct { | |
child: Index, | |
sentinel: Index = .none, | |
flags: Flags = .{}, | |
packed_offset: PackedOffset = .{ .bit_offset = 0, .host_size = 0 }, | |
pub const VectorIndex = enum(u16) { | |
none = std.math.maxInt(u16), | |
runtime = std.math.maxInt(u16) - 1, | |
_, | |
}; | |
pub const Flags = packed struct(u32) { | |
size: Size = .One, | |
/// `none` indicates the ABI alignment of the pointee_type. In this | |
/// case, this field *must* be set to `none`, otherwise the | |
/// `InternPool` equality and hashing functions will return incorrect | |
/// results. | |
alignment: Alignment = .none, | |
is_const: bool = false, | |
is_volatile: bool = false, | |
is_allowzero: bool = false, | |
/// See src/target.zig defaultAddressSpace function for how to obtain | |
/// an appropriate value for this field. | |
address_space: AddressSpace = .generic, | |
vector_index: VectorIndex = .none, | |
}; | |
pub const PackedOffset = packed struct(u32) { | |
/// If this is non-zero it means the pointer points to a sub-byte | |
/// range of data, which is backed by a "host integer" with this | |
/// number of bytes. | |
/// When host_size=pointee_abi_size and bit_offset=0, this must be | |
/// represented with host_size=0 instead. | |
host_size: u16, | |
bit_offset: u16, | |
}; | |
pub const Size = std.builtin.Type.Pointer.Size; | |
pub const AddressSpace = std.builtin.AddressSpace; | |
}; | |
/// Extern so that hashing can be done via memory reinterpreting. | |
pub const ArrayType = extern struct { | |
len: u64, | |
child: Index, | |
sentinel: Index = .none, | |
}; | |
/// Extern so that hashing can be done via memory reinterpreting. | |
pub const VectorType = extern struct { | |
len: u32, | |
child: Index, | |
}; | |
pub const OpaqueType = extern struct { | |
/// The Decl that corresponds to the opaque itself. | |
decl: DeclIndex, | |
/// Represents the declarations inside this opaque. | |
namespace: NamespaceIndex, | |
zir_index: TrackedInst.Index.Optional, | |
}; | |
/// Although packed structs and non-packed structs are encoded differently, | |
/// this struct is used for both categories since they share some common | |
/// functionality. | |
pub const StructType = struct { | |
extra_index: u32, | |
/// `none` when the struct is `@TypeOf(.{})`. | |
decl: OptionalDeclIndex, | |
/// `none` when the struct has no declarations. | |
namespace: OptionalNamespaceIndex, | |
/// Index of the struct_decl ZIR instruction. | |
zir_index: TrackedInst.Index.Optional, | |
layout: std.builtin.Type.ContainerLayout, | |
field_names: NullTerminatedString.Slice, | |
field_types: Index.Slice, | |
field_inits: Index.Slice, | |
field_aligns: Alignment.Slice, | |
runtime_order: RuntimeOrder.Slice, | |
comptime_bits: ComptimeBits, | |
offsets: Offsets, | |
names_map: OptionalMapIndex, | |
pub const ComptimeBits = struct { | |
start: u32, | |
/// This is the number of u32 elements, not the number of struct fields. | |
len: u32, | |
pub fn get(this: @This(), ip: *const InternPool) []u32 { | |
return ip.extra.items[this.start..][0..this.len]; | |
} | |
pub fn getBit(this: @This(), ip: *const InternPool, i: usize) bool { | |
if (this.len == 0) return false; | |
return @as(u1, @truncate(this.get(ip)[i / 32] >> @intCast(i % 32))) != 0; | |
} | |
pub fn setBit(this: @This(), ip: *const InternPool, i: usize) void { | |
this.get(ip)[i / 32] |= @as(u32, 1) << @intCast(i % 32); | |
} | |
pub fn clearBit(this: @This(), ip: *const InternPool, i: usize) void { | |
this.get(ip)[i / 32] &= ~(@as(u32, 1) << @intCast(i % 32)); | |
} | |
}; | |
pub const Offsets = struct { | |
start: u32, | |
len: u32, | |
pub fn get(this: @This(), ip: *const InternPool) []u32 { | |
return @ptrCast(ip.extra.items[this.start..][0..this.len]); | |
} | |
}; | |
pub const RuntimeOrder = enum(u32) { | |
/// Placeholder until layout is resolved. | |
unresolved = std.math.maxInt(u32) - 0, | |
/// Field not present at runtime | |
omitted = std.math.maxInt(u32) - 1, | |
_, | |
pub const Slice = struct { | |
start: u32, | |
len: u32, | |
pub fn get(slice: RuntimeOrder.Slice, ip: *const InternPool) []RuntimeOrder { | |
return @ptrCast(ip.extra.items[slice.start..][0..slice.len]); | |
} | |
}; | |
pub fn toInt(i: @This()) ?u32 { | |
return switch (i) { | |
.omitted => null, | |
.unresolved => unreachable, | |
else => @intFromEnum(i), | |
}; | |
} | |
}; | |
/// Look up field index based on field name. | |
pub fn nameIndex(self: StructType, ip: *const InternPool, name: NullTerminatedString) ?u32 { | |
const names_map = self.names_map.unwrap() orelse { | |
const i = name.toUnsigned(ip) orelse return null; | |
if (i >= self.field_types.len) return null; | |
return i; | |
}; | |
const map = &ip.maps.items[@intFromEnum(names_map)]; | |
const adapter: NullTerminatedString.Adapter = .{ .strings = self.field_names.get(ip) }; | |
const field_index = map.getIndexAdapted(name, adapter) orelse return null; | |
return @intCast(field_index); | |
} | |
/// Returns the already-existing field with the same name, if any. | |
pub fn addFieldName( | |
self: @This(), | |
ip: *InternPool, | |
name: NullTerminatedString, | |
) ?u32 { | |
return ip.addFieldName(self.names_map.unwrap().?, self.field_names.start, name); | |
} | |
pub fn fieldAlign(s: @This(), ip: *const InternPool, i: usize) Alignment { | |
if (s.field_aligns.len == 0) return .none; | |
return s.field_aligns.get(ip)[i]; | |
} | |
pub fn fieldInit(s: @This(), ip: *const InternPool, i: usize) Index { | |
if (s.field_inits.len == 0) return .none; | |
assert(s.haveFieldInits(ip)); | |
return s.field_inits.get(ip)[i]; | |
} | |
/// Returns `none` in the case the struct is a tuple. | |
pub fn fieldName(s: @This(), ip: *const InternPool, i: usize) OptionalNullTerminatedString { | |
if (s.field_names.len == 0) return .none; | |
return s.field_names.get(ip)[i].toOptional(); | |
} | |
pub fn fieldIsComptime(s: @This(), ip: *const InternPool, i: usize) bool { | |
return s.comptime_bits.getBit(ip, i); | |
} | |
pub fn setFieldComptime(s: @This(), ip: *InternPool, i: usize) void { | |
s.comptime_bits.setBit(ip, i); | |
} | |
/// Reads the non-opv flag calculated during AstGen. Used to short-circuit more | |
/// complicated logic. | |
pub fn knownNonOpv(s: @This(), ip: *InternPool) bool { | |
return switch (s.layout) { | |
.Packed => false, | |
.Auto, .Extern => s.flagsPtr(ip).known_non_opv, | |
}; | |
} | |
/// The returned pointer expires with any addition to the `InternPool`. | |
/// Asserts the struct is not packed. | |
pub fn flagsPtr(self: @This(), ip: *const InternPool) *Tag.TypeStruct.Flags { | |
assert(self.layout != .Packed); | |
const flags_field_index = std.meta.fieldIndex(Tag.TypeStruct, "flags").?; | |
return @ptrCast(&ip.extra.items[self.extra_index + flags_field_index]); | |
} | |
/// The returned pointer expires with any addition to the `InternPool`. | |
/// Asserts that the struct is packed. | |
pub fn packedFlagsPtr(self: @This(), ip: *const InternPool) *Tag.TypeStructPacked.Flags { | |
assert(self.layout == .Packed); | |
const flags_field_index = std.meta.fieldIndex(Tag.TypeStructPacked, "flags").?; | |
return @ptrCast(&ip.extra.items[self.extra_index + flags_field_index]); | |
} | |
pub fn assumeRuntimeBitsIfFieldTypesWip(s: @This(), ip: *InternPool) bool { | |
if (s.layout == .Packed) return false; | |
const flags_ptr = s.flagsPtr(ip); | |
if (flags_ptr.field_types_wip) { | |
flags_ptr.assumed_runtime_bits = true; | |
return true; | |
} | |
return false; | |
} | |
pub fn setTypesWip(s: @This(), ip: *InternPool) bool { | |
if (s.layout == .Packed) return false; | |
const flags_ptr = s.flagsPtr(ip); | |
if (flags_ptr.field_types_wip) return true; | |
flags_ptr.field_types_wip = true; | |
return false; | |
} | |
pub fn clearTypesWip(s: @This(), ip: *InternPool) void { | |
if (s.layout == .Packed) return; | |
s.flagsPtr(ip).field_types_wip = false; | |
} | |
pub fn setLayoutWip(s: @This(), ip: *InternPool) bool { | |
if (s.layout == .Packed) return false; | |
const flags_ptr = s.flagsPtr(ip); | |
if (flags_ptr.layout_wip) return true; | |
flags_ptr.layout_wip = true; | |
return false; | |
} | |
pub fn clearLayoutWip(s: @This(), ip: *InternPool) void { | |
if (s.layout == .Packed) return; | |
s.flagsPtr(ip).layout_wip = false; | |
} | |
pub fn setAlignmentWip(s: @This(), ip: *InternPool) bool { | |
if (s.layout == .Packed) return false; | |
const flags_ptr = s.flagsPtr(ip); | |
if (flags_ptr.alignment_wip) return true; | |
flags_ptr.alignment_wip = true; | |
return false; | |
} | |
pub fn clearAlignmentWip(s: @This(), ip: *InternPool) void { | |
if (s.layout == .Packed) return; | |
s.flagsPtr(ip).alignment_wip = false; | |
} | |
pub fn setInitsWip(s: @This(), ip: *InternPool) bool { | |
switch (s.layout) { | |
.Packed => { | |
const flag = &s.packedFlagsPtr(ip).field_inits_wip; | |
if (flag.*) return true; | |
flag.* = true; | |
return false; | |
}, | |
.Auto, .Extern => { | |
const flag = &s.flagsPtr(ip).field_inits_wip; | |
if (flag.*) return true; | |
flag.* = true; | |
return false; | |
}, | |
} | |
} | |
pub fn clearInitsWip(s: @This(), ip: *InternPool) void { | |
switch (s.layout) { | |
.Packed => s.packedFlagsPtr(ip).field_inits_wip = false, | |
.Auto, .Extern => s.flagsPtr(ip).field_inits_wip = false, | |
} | |
} | |
pub fn setFullyResolved(s: @This(), ip: *InternPool) bool { | |
if (s.layout == .Packed) return true; | |
const flags_ptr = s.flagsPtr(ip); | |
if (flags_ptr.fully_resolved) return true; | |
flags_ptr.fully_resolved = true; | |
return false; | |
} | |
pub fn clearFullyResolved(s: @This(), ip: *InternPool) void { | |
s.flagsPtr(ip).fully_resolved = false; | |
} | |
/// The returned pointer expires with any addition to the `InternPool`. | |
/// Asserts the struct is not packed. | |
pub fn size(self: @This(), ip: *InternPool) *u32 { | |
assert(self.layout != .Packed); | |
const size_field_index = std.meta.fieldIndex(Tag.TypeStruct, "size").?; | |
return @ptrCast(&ip.extra.items[self.extra_index + size_field_index]); | |
} | |
/// The backing integer type of the packed struct. Whether zig chooses | |
/// this type or the user specifies it, it is stored here. This will be | |
/// set to `none` until the layout is resolved. | |
/// Asserts the struct is packed. | |
pub fn backingIntType(s: @This(), ip: *const InternPool) *Index { | |
assert(s.layout == .Packed); | |
const field_index = std.meta.fieldIndex(Tag.TypeStructPacked, "backing_int_ty").?; | |
return @ptrCast(&ip.extra.items[s.extra_index + field_index]); | |
} | |
/// Asserts the struct is not packed. | |
pub fn setZirIndex(s: @This(), ip: *InternPool, new_zir_index: TrackedInst.Index.Optional) void { | |
assert(s.layout != .Packed); | |
const field_index = std.meta.fieldIndex(Tag.TypeStruct, "zir_index").?; | |
ip.extra.items[s.extra_index + field_index] = @intFromEnum(new_zir_index); | |
} | |
pub fn haveFieldTypes(s: @This(), ip: *const InternPool) bool { | |
const types = s.field_types.get(ip); | |
return types.len == 0 or types[0] != .none; | |
} | |
pub fn haveFieldInits(s: @This(), ip: *const InternPool) bool { | |
return switch (s.layout) { | |
.Packed => s.packedFlagsPtr(ip).inits_resolved, | |
.Auto, .Extern => s.flagsPtr(ip).inits_resolved, | |
}; | |
} | |
pub fn setHaveFieldInits(s: @This(), ip: *InternPool) void { | |
switch (s.layout) { | |
.Packed => s.packedFlagsPtr(ip).inits_resolved = true, | |
.Auto, .Extern => s.flagsPtr(ip).inits_resolved = true, | |
} | |
} | |
pub fn haveLayout(s: @This(), ip: *InternPool) bool { | |
return switch (s.layout) { | |
.Packed => s.backingIntType(ip).* != .none, | |
.Auto, .Extern => s.flagsPtr(ip).layout_resolved, | |
}; | |
} | |
pub fn isTuple(s: @This(), ip: *InternPool) bool { | |
return s.layout != .Packed and s.flagsPtr(ip).is_tuple; | |
} | |
pub fn hasReorderedFields(s: @This()) bool { | |
return s.layout == .Auto; | |
} | |
pub const RuntimeOrderIterator = struct { | |
ip: *InternPool, | |
field_index: u32, | |
struct_type: InternPool.Key.StructType, | |
pub fn next(it: *@This()) ?u32 { | |
var i = it.field_index; | |
if (i >= it.struct_type.field_types.len) | |
return null; | |
if (it.struct_type.hasReorderedFields()) { | |
it.field_index += 1; | |
return it.struct_type.runtime_order.get(it.ip)[i].toInt(); | |
} | |
while (it.struct_type.fieldIsComptime(it.ip, i)) { | |
i += 1; | |
if (i >= it.struct_type.field_types.len) | |
return null; | |
} | |
it.field_index = i + 1; | |
return i; | |
} | |
}; | |
/// Iterates over non-comptime fields in the order they are laid out in memory at runtime. | |
/// May or may not include zero-bit fields. | |
/// Asserts the struct is not packed. | |
pub fn iterateRuntimeOrder(s: @This(), ip: *InternPool) RuntimeOrderIterator { | |
assert(s.layout != .Packed); | |
return .{ | |
.ip = ip, | |
.field_index = 0, | |
.struct_type = s, | |
}; | |
} | |
}; | |
pub const AnonStructType = struct { | |
types: Index.Slice, | |
/// This may be empty, indicating this is a tuple. | |
names: NullTerminatedString.Slice, | |
/// These elements may be `none`, indicating runtime-known. | |
values: Index.Slice, | |
pub fn isTuple(self: AnonStructType) bool { | |
return self.names.len == 0; | |
} | |
pub fn fieldName( | |
self: AnonStructType, | |
ip: *const InternPool, | |
index: u32, | |
) OptionalNullTerminatedString { | |
if (self.names.len == 0) | |
return .none; | |
return self.names.get(ip)[index].toOptional(); | |
} | |
}; | |
/// Serves two purposes: | |
/// * Being the key in the InternPool hash map, which only requires the `decl` field. | |
/// * Provide the other fields that do not require chasing the enum type. | |
pub const UnionType = struct { | |
/// The Decl that corresponds to the union itself. | |
decl: DeclIndex, | |
/// The index of the `Tag.TypeUnion` payload. Ignored by `get`, | |
/// populated by `indexToKey`. | |
extra_index: u32, | |
namespace: NamespaceIndex, | |
flags: Tag.TypeUnion.Flags, | |
/// The enum that provides the list of field names and values. | |
enum_tag_ty: Index, | |
zir_index: TrackedInst.Index.Optional, | |
/// The returned pointer expires with any addition to the `InternPool`. | |
pub fn flagsPtr(self: @This(), ip: *const InternPool) *Tag.TypeUnion.Flags { | |
const flags_field_index = std.meta.fieldIndex(Tag.TypeUnion, "flags").?; | |
return @ptrCast(&ip.extra.items[self.extra_index + flags_field_index]); | |
} | |
/// The returned pointer expires with any addition to the `InternPool`. | |
pub fn size(self: @This(), ip: *InternPool) *u32 { | |
const size_field_index = std.meta.fieldIndex(Tag.TypeUnion, "size").?; | |
return &ip.extra.items[self.extra_index + size_field_index]; | |
} | |
/// The returned pointer expires with any addition to the `InternPool`. | |
pub fn padding(self: @This(), ip: *InternPool) *u32 { | |
const padding_field_index = std.meta.fieldIndex(Tag.TypeUnion, "padding").?; | |
return &ip.extra.items[self.extra_index + padding_field_index]; | |
} | |
pub fn haveFieldTypes(self: @This(), ip: *const InternPool) bool { | |
return self.flagsPtr(ip).status.haveFieldTypes(); | |
} | |
pub fn hasTag(self: @This(), ip: *const InternPool) bool { | |
return self.flagsPtr(ip).runtime_tag.hasTag(); | |
} | |
pub fn getLayout(self: @This(), ip: *const InternPool) std.builtin.Type.ContainerLayout { | |
return self.flagsPtr(ip).layout; | |
} | |
pub fn haveLayout(self: @This(), ip: *const InternPool) bool { | |
return self.flagsPtr(ip).status.haveLayout(); | |
} | |
/// Pointer to an enum type which is used for the tag of the union. | |
/// This type is created even for untagged unions, even when the memory | |
/// layout does not store the tag. | |
/// Whether zig chooses this type or the user specifies it, it is stored here. | |
/// This will be set to the null type until status is `have_field_types`. | |
/// This accessor is provided so that the tag type can be mutated, and so that | |
/// when it is mutated, the mutations are observed. | |
/// The returned pointer is invalidated when something is added to the `InternPool`. | |
pub fn tagTypePtr(self: @This(), ip: *const InternPool) *Index { | |
const tag_ty_field_index = std.meta.fieldIndex(Tag.TypeUnion, "tag_ty").?; | |
return @ptrCast(&ip.extra.items[self.extra_index + tag_ty_field_index]); | |
} | |
pub fn setFieldTypes(self: @This(), ip: *InternPool, types: []const Index) void { | |
@memcpy((Index.Slice{ | |
.start = @intCast(self.extra_index + @typeInfo(Tag.TypeUnion).Struct.fields.len), | |
.len = @intCast(types.len), | |
}).get(ip), types); | |
} | |
pub fn setFieldAligns(self: @This(), ip: *InternPool, aligns: []const Alignment) void { | |
if (aligns.len == 0) return; | |
assert(self.flagsPtr(ip).any_aligned_fields); | |
@memcpy((Alignment.Slice{ | |
.start = @intCast( | |
self.extra_index + @typeInfo(Tag.TypeUnion).Struct.fields.len + aligns.len, | |
), | |
.len = @intCast(aligns.len), | |
}).get(ip), aligns); | |
} | |
}; | |
pub const EnumType = struct { | |
/// The Decl that corresponds to the enum itself. | |
decl: DeclIndex, | |
/// Represents the declarations inside this enum. | |
namespace: OptionalNamespaceIndex, | |
/// An integer type which is used for the numerical value of the enum. | |
/// This field is present regardless of whether the enum has an | |
/// explicitly provided tag type or auto-numbered. | |
tag_ty: Index, | |
/// Set of field names in declaration order. | |
names: NullTerminatedString.Slice, | |
/// Maps integer tag value to field index. | |
/// Entries are in declaration order, same as `fields`. | |
/// If this is empty, it means the enum tags are auto-numbered. | |
values: Index.Slice, | |
tag_mode: TagMode, | |
/// This is ignored by `get` but will always be provided by `indexToKey`. | |
names_map: OptionalMapIndex = .none, | |
/// This is ignored by `get` but will be provided by `indexToKey` when | |
/// a value map exists. | |
values_map: OptionalMapIndex = .none, | |
zir_index: TrackedInst.Index.Optional, | |
pub const TagMode = enum { | |
/// The integer tag type was auto-numbered by zig. | |
auto, | |
/// The integer tag type was provided by the enum declaration, and the enum | |
/// is exhaustive. | |
explicit, | |
/// The integer tag type was provided by the enum declaration, and the enum | |
/// is non-exhaustive. | |
nonexhaustive, | |
}; | |
/// Look up field index based on field name. | |
pub fn nameIndex(self: EnumType, ip: *const InternPool, name: NullTerminatedString) ?u32 { | |
const map = &ip.maps.items[@intFromEnum(self.names_map.unwrap().?)]; | |
const adapter: NullTerminatedString.Adapter = .{ .strings = self.names.get(ip) }; | |
const field_index = map.getIndexAdapted(name, adapter) orelse return null; | |
return @intCast(field_index); | |
} | |
/// Look up field index based on tag value. | |
/// Asserts that `values_map` is not `none`. | |
/// This function returns `null` when `tag_val` does not have the | |
/// integer tag type of the enum. | |
pub fn tagValueIndex(self: EnumType, ip: *const InternPool, tag_val: Index) ?u32 { | |
assert(tag_val != .none); | |
// TODO: we should probably decide a single interface for this function, but currently | |
// it's being called with both tag values and underlying ints. Fix this! | |
const int_tag_val = switch (ip.indexToKey(tag_val)) { | |
.enum_tag => |enum_tag| enum_tag.int, | |
.int => tag_val, | |
else => unreachable, | |
}; | |
if (self.values_map.unwrap()) |values_map| { | |
const map = &ip.maps.items[@intFromEnum(values_map)]; | |
const adapter: Index.Adapter = .{ .indexes = self.values.get(ip) }; | |
const field_index = map.getIndexAdapted(int_tag_val, adapter) orelse return null; | |
return @intCast(field_index); | |
} | |
// Auto-numbered enum. Convert `int_tag_val` to field index. | |
const field_index = switch (ip.indexToKey(int_tag_val).int.storage) { | |
inline .u64, .i64 => |x| std.math.cast(u32, x) orelse return null, | |
.big_int => |x| x.to(u32) catch return null, | |
.lazy_align, .lazy_size => unreachable, | |
}; | |
return if (field_index < self.names.len) field_index else null; | |
} | |
}; | |
pub const IncompleteEnumType = struct { | |
/// Same as corresponding `EnumType` field. | |
decl: DeclIndex, | |
/// Same as corresponding `EnumType` field. | |
namespace: OptionalNamespaceIndex, | |
/// The field names and field values are not known yet, but | |
/// the number of fields must be known ahead of time. | |
fields_len: u32, | |
/// This information is needed so that the size does not change | |
/// later when populating field values. | |
has_values: bool, | |
/// Same as corresponding `EnumType` field. | |
tag_mode: EnumType.TagMode, | |
/// This may be updated via `setTagType` later. | |
tag_ty: Index = .none, | |
zir_index: TrackedInst.Index.Optional, | |
pub fn toEnumType(self: @This()) EnumType { | |
return .{ | |
.decl = self.decl, | |
.namespace = self.namespace, | |
.tag_ty = self.tag_ty, | |
.tag_mode = self.tag_mode, | |
.names = .{ .start = 0, .len = 0 }, | |
.values = .{ .start = 0, .len = 0 }, | |
.zir_index = self.zir_index, | |
}; | |
} | |
/// Only the decl is used for hashing and equality, so we can construct | |
/// this minimal key for use with `map`. | |
pub fn toKey(self: @This()) Key { | |
return .{ .enum_type = self.toEnumType() }; | |
} | |
}; | |
pub const FuncType = struct { | |
param_types: Index.Slice, | |
return_type: Index, | |
/// Tells whether a parameter is comptime. See `paramIsComptime` helper | |
/// method for accessing this. | |
comptime_bits: u32, | |
/// Tells whether a parameter is noalias. See `paramIsNoalias` helper | |
/// method for accessing this. | |
noalias_bits: u32, | |
/// `none` indicates the function has the default alignment for | |
/// function code on the target. In this case, this field *must* be set | |
/// to `none`, otherwise the `InternPool` equality and hashing | |
/// functions will return incorrect results. | |
alignment: Alignment, | |
cc: std.builtin.CallingConvention, | |
is_var_args: bool, | |
is_generic: bool, | |
is_noinline: bool, | |
align_is_generic: bool, | |
cc_is_generic: bool, | |
section_is_generic: bool, | |
addrspace_is_generic: bool, | |
pub fn paramIsComptime(self: @This(), i: u5) bool { | |
assert(i < self.param_types.len); | |
return @as(u1, @truncate(self.comptime_bits >> i)) != 0; | |
} | |
pub fn paramIsNoalias(self: @This(), i: u5) bool { | |
assert(i < self.param_types.len); | |
return @as(u1, @truncate(self.noalias_bits >> i)) != 0; | |
} | |
pub fn eql(a: FuncType, b: FuncType, ip: *const InternPool) bool { | |
return std.mem.eql(Index, a.param_types.get(ip), b.param_types.get(ip)) and | |
a.return_type == b.return_type and | |
a.comptime_bits == b.comptime_bits and | |
a.noalias_bits == b.noalias_bits and | |
a.alignment == b.alignment and | |
a.cc == b.cc and | |
a.is_var_args == b.is_var_args and | |
a.is_generic == b.is_generic and | |
a.is_noinline == b.is_noinline; | |
} | |
pub fn hash(self: FuncType, hasher: *Hash, ip: *const InternPool) void { | |
for (self.param_types.get(ip)) |param_type| { | |
std.hash.autoHash(hasher, param_type); | |
} | |
std.hash.autoHash(hasher, self.return_type); | |
std.hash.autoHash(hasher, self.comptime_bits); | |
std.hash.autoHash(hasher, self.noalias_bits); | |
std.hash.autoHash(hasher, self.alignment); | |
std.hash.autoHash(hasher, self.cc); | |
std.hash.autoHash(hasher, self.is_var_args); | |
std.hash.autoHash(hasher, self.is_generic); | |
std.hash.autoHash(hasher, self.is_noinline); | |
} | |
}; | |
pub const Variable = struct { | |
ty: Index, | |
init: Index, | |
decl: DeclIndex, | |
lib_name: OptionalNullTerminatedString, | |
is_extern: bool, | |
is_const: bool, | |
is_threadlocal: bool, | |
is_weak_linkage: bool, | |
}; | |
pub const ExternFunc = struct { | |
ty: Index, | |
/// The Decl that corresponds to the function itself. | |
decl: DeclIndex, | |
/// Library name if specified. | |
/// For example `extern "c" fn write(...) usize` would have 'c' as library name. | |
/// Index into the string table bytes. | |
lib_name: OptionalNullTerminatedString, | |
}; | |
pub const Func = struct { | |
/// In the case of a generic function, this type will potentially have fewer parameters | |
/// than the generic owner's type, because the comptime parameters will be deleted. | |
ty: Index, | |
/// If this is a function body that has been coerced to a different type, for example | |
/// ``` | |
/// fn f2() !void {} | |
/// const f: fn()anyerror!void = f2; | |
/// ``` | |
/// then it contains the original type of the function body. | |
uncoerced_ty: Index, | |
/// Index into extra array of the `FuncAnalysis` corresponding to this function. | |
/// Used for mutating that data. | |
analysis_extra_index: u32, | |
/// Index into extra array of the `zir_body_inst` corresponding to this function. | |
/// Used for mutating that data. | |
zir_body_inst_extra_index: u32, | |
/// Index into extra array of the resolved inferred error set for this function. | |
/// Used for mutating that data. | |
/// 0 when the function does not have an inferred error set. | |
resolved_error_set_extra_index: u32, | |
/// When a generic function is instantiated, branch_quota is inherited from the | |
/// active Sema context. Importantly, this value is also updated when an existing | |
/// generic function instantiation is found and called. | |
/// This field contains the index into the extra array of this value, | |
/// so that it can be mutated. | |
/// This will be 0 when the function is not a generic function instantiation. | |
branch_quota_extra_index: u32, | |
/// The Decl that corresponds to the function itself. | |
owner_decl: DeclIndex, | |
/// The ZIR instruction that is a function instruction. Use this to find | |
/// the body. We store this rather than the body directly so that when ZIR | |
/// is regenerated on update(), we can map this to the new corresponding | |
/// ZIR instruction. | |
zir_body_inst: TrackedInst.Index, | |
/// Relative to owner Decl. | |
lbrace_line: u32, | |
/// Relative to owner Decl. | |
rbrace_line: u32, | |
lbrace_column: u32, | |
rbrace_column: u32, | |
/// The `func_decl` which is the generic function from whence this instance was spawned. | |
/// If this is `none` it means the function is not a generic instantiation. | |
generic_owner: Index, | |
/// If this is a generic function instantiation, this will be non-empty. | |
/// Corresponds to the parameters of the `generic_owner` type, which | |
/// may have more parameters than `ty`. | |
/// Each element is the comptime-known value the generic function was instantiated with, | |
/// or `none` if the element is runtime-known. | |
/// TODO: as a follow-up optimization, don't store `none` values here since that data | |
/// is redundant with `comptime_bits` stored elsewhere. | |
comptime_args: Index.Slice, | |
/// Returns a pointer that becomes invalid after any additions to the `InternPool`. | |
pub fn analysis(func: *const Func, ip: *const InternPool) *FuncAnalysis { | |
return @ptrCast(&ip.extra.items[func.analysis_extra_index]); | |
} | |
/// Returns a pointer that becomes invalid after any additions to the `InternPool`. | |
pub fn zirBodyInst(func: *const Func, ip: *const InternPool) *TrackedInst.Index { | |
return @ptrCast(&ip.extra.items[func.zir_body_inst_extra_index]); | |
} | |
/// Returns a pointer that becomes invalid after any additions to the `InternPool`. | |
pub fn branchQuota(func: *const Func, ip: *const InternPool) *u32 { | |
return &ip.extra.items[func.branch_quota_extra_index]; | |
} | |
/// Returns a pointer that becomes invalid after any additions to the `InternPool`. | |
pub fn resolvedErrorSet(func: *const Func, ip: *const InternPool) *Index { | |
assert(func.analysis(ip).inferred_error_set); | |
return @ptrCast(&ip.extra.items[func.resolved_error_set_extra_index]); | |
} | |
}; | |
pub const Int = struct { | |
ty: Index, | |
storage: Storage, | |
pub const Storage = union(enum) { | |
u64: u64, | |
i64: i64, | |
big_int: BigIntConst, | |
lazy_align: Index, | |
lazy_size: Index, | |
/// Big enough to fit any non-BigInt value | |
pub const BigIntSpace = struct { | |
/// The +1 is headroom so that operations such as incrementing once | |
/// or decrementing once are possible without using an allocator. | |
limbs: [(@sizeOf(u64) / @sizeOf(std.math.big.Limb)) + 1]std.math.big.Limb, | |
}; | |
pub fn toBigInt(storage: Storage, space: *BigIntSpace) BigIntConst { | |
return switch (storage) { | |
.big_int => |x| x, | |
inline .u64, .i64 => |x| BigIntMutable.init(&space.limbs, x).toConst(), | |
.lazy_align, .lazy_size => unreachable, | |
}; | |
} | |
}; | |
}; | |
pub const Error = extern struct { | |
ty: Index, | |
name: NullTerminatedString, | |
}; | |
pub const ErrorUnion = struct { | |
ty: Index, | |
val: Value, | |
pub const Value = union(enum) { | |
err_name: NullTerminatedString, | |
payload: Index, | |
}; | |
}; | |
pub const EnumTag = extern struct { | |
/// The enum type. | |
ty: Index, | |
/// The integer tag value which has the integer tag type of the enum. | |
int: Index, | |
}; | |
pub const Float = struct { | |
ty: Index, | |
/// The storage used must match the size of the float type being represented. | |
storage: Storage, | |
pub const Storage = union(enum) { | |
f16: f16, | |
f32: f32, | |
f64: f64, | |
f80: f80, | |
f128: f128, | |
}; | |
}; | |
pub const Ptr = struct { | |
/// This is the pointer type, not the element type. | |
ty: Index, | |
/// The value of the address that the pointer points to. | |
addr: Addr, | |
pub const Addr = union(enum) { | |
const Tag = @typeInfo(Addr).Union.tag_type.?; | |
decl: DeclIndex, | |
mut_decl: MutDecl, | |
anon_decl: AnonDecl, | |
comptime_field: Index, | |
int: Index, | |
eu_payload: Index, | |
opt_payload: Index, | |
elem: BaseIndex, | |
field: BaseIndex, | |
pub const MutDecl = struct { | |
decl: DeclIndex, | |
runtime_index: RuntimeIndex, | |
}; | |
pub const BaseIndex = struct { | |
base: Index, | |
index: u64, | |
}; | |
pub const AnonDecl = extern struct { | |
val: Index, | |
/// Contains the canonical pointer type of the anonymous | |
/// declaration. This may equal `ty` of the `Ptr` or it may be | |
/// different. Importantly, when lowering the anonymous decl, | |
/// the original pointer type alignment must be used. | |
orig_ty: Index, | |
}; | |
}; | |
}; | |
pub const Slice = struct { | |
/// This is the slice type, not the element type. | |
ty: Index, | |
/// The slice's `ptr` field. Must be a many-ptr with the same properties as `ty`. | |
ptr: Index, | |
/// The slice's `len` field. Must be a `usize`. | |
len: Index, | |
}; | |
/// `null` is represented by the `val` field being `none`. | |
pub const Opt = extern struct { | |
/// This is the optional type; not the payload type. | |
ty: Index, | |
/// This could be `none`, indicating the optional is `null`. | |
val: Index, | |
}; | |
pub const Union = extern struct { | |
/// This is the union type; not the field type. | |
ty: Index, | |
/// Indicates the active field. This could be `none`, which indicates the tag is not known. `none` is only a valid value for extern and packed unions. | |
/// In those cases, the type of `val` is: | |
/// extern: a u8 array of the same byte length as the union | |
/// packed: an unsigned integer with the same bit size as the union | |
tag: Index, | |
/// The value of the active field. | |
val: Index, | |
}; | |
pub const Aggregate = struct { | |
ty: Index, | |
storage: Storage, | |
pub const Storage = union(enum) { | |
bytes: []const u8, | |
elems: []const Index, | |
repeated_elem: Index, | |
pub fn values(self: *const Storage) []const Index { | |
return switch (self.*) { | |
.bytes => &.{}, | |
.elems => |elems| elems, | |
.repeated_elem => |*elem| @as(*const [1]Index, elem), | |
}; | |
} | |
}; | |
}; | |
pub const MemoizedCall = struct { | |
func: Index, | |
arg_values: []const Index, | |
result: Index, | |
}; | |
pub fn hash32(key: Key, ip: *const InternPool) u32 { | |
return @truncate(key.hash64(ip)); | |
} | |
pub fn hash64(key: Key, ip: *const InternPool) u64 { | |
const asBytes = std.mem.asBytes; | |
const KeyTag = @typeInfo(Key).Union.tag_type.?; | |
const seed = @intFromEnum(@as(KeyTag, key)); | |
return switch (key) { | |
// TODO: assert no padding in these types | |
inline .ptr_type, | |
.array_type, | |
.vector_type, | |
.opt_type, | |
.anyframe_type, | |
.error_union_type, | |
.simple_type, | |
.simple_value, | |
.opt, | |
.undef, | |
.err, | |
.enum_literal, | |
.enum_tag, | |
.empty_enum_value, | |
.inferred_error_set_type, | |
.un, | |
=> |x| Hash.hash(seed, asBytes(&x)), | |
.int_type => |x| Hash.hash(seed + @intFromEnum(x.signedness), asBytes(&x.bits)), | |
.error_union => |x| switch (x.val) { | |
.err_name => |y| Hash.hash(seed + 0, asBytes(&x.ty) ++ asBytes(&y)), | |
.payload => |y| Hash.hash(seed + 1, asBytes(&x.ty) ++ asBytes(&y)), | |
}, | |
inline .opaque_type, | |
.enum_type, | |
.variable, | |
.union_type, | |
.struct_type, | |
=> |x| Hash.hash(seed, asBytes(&x.decl)), | |
.int => |int| { | |
var hasher = Hash.init(seed); | |
// Canonicalize all integers by converting them to BigIntConst. | |
switch (int.storage) { | |
.u64, .i64, .big_int => { | |
var buffer: Key.Int.Storage.BigIntSpace = undefined; | |
const big_int = int.storage.toBigInt(&buffer); | |
std.hash.autoHash(&hasher, int.ty); | |
std.hash.autoHash(&hasher, big_int.positive); | |
for (big_int.limbs) |limb| std.hash.autoHash(&hasher, limb); | |
}, | |
.lazy_align, .lazy_size => |lazy_ty| { | |
std.hash.autoHash( | |
&hasher, | |
@as(@typeInfo(Key.Int.Storage).Union.tag_type.?, int.storage), | |
); | |
std.hash.autoHash(&hasher, lazy_ty); | |
}, | |
} | |
return hasher.final(); | |
}, | |
.float => |float| { | |
var hasher = Hash.init(seed); | |
std.hash.autoHash(&hasher, float.ty); | |
switch (float.storage) { | |
inline else => |val| std.hash.autoHash( | |
&hasher, | |
@as(std.meta.Int(.unsigned, @bitSizeOf(@TypeOf(val))), @bitCast(val)), | |
), | |
} | |
return hasher.final(); | |
}, | |
.slice => |slice| Hash.hash(seed, asBytes(&slice.ty) ++ asBytes(&slice.ptr) ++ asBytes(&slice.len)), | |
.ptr => |ptr| { | |
// Int-to-ptr pointers are hashed separately than decl-referencing pointers. | |
// This is sound due to pointer provenance rules. | |
const addr: @typeInfo(Key.Ptr.Addr).Union.tag_type.? = ptr.addr; | |
const seed2 = seed + @intFromEnum(addr); | |
const common = asBytes(&ptr.ty); | |
return switch (ptr.addr) { | |
.decl => |x| Hash.hash(seed2, common ++ asBytes(&x)), | |
.mut_decl => |x| Hash.hash( | |
seed2, | |
common ++ asBytes(&x.decl) ++ asBytes(&x.runtime_index), | |
), | |
.anon_decl => |x| Hash.hash(seed2, common ++ asBytes(&x)), | |
.int, | |
.eu_payload, | |
.opt_payload, | |
.comptime_field, | |
=> |int| Hash.hash(seed2, common ++ asBytes(&int)), | |
.elem, .field => |x| Hash.hash( | |
seed2, | |
common ++ asBytes(&x.base) ++ asBytes(&x.index), | |
), | |
}; | |
}, | |
.aggregate => |aggregate| { | |
var hasher = Hash.init(seed); | |
std.hash.autoHash(&hasher, aggregate.ty); | |
const len = ip.aggregateTypeLen(aggregate.ty); | |
const child = switch (ip.indexToKey(aggregate.ty)) { | |
.array_type => |array_type| array_type.child, | |
.vector_type => |vector_type| vector_type.child, | |
.anon_struct_type, .struct_type => .none, | |
else => unreachable, | |
}; | |
if (child == .u8_type) { | |
switch (aggregate.storage) { | |
.bytes => |bytes| for (bytes[0..@intCast(len)]) |byte| { | |
std.hash.autoHash(&hasher, KeyTag.int); | |
std.hash.autoHash(&hasher, byte); | |
}, | |
.elems => |elems| for (elems[0..@intCast(len)]) |elem| { | |
const elem_key = ip.indexToKey(elem); | |
std.hash.autoHash(&hasher, @as(KeyTag, elem_key)); | |
switch (elem_key) { | |
.undef => {}, | |
.int => |int| std.hash.autoHash( | |
&hasher, | |
@as(u8, @intCast(int.storage.u64)), | |
), | |
else => unreachable, | |
} | |
}, | |
.repeated_elem => |elem| { | |
const elem_key = ip.indexToKey(elem); | |
var remaining = len; | |
while (remaining > 0) : (remaining -= 1) { | |
std.hash.autoHash(&hasher, @as(KeyTag, elem_key)); | |
switch (elem_key) { | |
.undef => {}, | |
.int => |int| std.hash.autoHash( | |
&hasher, | |
@as(u8, @intCast(int.storage.u64)), | |
), | |
else => unreachable, | |
} | |
} | |
}, | |
} | |
return hasher.final(); | |
} | |
switch (aggregate.storage) { | |
.bytes => unreachable, | |
.elems => |elems| for (elems[0..@as(usize, @intCast(len))]) |elem| | |
std.hash.autoHash(&hasher, elem), | |
.repeated_elem => |elem| { | |
var remaining = len; | |
while (remaining > 0) : (remaining -= 1) std.hash.autoHash(&hasher, elem); | |
}, | |
} | |
return hasher.final(); | |
}, | |
.error_set_type => |x| Hash.hash(seed, std.mem.sliceAsBytes(x.names.get(ip))), | |
.anon_struct_type => |anon_struct_type| { | |
var hasher = Hash.init(seed); | |
for (anon_struct_type.types.get(ip)) |elem| std.hash.autoHash(&hasher, elem); | |
for (anon_struct_type.values.get(ip)) |elem| std.hash.autoHash(&hasher, elem); | |
for (anon_struct_type.names.get(ip)) |elem| std.hash.autoHash(&hasher, elem); | |
return hasher.final(); | |
}, | |
.func_type => |func_type| { | |
var hasher = Hash.init(seed); | |
func_type.hash(&hasher, ip); | |
return hasher.final(); | |
}, | |
.memoized_call => |memoized_call| { | |
var hasher = Hash.init(seed); | |
std.hash.autoHash(&hasher, memoized_call.func); | |
for (memoized_call.arg_values) |arg| std.hash.autoHash(&hasher, arg); | |
return hasher.final(); | |
}, | |
.func => |func| { | |
// In the case of a function with an inferred error set, we | |
// must not include the inferred error set type in the hash, | |
// otherwise we would get false negatives for interning generic | |
// function instances which have inferred error sets. | |
if (func.generic_owner == .none and func.resolved_error_set_extra_index == 0) { | |
const bytes = asBytes(&func.owner_decl) ++ asBytes(&func.ty) ++ | |
[1]u8{@intFromBool(func.uncoerced_ty == func.ty)}; | |
return Hash.hash(seed, bytes); | |
} | |
var hasher = Hash.init(seed); | |
std.hash.autoHash(&hasher, func.generic_owner); | |
std.hash.autoHash(&hasher, func.uncoerced_ty == func.ty); | |
for (func.comptime_args.get(ip)) |arg| std.hash.autoHash(&hasher, arg); | |
if (func.resolved_error_set_extra_index == 0) { | |
std.hash.autoHash(&hasher, func.ty); | |
} else { | |
var ty_info = ip.indexToFuncType(func.ty).?; | |
ty_info.return_type = ip.errorUnionPayload(ty_info.return_type); | |
ty_info.hash(&hasher, ip); | |
} | |
return hasher.final(); | |
}, | |
.extern_func => |x| Hash.hash(seed, asBytes(&x.ty) ++ asBytes(&x.decl)), | |
}; | |
} | |
pub fn eql(a: Key, b: Key, ip: *const InternPool) bool { | |
const KeyTag = @typeInfo(Key).Union.tag_type.?; | |
const a_tag: KeyTag = a; | |
const b_tag: KeyTag = b; | |
if (a_tag != b_tag) return false; | |
switch (a) { | |
.int_type => |a_info| { | |
const b_info = b.int_type; | |
return std.meta.eql(a_info, b_info); | |
}, | |
.ptr_type => |a_info| { | |
const b_info = b.ptr_type; | |
return std.meta.eql(a_info, b_info); | |
}, | |
.array_type => |a_info| { | |
const b_info = b.array_type; | |
return std.meta.eql(a_info, b_info); | |
}, | |
.vector_type => |a_info| { | |
const b_info = b.vector_type; | |
return std.meta.eql(a_info, b_info); | |
}, | |
.opt_type => |a_info| { | |
const b_info = b.opt_type; | |
return a_info == b_info; | |
}, | |
.anyframe_type => |a_info| { | |
const b_info = b.anyframe_type; | |
return a_info == b_info; | |
}, | |
.error_union_type => |a_info| { | |
const b_info = b.error_union_type; | |
return std.meta.eql(a_info, b_info); | |
}, | |
.simple_type => |a_info| { | |
const b_info = b.simple_type; | |
return a_info == b_info; | |
}, | |
.simple_value => |a_info| { | |
const b_info = b.simple_value; | |
return a_info == b_info; | |
}, | |
.undef => |a_info| { | |
const b_info = b.undef; | |
return a_info == b_info; | |
}, | |
.opt => |a_info| { | |
const b_info = b.opt; | |
return std.meta.eql(a_info, b_info); | |
}, | |
.un => |a_info| { | |
const b_info = b.un; | |
return std.meta.eql(a_info, b_info); | |
}, | |
.err => |a_info| { | |
const b_info = b.err; | |
return std.meta.eql(a_info, b_info); | |
}, | |
.error_union => |a_info| { | |
const b_info = b.error_union; | |
return std.meta.eql(a_info, b_info); | |
}, | |
.enum_literal => |a_info| { | |
const b_info = b.enum_literal; | |
return a_info == b_info; | |
}, | |
.enum_tag => |a_info| { | |
const b_info = b.enum_tag; | |
return std.meta.eql(a_info, b_info); | |
}, | |
.empty_enum_value => |a_info| { | |
const b_info = b.empty_enum_value; | |
return a_info == b_info; | |
}, | |
.variable => |a_info| { | |
const b_info = b.variable; | |
return a_info.decl == b_info.decl; | |
}, | |
.extern_func => |a_info| { | |
const b_info = b.extern_func; | |
return a_info.ty == b_info.ty and a_info.decl == b_info.decl; | |
}, | |
.func => |a_info| { | |
const b_info = b.func; | |
if (a_info.generic_owner != b_info.generic_owner) | |
return false; | |
if (a_info.generic_owner == .none) { | |
if (a_info.owner_decl != b_info.owner_decl) | |
return false; | |
} else { | |
if (!std.mem.eql( | |
Index, | |
a_info.comptime_args.get(ip), | |
b_info.comptime_args.get(ip), | |
)) return false; | |
} | |
if ((a_info.ty == a_info.uncoerced_ty) != | |
(b_info.ty == b_info.uncoerced_ty)) | |
{ | |
return false; | |
} | |
if (a_info.ty == b_info.ty) | |
return true; | |
// There is one case where the types may be inequal but we | |
// still want to find the same function body instance. In the | |
// case of the functions having an inferred error set, the key | |
// used to find an existing function body will necessarily have | |
// a unique inferred error set type, because it refers to the | |
// function body InternPool Index. To make this case work we | |
// omit the inferred error set from the equality check. | |
if (a_info.resolved_error_set_extra_index == 0 or | |
b_info.resolved_error_set_extra_index == 0) | |
{ | |
return false; | |
} | |
var a_ty_info = ip.indexToFuncType(a_info.ty).?; | |
a_ty_info.return_type = ip.errorUnionPayload(a_ty_info.return_type); | |
var b_ty_info = ip.indexToFuncType(b_info.ty).?; | |
b_ty_info.return_type = ip.errorUnionPayload(b_ty_info.return_type); | |
return a_ty_info.eql(b_ty_info, ip); | |
}, | |
.slice => |a_info| { | |
const b_info = b.slice; | |
if (a_info.ty != b_info.ty) return false; | |
if (a_info.ptr != b_info.ptr) return false; | |
if (a_info.len != b_info.len) return false; | |
return true; | |
}, | |
.ptr => |a_info| { | |
const b_info = b.ptr; | |
if (a_info.ty != b_info.ty) return false; | |
const AddrTag = @typeInfo(Key.Ptr.Addr).Union.tag_type.?; | |
if (@as(AddrTag, a_info.addr) != @as(AddrTag, b_info.addr)) return false; | |
return switch (a_info.addr) { | |
.decl => |a_decl| a_decl == b_info.addr.decl, | |
.mut_decl => |a_mut_decl| std.meta.eql(a_mut_decl, b_info.addr.mut_decl), | |
.anon_decl => |ad| ad.val == b_info.addr.anon_decl.val and | |
ad.orig_ty == b_info.addr.anon_decl.orig_ty, | |
.int => |a_int| a_int == b_info.addr.int, | |
.eu_payload => |a_eu_payload| a_eu_payload == b_info.addr.eu_payload, | |
.opt_payload => |a_opt_payload| a_opt_payload == b_info.addr.opt_payload, | |
.comptime_field => |a_comptime_field| a_comptime_field == b_info.addr.comptime_field, | |
.elem => |a_elem| std.meta.eql(a_elem, b_info.addr.elem), | |
.field => |a_field| std.meta.eql(a_field, b_info.addr.field), | |
}; | |
}, | |
.int => |a_info| { | |
const b_info = b.int; | |
if (a_info.ty != b_info.ty) | |
return false; | |
return switch (a_info.storage) { | |
.u64 => |aa| switch (b_info.storage) { | |
.u64 => |bb| aa == bb, | |
.i64 => |bb| aa == bb, | |
.big_int => |bb| bb.orderAgainstScalar(aa) == .eq, | |
.lazy_align, .lazy_size => false, | |
}, | |
.i64 => |aa| switch (b_info.storage) { | |
.u64 => |bb| aa == bb, | |
.i64 => |bb| aa == bb, | |
.big_int => |bb| bb.orderAgainstScalar(aa) == .eq, | |
.lazy_align, .lazy_size => false, | |
}, | |
.big_int => |aa| switch (b_info.storage) { | |
.u64 => |bb| aa.orderAgainstScalar(bb) == .eq, | |
.i64 => |bb| aa.orderAgainstScalar(bb) == .eq, | |
.big_int => |bb| aa.eql(bb), | |
.lazy_align, .lazy_size => false, | |
}, | |
.lazy_align => |aa| switch (b_info.storage) { | |
.u64, .i64, .big_int, .lazy_size => false, | |
.lazy_align => |bb| aa == bb, | |
}, | |
.lazy_size => |aa| switch (b_info.storage) { | |
.u64, .i64, .big_int, .lazy_align => false, | |
.lazy_size => |bb| aa == bb, | |
}, | |
}; | |
}, | |
.float => |a_info| { | |
const b_info = b.float; | |
if (a_info.ty != b_info.ty) | |
return false; | |
if (a_info.ty == .c_longdouble_type and a_info.storage != .f80) { | |
// These are strange: we'll sometimes represent them as f128, even if the | |
// underlying type is smaller. f80 is an exception: see float_c_longdouble_f80. | |
const a_val = switch (a_info.storage) { | |
inline else => |val| @as(u128, @bitCast(@as(f128, @floatCast(val)))), | |
}; | |
const b_val = switch (b_info.storage) { | |
inline else => |val| @as(u128, @bitCast(@as(f128, @floatCast(val)))), | |
}; | |
return a_val == b_val; | |
} | |
const StorageTag = @typeInfo(Key.Float.Storage).Union.tag_type.?; | |
assert(@as(StorageTag, a_info.storage) == @as(StorageTag, b_info.storage)); | |
switch (a_info.storage) { | |
inline else => |val, tag| { | |
const Bits = std.meta.Int(.unsigned, @bitSizeOf(@TypeOf(val))); | |
const a_bits: Bits = @bitCast(val); | |
const b_bits: Bits = @bitCast(@field(b_info.storage, @tagName(tag))); | |
return a_bits == b_bits; | |
}, | |
} | |
}, | |
.opaque_type => |a_info| { | |
const b_info = b.opaque_type; | |
return a_info.decl == b_info.decl; | |
}, | |
.enum_type => |a_info| { | |
const b_info = b.enum_type; | |
return a_info.decl == b_info.decl; | |
}, | |
.union_type => |a_info| { | |
const b_info = b.union_type; | |
return a_info.decl == b_info.decl; | |
}, | |
.struct_type => |a_info| { | |
const b_info = b.struct_type; | |
return a_info.decl == b_info.decl; | |
}, | |
.aggregate => |a_info| { | |
const b_info = b.aggregate; | |
if (a_info.ty != b_info.ty) return false; | |
const len = ip.aggregateTypeLen(a_info.ty); | |
const StorageTag = @typeInfo(Key.Aggregate.Storage).Union.tag_type.?; | |
if (@as(StorageTag, a_info.storage) != @as(StorageTag, b_info.storage)) { | |
for (0..@as(usize, @intCast(len))) |elem_index| { | |
const a_elem = switch (a_info.storage) { | |
.bytes => |bytes| ip.getIfExists(.{ .int = .{ | |
.ty = .u8_type, | |
.storage = .{ .u64 = bytes[elem_index] }, | |
} }) orelse return false, | |
.elems => |elems| elems[elem_index], | |
.repeated_elem => |elem| elem, | |
}; | |
const b_elem = switch (b_info.storage) { | |
.bytes => |bytes| ip.getIfExists(.{ .int = .{ | |
.ty = .u8_type, | |
.storage = .{ .u64 = bytes[elem_index] }, | |
} }) orelse return false, | |
.elems => |elems| elems[elem_index], | |
.repeated_elem => |elem| elem, | |
}; | |
if (a_elem != b_elem) return false; | |
} | |
return true; | |
} | |
switch (a_info.storage) { | |
.bytes => |a_bytes| { | |
const b_bytes = b_info.storage.bytes; | |
return std.mem.eql( | |
u8, | |
a_bytes[0..@as(usize, @intCast(len))], | |
b_bytes[0..@as(usize, @intCast(len))], | |
); | |
}, | |
.elems => |a_elems| { | |
const b_elems = b_info.storage.elems; | |
return std.mem.eql( | |
Index, | |
a_elems[0..@as(usize, @intCast(len))], | |
b_elems[0..@as(usize, @intCast(len))], | |
); | |
}, | |
.repeated_elem => |a_elem| { | |
const b_elem = b_info.storage.repeated_elem; | |
return a_elem == b_elem; | |
}, | |
} | |
}, | |
.anon_struct_type => |a_info| { | |
const b_info = b.anon_struct_type; | |
return std.mem.eql(Index, a_info.types.get(ip), b_info.types.get(ip)) and | |
std.mem.eql(Index, a_info.values.get(ip), b_info.values.get(ip)) and | |
std.mem.eql(NullTerminatedString, a_info.names.get(ip), b_info.names.get(ip)); | |
}, | |
.error_set_type => |a_info| { | |
const b_info = b.error_set_type; | |
return std.mem.eql(NullTerminatedString, a_info.names.get(ip), b_info.names.get(ip)); | |
}, | |
.inferred_error_set_type => |a_info| { | |
const b_info = b.inferred_error_set_type; | |
return a_info == b_info; | |
}, | |
.func_type => |a_info| { | |
const b_info = b.func_type; | |
return Key.FuncType.eql(a_info, b_info, ip); | |
}, | |
.memoized_call => |a_info| { | |
const b_info = b.memoized_call; | |
return a_info.func == b_info.func and | |
std.mem.eql(Index, a_info.arg_values, b_info.arg_values); | |
}, | |
} | |
} | |
pub fn typeOf(key: Key) Index { | |
return switch (key) { | |
.int_type, | |
.ptr_type, | |
.array_type, | |
.vector_type, | |
.opt_type, | |
.anyframe_type, | |
.error_union_type, | |
.error_set_type, | |
.inferred_error_set_type, | |
.simple_type, | |
.struct_type, | |
.union_type, | |
.opaque_type, | |
.enum_type, | |
.anon_struct_type, | |
.func_type, | |
=> .type_type, | |
inline .ptr, | |
.slice, | |
.int, | |
.float, | |
.opt, | |
.variable, | |
.extern_func, | |
.func, | |
.err, | |
.error_union, | |
.enum_tag, | |
.aggregate, | |
.un, | |
=> |x| x.ty, | |
.enum_literal => .enum_literal_type, | |
.undef => |x| x, | |
.empty_enum_value => |x| x, | |
.simple_value => |s| switch (s) { | |
.undefined => .undefined_type, | |
.void => .void_type, | |
.null => .null_type, | |
.false, .true => .bool_type, | |
.empty_struct => .empty_struct_type, | |
.@"unreachable" => .noreturn_type, | |
.generic_poison => .generic_poison_type, | |
}, | |
.memoized_call => unreachable, | |
}; | |
} | |
}; | |
pub const RequiresComptime = enum(u2) { no, yes, unknown, wip }; | |
// Unlike `Tag.TypeUnion` which is an encoding, and `Key.UnionType` which is a | |
// minimal hashmap key, this type is a convenience type that contains info | |
// needed by semantic analysis. | |
pub const UnionType = struct { | |
/// The Decl that corresponds to the union itself. | |
decl: DeclIndex, | |
/// Represents the declarations inside this union. | |
namespace: NamespaceIndex, | |
/// The enum tag type. | |
enum_tag_ty: Index, | |
/// The integer tag type of the enum. | |
int_tag_ty: Index, | |
/// ABI size of the union, including padding | |
size: u64, | |
/// Trailing padding bytes | |
padding: u32, | |
/// List of field names in declaration order. | |
field_names: NullTerminatedString.Slice, | |
/// List of field types in declaration order. | |
/// These are `none` until `status` is `have_field_types` or `have_layout`. | |
field_types: Index.Slice, | |
/// List of field alignments in declaration order. | |
/// `none` means the ABI alignment of the type. | |
/// If this slice has length 0 it means all elements are `none`. | |
field_aligns: Alignment.Slice, | |
/// Index of the union_decl ZIR instruction. | |
zir_index: TrackedInst.Index.Optional, | |
/// Index into extra array of the `flags` field. | |
flags_index: u32, | |
/// Copied from `enum_tag_ty`. | |
names_map: OptionalMapIndex, | |
pub const RuntimeTag = enum(u2) { | |
none, | |
safety, | |
tagged, | |
pub fn hasTag(self: RuntimeTag) bool { | |
return switch (self) { | |
.none => false, | |
.tagged, .safety => true, | |
}; | |
} | |
}; | |
pub const Status = enum(u3) { | |
none, | |
field_types_wip, | |
have_field_types, | |
layout_wip, | |
have_layout, | |
fully_resolved_wip, | |
/// The types and all its fields have had their layout resolved. | |
/// Even through pointer, which `have_layout` does not ensure. | |
fully_resolved, | |
pub fn haveFieldTypes(status: Status) bool { | |
return switch (status) { | |
.none, | |
.field_types_wip, | |
=> false, | |
.have_field_types, | |
.layout_wip, | |
.have_layout, | |
.fully_resolved_wip, | |
.fully_resolved, | |
=> true, | |
}; | |
} | |
pub fn haveLayout(status: Status) bool { | |
return switch (status) { | |
.none, | |
.field_types_wip, | |
.have_field_types, | |
.layout_wip, | |
=> false, | |
.have_layout, | |
.fully_resolved_wip, | |
.fully_resolved, | |
=> true, | |
}; | |
} | |
}; | |
/// The returned pointer expires with any addition to the `InternPool`. | |
pub fn flagsPtr(self: UnionType, ip: *const InternPool) *Tag.TypeUnion.Flags { | |
return @ptrCast(&ip.extra.items[self.flags_index]); | |
} | |
/// Look up field index based on field name. | |
pub fn nameIndex(self: UnionType, ip: *const InternPool, name: NullTerminatedString) ?u32 { | |
const map = &ip.maps.items[@intFromEnum(self.names_map.unwrap().?)]; | |
const adapter: NullTerminatedString.Adapter = .{ .strings = self.field_names.get(ip) }; | |
const field_index = map.getIndexAdapted(name, adapter) orelse return null; | |
return @intCast(field_index); | |
} | |
pub fn hasTag(self: UnionType, ip: *const InternPool) bool { | |
return self.flagsPtr(ip).runtime_tag.hasTag(); | |
} | |
pub fn haveFieldTypes(self: UnionType, ip: *const InternPool) bool { | |
return self.flagsPtr(ip).status.haveFieldTypes(); | |
} | |
pub fn haveLayout(self: UnionType, ip: *const InternPool) bool { | |
return self.flagsPtr(ip).status.haveLayout(); | |
} | |
pub fn getLayout(self: UnionType, ip: *const InternPool) std.builtin.Type.ContainerLayout { | |
return self.flagsPtr(ip).layout; | |
} | |
pub fn fieldAlign(self: UnionType, ip: *const InternPool, field_index: u32) Alignment { | |
if (self.field_aligns.len == 0) return .none; | |
return self.field_aligns.get(ip)[field_index]; | |
} | |
/// This does not mutate the field of UnionType. | |
pub fn setZirIndex(self: @This(), ip: *InternPool, new_zir_index: TrackedInst.Index.Optional) void { | |
const flags_field_index = std.meta.fieldIndex(Tag.TypeUnion, "flags").?; | |
const zir_index_field_index = std.meta.fieldIndex(Tag.TypeUnion, "zir_index").?; | |
const ptr: *TrackedInst.Index.Optional = | |
@ptrCast(&ip.extra.items[self.flags_index - flags_field_index + zir_index_field_index]); | |
ptr.* = new_zir_index; | |
} | |
}; | |
/// Fetch all the interesting fields of a union type into a convenient data | |
/// structure. | |
/// This asserts that the union's enum tag type has been resolved. | |
pub fn loadUnionType(ip: *InternPool, key: Key.UnionType) UnionType { | |
const type_union = ip.extraDataTrail(Tag.TypeUnion, key.extra_index); | |
const enum_ty = type_union.data.tag_ty; | |
const enum_info = ip.indexToKey(enum_ty).enum_type; | |
const fields_len: u32 = @intCast(enum_info.names.len); | |
return .{ | |
.decl = type_union.data.decl, | |
.namespace = type_union.data.namespace, | |
.enum_tag_ty = enum_ty, | |
.int_tag_ty = enum_info.tag_ty, | |
.size = type_union.data.size, | |
.padding = type_union.data.padding, | |
.field_names = enum_info.names, | |
.names_map = enum_info.names_map, | |
.field_types = .{ | |
.start = type_union.end, | |
.len = fields_len, | |
}, | |
.field_aligns = .{ | |
.start = type_union.end + fields_len, | |
.len = if (type_union.data.flags.any_aligned_fields) fields_len else 0, | |
}, | |
.zir_index = type_union.data.zir_index, | |
.flags_index = key.extra_index + std.meta.fieldIndex(Tag.TypeUnion, "flags").?, | |
}; | |
} | |
pub const Item = struct { | |
tag: Tag, | |
/// The doc comments on the respective Tag explain how to interpret this. | |
data: u32, | |
}; | |
/// Represents an index into `map`. It represents the canonical index | |
/// of a `Value` within this `InternPool`. The values are typed. | |
/// Two values which have the same type can be equality compared simply | |
/// by checking if their indexes are equal, provided they are both in | |
/// the same `InternPool`. | |
/// When adding a tag to this enum, consider adding a corresponding entry to | |
/// `primitives` in AstGen.zig. | |
pub const Index = enum(u32) { | |
pub const first_type: Index = .u0_type; | |
pub const last_type: Index = .empty_struct_type; | |
pub const first_value: Index = .undef; | |
pub const last_value: Index = .empty_struct; | |
u0_type, | |
i0_type, | |
u1_type, | |
u8_type, | |
i8_type, | |
u16_type, | |
i16_type, | |
u29_type, | |
u32_type, | |
i32_type, | |
u64_type, | |
i64_type, | |
u80_type, | |
u128_type, | |
i128_type, | |
usize_type, | |
isize_type, | |
c_char_type, | |
c_short_type, | |
c_ushort_type, | |
c_int_type, | |
c_uint_type, | |
c_long_type, | |
c_ulong_type, | |
c_longlong_type, | |
c_ulonglong_type, | |
c_longdouble_type, | |
f16_type, | |
f32_type, | |
f64_type, | |
f80_type, | |
f128_type, | |
anyopaque_type, | |
bool_type, | |
void_type, | |
type_type, | |
anyerror_type, | |
comptime_int_type, | |
comptime_float_type, | |
noreturn_type, | |
anyframe_type, | |
null_type, | |
undefined_type, | |
enum_literal_type, | |
atomic_order_type, | |
atomic_rmw_op_type, | |
calling_convention_type, | |
address_space_type, | |
float_mode_type, | |
reduce_op_type, | |
call_modifier_type, | |
prefetch_options_type, | |
export_options_type, | |
extern_options_type, | |
type_info_type, | |
manyptr_u8_type, | |
manyptr_const_u8_type, | |
manyptr_const_u8_sentinel_0_type, | |
single_const_pointer_to_comptime_int_type, | |
slice_const_u8_type, | |
slice_const_u8_sentinel_0_type, | |
optional_noreturn_type, | |
anyerror_void_error_union_type, | |
/// Used for the inferred error set of inline/comptime function calls. | |
adhoc_inferred_error_set_type, | |
generic_poison_type, | |
/// `@TypeOf(.{})` | |
empty_struct_type, | |
/// `undefined` (untyped) | |
undef, | |
/// `0` (comptime_int) | |
zero, | |
/// `0` (usize) | |
zero_usize, | |
/// `0` (u8) | |
zero_u8, | |
/// `1` (comptime_int) | |
one, | |
/// `1` (usize) | |
one_usize, | |
/// `1` (u8) | |
one_u8, | |
/// `4` (u8) | |
four_u8, | |
/// `-1` (comptime_int) | |
negative_one, | |
/// `std.builtin.CallingConvention.C` | |
calling_convention_c, | |
/// `std.builtin.CallingConvention.Inline` | |
calling_convention_inline, | |
/// `{}` | |
void_value, | |
/// `unreachable` (noreturn type) | |
unreachable_value, | |
/// `null` (untyped) | |
null_value, | |
/// `true` | |
bool_true, | |
/// `false` | |
bool_false, | |
/// `.{}` (untyped) | |
empty_struct, | |
/// Used for generic parameters where the type and value | |
/// is not known until generic function instantiation. | |
generic_poison, | |
/// Used by Air/Sema only. | |
var_args_param_type = std.math.maxInt(u32) - 1, | |
none = std.math.maxInt(u32), | |
_, | |
/// An array of `Index` existing within the `extra` array. | |
/// This type exists to provide a struct with lifetime that is | |
/// not invalidated when items are added to the `InternPool`. | |
pub const Slice = struct { | |
start: u32, | |
len: u32, | |
pub fn get(slice: Slice, ip: *const InternPool) []Index { | |
return @ptrCast(ip.extra.items[slice.start..][0..slice.len]); | |
} | |
}; | |
/// Used for a map of `Index` values to the index within a list of `Index` values. | |
const Adapter = struct { | |
indexes: []const Index, | |
pub fn eql(ctx: @This(), a: Index, b_void: void, b_map_index: usize) bool { | |
_ = b_void; | |
return a == ctx.indexes[b_map_index]; | |
} | |
pub fn hash(ctx: @This(), a: Index) u32 { | |
_ = ctx; | |
return std.hash.uint32(@intFromEnum(a)); | |
} | |
}; | |
/// This function is used in the debugger pretty formatters in tools/ to fetch the | |
/// Tag to encoding mapping to facilitate fancy debug printing for this type. | |
/// TODO merge this with `Tag.Payload`. | |
fn dbHelper(self: *Index, tag_to_encoding_map: *struct { | |
const DataIsIndex = struct { data: Index }; | |
const DataIsExtraIndexOfEnumExplicit = struct { | |
const @"data.fields_len" = opaque {}; | |
data: *EnumExplicit, | |
@"trailing.names.len": *@"data.fields_len", | |
@"trailing.values.len": *@"data.fields_len", | |
trailing: struct { | |
names: []NullTerminatedString, | |
values: []Index, | |
}, | |
}; | |
const DataIsExtraIndexOfTypeStructAnon = struct { | |
const @"data.fields_len" = opaque {}; | |
data: *TypeStructAnon, | |
@"trailing.types.len": *@"data.fields_len", | |
@"trailing.values.len": *@"data.fields_len", | |
@"trailing.names.len": *@"data.fields_len", | |
trailing: struct { | |
types: []Index, | |
values: []Index, | |
names: []NullTerminatedString, | |
}, | |
}; | |
type_int_signed: struct { data: u32 }, | |
type_int_unsigned: struct { data: u32 }, | |
type_array_big: struct { data: *Array }, | |
type_array_small: struct { data: *Vector }, | |
type_vector: struct { data: *Vector }, | |
type_pointer: struct { data: *Tag.TypePointer }, | |
type_slice: DataIsIndex, | |
type_optional: DataIsIndex, | |
type_anyframe: DataIsIndex, | |
type_error_union: struct { data: *Key.ErrorUnionType }, | |
type_anyerror_union: DataIsIndex, | |
type_error_set: struct { | |
const @"data.names_len" = opaque {}; | |
data: *Tag.ErrorSet, | |
@"trailing.names.len": *@"data.names_len", | |
trailing: struct { names: []NullTerminatedString }, | |
}, | |
type_inferred_error_set: DataIsIndex, | |
type_enum_auto: struct { | |
const @"data.fields_len" = opaque {}; | |
data: *EnumAuto, | |
@"trailing.names.len": *@"data.fields_len", | |
trailing: struct { names: []NullTerminatedString }, | |
}, | |
type_enum_explicit: DataIsExtraIndexOfEnumExplicit, | |
type_enum_nonexhaustive: DataIsExtraIndexOfEnumExplicit, | |
simple_type: struct { data: SimpleType }, | |
type_opaque: struct { data: *Key.OpaqueType }, | |
type_struct: struct { data: *Tag.TypeStruct }, | |
type_struct_ns: struct { data: NamespaceIndex }, | |
type_struct_anon: DataIsExtraIndexOfTypeStructAnon, | |
type_struct_packed: struct { data: *Tag.TypeStructPacked }, | |
type_struct_packed_inits: struct { data: *Tag.TypeStructPacked }, | |
type_tuple_anon: DataIsExtraIndexOfTypeStructAnon, | |
type_union: struct { data: *Tag.TypeUnion }, | |
type_function: struct { | |
const @"data.flags.has_comptime_bits" = opaque {}; | |
const @"data.flags.has_noalias_bits" = opaque {}; | |
const @"data.params_len" = opaque {}; | |
data: *Tag.TypeFunction, | |
@"trailing.comptime_bits.len": *@"data.flags.has_comptime_bits", | |
@"trailing.noalias_bits.len": *@"data.flags.has_noalias_bits", | |
@"trailing.param_types.len": *@"data.params_len", | |
trailing: struct { comptime_bits: []u32, noalias_bits: []u32, param_types: []Index }, | |
}, | |
undef: DataIsIndex, | |
simple_value: struct { data: SimpleValue }, | |
ptr_decl: struct { data: *PtrDecl }, | |
ptr_mut_decl: struct { data: *PtrMutDecl }, | |
ptr_anon_decl: struct { data: *PtrAnonDecl }, | |
ptr_anon_decl_aligned: struct { data: *PtrAnonDeclAligned }, | |
ptr_comptime_field: struct { data: *PtrComptimeField }, | |
ptr_int: struct { data: *PtrBase }, | |
ptr_eu_payload: struct { data: *PtrBase }, | |
ptr_opt_payload: struct { data: *PtrBase }, | |
ptr_elem: struct { data: *PtrBaseIndex }, | |
ptr_field: struct { data: *PtrBaseIndex }, | |
ptr_slice: struct { data: *PtrSlice }, | |
opt_payload: struct { data: *Tag.TypeValue }, | |
opt_null: DataIsIndex, | |
int_u8: struct { data: u8 }, | |
int_u16: struct { data: u16 }, | |
int_u32: struct { data: u32 }, | |
int_i32: struct { data: i32 }, | |
int_usize: struct { data: u32 }, | |
int_comptime_int_u32: struct { data: u32 }, | |
int_comptime_int_i32: struct { data: i32 }, | |
int_small: struct { data: *IntSmall }, | |
int_positive: struct { data: u32 }, | |
int_negative: struct { data: u32 }, | |
int_lazy_align: struct { data: *IntLazy }, | |
int_lazy_size: struct { data: *IntLazy }, | |
error_set_error: struct { data: *Key.Error }, | |
error_union_error: struct { data: *Key.Error }, | |
error_union_payload: struct { data: *Tag.TypeValue }, | |
enum_literal: struct { data: NullTerminatedString }, | |
enum_tag: struct { data: *Tag.EnumTag }, | |
float_f16: struct { data: f16 }, | |
float_f32: struct { data: f32 }, | |
float_f64: struct { data: *Float64 }, | |
float_f80: struct { data: *Float80 }, | |
float_f128: struct { data: *Float128 }, | |
float_c_longdouble_f80: struct { data: *Float80 }, | |
float_c_longdouble_f128: struct { data: *Float128 }, | |
float_comptime_float: struct { data: *Float128 }, | |
variable: struct { data: *Tag.Variable }, | |
extern_func: struct { data: *Key.ExternFunc }, | |
func_decl: struct { | |
const @"data.analysis.inferred_error_set" = opaque {}; | |
data: *Tag.FuncDecl, | |
@"trailing.resolved_error_set.len": *@"data.analysis.inferred_error_set", | |
trailing: struct { resolved_error_set: []Index }, | |
}, | |
func_instance: struct { | |
const @"data.analysis.inferred_error_set" = opaque {}; | |
const @"data.generic_owner.data.ty.data.params_len" = opaque {}; | |
data: *Tag.FuncInstance, | |
@"trailing.resolved_error_set.len": *@"data.analysis.inferred_error_set", | |
@"trailing.comptime_args.len": *@"data.generic_owner.data.ty.data.params_len", | |
trailing: struct { resolved_error_set: []Index, comptime_args: []Index }, | |
}, | |
func_coerced: struct { | |
data: *Tag.FuncCoerced, | |
}, | |
only_possible_value: DataIsIndex, | |
union_value: struct { data: *Key.Union }, | |
bytes: struct { data: *Bytes }, | |
aggregate: struct { | |
const @"data.ty.data.len orelse data.ty.data.fields_len" = opaque {}; | |
data: *Tag.Aggregate, | |
@"trailing.element_values.len": *@"data.ty.data.len orelse data.ty.data.fields_len", | |
trailing: struct { element_values: []Index }, | |
}, | |
repeated: struct { data: *Repeated }, | |
memoized_call: struct { | |
const @"data.args_len" = opaque {}; | |
data: *MemoizedCall, | |
@"trailing.arg_values.len": *@"data.args_len", | |
trailing: struct { arg_values: []Index }, | |
}, | |
}) void { | |
_ = self; | |
const map_fields = @typeInfo(@typeInfo(@TypeOf(tag_to_encoding_map)).Pointer.child).Struct.fields; | |
@setEvalBranchQuota(2_000); | |
inline for (@typeInfo(Tag).Enum.fields, 0..) |tag, start| { | |
inline for (0..map_fields.len) |offset| { | |
if (comptime std.mem.eql(u8, tag.name, map_fields[(start + offset) % map_fields.len].name)) break; | |
} else { | |
@compileError(@typeName(Tag) ++ "." ++ tag.name ++ " missing dbHelper tag_to_encoding_map entry"); | |
} | |
} | |
} | |
comptime { | |
if (!builtin.strip_debug_info) { | |
_ = &dbHelper; | |
} | |
} | |
}; | |
pub const static_keys = [_]Key{ | |
.{ .int_type = .{ | |
.signedness = .unsigned, | |
.bits = 0, | |
} }, | |
.{ .int_type = .{ | |
.signedness = .signed, | |
.bits = 0, | |
} }, | |
.{ .int_type = .{ | |
.signedness = .unsigned, | |
.bits = 1, | |
} }, | |
.{ .int_type = .{ | |
.signedness = .unsigned, | |
.bits = 8, | |
} }, | |
.{ .int_type = .{ | |
.signedness = .signed, | |
.bits = 8, | |
} }, | |
.{ .int_type = .{ | |
.signedness = .unsigned, | |
.bits = 16, | |
} }, | |
.{ .int_type = .{ | |
.signedness = .signed, | |
.bits = 16, | |
} }, | |
.{ .int_type = .{ | |
.signedness = .unsigned, | |
.bits = 29, | |
} }, | |
.{ .int_type = .{ | |
.signedness = .unsigned, | |
.bits = 32, | |
} }, | |
.{ .int_type = .{ | |
.signedness = .signed, | |
.bits = 32, | |
} }, | |
.{ .int_type = .{ | |
.signedness = .unsigned, | |
.bits = 64, | |
} }, | |
.{ .int_type = .{ | |
.signedness = .signed, | |
.bits = 64, | |
} }, | |
.{ .int_type = .{ | |
.signedness = .unsigned, | |
.bits = 80, | |
} }, | |
.{ .int_type = .{ | |
.signedness = .unsigned, | |
.bits = 128, | |
} }, | |
.{ .int_type = .{ | |
.signedness = .signed, | |
.bits = 128, | |
} }, | |
.{ .simple_type = .usize }, | |
.{ .simple_type = .isize }, | |
.{ .simple_type = .c_char }, | |
.{ .simple_type = .c_short }, | |
.{ .simple_type = .c_ushort }, | |
.{ .simple_type = .c_int }, | |
.{ .simple_type = .c_uint }, | |
.{ .simple_type = .c_long }, | |
.{ .simple_type = .c_ulong }, | |
.{ .simple_type = .c_longlong }, | |
.{ .simple_type = .c_ulonglong }, | |
.{ .simple_type = .c_longdouble }, | |
.{ .simple_type = .f16 }, | |
.{ .simple_type = .f32 }, | |
.{ .simple_type = .f64 }, | |
.{ .simple_type = .f80 }, | |
.{ .simple_type = .f128 }, | |
.{ .simple_type = .anyopaque }, | |
.{ .simple_type = .bool }, | |
.{ .simple_type = .void }, | |
.{ .simple_type = .type }, | |
.{ .simple_type = .anyerror }, | |
.{ .simple_type = .comptime_int }, | |
.{ .simple_type = .comptime_float }, | |
.{ .simple_type = .noreturn }, | |
.{ .anyframe_type = .none }, | |
.{ .simple_type = .null }, | |
.{ .simple_type = .undefined }, | |
.{ .simple_type = .enum_literal }, | |
.{ .simple_type = .atomic_order }, | |
.{ .simple_type = .atomic_rmw_op }, | |
.{ .simple_type = .calling_convention }, | |
.{ .simple_type = .address_space }, | |
.{ .simple_type = .float_mode }, | |
.{ .simple_type = .reduce_op }, | |
.{ .simple_type = .call_modifier }, | |
.{ .simple_type = .prefetch_options }, | |
.{ .simple_type = .export_options }, | |
.{ .simple_type = .extern_options }, | |
.{ .simple_type = .type_info }, | |
// [*]u8 | |
.{ .ptr_type = .{ | |
.child = .u8_type, | |
.flags = .{ | |
.size = .Many, | |
}, | |
} }, | |
// [*]const u8 | |
.{ .ptr_type = .{ | |
.child = .u8_type, | |
.flags = .{ | |
.size = .Many, | |
.is_const = true, | |
}, | |
} }, | |
// [*:0]const u8 | |
.{ .ptr_type = .{ | |
.child = .u8_type, | |
.sentinel = .zero_u8, | |
.flags = .{ | |
.size = .Many, | |
.is_const = true, | |
}, | |
} }, | |
// comptime_int | |
.{ .ptr_type = .{ | |
.child = .comptime_int_type, | |
.flags = .{ | |
.size = .One, | |
.is_const = true, | |
}, | |
} }, | |
// []const u8 | |
.{ .ptr_type = .{ | |
.child = .u8_type, | |
.flags = .{ | |
.size = .Slice, | |
.is_const = true, | |
}, | |
} }, | |
// [:0]const u8 | |
.{ .ptr_type = .{ | |
.child = .u8_type, | |
.sentinel = .zero_u8, | |
.flags = .{ | |
.size = .Slice, | |
.is_const = true, | |
}, | |
} }, | |
// ?noreturn | |
.{ .opt_type = .noreturn_type }, | |
// anyerror!void | |
.{ .error_union_type = .{ | |
.error_set_type = .anyerror_type, | |
.payload_type = .void_type, | |
} }, | |
// adhoc_inferred_error_set_type | |
.{ .simple_type = .adhoc_inferred_error_set }, | |
// generic_poison_type | |
.{ .simple_type = .generic_poison }, | |
// empty_struct_type | |
.{ .anon_struct_type = .{ | |
.types = .{ .start = 0, .len = 0 }, | |
.names = .{ .start = 0, .len = 0 }, | |
.values = .{ .start = 0, .len = 0 }, | |
} }, | |
.{ .simple_value = .undefined }, | |
.{ .int = .{ | |
.ty = .comptime_int_type, | |
.storage = .{ .u64 = 0 }, | |
} }, | |
.{ .int = .{ | |
.ty = .usize_type, | |
.storage = .{ .u64 = 0 }, | |
} }, | |
.{ .int = .{ | |
.ty = .u8_type, | |
.storage = .{ .u64 = 0 }, | |
} }, | |
.{ .int = .{ | |
.ty = .comptime_int_type, | |
.storage = .{ .u64 = 1 }, | |
} }, | |
.{ .int = .{ | |
.ty = .usize_type, | |
.storage = .{ .u64 = 1 }, | |
} }, | |
// one_u8 | |
.{ .int = .{ | |
.ty = .u8_type, | |
.storage = .{ .u64 = 1 }, | |
} }, | |
// four_u8 | |
.{ .int = .{ | |
.ty = .u8_type, | |
.storage = .{ .u64 = 4 }, | |
} }, | |
// negative_one | |
.{ .int = .{ | |
.ty = .comptime_int_type, | |
.storage = .{ .i64 = -1 }, | |
} }, | |
// calling_convention_c | |
.{ .enum_tag = .{ | |
.ty = .calling_convention_type, | |
.int = .one_u8, | |
} }, | |
// calling_convention_inline | |
.{ .enum_tag = .{ | |
.ty = .calling_convention_type, | |
.int = .four_u8, | |
} }, | |
.{ .simple_value = .void }, | |
.{ .simple_value = .@"unreachable" }, | |
.{ .simple_value = .null }, | |
.{ .simple_value = .true }, | |
.{ .simple_value = .false }, | |
.{ .simple_value = .empty_struct }, | |
.{ .simple_value = .generic_poison }, | |
}; | |
/// How many items in the InternPool are statically known. | |
/// This is specified with an integer literal and a corresponding comptime | |
/// assert below to break an unfortunate and arguably incorrect dependency loop | |
/// when compiling. | |
pub const static_len = Zir.Inst.Index.static_len; | |
comptime { | |
//@compileLog(static_keys.len); | |
assert(static_len == static_keys.len); | |
} | |
pub const Tag = enum(u8) { | |
/// An integer type. | |
/// data is number of bits | |
type_int_signed, | |
/// An integer type. | |
/// data is number of bits | |
type_int_unsigned, | |
/// An array type whose length requires 64 bits or which has a sentinel. | |
/// data is payload to Array. | |
type_array_big, | |
/// An array type that has no sentinel and whose length fits in 32 bits. | |
/// data is payload to Vector. | |
type_array_small, | |
/// A vector type. | |
/// data is payload to Vector. | |
type_vector, | |
/// A fully explicitly specified pointer type. | |
type_pointer, | |
/// A slice type. | |
/// data is Index of underlying pointer type. | |
type_slice, | |
/// An optional type. | |
/// data is the child type. | |
type_optional, | |
/// The type `anyframe->T`. | |
/// data is the child type. | |
/// If the child type is `none`, the type is `anyframe`. | |
type_anyframe, | |
/// An error union type. | |
/// data is payload to `Key.ErrorUnionType`. | |
type_error_union, | |
/// An error union type of the form `anyerror!T`. | |
/// data is `Index` of payload type. | |
type_anyerror_union, | |
/// An error set type. | |
/// data is payload to `ErrorSet`. | |
type_error_set, | |
/// The inferred error set type of a function. | |
/// data is `Index` of a `func_decl` or `func_instance`. | |
type_inferred_error_set, | |
/// An enum type with auto-numbered tag values. | |
/// The enum is exhaustive. | |
/// data is payload index to `EnumAuto`. | |
type_enum_auto, | |
/// An enum type with an explicitly provided integer tag type. | |
/// The enum is exhaustive. | |
/// data is payload index to `EnumExplicit`. | |
type_enum_explicit, | |
/// An enum type with an explicitly provided integer tag type. | |
/// The enum is non-exhaustive. | |
/// data is payload index to `EnumExplicit`. | |
type_enum_nonexhaustive, | |
/// A type that can be represented with only an enum tag. | |
/// data is SimpleType enum value. | |
simple_type, | |
/// An opaque type. | |
/// data is index of Key.OpaqueType in extra. | |
type_opaque, | |
/// A non-packed struct type. | |
/// data is 0 or extra index of `TypeStruct`. | |
/// data == 0 represents `@TypeOf(.{})`. | |
type_struct, | |
/// A non-packed struct type that has only a namespace; no fields. | |
/// data is NamespaceIndex. | |
type_struct_ns, | |
/// An AnonStructType which stores types, names, and values for fields. | |
/// data is extra index of `TypeStructAnon`. | |
type_struct_anon, | |
/// A packed struct, no fields have any init values. | |
/// data is extra index of `TypeStructPacked`. | |
type_struct_packed, | |
/// A packed struct, one or more fields have init values. | |
/// data is extra index of `TypeStructPacked`. | |
type_struct_packed_inits, | |
/// An AnonStructType which has only types and values for fields. | |
/// data is extra index of `TypeStructAnon`. | |
type_tuple_anon, | |
/// A union type. | |
/// `data` is extra index of `TypeUnion`. | |
type_union, | |
/// A function body type. | |
/// `data` is extra index to `TypeFunction`. | |
type_function, | |
/// Typed `undefined`. | |
/// `data` is `Index` of the type. | |
/// Untyped `undefined` is stored instead via `simple_value`. | |
undef, | |
/// A value that can be represented with only an enum tag. | |
/// data is SimpleValue enum value. | |
simple_value, | |
/// A pointer to a decl. | |
/// data is extra index of `PtrDecl`, which contains the type and address. | |
ptr_decl, | |
/// A pointer to a decl that can be mutated at comptime. | |
/// data is extra index of `PtrMutDecl`, which contains the type and address. | |
ptr_mut_decl, | |
/// A pointer to an anonymous decl. | |
/// data is extra index of `PtrAnonDecl`, which contains the pointer type and decl value. | |
/// The alignment of the anonymous decl is communicated via the pointer type. | |
ptr_anon_decl, | |
/// A pointer to an anonymous decl. | |
/// data is extra index of `PtrAnonDeclAligned`, which contains the pointer | |
/// type and decl value. | |
/// The original pointer type is also provided, which will be different than `ty`. | |
/// This encoding is only used when a pointer to an anonymous decl is | |
/// coerced to a different pointer type with a different alignment. | |
ptr_anon_decl_aligned, | |
/// data is extra index of `PtrComptimeField`, which contains the pointer type and field value. | |
ptr_comptime_field, | |
/// A pointer with an integer value. | |
/// data is extra index of `PtrBase`, which contains the type and address. | |
/// Only pointer types are allowed to have this encoding. Optional types must use | |
/// `opt_payload` or `opt_null`. | |
ptr_int, | |
/// A pointer to the payload of an error union. | |
/// data is extra index of `PtrBase`, which contains the type and base pointer. | |
ptr_eu_payload, | |
/// A pointer to the payload of an optional. | |
/// data is extra index of `PtrBase`, which contains the type and base pointer. | |
ptr_opt_payload, | |
/// A pointer to an array element. | |
/// data is extra index of PtrBaseIndex, which contains the base array and element index. | |
/// In order to use this encoding, one must ensure that the `InternPool` | |
/// already contains the elem pointer type corresponding to this payload. | |
ptr_elem, | |
/// A pointer to a container field. | |
/// data is extra index of PtrBaseIndex, which contains the base container and field index. | |
ptr_field, | |
/// A slice. | |
/// data is extra index of PtrSlice, which contains the ptr and len values | |
ptr_slice, | |
/// An optional value that is non-null. | |
/// data is extra index of `TypeValue`. | |
/// The type is the optional type (not the payload type). | |
opt_payload, | |
/// An optional value that is null. | |
/// data is Index of the optional type. | |
opt_null, | |
/// Type: u8 | |
/// data is integer value | |
int_u8, | |
/// Type: u16 | |
/// data is integer value | |
int_u16, | |
/// Type: u32 | |
/// data is integer value | |
int_u32, | |
/// Type: i32 | |
/// data is integer value bitcasted to u32. | |
int_i32, | |
/// A usize that fits in 32 bits. | |
/// data is integer value. | |
int_usize, | |
/// A comptime_int that fits in a u32. | |
/// data is integer value. | |
int_comptime_int_u32, | |
/// A comptime_int that fits in an i32. | |
/// data is integer value bitcasted to u32. | |
int_comptime_int_i32, | |
/// An integer value that fits in 32 bits with an explicitly provided type. | |
/// data is extra index of `IntSmall`. | |
int_small, | |
/// A positive integer value. | |
/// data is a limbs index to `Int`. | |
int_positive, | |
/// A negative integer value. | |
/// data is a limbs index to `Int`. | |
int_negative, | |
/// The ABI alignment of a lazy type. | |
/// data is extra index of `IntLazy`. | |
int_lazy_align, | |
/// The ABI size of a lazy type. | |
/// data is extra index of `IntLazy`. | |
int_lazy_size, | |
/// An error value. | |
/// data is extra index of `Key.Error`. | |
error_set_error, | |
/// An error union error. | |
/// data is extra index of `Key.Error`. | |
error_union_error, | |
/// An error union payload. | |
/// data is extra index of `TypeValue`. | |
error_union_payload, | |
/// An enum literal value. | |
/// data is `NullTerminatedString` of the error name. | |
enum_literal, | |
/// An enum tag value. | |
/// data is extra index of `EnumTag`. | |
enum_tag, | |
/// An f16 value. | |
/// data is float value bitcasted to u16 and zero-extended. | |
float_f16, | |
/// An f32 value. | |
/// data is float value bitcasted to u32. | |
float_f32, | |
/// An f64 value. | |
/// data is extra index to Float64. | |
float_f64, | |
/// An f80 value. | |
/// data is extra index to Float80. | |
float_f80, | |
/// An f128 value. | |
/// data is extra index to Float128. | |
float_f128, | |
/// A c_longdouble value of 80 bits. | |
/// data is extra index to Float80. | |
/// This is used when a c_longdouble value is provided as an f80, because f80 has unnormalized | |
/// values which cannot be losslessly represented as f128. It should only be used when the type | |
/// underlying c_longdouble for the target is 80 bits. | |
float_c_longdouble_f80, | |
/// A c_longdouble value of 128 bits. | |
/// data is extra index to Float128. | |
/// This is used when a c_longdouble value is provided as any type other than an f80, since all | |
/// other float types can be losslessly converted to and from f128. | |
float_c_longdouble_f128, | |
/// A comptime_float value. | |
/// data is extra index to Float128. | |
float_comptime_float, | |
/// A global variable. | |
/// data is extra index to Variable. | |
variable, | |
/// An extern function. | |
/// data is extra index to ExternFunc. | |
extern_func, | |
/// A non-extern function corresponding directly to the AST node from whence it originated. | |
/// data is extra index to `FuncDecl`. | |
/// Only the owner Decl is used for hashing and equality because the other | |
/// fields can get patched up during incremental compilation. | |
func_decl, | |
/// A generic function instantiation. | |
/// data is extra index to `FuncInstance`. | |
func_instance, | |
/// A `func_decl` or a `func_instance` that has been coerced to a different type. | |
/// data is extra index to `FuncCoerced`. | |
func_coerced, | |
/// This represents the only possible value for *some* types which have | |
/// only one possible value. Not all only-possible-values are encoded this way; | |
/// for example structs which have all comptime fields are not encoded this way. | |
/// The set of values that are encoded this way is: | |
/// * An array or vector which has length 0. | |
/// * A struct which has all fields comptime-known. | |
/// * An empty enum or union. TODO: this value's existence is strange, because such a type in reality has no values. See #15909 | |
/// data is Index of the type, which is known to be zero bits at runtime. | |
only_possible_value, | |
/// data is extra index to Key.Union. | |
union_value, | |
/// An array of bytes. | |
/// data is extra index to `Bytes`. | |
bytes, | |
/// An instance of a struct, array, or vector. | |
/// data is extra index to `Aggregate`. | |
aggregate, | |
/// An instance of an array or vector with every element being the same value. | |
/// data is extra index to `Repeated`. | |
repeated, | |
/// A memoized comptime function call result. | |
/// data is extra index to `MemoizedCall` | |
memoized_call, | |
const ErrorUnionType = Key.ErrorUnionType; | |
const OpaqueType = Key.OpaqueType; | |
const TypeValue = Key.TypeValue; | |
const Error = Key.Error; | |
const EnumTag = Key.EnumTag; | |
const ExternFunc = Key.ExternFunc; | |
const Union = Key.Union; | |
const TypePointer = Key.PtrType; | |
fn Payload(comptime tag: Tag) type { | |
return switch (tag) { | |
.type_int_signed => unreachable, | |
.type_int_unsigned => unreachable, | |
.type_array_big => Array, | |
.type_array_small => Vector, | |
.type_vector => Vector, | |
.type_pointer => TypePointer, | |
.type_slice => unreachable, | |
.type_optional => unreachable, | |
.type_anyframe => unreachable, | |
.type_error_union => ErrorUnionType, | |
.type_anyerror_union => unreachable, | |
.type_error_set => ErrorSet, | |
.type_inferred_error_set => unreachable, | |
.type_enum_auto => EnumAuto, | |
.type_enum_explicit => EnumExplicit, | |
.type_enum_nonexhaustive => EnumExplicit, | |
.simple_type => unreachable, | |
.type_opaque => OpaqueType, | |
.type_struct => TypeStruct, | |
.type_struct_ns => unreachable, | |
.type_struct_anon => TypeStructAnon, | |
.type_struct_packed, .type_struct_packed_inits => TypeStructPacked, | |
.type_tuple_anon => TypeStructAnon, | |
.type_union => TypeUnion, | |
.type_function => TypeFunction, | |
.undef => unreachable, | |
.simple_value => unreachable, | |
.ptr_decl => PtrDecl, | |
.ptr_mut_decl => PtrMutDecl, | |
.ptr_anon_decl => PtrAnonDecl, | |
.ptr_anon_decl_aligned => PtrAnonDeclAligned, | |
.ptr_comptime_field => PtrComptimeField, | |
.ptr_int => PtrBase, | |
.ptr_eu_payload => PtrBase, | |
.ptr_opt_payload => PtrBase, | |
.ptr_elem => PtrBaseIndex, | |
.ptr_field => PtrBaseIndex, | |
.ptr_slice => PtrSlice, | |
.opt_payload => TypeValue, | |
.opt_null => unreachable, | |
.int_u8 => unreachable, | |
.int_u16 => unreachable, | |
.int_u32 => unreachable, | |
.int_i32 => unreachable, | |
.int_usize => unreachable, | |
.int_comptime_int_u32 => unreachable, | |
.int_comptime_int_i32 => unreachable, | |
.int_small => IntSmall, | |
.int_positive => unreachable, | |
.int_negative => unreachable, | |
.int_lazy_align => IntLazy, | |
.int_lazy_size => IntLazy, | |
.error_set_error => Error, | |
.error_union_error => Error, | |
.error_union_payload => TypeValue, | |
.enum_literal => unreachable, | |
.enum_tag => EnumTag, | |
.float_f16 => unreachable, | |
.float_f32 => unreachable, | |
.float_f64 => unreachable, | |
.float_f80 => unreachable, | |
.float_f128 => unreachable, | |
.float_c_longdouble_f80 => unreachable, | |
.float_c_longdouble_f128 => unreachable, | |
.float_comptime_float => unreachable, | |
.variable => Variable, | |
.extern_func => ExternFunc, | |
.func_decl => FuncDecl, | |
.func_instance => FuncInstance, | |
.func_coerced => FuncCoerced, | |
.only_possible_value => unreachable, | |
.union_value => Union, | |
.bytes => Bytes, | |
.aggregate => Aggregate, | |
.repeated => Repeated, | |
.memoized_call => MemoizedCall, | |
}; | |
} | |
pub const Variable = struct { | |
ty: Index, | |
/// May be `none`. | |
init: Index, | |
decl: DeclIndex, | |
/// Library name if specified. | |
/// For example `extern "c" var stderrp = ...` would have 'c' as library name. | |
lib_name: OptionalNullTerminatedString, | |
flags: Flags, | |
pub const Flags = packed struct(u32) { | |
is_extern: bool, | |
is_const: bool, | |
is_threadlocal: bool, | |
is_weak_linkage: bool, | |
_: u28 = 0, | |
}; | |
}; | |
/// Trailing: | |
/// 0. element: Index for each len | |
/// len is determined by the aggregate type. | |
pub const Aggregate = struct { | |
/// The type of the aggregate. | |
ty: Index, | |
}; | |
/// Trailing: | |
/// 0. If `analysis.inferred_error_set` is `true`, `Index` of an `error_set` which | |
/// is a regular error set corresponding to the finished inferred error set. | |
/// A `none` value marks that the inferred error set is not resolved yet. | |
pub const FuncDecl = struct { | |
analysis: FuncAnalysis, | |
owner_decl: DeclIndex, | |
ty: Index, | |
zir_body_inst: TrackedInst.Index, | |
lbrace_line: u32, | |
rbrace_line: u32, | |
lbrace_column: u32, | |
rbrace_column: u32, | |
}; | |
/// Trailing: | |
/// 0. If `analysis.inferred_error_set` is `true`, `Index` of an `error_set` which | |
/// is a regular error set corresponding to the finished inferred error set. | |
/// A `none` value marks that the inferred error set is not resolved yet. | |
/// 1. For each parameter of generic_owner: `Index` if comptime, otherwise `none` | |
pub const FuncInstance = struct { | |
analysis: FuncAnalysis, | |
// Needed by the linker for codegen. Not part of hashing or equality. | |
owner_decl: DeclIndex, | |
ty: Index, | |
branch_quota: u32, | |
/// Points to a `FuncDecl`. | |
generic_owner: Index, | |
}; | |
pub const FuncCoerced = struct { | |
ty: Index, | |
func: Index, | |
}; | |
/// Trailing: | |
/// 0. name: NullTerminatedString for each names_len | |
pub const ErrorSet = struct { | |
names_len: u32, | |
/// Maps error names to declaration index. | |
names_map: MapIndex, | |
}; | |
/// Trailing: | |
/// 0. comptime_bits: u32, // if has_comptime_bits | |
/// 1. noalias_bits: u32, // if has_noalias_bits | |
/// 2. param_type: Index for each params_len | |
pub const TypeFunction = struct { | |
params_len: u32, | |
return_type: Index, | |
flags: Flags, | |
pub const Flags = packed struct(u32) { | |
alignment: Alignment, | |
cc: std.builtin.CallingConvention, | |
is_var_args: bool, | |
is_generic: bool, | |
has_comptime_bits: bool, | |
has_noalias_bits: bool, | |
is_noinline: bool, | |
align_is_generic: bool, | |
cc_is_generic: bool, | |
section_is_generic: bool, | |
addrspace_is_generic: bool, | |
_: u9 = 0, | |
}; | |
}; | |
/// The number of fields is provided by the `tag_ty` field. | |
/// Trailing: | |
/// 0. field type: Index for each field; declaration order | |
/// 1. field align: Alignment for each field; declaration order | |
pub const TypeUnion = struct { | |
flags: Flags, | |
/// Only valid after .have_layout | |
size: u32, | |
/// Only valid after .have_layout | |
padding: u32, | |
decl: DeclIndex, | |
namespace: NamespaceIndex, | |
/// The enum that provides the list of field names and values. | |
tag_ty: Index, | |
zir_index: TrackedInst.Index.Optional, | |
pub const Flags = packed struct(u32) { | |
runtime_tag: UnionType.RuntimeTag, | |
/// If false, the field alignment trailing data is omitted. | |
any_aligned_fields: bool, | |
layout: std.builtin.Type.ContainerLayout, | |
status: UnionType.Status, | |
requires_comptime: RequiresComptime, | |
assumed_runtime_bits: bool, | |
assumed_pointer_aligned: bool, | |
alignment: Alignment, | |
_: u14 = 0, | |
}; | |
}; | |
/// Trailing: | |
/// 0. type: Index for each fields_len | |
/// 1. name: NullTerminatedString for each fields_len | |
/// 2. init: Index for each fields_len // if tag is type_struct_packed_inits | |
pub const TypeStructPacked = struct { | |
decl: DeclIndex, | |
zir_index: TrackedInst.Index.Optional, | |
fields_len: u32, | |
namespace: OptionalNamespaceIndex, | |
backing_int_ty: Index, | |
names_map: MapIndex, | |
flags: Flags, | |
pub const Flags = packed struct(u32) { | |
/// Dependency loop detection when resolving field inits. | |
field_inits_wip: bool, | |
inits_resolved: bool, | |
_: u30 = 0, | |
}; | |
}; | |
/// At first I thought of storing the denormalized data externally, such as... | |
/// | |
/// * runtime field order | |
/// * calculated field offsets | |
/// * size and alignment of the struct | |
/// | |
/// ...since these can be computed based on the other data here. However, | |
/// this data does need to be memoized, and therefore stored in memory | |
/// while the compiler is running, in order to avoid O(N^2) logic in many | |
/// places. Since the data can be stored compactly in the InternPool | |
/// representation, it is better for memory usage to store denormalized data | |
/// here, and potentially also better for performance as well. It's also simpler | |
/// than coming up with some other scheme for the data. | |
/// | |
/// Trailing: | |
/// 0. type: Index for each field in declared order | |
/// 1. if not is_tuple: | |
/// names_map: MapIndex, | |
/// name: NullTerminatedString // for each field in declared order | |
/// 2. if any_default_inits: | |
/// init: Index // for each field in declared order | |
/// 3. if has_namespace: | |
/// namespace: NamespaceIndex | |
/// 4. if any_aligned_fields: | |
/// align: Alignment // for each field in declared order | |
/// 5. if any_comptime_fields: | |
/// field_is_comptime_bits: u32 // minimal number of u32s needed, LSB is field 0 | |
/// 6. if not is_extern: | |
/// field_index: RuntimeOrder // for each field in runtime order | |
/// 7. field_offset: u32 // for each field in declared order, undef until layout_resolved | |
pub const TypeStruct = struct { | |
decl: DeclIndex, | |
zir_index: TrackedInst.Index.Optional, | |
fields_len: u32, | |
flags: Flags, | |
size: u32, | |
pub const Flags = packed struct(u32) { | |
is_extern: bool, | |
known_non_opv: bool, | |
requires_comptime: RequiresComptime, | |
is_tuple: bool, | |
assumed_runtime_bits: bool, | |
assumed_pointer_aligned: bool, | |
has_namespace: bool, | |
any_comptime_fields: bool, | |
any_default_inits: bool, | |
any_aligned_fields: bool, | |
/// `.none` until layout_resolved | |
alignment: Alignment, | |
/// Dependency loop detection when resolving struct alignment. | |
alignment_wip: bool, | |
/// Dependency loop detection when resolving field types. | |
field_types_wip: bool, | |
/// Dependency loop detection when resolving struct layout. | |
layout_wip: bool, | |
/// Indicates whether `size`, `alignment`, runtime field order, and | |
/// field offets are populated. | |
layout_resolved: bool, | |
/// Dependency loop detection when resolving field inits. | |
field_inits_wip: bool, | |
/// Indicates whether `field_inits` has been resolved. | |
inits_resolved: bool, | |
// The types and all its fields have had their layout resolved. Even through pointer, | |
// which `layout_resolved` does not ensure. | |
fully_resolved: bool, | |
_: u8 = 0, | |
}; | |
}; | |
}; | |
/// State that is mutable during semantic analysis. This data is not used for | |
/// equality or hashing, except for `inferred_error_set` which is considered | |
/// to be part of the type of the function. | |
pub const FuncAnalysis = packed struct(u32) { | |
state: State, | |
is_cold: bool, | |
is_noinline: bool, | |
calls_or_awaits_errorable_fn: bool, | |
stack_alignment: Alignment, | |
/// True if this function has an inferred error set. | |
inferred_error_set: bool, | |
_: u14 = 0, | |
pub const State = enum(u8) { | |
/// This function has not yet undergone analysis, because we have not | |
/// seen a potential runtime call. It may be analyzed in future. | |
none, | |
/// Analysis for this function has been queued, but not yet completed. | |
queued, | |
/// This function intentionally only has ZIR generated because it is marked | |
/// inline, which means no runtime version of the function will be generated. | |
inline_only, | |
in_progress, | |
/// There will be a corresponding ErrorMsg in Module.failed_decls | |
sema_failure, | |
/// This function might be OK but it depends on another Decl which did not | |
/// successfully complete semantic analysis. | |
dependency_failure, | |
/// There will be a corresponding ErrorMsg in Module.failed_decls. | |
/// Indicates that semantic analysis succeeded, but code generation for | |
/// this function failed. | |
codegen_failure, | |
/// Semantic analysis and code generation of this function succeeded. | |
success, | |
}; | |
}; | |
pub const Bytes = struct { | |
/// The type of the aggregate | |
ty: Index, | |
/// Index into string_bytes, of len ip.aggregateTypeLen(ty) | |
bytes: String, | |
}; | |
pub const Repeated = struct { | |
/// The type of the aggregate. | |
ty: Index, | |
/// The value of every element. | |
elem_val: Index, | |
}; | |
/// Trailing: | |
/// 0. type: Index for each fields_len | |
/// 1. value: Index for each fields_len | |
/// 2. name: NullTerminatedString for each fields_len | |
/// The set of field names is omitted when the `Tag` is `type_tuple_anon`. | |
pub const TypeStructAnon = struct { | |
fields_len: u32, | |
}; | |
/// Having `SimpleType` and `SimpleValue` in separate enums makes it easier to | |
/// implement logic that only wants to deal with types because the logic can | |
/// ignore all simple values. Note that technically, types are values. | |
pub const SimpleType = enum(u32) { | |
f16, | |
f32, | |
f64, | |
f80, | |
f128, | |
usize, | |
isize, | |
c_char, | |
c_short, | |
c_ushort, | |
c_int, | |
c_uint, | |
c_long, | |
c_ulong, | |
c_longlong, | |
c_ulonglong, | |
c_longdouble, | |
anyopaque, | |
bool, | |
void, | |
type, | |
anyerror, | |
comptime_int, | |
comptime_float, | |
noreturn, | |
null, | |
undefined, | |
enum_literal, | |
atomic_order, | |
atomic_rmw_op, | |
calling_convention, | |
address_space, | |
float_mode, | |
reduce_op, | |
call_modifier, | |
prefetch_options, | |
export_options, | |
extern_options, | |
type_info, | |
adhoc_inferred_error_set, | |
generic_poison, | |
}; | |
pub const SimpleValue = enum(u32) { | |
/// This is untyped `undefined`. | |
undefined, | |
void, | |
/// This is untyped `null`. | |
null, | |
/// This is the untyped empty struct literal: `.{}` | |
empty_struct, | |
true, | |
false, | |
@"unreachable", | |
generic_poison, | |
}; | |
/// Stored as a power-of-two, with one special value to indicate none. | |
pub const Alignment = enum(u6) { | |
@"1" = 0, | |
@"2" = 1, | |
@"4" = 2, | |
@"8" = 3, | |
@"16" = 4, | |
@"32" = 5, | |
@"64" = 6, | |
none = std.math.maxInt(u6), | |
_, | |
pub fn toByteUnitsOptional(a: Alignment) ?u64 { | |
return switch (a) { | |
.none => null, | |
else => @as(u64, 1) << @intFromEnum(a), | |
}; | |
} | |
pub fn toByteUnits(a: Alignment, default: u64) u64 { | |
return switch (a) { | |
.none => default, | |
else => @as(u64, 1) << @intFromEnum(a), | |
}; | |
} | |
pub fn fromByteUnits(n: u64) Alignment { | |
if (n == 0) return .none; | |
assert(std.math.isPowerOfTwo(n)); | |
return @enumFromInt(@ctz(n)); | |
} | |
pub fn fromNonzeroByteUnits(n: u64) Alignment { | |
assert(n != 0); | |
return fromByteUnits(n); | |
} | |
pub fn toLog2Units(a: Alignment) u6 { | |
assert(a != .none); | |
return @intFromEnum(a); | |
} | |
/// This is just a glorified `@enumFromInt` but using it can help | |
/// document the intended conversion. | |
/// The parameter uses a u32 for convenience at the callsite. | |
pub fn fromLog2Units(a: u32) Alignment { | |
assert(a != @intFromEnum(Alignment.none)); | |
return @enumFromInt(a); | |
} | |
pub fn order(lhs: Alignment, rhs: Alignment) std.math.Order { | |
assert(lhs != .none); | |
assert(rhs != .none); | |
return std.math.order(@intFromEnum(lhs), @intFromEnum(rhs)); | |
} | |
/// Relaxed comparison. We have this as default because a lot of callsites | |
/// were upgraded from directly using comparison operators on byte units, | |
/// with the `none` value represented by zero. | |
/// Prefer `compareStrict` if possible. | |
pub fn compare(lhs: Alignment, op: std.math.CompareOperator, rhs: Alignment) bool { | |
return std.math.compare(lhs.toRelaxedCompareUnits(), op, rhs.toRelaxedCompareUnits()); | |
} | |
pub fn compareStrict(lhs: Alignment, op: std.math.CompareOperator, rhs: Alignment) bool { | |
assert(lhs != .none); | |
assert(rhs != .none); | |
return std.math.compare(@intFromEnum(lhs), op, @intFromEnum(rhs)); | |
} | |
/// Treats `none` as zero. | |
/// This matches previous behavior of using `@max` directly on byte units. | |
/// Prefer `maxStrict` if possible. | |
pub fn max(lhs: Alignment, rhs: Alignment) Alignment { | |
if (lhs == .none) return rhs; | |
if (rhs == .none) return lhs; | |
return maxStrict(lhs, rhs); | |
} | |
pub fn maxStrict(lhs: Alignment, rhs: Alignment) Alignment { | |
assert(lhs != .none); | |
assert(rhs != .none); | |
return @enumFromInt(@max(@intFromEnum(lhs), @intFromEnum(rhs))); | |
} | |
/// Treats `none` as zero. | |
/// This matches previous behavior of using `@min` directly on byte units. | |
/// Prefer `minStrict` if possible. | |
pub fn min(lhs: Alignment, rhs: Alignment) Alignment { | |
if (lhs == .none) return lhs; | |
if (rhs == .none) return rhs; | |
return minStrict(lhs, rhs); | |
} | |
pub fn minStrict(lhs: Alignment, rhs: Alignment) Alignment { | |
assert(lhs != .none); | |
assert(rhs != .none); | |
return @enumFromInt(@min(@intFromEnum(lhs), @intFromEnum(rhs))); | |
} | |
/// Align an address forwards to this alignment. | |
pub fn forward(a: Alignment, addr: u64) u64 { | |
assert(a != .none); | |
const x = (@as(u64, 1) << @intFromEnum(a)) - 1; | |
return (addr + x) & ~x; | |
} | |
/// Align an address backwards to this alignment. | |
pub fn backward(a: Alignment, addr: u64) u64 { | |
assert(a != .none); | |
const x = (@as(u64, 1) << @intFromEnum(a)) - 1; | |
return addr & ~x; | |
} | |
/// Check if an address is aligned to this amount. | |
pub fn check(a: Alignment, addr: u64) bool { | |
assert(a != .none); | |
return @ctz(addr) >= @intFromEnum(a); | |
} | |
/// An array of `Alignment` objects existing within the `extra` array. | |
/// This type exists to provide a struct with lifetime that is | |
/// not invalidated when items are added to the `InternPool`. | |
pub const Slice = struct { | |
start: u32, | |
/// This is the number of alignment values, not the number of u32 elements. | |
len: u32, | |
pub fn get(slice: Slice, ip: *const InternPool) []Alignment { | |
// TODO: implement @ptrCast between slices changing the length | |
//const bytes: []u8 = @ptrCast(ip.extra.items[slice.start..]); | |
const bytes: []u8 = std.mem.sliceAsBytes(ip.extra.items[slice.start..]); | |
return @ptrCast(bytes[0..slice.len]); | |
} | |
}; | |
pub fn toRelaxedCompareUnits(a: Alignment) u8 { | |
const n: u8 = @intFromEnum(a); | |
assert(n <= @intFromEnum(Alignment.none)); | |
if (n == @intFromEnum(Alignment.none)) return 0; | |
return n + 1; | |
} | |
const LlvmBuilderAlignment = @import("codegen/llvm/Builder.zig").Alignment; | |
pub fn toLlvm(this: @This()) LlvmBuilderAlignment { | |
return @enumFromInt(@intFromEnum(this)); | |
} | |
pub fn fromLlvm(other: LlvmBuilderAlignment) @This() { | |
return @enumFromInt(@intFromEnum(other)); | |
} | |
}; | |
/// Used for non-sentineled arrays that have length fitting in u32, as well as | |
/// vectors. | |
pub const Vector = struct { | |
len: u32, | |
child: Index, | |
}; | |
pub const Array = struct { | |
len0: u32, | |
len1: u32, | |
child: Index, | |
sentinel: Index, | |
pub const Length = PackedU64; | |
pub fn getLength(a: Array) u64 { | |
return (PackedU64{ | |
.a = a.len0, | |
.b = a.len1, | |
}).get(); | |
} | |
}; | |
/// Trailing: | |
/// 0. field name: NullTerminatedString for each fields_len; declaration order | |
/// 1. tag value: Index for each fields_len; declaration order | |
pub const EnumExplicit = struct { | |
/// The Decl that corresponds to the enum itself. | |
decl: DeclIndex, | |
/// This may be `none` if there are no declarations. | |
namespace: OptionalNamespaceIndex, | |
/// An integer type which is used for the numerical value of the enum, which | |
/// has been explicitly provided by the enum declaration. | |
int_tag_type: Index, | |
fields_len: u32, | |
/// Maps field names to declaration index. | |
names_map: MapIndex, | |
/// Maps field values to declaration index. | |
/// If this is `none`, it means the trailing tag values are absent because | |
/// they are auto-numbered. | |
values_map: OptionalMapIndex, | |
zir_index: TrackedInst.Index.Optional, | |
}; | |
/// Trailing: | |
/// 0. field name: NullTerminatedString for each fields_len; declaration order | |
pub const EnumAuto = struct { | |
/// The Decl that corresponds to the enum itself. | |
decl: DeclIndex, | |
/// This may be `none` if there are no declarations. | |
namespace: OptionalNamespaceIndex, | |
/// An integer type which is used for the numerical value of the enum, which | |
/// was inferred by Zig based on the number of tags. | |
int_tag_type: Index, | |
fields_len: u32, | |
/// Maps field names to declaration index. | |
names_map: MapIndex, | |
zir_index: TrackedInst.Index.Optional, | |
}; | |
pub const PackedU64 = packed struct(u64) { | |
a: u32, | |
b: u32, | |
pub fn get(x: PackedU64) u64 { | |
return @bitCast(x); | |
} | |
pub fn init(x: u64) PackedU64 { | |
return @bitCast(x); | |
} | |
}; | |
pub const PtrDecl = struct { | |
ty: Index, | |
decl: DeclIndex, | |
}; | |
pub const PtrAnonDecl = struct { | |
ty: Index, | |
val: Index, | |
}; | |
pub const PtrAnonDeclAligned = struct { | |
ty: Index, | |
val: Index, | |
/// Must be nonequal to `ty`. Only the alignment from this value is important. | |
orig_ty: Index, | |
}; | |
pub const PtrMutDecl = struct { | |
ty: Index, | |
decl: DeclIndex, | |
runtime_index: RuntimeIndex, | |
}; | |
pub const PtrComptimeField = struct { | |
ty: Index, | |
field_val: Index, | |
}; | |
pub const PtrBase = struct { | |
ty: Index, | |
base: Index, | |
}; | |
pub const PtrBaseIndex = struct { | |
ty: Index, | |
base: Index, | |
index: Index, | |
}; | |
pub const PtrSlice = struct { | |
/// The slice type. | |
ty: Index, | |
/// A many pointer value. | |
ptr: Index, | |
/// A usize value. | |
len: Index, | |
}; | |
/// Trailing: Limb for every limbs_len | |
pub const Int = struct { | |
ty: Index, | |
limbs_len: u32, | |
}; | |
pub const IntSmall = struct { | |
ty: Index, | |
value: u32, | |
}; | |
pub const IntLazy = struct { | |
ty: Index, | |
lazy_ty: Index, | |
}; | |
/// A f64 value, broken up into 2 u32 parts. | |
pub const Float64 = struct { | |
piece0: u32, | |
piece1: u32, | |
pub fn get(self: Float64) f64 { | |
const int_bits = @as(u64, self.piece0) | (@as(u64, self.piece1) << 32); | |
return @bitCast(int_bits); | |
} | |
fn pack(val: f64) Float64 { | |
const bits = @as(u64, @bitCast(val)); | |
return .{ | |
.piece0 = @as(u32, @truncate(bits)), | |
.piece1 = @as(u32, @truncate(bits >> 32)), | |
}; | |
} | |
}; | |
/// A f80 value, broken up into 2 u32 parts and a u16 part zero-padded to a u32. | |
pub const Float80 = struct { | |
piece0: u32, | |
piece1: u32, | |
piece2: u32, // u16 part, top bits | |
pub fn get(self: Float80) f80 { | |
const int_bits = @as(u80, self.piece0) | | |
(@as(u80, self.piece1) << 32) | | |
(@as(u80, self.piece2) << 64); | |
return @bitCast(int_bits); | |
} | |
fn pack(val: f80) Float80 { | |
const bits = @as(u80, @bitCast(val)); | |
return .{ | |
.piece0 = @as(u32, @truncate(bits)), | |
.piece1 = @as(u32, @truncate(bits >> 32)), | |
.piece2 = @as(u16, @truncate(bits >> 64)), | |
}; | |
} | |
}; | |
/// A f128 value, broken up into 4 u32 parts. | |
pub const Float128 = struct { | |
piece0: u32, | |
piece1: u32, | |
piece2: u32, | |
piece3: u32, | |
pub fn get(self: Float128) f128 { | |
const int_bits = @as(u128, self.piece0) | | |
(@as(u128, self.piece1) << 32) | | |
(@as(u128, self.piece2) << 64) | | |
(@as(u128, self.piece3) << 96); | |
return @bitCast(int_bits); | |
} | |
fn pack(val: f128) Float128 { | |
const bits = @as(u128, @bitCast(val)); | |
return .{ | |
.piece0 = @as(u32, @truncate(bits)), | |
.piece1 = @as(u32, @truncate(bits >> 32)), | |
.piece2 = @as(u32, @truncate(bits >> 64)), | |
.piece3 = @as(u32, @truncate(bits >> 96)), | |
}; | |
} | |
}; | |
/// Trailing: | |
/// 0. arg value: Index for each args_len | |
pub const MemoizedCall = struct { | |
func: Index, | |
args_len: u32, | |
result: Index, | |
}; | |
pub fn init(ip: *InternPool, gpa: Allocator) !void { | |
assert(ip.items.len == 0); | |
// Reserve string index 0 for an empty string. | |
assert((try ip.getOrPutString(gpa, "")) == .empty); | |
// So that we can use `catch unreachable` below. | |
try ip.items.ensureUnusedCapacity(gpa, static_keys.len); | |
try ip.map.ensureUnusedCapacity(gpa, static_keys.len); | |
try ip.extra.ensureUnusedCapacity(gpa, static_keys.len); | |
// This inserts all the statically-known values into the intern pool in the | |
// order expected. | |
for (static_keys[0..@intFromEnum(Index.empty_struct_type)]) |key| { | |
_ = ip.get(gpa, key) catch unreachable; | |
} | |
_ = ip.getAnonStructType(gpa, .{ | |
.types = &.{}, | |
.names = &.{}, | |
.values = &.{}, | |
}) catch unreachable; | |
for (static_keys[@intFromEnum(Index.empty_struct_type) + 1 ..]) |key| { | |
_ = ip.get(gpa, key) catch unreachable; | |
} | |
if (std.debug.runtime_safety) { | |
// Sanity check. | |
assert(ip.indexToKey(.bool_true).simple_value == .true); | |
assert(ip.indexToKey(.bool_false).simple_value == .false); | |
const cc_inline = ip.indexToKey(.calling_convention_inline).enum_tag.int; | |
const cc_c = ip.indexToKey(.calling_convention_c).enum_tag.int; | |
assert(ip.indexToKey(cc_inline).int.storage.u64 == | |
@intFromEnum(std.builtin.CallingConvention.Inline)); | |
assert(ip.indexToKey(cc_c).int.storage.u64 == | |
@intFromEnum(std.builtin.CallingConvention.C)); | |
assert(ip.indexToKey(ip.typeOf(cc_inline)).int_type.bits == | |
@typeInfo(@typeInfo(std.builtin.CallingConvention).Enum.tag_type).Int.bits); | |
} | |
assert(ip.items.len == static_keys.len); | |
} | |
pub fn deinit(ip: *InternPool, gpa: Allocator) void { | |
ip.map.deinit(gpa); | |
ip.items.deinit(gpa); | |
ip.extra.deinit(gpa); | |
ip.limbs.deinit(gpa); | |
ip.string_bytes.deinit(gpa); | |
ip.decls_free_list.deinit(gpa); | |
ip.allocated_decls.deinit(gpa); | |
ip.namespaces_free_list.deinit(gpa); | |
ip.allocated_namespaces.deinit(gpa); | |
for (ip.maps.items) |*map| map.deinit(gpa); | |
ip.maps.deinit(gpa); | |
ip.string_table.deinit(gpa); | |
ip.tracked_insts.deinit(gpa); | |
ip.src_hash_deps.deinit(gpa); | |
ip.decl_val_deps.deinit(gpa); | |
ip.namespace_deps.deinit(gpa); | |
ip.namespace_name_deps.deinit(gpa); | |
ip.first_dependency.deinit(gpa); | |
ip.dep_entries.deinit(gpa); | |
ip.free_dep_entries.deinit(gpa); | |
ip.* = undefined; | |
} | |
pub fn indexToKey(ip: *const InternPool, index: Index) Key { | |
assert(index != .none); | |
const item = ip.items.get(@intFromEnum(index)); | |
const data = item.data; | |
return switch (item.tag) { | |
.type_int_signed => .{ | |
.int_type = .{ | |
.signedness = .signed, | |
.bits = @as(u16, @intCast(data)), | |
}, | |
}, | |
.type_int_unsigned => .{ | |
.int_type = .{ | |
.signedness = .unsigned, | |
.bits = @as(u16, @intCast(data)), | |
}, | |
}, | |
.type_array_big => { | |
const array_info = ip.extraData(Array, data); | |
return .{ .array_type = .{ | |
.len = array_info.getLength(), | |
.child = array_info.child, | |
.sentinel = array_info.sentinel, | |
} }; | |
}, | |
.type_array_small => { | |
const array_info = ip.extraData(Vector, data); | |
return .{ .array_type = .{ | |
.len = array_info.len, | |
.child = array_info.child, | |
.sentinel = .none, | |
} }; | |
}, | |
.simple_type => .{ .simple_type = @as(SimpleType, @enumFromInt(data)) }, | |
.simple_value => .{ .simple_value = @as(SimpleValue, @enumFromInt(data)) }, | |
.type_vector => { | |
const vector_info = ip.extraData(Vector, data); | |
return .{ .vector_type = .{ | |
.len = vector_info.len, | |
.child = vector_info.child, | |
} }; | |
}, | |
.type_pointer => .{ .ptr_type = ip.extraData(Tag.TypePointer, data) }, | |
.type_slice => { | |
assert(ip.items.items(.tag)[data] == .type_pointer); | |
var ptr_info = ip.extraData(Tag.TypePointer, ip.items.items(.data)[data]); | |
ptr_info.flags.size = .Slice; | |
return .{ .ptr_type = ptr_info }; | |
}, | |
.type_optional => .{ .opt_type = @enumFromInt(data) }, | |
.type_anyframe => .{ .anyframe_type = @enumFromInt(data) }, | |
.type_error_union => .{ .error_union_type = ip.extraData(Key.ErrorUnionType, data) }, | |
.type_anyerror_union => .{ .error_union_type = .{ | |
.error_set_type = .anyerror_type, | |
.payload_type = @enumFromInt(data), | |
} }, | |
.type_error_set => .{ .error_set_type = ip.extraErrorSet(data) }, | |
.type_inferred_error_set => .{ | |
.inferred_error_set_type = @enumFromInt(data), | |
}, | |
.type_opaque => .{ .opaque_type = ip.extraData(Key.OpaqueType, data) }, | |
.type_struct => .{ .struct_type = if (data == 0) .{ | |
.extra_index = 0, | |
.namespace = .none, | |
.decl = .none, | |
.zir_index = undefined, | |
.layout = .Auto, | |
.field_names = .{ .start = 0, .len = 0 }, | |
.field_types = .{ .start = 0, .len = 0 }, | |
.field_inits = .{ .start = 0, .len = 0 }, | |
.field_aligns = .{ .start = 0, .len = 0 }, | |
.runtime_order = .{ .start = 0, .len = 0 }, | |
.comptime_bits = .{ .start = 0, .len = 0 }, | |
.offsets = .{ .start = 0, .len = 0 }, | |
.names_map = undefined, | |
} else extraStructType(ip, data) }, | |
.type_struct_ns => .{ .struct_type = .{ | |
.extra_index = 0, | |
.namespace = @as(NamespaceIndex, @enumFromInt(data)).toOptional(), | |
.decl = .none, | |
.zir_index = undefined, | |
.layout = .Auto, | |
.field_names = .{ .start = 0, .len = 0 }, | |
.field_types = .{ .start = 0, .len = 0 }, | |
.field_inits = .{ .start = 0, .len = 0 }, | |
.field_aligns = .{ .start = 0, .len = 0 }, | |
.runtime_order = .{ .start = 0, .len = 0 }, | |
.comptime_bits = .{ .start = 0, .len = 0 }, | |
.offsets = .{ .start = 0, .len = 0 }, | |
.names_map = undefined, | |
} }, | |
.type_struct_anon => .{ .anon_struct_type = extraTypeStructAnon(ip, data) }, | |
.type_tuple_anon => .{ .anon_struct_type = extraTypeTupleAnon(ip, data) }, | |
.type_struct_packed => .{ .struct_type = extraPackedStructType(ip, data, false) }, | |
.type_struct_packed_inits => .{ .struct_type = extraPackedStructType(ip, data, true) }, | |
.type_union => .{ .union_type = extraUnionType(ip, data) }, | |
.type_enum_auto => { | |
const enum_auto = ip.extraDataTrail(EnumAuto, data); | |
return .{ .enum_type = .{ | |
.decl = enum_auto.data.decl, | |
.namespace = enum_auto.data.namespace, | |
.tag_ty = enum_auto.data.int_tag_type, | |
.names = .{ | |
.start = @intCast(enum_auto.end), | |
.len = enum_auto.data.fields_len, | |
}, | |
.values = .{ | |
.start = 0, | |
.len = 0, | |
}, | |
.tag_mode = .auto, | |
.names_map = enum_auto.data.names_map.toOptional(), | |
.values_map = .none, | |
.zir_index = enum_auto.data.zir_index, | |
} }; | |
}, | |
.type_enum_explicit => ip.indexToKeyEnum(data, .explicit), | |
.type_enum_nonexhaustive => ip.indexToKeyEnum(data, .nonexhaustive), | |
.type_function => .{ .func_type = ip.extraFuncType(data) }, | |
.undef => .{ .undef = @as(Index, @enumFromInt(data)) }, | |
.opt_null => .{ .opt = .{ | |
.ty = @as(Index, @enumFromInt(data)), | |
.val = .none, | |
} }, | |
.opt_payload => { | |
const extra = ip.extraData(Tag.TypeValue, data); | |
return .{ .opt = .{ | |
.ty = extra.ty, | |
.val = extra.val, | |
} }; | |
}, | |
.ptr_decl => { | |
const info = ip.extraData(PtrDecl, data); | |
return .{ .ptr = .{ | |
.ty = info.ty, | |
.addr = .{ .decl = info.decl }, | |
} }; | |
}, | |
.ptr_mut_decl => { | |
const info = ip.extraData(PtrMutDecl, data); | |
return .{ .ptr = .{ | |
.ty = info.ty, | |
.addr = .{ .mut_decl = .{ | |
.decl = info.decl, | |
.runtime_index = info.runtime_index, | |
} }, | |
} }; | |
}, | |
.ptr_anon_decl => { | |
const info = ip.extraData(PtrAnonDecl, data); | |
return .{ .ptr = .{ | |
.ty = info.ty, | |
.addr = .{ .anon_decl = .{ | |
.val = info.val, | |
.orig_ty = info.ty, | |
} }, | |
} }; | |
}, | |
.ptr_anon_decl_aligned => { | |
const info = ip.extraData(PtrAnonDeclAligned, data); | |
return .{ .ptr = .{ | |
.ty = info.ty, | |
.addr = .{ .anon_decl = .{ | |
.val = info.val, | |
.orig_ty = info.orig_ty, | |
} }, | |
} }; | |
}, | |
.ptr_comptime_field => { | |
const info = ip.extraData(PtrComptimeField, data); | |
return .{ .ptr = .{ | |
.ty = info.ty, | |
.addr = .{ .comptime_field = info.field_val }, | |
} }; | |
}, | |
.ptr_int => { | |
const info = ip.extraData(PtrBase, data); | |
return .{ .ptr = .{ | |
.ty = info.ty, | |
.addr = .{ .int = info.base }, | |
} }; | |
}, | |
.ptr_eu_payload => { | |
const info = ip.extraData(PtrBase, data); | |
return .{ .ptr = .{ | |
.ty = info.ty, | |
.addr = .{ .eu_payload = info.base }, | |
} }; | |
}, | |
.ptr_opt_payload => { | |
const info = ip.extraData(PtrBase, data); | |
return .{ .ptr = .{ | |
.ty = info.ty, | |
.addr = .{ .opt_payload = info.base }, | |
} }; | |
}, | |
.ptr_elem => { | |
// Avoid `indexToKey` recursion by asserting the tag encoding. | |
const info = ip.extraData(PtrBaseIndex, data); | |
const index_item = ip.items.get(@intFromEnum(info.index)); | |
return switch (index_item.tag) { | |
.int_usize => .{ .ptr = .{ | |
.ty = info.ty, | |
.addr = .{ .elem = .{ | |
.base = info.base, | |
.index = index_item.data, | |
} }, | |
} }, | |
.int_positive => @panic("TODO"), // implement along with behavior test coverage | |
else => unreachable, | |
}; | |
}, | |
.ptr_field => { | |
// Avoid `indexToKey` recursion by asserting the tag encoding. | |
const info = ip.extraData(PtrBaseIndex, data); | |
const index_item = ip.items.get(@intFromEnum(info.index)); | |
return switch (index_item.tag) { | |
.int_usize => .{ .ptr = .{ | |
.ty = info.ty, | |
.addr = .{ .field = .{ | |
.base = info.base, | |
.index = index_item.data, | |
} }, | |
} }, | |
.int_positive => @panic("TODO"), // implement along with behavior test coverage | |
else => unreachable, | |
}; | |
}, | |
.ptr_slice => { | |
const info = ip.extraData(PtrSlice, data); | |
return .{ .slice = .{ | |
.ty = info.ty, | |
.ptr = info.ptr, | |
.len = info.len, | |
} }; | |
}, | |
.int_u8 => .{ .int = .{ | |
.ty = .u8_type, | |
.storage = .{ .u64 = data }, | |
} }, | |
.int_u16 => .{ .int = .{ | |
.ty = .u16_type, | |
.storage = .{ .u64 = data }, | |
} }, | |
.int_u32 => .{ .int = .{ | |
.ty = .u32_type, | |
.storage = .{ .u64 = data }, | |
} }, | |
.int_i32 => .{ .int = .{ | |
.ty = .i32_type, | |
.storage = .{ .i64 = @as(i32, @bitCast(data)) }, | |
} }, | |
.int_usize => .{ .int = .{ | |
.ty = .usize_type, | |
.storage = .{ .u64 = data }, | |
} }, | |
.int_comptime_int_u32 => .{ .int = .{ | |
.ty = .comptime_int_type, | |
.storage = .{ .u64 = data }, | |
} }, | |
.int_comptime_int_i32 => .{ .int = .{ | |
.ty = .comptime_int_type, | |
.storage = .{ .i64 = @as(i32, @bitCast(data)) }, | |
} }, | |
.int_positive => ip.indexToKeyBigInt(data, true), | |
.int_negative => ip.indexToKeyBigInt(data, false), | |
.int_small => { | |
const info = ip.extraData(IntSmall, data); | |
return .{ .int = .{ | |
.ty = info.ty, | |
.storage = .{ .u64 = info.value }, | |
} }; | |
}, | |
.int_lazy_align, .int_lazy_size => |tag| { | |
const info = ip.extraData(IntLazy, data); | |
return .{ .int = .{ | |
.ty = info.ty, | |
.storage = switch (tag) { | |
.int_lazy_align => .{ .lazy_align = info.lazy_ty }, | |
.int_lazy_size => .{ .lazy_size = info.lazy_ty }, | |
else => unreachable, | |
}, | |
} }; | |
}, | |
.float_f16 => .{ .float = .{ | |
.ty = .f16_type, | |
.storage = .{ .f16 = @as(f16, @bitCast(@as(u16, @intCast(data)))) }, | |
} }, | |
.float_f32 => .{ .float = .{ | |
.ty = .f32_type, | |
.storage = .{ .f32 = @as(f32, @bitCast(data)) }, | |
} }, | |
.float_f64 => .{ .float = .{ | |
.ty = .f64_type, | |
.storage = .{ .f64 = ip.extraData(Float64, data).get() }, | |
} }, | |
.float_f80 => .{ .float = .{ | |
.ty = .f80_type, | |
.storage = .{ .f80 = ip.extraData(Float80, data).get() }, | |
} }, | |
.float_f128 => .{ .float = .{ | |
.ty = .f128_type, | |
.storage = .{ .f128 = ip.extraData(Float128, data).get() }, | |
} }, | |
.float_c_longdouble_f80 => .{ .float = .{ | |
.ty = .c_longdouble_type, | |
.storage = .{ .f80 = ip.extraData(Float80, data).get() }, | |
} }, | |
.float_c_longdouble_f128 => .{ .float = .{ | |
.ty = .c_longdouble_type, | |
.storage = .{ .f128 = ip.extraData(Float128, data).get() }, | |
} }, | |
.float_comptime_float => .{ .float = .{ | |
.ty = .comptime_float_type, | |
.storage = .{ .f128 = ip.extraData(Float128, data).get() }, | |
} }, | |
.variable => { | |
const extra = ip.extraData(Tag.Variable, data); | |
return .{ .variable = .{ | |
.ty = extra.ty, | |
.init = extra.init, | |
.decl = extra.decl, | |
.lib_name = extra.lib_name, | |
.is_extern = extra.flags.is_extern, | |
.is_const = extra.flags.is_const, | |
.is_threadlocal = extra.flags.is_threadlocal, | |
.is_weak_linkage = extra.flags.is_weak_linkage, | |
} }; | |
}, | |
.extern_func => .{ .extern_func = ip.extraData(Tag.ExternFunc, data) }, | |
.func_instance => .{ .func = ip.extraFuncInstance(data) }, | |
.func_decl => .{ .func = ip.extraFuncDecl(data) }, | |
.func_coerced => .{ .func = ip.extraFuncCoerced(data) }, | |
.only_possible_value => { | |
const ty: Index = @enumFromInt(data); | |
const ty_item = ip.items.get(@intFromEnum(ty)); | |
return switch (ty_item.tag) { | |
.type_array_big => { | |
const sentinel = @as( | |
*const [1]Index, | |
@ptrCast(&ip.extra.items[ty_item.data + std.meta.fieldIndex(Array, "sentinel").?]), | |
); | |
return .{ .aggregate = .{ | |
.ty = ty, | |
.storage = .{ .elems = sentinel[0..@intFromBool(sentinel[0] != .none)] }, | |
} }; | |
}, | |
.type_array_small, | |
.type_vector, | |
.type_struct_ns, | |
.type_struct_packed, | |
=> .{ .aggregate = .{ | |
.ty = ty, | |
.storage = .{ .elems = &.{} }, | |
} }, | |
// There is only one possible value precisely due to the | |
// fact that this values slice is fully populated! | |
.type_struct => { | |
const info = extraStructType(ip, ty_item.data); | |
return .{ .aggregate = .{ | |
.ty = ty, | |
.storage = .{ .elems = @ptrCast(info.field_inits.get(ip)) }, | |
} }; | |
}, | |
.type_struct_packed_inits => { | |
const info = extraPackedStructType(ip, ty_item.data, true); | |
return .{ .aggregate = .{ | |
.ty = ty, | |
.storage = .{ .elems = @ptrCast(info.field_inits.get(ip)) }, | |
} }; | |
}, | |
// There is only one possible value precisely due to the | |
// fact that this values slice is fully populated! | |
.type_struct_anon, .type_tuple_anon => { | |
const type_struct_anon = ip.extraDataTrail(TypeStructAnon, ty_item.data); | |
const fields_len = type_struct_anon.data.fields_len; | |
const values = ip.extra.items[type_struct_anon.end + fields_len ..][0..fields_len]; | |
return .{ .aggregate = .{ | |
.ty = ty, | |
.storage = .{ .elems = @ptrCast(values) }, | |
} }; | |
}, | |
.type_enum_auto, | |
.type_enum_explicit, | |
.type_union, | |
=> .{ .empty_enum_value = ty }, | |
else => unreachable, | |
}; | |
}, | |
.bytes => { | |
const extra = ip.extraData(Bytes, data); | |
const len: u32 = @intCast(ip.aggregateTypeLenIncludingSentinel(extra.ty)); | |
return .{ .aggregate = .{ | |
.ty = extra.ty, | |
.storage = .{ .bytes = ip.string_bytes.items[@intFromEnum(extra.bytes)..][0..len] }, | |
} }; | |
}, | |
.aggregate => { | |
const extra = ip.extraDataTrail(Tag.Aggregate, data); | |
const len: u32 = @intCast(ip.aggregateTypeLenIncludingSentinel(extra.data.ty)); | |
const fields: []const Index = @ptrCast(ip.extra.items[extra.end..][0..len]); | |
return .{ .aggregate = .{ | |
.ty = extra.data.ty, | |
.storage = .{ .elems = fields }, | |
} }; | |
}, | |
.repeated => { | |
const extra = ip.extraData(Repeated, data); | |
return .{ .aggregate = .{ | |
.ty = extra.ty, | |
.storage = .{ .repeated_elem = extra.elem_val }, | |
} }; | |
}, | |
.union_value => .{ .un = ip.extraData(Key.Union, data) }, | |
.error_set_error => .{ .err = ip.extraData(Key.Error, data) }, | |
.error_union_error => { | |
const extra = ip.extraData(Key.Error, data); | |
return .{ .error_union = .{ | |
.ty = extra.ty, | |
.val = .{ .err_name = extra.name }, | |
} }; | |
}, | |
.error_union_payload => { | |
const extra = ip.extraData(Tag.TypeValue, data); | |
return .{ .error_union = .{ | |
.ty = extra.ty, | |
.val = .{ .payload = extra.val }, | |
} }; | |
}, | |
.enum_literal => .{ .enum_literal = @as(NullTerminatedString, @enumFromInt(data)) }, | |
.enum_tag => .{ .enum_tag = ip.extraData(Tag.EnumTag, data) }, | |
.memoized_call => { | |
const extra = ip.extraDataTrail(MemoizedCall, data); | |
return .{ .memoized_call = .{ | |
.func = extra.data.func, | |
.arg_values = @as([]const Index, @ptrCast(ip.extra.items[extra.end..][0..extra.data.args_len])), | |
.result = extra.data.result, | |
} }; | |
}, | |
}; | |
} | |
fn extraErrorSet(ip: *const InternPool, extra_index: u32) Key.ErrorSetType { | |
const error_set = ip.extraDataTrail(Tag.ErrorSet, extra_index); | |
return .{ | |
.names = .{ | |
.start = @intCast(error_set.end), | |
.len = error_set.data.names_len, | |
}, | |
.names_map = error_set.data.names_map.toOptional(), | |
}; | |
} | |
fn extraUnionType(ip: *const InternPool, extra_index: u32) Key.UnionType { | |
const type_union = ip.extraData(Tag.TypeUnion, extra_index); | |
return .{ | |
.decl = type_union.decl, | |
.namespace = type_union.namespace, | |
.flags = type_union.flags, | |
.enum_tag_ty = type_union.tag_ty, | |
.zir_index = type_union.zir_index, | |
.extra_index = extra_index, | |
}; | |
} | |
fn extraTypeStructAnon(ip: *const InternPool, extra_index: u32) Key.AnonStructType { | |
const type_struct_anon = ip.extraDataTrail(TypeStructAnon, extra_index); | |
const fields_len = type_struct_anon.data.fields_len; | |
return .{ | |
.types = .{ | |
.start = type_struct_anon.end, | |
.len = fields_len, | |
}, | |
.values = .{ | |
.start = type_struct_anon.end + fields_len, | |
.len = fields_len, | |
}, | |
.names = .{ | |
.start = type_struct_anon.end + fields_len + fields_len, | |
.len = fields_len, | |
}, | |
}; | |
} | |
fn extraTypeTupleAnon(ip: *const InternPool, extra_index: u32) Key.AnonStructType { | |
const type_struct_anon = ip.extraDataTrail(TypeStructAnon, extra_index); | |
const fields_len = type_struct_anon.data.fields_len; | |
return .{ | |
.types = .{ | |
.start = type_struct_anon.end, | |
.len = fields_len, | |
}, | |
.values = .{ | |
.start = type_struct_anon.end + fields_len, | |
.len = fields_len, | |
}, | |
.names = .{ | |
.start = 0, | |
.len = 0, | |
}, | |
}; | |
} | |
fn extraStructType(ip: *const InternPool, extra_index: u32) Key.StructType { | |
const s = ip.extraDataTrail(Tag.TypeStruct, extra_index); | |
const fields_len = s.data.fields_len; | |
var index = s.end; | |
const field_types = t: { | |
const types: Index.Slice = .{ .start = index, .len = fields_len }; | |
index += fields_len; | |
break :t types; | |
}; | |
const names_map, const field_names: NullTerminatedString.Slice = t: { | |
if (s.data.flags.is_tuple) break :t .{ .none, .{ .start = 0, .len = 0 } }; | |
const names_map: MapIndex = @enumFromInt(ip.extra.items[index]); | |
index += 1; | |
const names: NullTerminatedString.Slice = .{ .start = index, .len = fields_len }; | |
index += fields_len; | |
break :t .{ names_map.toOptional(), names }; | |
}; | |
const field_inits: Index.Slice = t: { | |
if (!s.data.flags.any_default_inits) break :t .{ .start = 0, .len = 0 }; | |
const inits: Index.Slice = .{ .start = index, .len = fields_len }; | |
index += fields_len; | |
break :t inits; | |
}; | |
const namespace = t: { | |
if (!s.data.flags.has_namespace) break :t .none; | |
const namespace: NamespaceIndex = @enumFromInt(ip.extra.items[index]); | |
index += 1; | |
break :t namespace.toOptional(); | |
}; | |
const field_aligns: Alignment.Slice = t: { | |
if (!s.data.flags.any_aligned_fields) break :t .{ .start = 0, .len = 0 }; | |
const aligns: Alignment.Slice = .{ .start = index, .len = fields_len }; | |
index += (fields_len + 3) / 4; | |
break :t aligns; | |
}; | |
const comptime_bits: Key.StructType.ComptimeBits = t: { | |
if (!s.data.flags.any_comptime_fields) break :t .{ .start = 0, .len = 0 }; | |
const comptime_bits: Key.StructType.ComptimeBits = .{ .start = index, .len = fields_len }; | |
index += (fields_len + 31) / 32; | |
break :t comptime_bits; | |
}; | |
const runtime_order: Key.StructType.RuntimeOrder.Slice = t: { | |
if (s.data.flags.is_extern) break :t .{ .start = 0, .len = 0 }; | |
const ro: Key.StructType.RuntimeOrder.Slice = .{ .start = index, .len = fields_len }; | |
index += fields_len; | |
break :t ro; | |
}; | |
const offsets = t: { | |
const offsets: Key.StructType.Offsets = .{ .start = index, .len = fields_len }; | |
index += fields_len; | |
break :t offsets; | |
}; | |
return .{ | |
.extra_index = extra_index, | |
.decl = s.data.decl.toOptional(), | |
.zir_index = s.data.zir_index, | |
.layout = if (s.data.flags.is_extern) .Extern else .Auto, | |
.field_types = field_types, | |
.names_map = names_map, | |
.field_names = field_names, | |
.field_inits = field_inits, | |
.namespace = namespace, | |
.field_aligns = field_aligns, | |
.comptime_bits = comptime_bits, | |
.runtime_order = runtime_order, | |
.offsets = offsets, | |
}; | |
} | |
fn extraPackedStructType(ip: *const InternPool, extra_index: u32, inits: bool) Key.StructType { | |
const type_struct_packed = ip.extraDataTrail(Tag.TypeStructPacked, extra_index); | |
const fields_len = type_struct_packed.data.fields_len; | |
return .{ | |
.extra_index = extra_index, | |
.decl = type_struct_packed.data.decl.toOptional(), | |
.namespace = type_struct_packed.data.namespace, | |
.zir_index = type_struct_packed.data.zir_index, | |
.layout = .Packed, | |
.field_types = .{ | |
.start = type_struct_packed.end, | |
.len = fields_len, | |
}, | |
.field_names = .{ | |
.start = type_struct_packed.end + fields_len, | |
.len = fields_len, | |
}, | |
.field_inits = if (inits) .{ | |
.start = type_struct_packed.end + fields_len * 2, | |
.len = fields_len, | |
} else .{ | |
.start = 0, | |
.len = 0, | |
}, | |
.field_aligns = .{ .start = 0, .len = 0 }, | |
.runtime_order = .{ .start = 0, .len = 0 }, | |
.comptime_bits = .{ .start = 0, .len = 0 }, | |
.offsets = .{ .start = 0, .len = 0 }, | |
.names_map = type_struct_packed.data.names_map.toOptional(), | |
}; | |
} | |
fn extraFuncType(ip: *const InternPool, extra_index: u32) Key.FuncType { | |
const type_function = ip.extraDataTrail(Tag.TypeFunction, extra_index); | |
var index: usize = type_function.end; | |
const comptime_bits: u32 = if (!type_function.data.flags.has_comptime_bits) 0 else b: { | |
const x = ip.extra.items[index]; | |
index += 1; | |
break :b x; | |
}; | |
const noalias_bits: u32 = if (!type_function.data.flags.has_noalias_bits) 0 else b: { | |
const x = ip.extra.items[index]; | |
index += 1; | |
break :b x; | |
}; | |
return .{ | |
.param_types = .{ | |
.start = @intCast(index), | |
.len = type_function.data.params_len, | |
}, | |
.return_type = type_function.data.return_type, | |
.comptime_bits = comptime_bits, | |
.noalias_bits = noalias_bits, | |
.alignment = type_function.data.flags.alignment, | |
.cc = type_function.data.flags.cc, | |
.is_var_args = type_function.data.flags.is_var_args, | |
.is_noinline = type_function.data.flags.is_noinline, | |
.align_is_generic = type_function.data.flags.align_is_generic, | |
.cc_is_generic = type_function.data.flags.cc_is_generic, | |
.section_is_generic = type_function.data.flags.section_is_generic, | |
.addrspace_is_generic = type_function.data.flags.addrspace_is_generic, | |
.is_generic = type_function.data.flags.is_generic, | |
}; | |
} | |
fn extraFuncDecl(ip: *const InternPool, extra_index: u32) Key.Func { | |
const P = Tag.FuncDecl; | |
const func_decl = ip.extraDataTrail(P, extra_index); | |
return .{ | |
.ty = func_decl.data.ty, | |
.uncoerced_ty = func_decl.data.ty, | |
.analysis_extra_index = extra_index + std.meta.fieldIndex(P, "analysis").?, | |
.zir_body_inst_extra_index = extra_index + std.meta.fieldIndex(P, "zir_body_inst").?, | |
.resolved_error_set_extra_index = if (func_decl.data.analysis.inferred_error_set) func_decl.end else 0, | |
.branch_quota_extra_index = 0, | |
.owner_decl = func_decl.data.owner_decl, | |
.zir_body_inst = func_decl.data.zir_body_inst, | |
.lbrace_line = func_decl.data.lbrace_line, | |
.rbrace_line = func_decl.data.rbrace_line, | |
.lbrace_column = func_decl.data.lbrace_column, | |
.rbrace_column = func_decl.data.rbrace_column, | |
.generic_owner = .none, | |
.comptime_args = .{ .start = 0, .len = 0 }, | |
}; | |
} | |
fn extraFuncInstance(ip: *const InternPool, extra_index: u32) Key.Func { | |
const P = Tag.FuncInstance; | |
const fi = ip.extraDataTrail(P, extra_index); | |
const func_decl = ip.funcDeclInfo(fi.data.generic_owner); | |
return .{ | |
.ty = fi.data.ty, | |
.uncoerced_ty = fi.data.ty, | |
.analysis_extra_index = extra_index + std.meta.fieldIndex(P, "analysis").?, | |
.zir_body_inst_extra_index = func_decl.zir_body_inst_extra_index, | |
.resolved_error_set_extra_index = if (fi.data.analysis.inferred_error_set) fi.end else 0, | |
.branch_quota_extra_index = extra_index + std.meta.fieldIndex(P, "branch_quota").?, | |
.owner_decl = fi.data.owner_decl, | |
.zir_body_inst = func_decl.zir_body_inst, | |
.lbrace_line = func_decl.lbrace_line, | |
.rbrace_line = func_decl.rbrace_line, | |
.lbrace_column = func_decl.lbrace_column, | |
.rbrace_column = func_decl.rbrace_column, | |
.generic_owner = fi.data.generic_owner, | |
.comptime_args = .{ | |
.start = fi.end + @intFromBool(fi.data.analysis.inferred_error_set), | |
.len = ip.funcTypeParamsLen(func_decl.ty), | |
}, | |
}; | |
} | |
fn extraFuncCoerced(ip: *const InternPool, extra_index: u32) Key.Func { | |
const func_coerced = ip.extraData(Tag.FuncCoerced, extra_index); | |
const sub_item = ip.items.get(@intFromEnum(func_coerced.func)); | |
var func: Key.Func = switch (sub_item.tag) { | |
.func_instance => ip.extraFuncInstance(sub_item.data), | |
.func_decl => ip.extraFuncDecl(sub_item.data), | |
else => unreachable, | |
}; | |
func.ty = func_coerced.ty; | |
return func; | |
} | |
fn indexToKeyEnum(ip: *const InternPool, data: u32, tag_mode: Key.EnumType.TagMode) Key { | |
const enum_explicit = ip.extraDataTrail(EnumExplicit, data); | |
const fields_len = enum_explicit.data.fields_len; | |
return .{ .enum_type = .{ | |
.decl = enum_explicit.data.decl, | |
.namespace = enum_explicit.data.namespace, | |
.tag_ty = enum_explicit.data.int_tag_type, | |
.names = .{ | |
.start = @intCast(enum_explicit.end), | |
.len = fields_len, | |
}, | |
.values = .{ | |
.start = @intCast(enum_explicit.end + fields_len), | |
.len = if (enum_explicit.data.values_map != .none) fields_len else 0, | |
}, | |
.tag_mode = tag_mode, | |
.names_map = enum_explicit.data.names_map.toOptional(), | |
.values_map = enum_explicit.data.values_map, | |
.zir_index = enum_explicit.data.zir_index, | |
} }; | |
} | |
fn indexToKeyBigInt(ip: *const InternPool, limb_index: u32, positive: bool) Key { | |
const int_info = ip.limbData(Int, limb_index); | |
return .{ .int = .{ | |
.ty = int_info.ty, | |
.storage = .{ .big_int = .{ | |
.limbs = ip.limbSlice(Int, limb_index, int_info.limbs_len), | |
.positive = positive, | |
} }, | |
} }; | |
} | |
pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index { | |
const adapter: KeyAdapter = .{ .intern_pool = ip }; | |
const gop = try ip.map.getOrPutAdapted(gpa, key, adapter); | |
if (gop.found_existing) return @enumFromInt(gop.index); | |
try ip.items.ensureUnusedCapacity(gpa, 1); | |
switch (key) { | |
.int_type => |int_type| { | |
const t: Tag = switch (int_type.signedness) { | |
.signed => .type_int_signed, | |
.unsigned => .type_int_unsigned, | |
}; | |
ip.items.appendAssumeCapacity(.{ | |
.tag = t, | |
.data = int_type.bits, | |
}); | |
}, | |
.ptr_type => |ptr_type| { | |
assert(ptr_type.child != .none); | |
assert(ptr_type.sentinel == .none or ip.typeOf(ptr_type.sentinel) == ptr_type.child); | |
if (ptr_type.flags.size == .Slice) { | |
_ = ip.map.pop(); | |
var new_key = key; | |
new_key.ptr_type.flags.size = .Many; | |
const ptr_type_index = try ip.get(gpa, new_key); | |
assert(!(try ip.map.getOrPutAdapted(gpa, key, adapter)).found_existing); | |
try ip.items.ensureUnusedCapacity(gpa, 1); | |
ip.items.appendAssumeCapacity(.{ | |
.tag = .type_slice, | |
.data = @intFromEnum(ptr_type_index), | |
}); | |
return @enumFromInt(ip.items.len - 1); | |
} | |
var ptr_type_adjusted = ptr_type; | |
if (ptr_type.flags.size == .C) ptr_type_adjusted.flags.is_allowzero = true; | |
ip.items.appendAssumeCapacity(.{ | |
.tag = .type_pointer, | |
.data = try ip.addExtra(gpa, ptr_type_adjusted), | |
}); | |
}, | |
.array_type => |array_type| { | |
assert(array_type.child != .none); | |
assert(array_type.sentinel == .none or ip.typeOf(array_type.sentinel) == array_type.child); | |
if (std.math.cast(u32, array_type.len)) |len| { | |
if (array_type.sentinel == .none) { | |
ip.items.appendAssumeCapacity(.{ | |
.tag = .type_array_small, | |
.data = try ip.addExtra(gpa, Vector{ | |
.len = len, | |
.child = array_type.child, | |
}), | |
}); | |
return @enumFromInt(ip.items.len - 1); | |
} | |
} | |
const length = Array.Length.init(array_type.len); | |
ip.items.appendAssumeCapacity(.{ | |
.tag = .type_array_big, | |
.data = try ip.addExtra(gpa, Array{ | |
.len0 = length.a, | |
.len1 = length.b, | |
.child = array_type.child, | |
.sentinel = array_type.sentinel, | |
}), | |
}); | |
}, | |
.vector_type => |vector_type| { | |
ip.items.appendAssumeCapacity(.{ | |
.tag = .type_vector, | |
.data = try ip.addExtra(gpa, Vector{ | |
.len = vector_type.len, | |
.child = vector_type.child, | |
}), | |
}); | |
}, | |
.opt_type => |payload_type| { | |
assert(payload_type != .none); | |
ip.items.appendAssumeCapacity(.{ | |
.tag = .type_optional, | |
.data = @intFromEnum(payload_type), | |
}); | |
}, | |
.anyframe_type => |payload_type| { | |
// payload_type might be none, indicating the type is `anyframe`. | |
ip.items.appendAssumeCapacity(.{ | |
.tag = .type_anyframe, | |
.data = @intFromEnum(payload_type), | |
}); | |
}, | |
.error_union_type => |error_union_type| { | |
ip.items.appendAssumeCapacity(if (error_union_type.error_set_type == .anyerror_type) .{ | |
.tag = .type_anyerror_union, | |
.data = @intFromEnum(error_union_type.payload_type), | |
} else .{ | |
.tag = .type_error_union, | |
.data = try ip.addExtra(gpa, error_union_type), | |
}); | |
}, | |
.error_set_type => |error_set_type| { | |
assert(error_set_type.names_map == .none); | |
assert(std.sort.isSorted(NullTerminatedString, error_set_type.names.get(ip), {}, NullTerminatedString.indexLessThan)); | |
const names = error_set_type.names.get(ip); | |
const names_map = try ip.addMap(gpa, names.len); | |
addStringsToMap(ip, names_map, names); | |
const names_len = error_set_type.names.len; | |
try ip.extra.ensureUnusedCapacity(gpa, @typeInfo(Tag.ErrorSet).Struct.fields.len + names_len); | |
ip.items.appendAssumeCapacity(.{ | |
.tag = .type_error_set, | |
.data = ip.addExtraAssumeCapacity(Tag.ErrorSet{ | |
.names_len = names_len, | |
.names_map = names_map, | |
}), | |
}); | |
ip.extra.appendSliceAssumeCapacity(@ptrCast(error_set_type.names.get(ip))); | |
}, | |
.inferred_error_set_type => |ies_index| { | |
ip.items.appendAssumeCapacity(.{ | |
.tag = .type_inferred_error_set, | |
.data = @intFromEnum(ies_index), | |
}); | |
}, | |
.simple_type => |simple_type| { | |
ip.items.appendAssumeCapacity(.{ | |
.tag = .simple_type, | |
.data = @intFromEnum(simple_type), | |
}); | |
}, | |
.simple_value => |simple_value| { | |
ip.items.appendAssumeCapacity(.{ | |
.tag = .simple_value, | |
.data = @intFromEnum(simple_value), | |
}); | |
}, | |
.undef => |ty| { | |
assert(ty != .none); | |
ip.items.appendAssumeCapacity(.{ | |
.tag = .undef, | |
.data = @intFromEnum(ty), | |
}); | |
}, | |
.struct_type => unreachable, // use getStructType() instead | |
.anon_struct_type => unreachable, // use getAnonStructType() instead | |
.union_type => unreachable, // use getUnionType() instead | |
.opaque_type => |opaque_type| { | |
ip.items.appendAssumeCapacity(.{ | |
.tag = .type_opaque, | |
.data = try ip.addExtra(gpa, opaque_type), | |
}); | |
}, | |
.enum_type => unreachable, // use getEnum() or getIncompleteEnum() instead | |
.func_type => unreachable, // use getFuncType() instead | |
.extern_func => unreachable, // use getExternFunc() instead | |
.func => unreachable, // use getFuncInstance() or getFuncDecl() instead | |
.variable => |variable| { | |
const has_init = variable.init != .none; | |
if (has_init) assert(variable.ty == ip.typeOf(variable.init)); | |
ip.items.appendAssumeCapacity(.{ | |
.tag = .variable, | |
.data = try ip.addExtra(gpa, Tag.Variable{ | |
.ty = variable.ty, | |
.init = variable.init, | |
.decl = variable.decl, | |
.lib_name = variable.lib_name, | |
.flags = .{ | |
.is_extern = variable.is_extern, | |
.is_const = variable.is_const, | |
.is_threadlocal = variable.is_threadlocal, | |
.is_weak_linkage = variable.is_weak_linkage, | |
}, | |
}), | |
}); | |
}, | |
.slice => |slice| { | |
assert(ip.indexToKey(slice.ty).ptr_type.flags.size == .Slice); | |
assert(ip.indexToKey(ip.typeOf(slice.ptr)).ptr_type.flags.size == .Many); | |
ip.items.appendAssumeCapacity(.{ | |
.tag = .ptr_slice, | |
.data = try ip.addExtra(gpa, PtrSlice{ | |
.ty = slice.ty, | |
.ptr = slice.ptr, | |
.len = slice.len, | |
}), | |
}); | |
}, | |
.ptr => |ptr| { | |
const ptr_type = ip.indexToKey(ptr.ty).ptr_type; | |
assert(ptr_type.flags.size != .Slice); | |
switch (ptr.addr) { | |
.decl => |decl| ip.items.appendAssumeCapacity(.{ | |
.tag = .ptr_decl, | |
.data = try ip.addExtra(gpa, PtrDecl{ | |
.ty = ptr.ty, | |
.decl = decl, | |
}), | |
}), | |
.mut_decl => |mut_decl| ip.items.appendAssumeCapacity(.{ | |
.tag = .ptr_mut_decl, | |
.data = try ip.addExtra(gpa, PtrMutDecl{ | |
.ty = ptr.ty, | |
.decl = mut_decl.decl, | |
.runtime_index = mut_decl.runtime_index, | |
}), | |
}), | |
.anon_decl => |anon_decl| ip.items.appendAssumeCapacity( | |
if (ptrsHaveSameAlignment(ip, ptr.ty, ptr_type, anon_decl.orig_ty)) .{ | |
.tag = .ptr_anon_decl, | |
.data = try ip.addExtra(gpa, PtrAnonDecl{ | |
.ty = ptr.ty, | |
.val = anon_decl.val, | |
}), | |
} else .{ | |
.tag = .ptr_anon_decl_aligned, | |
.data = try ip.addExtra(gpa, PtrAnonDeclAligned{ | |
.ty = ptr.ty, | |
.val = anon_decl.val, | |
.orig_ty = anon_decl.orig_ty, | |
}), | |
}, | |
), | |
.comptime_field => |field_val| { | |
assert(field_val != .none); | |
ip.items.appendAssumeCapacity(.{ | |
.tag = .ptr_comptime_field, | |
.data = try ip.addExtra(gpa, PtrComptimeField{ | |
.ty = ptr.ty, | |
.field_val = field_val, | |
}), | |
}); | |
}, | |
.int, .eu_payload, .opt_payload => |base| { | |
switch (ptr.addr) { | |
.int => assert(ip.typeOf(base) == .usize_type), | |
.eu_payload => assert(ip.indexToKey( | |
ip.indexToKey(ip.typeOf(base)).ptr_type.child, | |
) == .error_union_type), | |
.opt_payload => assert(ip.indexToKey( | |
ip.indexToKey(ip.typeOf(base)).ptr_type.child, | |
) == .opt_type), | |
else => unreachable, | |
} | |
ip.items.appendAssumeCapacity(.{ | |
.tag = switch (ptr.addr) { | |
.int => .ptr_int, | |
.eu_payload => .ptr_eu_payload, | |
.opt_payload => .ptr_opt_payload, | |
else => unreachable, | |
}, | |
.data = try ip.addExtra(gpa, PtrBase{ | |
.ty = ptr.ty, | |
.base = base, | |
}), | |
}); | |
}, | |
.elem, .field => |base_index| { | |
const base_ptr_type = ip.indexToKey(ip.typeOf(base_index.base)).ptr_type; | |
switch (ptr.addr) { | |
.elem => assert(base_ptr_type.flags.size == .Many), | |
.field => { | |
assert(base_ptr_type.flags.size == .One); | |
switch (ip.indexToKey(base_ptr_type.child)) { | |
.anon_struct_type => |anon_struct_type| { | |
assert(ptr.addr == .field); | |
assert(base_index.index < anon_struct_type.types.len); | |
}, | |
.struct_type => |struct_type| { | |
assert(ptr.addr == .field); | |
assert(base_index.index < struct_type.field_types.len); | |
}, | |
.union_type => |union_key| { | |
const union_type = ip.loadUnionType(union_key); | |
assert(ptr.addr == .field); | |
assert(base_index.index < union_type.field_names.len); | |
}, | |
.ptr_type => |slice_type| { | |
assert(ptr.addr == .field); | |
assert(slice_type.flags.size == .Slice); | |
assert(base_index.index < 2); | |
}, | |
else => unreachable, | |
} | |
}, | |
else => unreachable, | |
} | |
_ = ip.map.pop(); | |
const index_index = try ip.get(gpa, .{ .int = .{ | |
.ty = .usize_type, | |
.storage = .{ .u64 = base_index.index }, | |
} }); | |
assert(!(try ip.map.getOrPutAdapted(gpa, key, adapter)).found_existing); | |
try ip.items.ensureUnusedCapacity(gpa, 1); | |
ip.items.appendAssumeCapacity(.{ | |
.tag = switch (ptr.addr) { | |
.elem => .ptr_elem, | |
.field => .ptr_field, | |
else => unreachable, | |
}, | |
.data = try ip.addExtra(gpa, PtrBaseIndex{ | |
.ty = ptr.ty, | |
.base = base_index.base, | |
.index = index_index, | |
}), | |
}); | |
}, | |
} | |
}, | |
.opt => |opt| { | |
assert(ip.isOptionalType(opt.ty)); | |
assert(opt.val == .none or ip.indexToKey(opt.ty).opt_type == ip.typeOf(opt.val)); | |
ip.items.appendAssumeCapacity(if (opt.val == .none) .{ | |
.tag = .opt_null, | |
.data = @intFromEnum(opt.ty), | |
} else .{ | |
.tag = .opt_payload, | |
.data = try ip.addExtra(gpa, Tag.TypeValue{ | |
.ty = opt.ty, | |
.val = opt.val, | |
}), | |
}); | |
}, | |
.int => |int| b: { | |
assert(ip.isIntegerType(int.ty)); | |
switch (int.storage) { | |
.u64, .i64, .big_int => {}, | |
.lazy_align, .lazy_size => |lazy_ty| { | |
ip.items.appendAssumeCapacity(.{ | |
.tag = switch (int.storage) { | |
else => unreachable, | |
.lazy_align => .int_lazy_align, | |
.lazy_size => .int_lazy_size, | |
}, | |
.data = try ip.addExtra(gpa, IntLazy{ | |
.ty = int.ty, | |
.lazy_ty = lazy_ty, | |
}), | |
}); | |
return @enumFromInt(ip.items.len - 1); | |
}, | |
} | |
switch (int.ty) { | |
.u8_type => switch (int.storage) { | |
.big_int => |big_int| { | |
ip.items.appendAssumeCapacity(.{ | |
.tag = .int_u8, | |
.data = big_int.to(u8) catch unreachable, | |
}); | |
break :b; | |
}, | |
inline .u64, .i64 => |x| { | |
ip.items.appendAssumeCapacity(.{ | |
.tag = .int_u8, | |
.data = @as(u8, @intCast(x)), | |
}); | |
break :b; | |
}, | |
.lazy_align, .lazy_size => unreachable, | |
}, | |
.u16_type => switch (int.storage) { | |
.big_int => |big_int| { | |
ip.items.appendAssumeCapacity(.{ | |
.tag = .int_u16, | |
.data = big_int.to(u16) catch unreachable, | |
}); | |
break :b; | |
}, | |
inline .u64, .i64 => |x| { | |
ip.items.appendAssumeCapacity(.{ | |
.tag = .int_u16, | |
.data = @as(u16, @intCast(x)), | |
}); | |
break :b; | |
}, | |
.lazy_align, .lazy_size => unreachable, | |
}, | |
.u32_type => switch (int.storage) { | |
.big_int => |big_int| { | |
ip.items.appendAssumeCapacity(.{ | |
.tag = .int_u32, | |
.data = big_int.to(u32) catch unreachable, | |
}); | |
break :b; | |
}, | |
inline .u64, .i64 => |x| { | |
ip.items.appendAssumeCapacity(.{ | |
.tag = .int_u32, | |
.data = @as(u32, @intCast(x)), | |
}); | |
break :b; | |
}, | |
.lazy_align, .lazy_size => unreachable, | |
}, | |
.i32_type => switch (int.storage) { | |
.big_int => |big_int| { | |
const casted = big_int.to(i32) catch unreachable; | |
ip.items.appendAssumeCapacity(.{ | |
.tag = .int_i32, | |
.data = @as(u32, @bitCast(casted)), | |
}); | |
break :b; | |
}, | |
inline .u64, .i64 => |x| { | |
ip.items.appendAssumeCapacity(.{ | |
.tag = .int_i32, | |
.data = @as(u32, @bitCast(@as(i32, @intCast(x)))), | |
}); | |
break :b; | |
}, | |
.lazy_align, .lazy_size => unreachable, | |
}, | |
.usize_type => switch (int.storage) { | |
.big_int => |big_int| { | |
if (big_int.to(u32)) |casted| { | |
ip.items.appendAssumeCapacity(.{ | |
.tag = .int_usize, | |
.data = casted, | |
}); | |
break :b; | |
} else |_| {} | |
}, | |
inline .u64, .i64 => |x| { | |
if (std.math.cast(u32, x)) |casted| { | |
ip.items.appendAssumeCapacity(.{ | |
.tag = .int_usize, | |
.data = casted, | |
}); | |
break :b; | |
} | |
}, | |
.lazy_align, .lazy_size => unreachable, | |
}, | |
.comptime_int_type => switch (int.storage) { | |
.big_int => |big_int| { | |
if (big_int.to(u32)) |casted| { | |
ip.items.appendAssumeCapacity(.{ | |
.tag = .int_comptime_int_u32, | |
.data = casted, | |
}); | |
break :b; | |
} else |_| {} | |
if (big_int.to(i32)) |casted| { | |
ip.items.appendAssumeCapacity(.{ | |
.tag = .int_comptime_int_i32, | |
.data = @as(u32, @bitCast(casted)), | |
}); | |
break :b; | |
} else |_| {} | |
}, | |
inline .u64, .i64 => |x| { | |
if (std.math.cast(u32, x)) |casted| { | |
ip.items.appendAssumeCapacity(.{ | |
.tag = .int_comptime_int_u32, | |
.data = casted, | |
}); | |
break :b; | |
} | |
if (std.math.cast(i32, x)) |casted| { | |
ip.items.appendAssumeCapacity(.{ | |
.tag = .int_comptime_int_i32, | |
.data = @as(u32, @bitCast(casted)), | |
}); | |
break :b; | |
} | |
}, | |
.lazy_align, .lazy_size => unreachable, | |
}, | |
else => {}, | |
} | |
switch (int.storage) { | |
.big_int => |big_int| { | |
if (big_int.to(u32)) |casted| { | |
ip.items.appendAssumeCapacity(.{ | |
.tag = .int_small, | |
.data = try ip.addExtra(gpa, IntSmall{ | |
.ty = int.ty, | |
.value = casted, | |
}), | |
}); | |
return @enumFromInt(ip.items.len - 1); | |
} else |_| {} | |
const tag: Tag = if (big_int.positive) .int_positive else .int_negative; | |
try addInt(ip, gpa, int.ty, tag, big_int.limbs); | |
}, | |
inline .u64, .i64 => |x| { | |
if (std.math.cast(u32, x)) |casted| { | |
ip.items.appendAssumeCapacity(.{ | |
.tag = .int_small, | |
.data = try ip.addExtra(gpa, IntSmall{ | |
.ty = int.ty, | |
.value = casted, | |
}), | |
}); | |
return @enumFromInt(ip.items.len - 1); | |
} | |
var buf: [2]Limb = undefined; | |
const big_int = BigIntMutable.init(&buf, x).toConst(); | |
const tag: Tag = if (big_int.positive) .int_positive else .int_negative; | |
try addInt(ip, gpa, int.ty, tag, big_int.limbs); | |
}, | |
.lazy_align, .lazy_size => unreachable, | |
} | |
}, | |
.err => |err| { | |
assert(ip.isErrorSetType(err.ty)); | |
ip.items.appendAssumeCapacity(.{ | |
.tag = .error_set_error, | |
.data = try ip.addExtra(gpa, err), | |
}); | |
}, | |
.error_union => |error_union| { | |
assert(ip.isErrorUnionType(error_union.ty)); | |
ip.items.appendAssumeCapacity(switch (error_union.val) { | |
.err_name => |err_name| .{ | |
.tag = .error_union_error, | |
.data = try ip.addExtra(gpa, Key.Error{ | |
.ty = error_union.ty, | |
.name = err_name, | |
}), | |
}, | |
.payload => |payload| .{ | |
.tag = .error_union_payload, | |
.data = try ip.addExtra(gpa, Tag.TypeValue{ | |
.ty = error_union.ty, | |
.val = payload, | |
}), | |
}, | |
}); | |
}, | |
.enum_literal => |enum_literal| ip.items.appendAssumeCapacity(.{ | |
.tag = .enum_literal, | |
.data = @intFromEnum(enum_literal), | |
}), | |
.enum_tag => |enum_tag| { | |
assert(ip.isEnumType(enum_tag.ty)); | |
switch (ip.indexToKey(enum_tag.ty)) { | |
.simple_type => assert(ip.isIntegerType(ip.typeOf(enum_tag.int))), | |
.enum_type => |enum_type| assert(ip.typeOf(enum_tag.int) == enum_type.tag_ty), | |
else => unreachable, | |
} | |
ip.items.appendAssumeCapacity(.{ | |
.tag = .enum_tag, | |
.data = try ip.addExtra(gpa, enum_tag), | |
}); | |
}, | |
.empty_enum_value => |enum_or_union_ty| ip.items.appendAssumeCapacity(.{ | |
.tag = .only_possible_value, | |
.data = @intFromEnum(enum_or_union_ty), | |
}), | |
.float => |float| { | |
switch (float.ty) { | |
.f16_type => ip.items.appendAssumeCapacity(.{ | |
.tag = .float_f16, | |
.data = @as(u16, @bitCast(float.storage.f16)), | |
}), | |
.f32_type => ip.items.appendAssumeCapacity(.{ | |
.tag = .float_f32, | |
.data = @as(u32, @bitCast(float.storage.f32)), | |
}), | |
.f64_type => ip.items.appendAssumeCapacity(.{ | |
.tag = .float_f64, | |
.data = try ip.addExtra(gpa, Float64.pack(float.storage.f64)), | |
}), | |
.f80_type => ip.items.appendAssumeCapacity(.{ | |
.tag = .float_f80, | |
.data = try ip.addExtra(gpa, Float80.pack(float.storage.f80)), | |
}), | |
.f128_type => ip.items.appendAssumeCapacity(.{ | |
.tag = .float_f128, | |
.data = try ip.addExtra(gpa, Float128.pack(float.storage.f128)), | |
}), | |
.c_longdouble_type => switch (float.storage) { | |
.f80 => |x| ip.items.appendAssumeCapacity(.{ | |
.tag = .float_c_longdouble_f80, | |
.data = try ip.addExtra(gpa, Float80.pack(x)), | |
}), | |
inline .f16, .f32, .f64, .f128 => |x| ip.items.appendAssumeCapacity(.{ | |
.tag = .float_c_longdouble_f128, | |
.data = try ip.addExtra(gpa, Float128.pack(x)), | |
}), | |
}, | |
.comptime_float_type => ip.items.appendAssumeCapacity(.{ | |
.tag = .float_comptime_float, | |
.data = try ip.addExtra(gpa, Float128.pack(float.storage.f128)), | |
}), | |
else => unreachable, | |
} | |
}, | |
.aggregate => |aggregate| { | |
const ty_key = ip.indexToKey(aggregate.ty); | |
const len = ip.aggregateTypeLen(aggregate.ty); | |
const child = switch (ty_key) { | |
.array_type => |array_type| array_type.child, | |
.vector_type => |vector_type| vector_type.child, | |
.anon_struct_type, .struct_type => .none, | |
else => unreachable, | |
}; | |
const sentinel = switch (ty_key) { | |
.array_type => |array_type| array_type.sentinel, | |
.vector_type, .anon_struct_type, .struct_type => .none, | |
else => unreachable, | |
}; | |
const len_including_sentinel = len + @intFromBool(sentinel != .none); | |
switch (aggregate.storage) { | |
.bytes => |bytes| { | |
assert(child == .u8_type); | |
if (bytes.len != len) { | |
assert(bytes.len == len_including_sentinel); | |
assert(bytes[@intCast(len)] == ip.indexToKey(sentinel).int.storage.u64); | |
} | |
}, | |
.elems => |elems| { | |
if (elems.len != len) { | |
assert(elems.len == len_including_sentinel); | |
assert(elems[@intCast(len)] == sentinel); | |
} | |
}, | |
.repeated_elem => |elem| { | |
assert(sentinel == .none or elem == sentinel); | |
}, | |
} | |
switch (ty_key) { | |
.array_type, .vector_type => { | |
for (aggregate.storage.values()) |elem| { | |
assert(ip.typeOf(elem) == child); | |
} | |
}, | |
.struct_type => |t| { | |
for (aggregate.storage.values(), t.field_types.get(ip)) |elem, field_ty| { | |
assert(ip.typeOf(elem) == field_ty); | |
} | |
}, | |
.anon_struct_type => |anon_struct_type| { | |
for (aggregate.storage.values(), anon_struct_type.types.get(ip)) |elem, ty| { | |
assert(ip.typeOf(elem) == ty); | |
} | |
}, | |
else => unreachable, | |
} | |
if (len == 0) { | |
ip.items.appendAssumeCapacity(.{ | |
.tag = .only_possible_value, | |
.data = @intFromEnum(aggregate.ty), | |
}); | |
return @enumFromInt(ip.items.len - 1); | |
} | |
switch (ty_key) { | |
.anon_struct_type => |anon_struct_type| opv: { | |
switch (aggregate.storage) { | |
.bytes => |bytes| for (anon_struct_type.values.get(ip), bytes) |value, byte| { | |
if (value != ip.getIfExists(.{ .int = .{ | |
.ty = .u8_type, | |
.storage = .{ .u64 = byte }, | |
} })) break :opv; | |
}, | |
.elems => |elems| if (!std.mem.eql( | |
Index, | |
anon_struct_type.values.get(ip), | |
elems, | |
)) break :opv, | |
.repeated_elem => |elem| for (anon_struct_type.values.get(ip)) |value| { | |
if (value != elem) break :opv; | |
}, | |
} | |
// This encoding works thanks to the fact that, as we just verified, | |
// the type itself contains a slice of values that can be provided | |
// in the aggregate fields. | |
ip.items.appendAssumeCapacity(.{ | |
.tag = .only_possible_value, | |
.data = @intFromEnum(aggregate.ty), | |
}); | |
return @enumFromInt(ip.items.len - 1); | |
}, | |
else => {}, | |
} | |
repeated: { | |
switch (aggregate.storage) { | |
.bytes => |bytes| for (bytes[1..@as(usize, @intCast(len))]) |byte| | |
if (byte != bytes[0]) break :repeated, | |
.elems => |elems| for (elems[1..@as(usize, @intCast(len))]) |elem| | |
if (elem != elems[0]) break :repeated, | |
.repeated_elem => {}, | |
} | |
const elem = switch (aggregate.storage) { | |
.bytes => |bytes| elem: { | |
_ = ip.map.pop(); | |
const elem = try ip.get(gpa, .{ .int = .{ | |
.ty = .u8_type, | |
.storage = .{ .u64 = bytes[0] }, | |
} }); | |
assert(!(try ip.map.getOrPutAdapted(gpa, key, adapter)).found_existing); | |
try ip.items.ensureUnusedCapacity(gpa, 1); | |
break :elem elem; | |
}, | |
.elems => |elems| elems[0], | |
.repeated_elem => |elem| elem, | |
}; | |
try ip.extra.ensureUnusedCapacity( | |
gpa, | |
@typeInfo(Repeated).Struct.fields.len, | |
); | |
ip.items.appendAssumeCapacity(.{ | |
.tag = .repeated, | |
.data = ip.addExtraAssumeCapacity(Repeated{ | |
.ty = aggregate.ty, | |
.elem_val = elem, | |
}), | |
}); | |
return @enumFromInt(ip.items.len - 1); | |
} | |
if (child == .u8_type) bytes: { | |
const string_bytes_index = ip.string_bytes.items.len; | |
try ip.string_bytes.ensureUnusedCapacity(gpa, @intCast(len_including_sentinel + 1)); | |
try ip.extra.ensureUnusedCapacity(gpa, @typeInfo(Bytes).Struct.fields.len); | |
switch (aggregate.storage) { | |
.bytes => |bytes| ip.string_bytes.appendSliceAssumeCapacity(bytes[0..@intCast(len)]), | |
.elems => |elems| for (elems[0..@intCast(len)]) |elem| switch (ip.indexToKey(elem)) { | |
.undef => { | |
ip.string_bytes.shrinkRetainingCapacity(string_bytes_index); | |
break :bytes; | |
}, | |
.int => |int| ip.string_bytes.appendAssumeCapacity( | |
@intCast(int.storage.u64), | |
), | |
else => unreachable, | |
}, | |
.repeated_elem => |elem| switch (ip.indexToKey(elem)) { | |
.undef => break :bytes, | |
.int => |int| @memset( | |
ip.string_bytes.addManyAsSliceAssumeCapacity(@intCast(len)), | |
@intCast(int.storage.u64), | |
), | |
else => unreachable, | |
}, | |
} | |
const has_internal_null = | |
std.mem.indexOfScalar(u8, ip.string_bytes.items[string_bytes_index..], 0) != null; | |
if (sentinel != .none) ip.string_bytes.appendAssumeCapacity( | |
@intCast(ip.indexToKey(sentinel).int.storage.u64), | |
); | |
const string: String = if (has_internal_null) | |
@enumFromInt(string_bytes_index) | |
else | |
(try ip.getOrPutTrailingString(gpa, @intCast(len_including_sentinel))).toString(); | |
ip.items.appendAssumeCapacity(.{ | |
.tag = .bytes, | |
.data = ip.addExtraAssumeCapacity(Bytes{ | |
.ty = aggregate.ty, | |
.bytes = string, | |
}), | |
}); | |
return @enumFromInt(ip.items.len - 1); | |
} | |
try ip.extra.ensureUnusedCapacity( | |
gpa, | |
@typeInfo(Tag.Aggregate).Struct.fields.len + @as(usize, @intCast(len_including_sentinel + 1)), | |
); | |
ip.items.appendAssumeCapacity(.{ | |
.tag = .aggregate, | |
.data = ip.addExtraAssumeCapacity(Tag.Aggregate{ | |
.ty = aggregate.ty, | |
}), | |
}); | |
ip.extra.appendSliceAssumeCapacity(@ptrCast(aggregate.storage.elems)); | |
if (sentinel != .none) ip.extra.appendAssumeCapacity(@intFromEnum(sentinel)); | |
}, | |
.un => |un| { | |
assert(un.ty != .none); | |
assert(un.val != .none); | |
ip.items.appendAssumeCapacity(.{ | |
.tag = .union_value, | |
.data = try ip.addExtra(gpa, un), | |
}); | |
}, | |
.memoized_call => |memoized_call| { | |
for (memoized_call.arg_values) |arg| assert(arg != .none); | |
try ip.extra.ensureUnusedCapacity(gpa, @typeInfo(MemoizedCall).Struct.fields.len + | |
memoized_call.arg_values.len); | |
ip.items.appendAssumeCapacity(.{ | |
.tag = .memoized_call, | |
.data = ip.addExtraAssumeCapacity(MemoizedCall{ | |
.func = memoized_call.func, | |
.args_len = @as(u32, @intCast(memoized_call.arg_values.len)), | |
.result = memoized_call.result, | |
}), | |
}); | |
ip.extra.appendSliceAssumeCapacity(@ptrCast(memoized_call.arg_values)); | |
}, | |
} | |
return @enumFromInt(ip.items.len - 1); | |
} | |
pub const UnionTypeInit = struct { | |
flags: Tag.TypeUnion.Flags, | |
decl: DeclIndex, | |
namespace: NamespaceIndex, | |
zir_index: TrackedInst.Index.Optional, | |
fields_len: u32, | |
enum_tag_ty: Index, | |
/// May have length 0 which leaves the values unset until later. | |
field_types: []const Index, | |
/// May have length 0 which leaves the values unset until later. | |
/// The logic for `any_aligned_fields` is asserted to have been done before | |
/// calling this function. | |
field_aligns: []const Alignment, | |
}; | |
pub fn getUnionType(ip: *InternPool, gpa: Allocator, ini: UnionTypeInit) Allocator.Error!Index { | |
const prev_extra_len = ip.extra.items.len; | |
const align_elements_len = if (ini.flags.any_aligned_fields) (ini.fields_len + 3) / 4 else 0; | |
const align_element: u32 = @bitCast([1]u8{@intFromEnum(Alignment.none)} ** 4); | |
try ip.extra.ensureUnusedCapacity(gpa, @typeInfo(Tag.TypeUnion).Struct.fields.len + | |
ini.fields_len + // field types | |
align_elements_len); | |
try ip.items.ensureUnusedCapacity(gpa, 1); | |
const union_type_extra_index = ip.addExtraAssumeCapacity(Tag.TypeUnion{ | |
.flags = ini.flags, | |
.size = std.math.maxInt(u32), | |
.padding = std.math.maxInt(u32), | |
.decl = ini.decl, | |
.namespace = ini.namespace, | |
.tag_ty = ini.enum_tag_ty, | |
.zir_index = ini.zir_index, | |
}); | |
// field types | |
if (ini.field_types.len > 0) { | |
assert(ini.field_types.len == ini.fields_len); | |
ip.extra.appendSliceAssumeCapacity(@ptrCast(ini.field_types)); | |
} else { | |
ip.extra.appendNTimesAssumeCapacity(@intFromEnum(Index.none), ini.fields_len); | |
} | |
// field alignments | |
if (ini.flags.any_aligned_fields) { | |
ip.extra.appendNTimesAssumeCapacity(align_element, align_elements_len); | |
if (ini.field_aligns.len > 0) { | |
assert(ini.field_aligns.len == ini.fields_len); | |
@memcpy((Alignment.Slice{ | |
.start = @intCast(ip.extra.items.len - align_elements_len), | |
.len = @intCast(ini.field_aligns.len), | |
}).get(ip), ini.field_aligns); | |
} | |
} else { | |
assert(ini.field_aligns.len == 0); | |
} | |
const adapter: KeyAdapter = .{ .intern_pool = ip }; | |
const gop = try ip.map.getOrPutAdapted(gpa, Key{ | |
.union_type = extraUnionType(ip, union_type_extra_index), | |
}, adapter); | |
if (gop.found_existing) { | |
ip.extra.items.len = prev_extra_len; | |
return @enumFromInt(gop.index); | |
} | |
ip.items.appendAssumeCapacity(.{ | |
.tag = .type_union, | |
.data = union_type_extra_index, | |
}); | |
return @enumFromInt(ip.items.len - 1); | |
} | |
pub const StructTypeInit = struct { | |
decl: DeclIndex, | |
namespace: OptionalNamespaceIndex, | |
layout: std.builtin.Type.ContainerLayout, | |
zir_index: TrackedInst.Index.Optional, | |
fields_len: u32, | |
known_non_opv: bool, | |
requires_comptime: RequiresComptime, | |
is_tuple: bool, | |
any_comptime_fields: bool, | |
any_default_inits: bool, | |
inits_resolved: bool, | |
any_aligned_fields: bool, | |
}; | |
pub fn getStructType( | |
ip: *InternPool, | |
gpa: Allocator, | |
ini: StructTypeInit, | |
) Allocator.Error!Index { | |
const adapter: KeyAdapter = .{ .intern_pool = ip }; | |
const key: Key = .{ | |
.struct_type = .{ | |
// Only the decl matters for hashing and equality purposes. | |
.decl = ini.decl.toOptional(), | |
.extra_index = undefined, | |
.namespace = undefined, | |
.zir_index = undefined, | |
.layout = undefined, | |
.field_names = undefined, | |
.field_types = undefined, | |
.field_inits = undefined, | |
.field_aligns = undefined, | |
.runtime_order = undefined, | |
.comptime_bits = undefined, | |
.offsets = undefined, | |
.names_map = undefined, | |
}, | |
}; | |
const gop = try ip.map.getOrPutAdapted(gpa, key, adapter); | |
if (gop.found_existing) return @enumFromInt(gop.index); | |
errdefer _ = ip.map.pop(); | |
const names_map = try ip.addMap(gpa, ini.fields_len); | |
errdefer _ = ip.maps.pop(); | |
const is_extern = switch (ini.layout) { | |
.Auto => false, | |
.Extern => true, | |
.Packed => { | |
try ip.extra.ensureUnusedCapacity(gpa, @typeInfo(Tag.TypeStructPacked).Struct.fields.len + | |
ini.fields_len + // types | |
ini.fields_len + // names | |
ini.fields_len); // inits | |
try ip.items.append(gpa, .{ | |
.tag = if (ini.any_default_inits) .type_struct_packed_inits else .type_struct_packed, | |
.data = ip.addExtraAssumeCapacity(Tag.TypeStructPacked{ | |
.decl = ini.decl, | |
.zir_index = ini.zir_index, | |
.fields_len = ini.fields_len, | |
.namespace = ini.namespace, | |
.backing_int_ty = .none, | |
.names_map = names_map, | |
.flags = .{ | |
.field_inits_wip = false, | |
.inits_resolved = ini.inits_resolved, | |
}, | |
}), | |
}); | |
ip.extra.appendNTimesAssumeCapacity(@intFromEnum(Index.none), ini.fields_len); | |
ip.extra.appendNTimesAssumeCapacity(@intFromEnum(OptionalNullTerminatedString.none), ini.fields_len); | |
if (ini.any_default_inits) { | |
ip.extra.appendNTimesAssumeCapacity(@intFromEnum(Index.none), ini.fields_len); | |
} | |
return @enumFromInt(ip.items.len - 1); | |
}, | |
}; | |
const align_elements_len = if (ini.any_aligned_fields) (ini.fields_len + 3) / 4 else 0; | |
const align_element: u32 = @bitCast([1]u8{@intFromEnum(Alignment.none)} ** 4); | |
const comptime_elements_len = if (ini.any_comptime_fields) (ini.fields_len + 31) / 32 else 0; | |
try ip.extra.ensureUnusedCapacity(gpa, @typeInfo(Tag.TypeStruct).Struct.fields.len + | |
(ini.fields_len * 5) + // types, names, inits, runtime order, offsets | |
align_elements_len + comptime_elements_len + | |
2); // names_map + namespace | |
try ip.items.append(gpa, .{ | |
.tag = .type_struct, | |
.data = ip.addExtraAssumeCapacity(Tag.TypeStruct{ | |
.decl = ini.decl, | |
.zir_index = ini.zir_index, | |
.fields_len = ini.fields_len, | |
.size = std.math.maxInt(u32), | |
.flags = .{ | |
.is_extern = is_extern, | |
.known_non_opv = ini.known_non_opv, | |
.requires_comptime = ini.requires_comptime, | |
.is_tuple = ini.is_tuple, | |
.assumed_runtime_bits = false, | |
.assumed_pointer_aligned = false, | |
.has_namespace = ini.namespace != .none, | |
.any_comptime_fields = ini.any_comptime_fields, | |
.any_default_inits = ini.any_default_inits, | |
.any_aligned_fields = ini.any_aligned_fields, | |
.alignment = .none, | |
.alignment_wip = false, | |
.field_types_wip = false, | |
.layout_wip = false, | |
.layout_resolved = false, | |
.field_inits_wip = false, | |
.inits_resolved = ini.inits_resolved, | |
.fully_resolved = false, | |
}, | |
}), | |
}); | |
ip.extra.appendNTimesAssumeCapacity(@intFromEnum(Index.none), ini.fields_len); | |
if (!ini.is_tuple) { | |
ip.extra.appendAssumeCapacity(@intFromEnum(names_map)); | |
ip.extra.appendNTimesAssumeCapacity(@intFromEnum(OptionalNullTerminatedString.none), ini.fields_len); | |
} | |
if (ini.any_default_inits) { | |
ip.extra.appendNTimesAssumeCapacity(@intFromEnum(Index.none), ini.fields_len); | |
} | |
if (ini.namespace.unwrap()) |namespace| { | |
ip.extra.appendAssumeCapacity(@intFromEnum(namespace)); | |
} | |
if (ini.any_aligned_fields) { | |
ip.extra.appendNTimesAssumeCapacity(align_element, align_elements_len); | |
} | |
if (ini.any_comptime_fields) { | |
ip.extra.appendNTimesAssumeCapacity(0, comptime_elements_len); | |
} | |
if (ini.layout == .Auto) { | |
ip.extra.appendNTimesAssumeCapacity(@intFromEnum(Key.StructType.RuntimeOrder.unresolved), ini.fields_len); | |
} | |
ip.extra.appendNTimesAssumeCapacity(std.math.maxInt(u32), ini.fields_len); | |
return @enumFromInt(ip.items.len - 1); | |
} | |
pub const AnonStructTypeInit = struct { | |
types: []const Index, | |
/// This may be empty, indicating this is a tuple. | |
names: []const NullTerminatedString, | |
/// These elements may be `none`, indicating runtime-known. | |
values: []const Index, | |
}; | |
pub fn getAnonStructType(ip: *InternPool, gpa: Allocator, ini: AnonStructTypeInit) Allocator.Error!Index { | |
assert(ini.types.len == ini.values.len); | |
for (ini.types) |elem| assert(elem != .none); | |
const prev_extra_len = ip.extra.items.len; | |
const fields_len: u32 = @intCast(ini.types.len); | |
try ip.extra.ensureUnusedCapacity( | |
gpa, | |
@typeInfo(TypeStructAnon).Struct.fields.len + (fields_len * 3), | |
); | |
try ip.items.ensureUnusedCapacity(gpa, 1); | |
const extra_index = ip.addExtraAssumeCapacity(TypeStructAnon{ | |
.fields_len = fields_len, | |
}); | |
ip.extra.appendSliceAssumeCapacity(@ptrCast(ini.types)); | |
ip.extra.appendSliceAssumeCapacity(@ptrCast(ini.values)); | |
const adapter: KeyAdapter = .{ .intern_pool = ip }; | |
const key: Key = .{ | |
.anon_struct_type = if (ini.names.len == 0) extraTypeTupleAnon(ip, extra_index) else k: { | |
assert(ini.names.len == ini.types.len); | |
ip.extra.appendSliceAssumeCapacity(@ptrCast(ini.names)); | |
break :k extraTypeStructAnon(ip, extra_index); | |
}, | |
}; | |
const gop = try ip.map.getOrPutAdapted(gpa, key, adapter); | |
if (gop.found_existing) { | |
ip.extra.items.len = prev_extra_len; | |
return @enumFromInt(gop.index); | |
} | |
ip.items.appendAssumeCapacity(.{ | |
.tag = if (ini.names.len == 0) .type_tuple_anon else .type_struct_anon, | |
.data = extra_index, | |
}); | |
return @enumFromInt(ip.items.len - 1); | |
} | |
/// This is equivalent to `Key.FuncType` but adjusted to have a slice for `param_types`. | |
pub const GetFuncTypeKey = struct { | |
param_types: []const Index, | |
return_type: Index, | |
comptime_bits: u32 = 0, | |
noalias_bits: u32 = 0, | |
/// `null` means generic. | |
alignment: ?Alignment = .none, | |
/// `null` means generic. | |
cc: ?std.builtin.CallingConvention = .Unspecified, | |
is_var_args: bool = false, | |
is_generic: bool = false, | |
is_noinline: bool = false, | |
section_is_generic: bool = false, | |
addrspace_is_generic: bool = false, | |
}; | |
pub fn getFuncType(ip: *InternPool, gpa: Allocator, key: GetFuncTypeKey) Allocator.Error!Index { | |
// Validate input parameters. | |
assert(key.return_type != .none); | |
for (key.param_types) |param_type| assert(param_type != .none); | |
// The strategy here is to add the function type unconditionally, then to | |
// ask if it already exists, and if so, revert the lengths of the mutated | |
// arrays. This is similar to what `getOrPutTrailingString` does. | |
const prev_extra_len = ip.extra.items.len; | |
const params_len: u32 = @intCast(key.param_types.len); | |
try ip.extra.ensureUnusedCapacity(gpa, @typeInfo(Tag.TypeFunction).Struct.fields.len + | |
@intFromBool(key.comptime_bits != 0) + | |
@intFromBool(key.noalias_bits != 0) + | |
params_len); | |
try ip.items.ensureUnusedCapacity(gpa, 1); | |
const func_type_extra_index = ip.addExtraAssumeCapacity(Tag.TypeFunction{ | |
.params_len = params_len, | |
.return_type = key.return_type, | |
.flags = .{ | |
.alignment = key.alignment orelse .none, | |
.cc = key.cc orelse .Unspecified, | |
.is_var_args = key.is_var_args, | |
.has_comptime_bits = key.comptime_bits != 0, | |
.has_noalias_bits = key.noalias_bits != 0, | |
.is_generic = key.is_generic, | |
.is_noinline = key.is_noinline, | |
.align_is_generic = key.alignment == null, | |
.cc_is_generic = key.cc == null, | |
.section_is_generic = key.section_is_generic, | |
.addrspace_is_generic = key.addrspace_is_generic, | |
}, | |
}); | |
if (key.comptime_bits != 0) ip.extra.appendAssumeCapacity(key.comptime_bits); | |
if (key.noalias_bits != 0) ip.extra.appendAssumeCapacity(key.noalias_bits); | |
ip.extra.appendSliceAssumeCapacity(@ptrCast(key.param_types)); | |
const adapter: KeyAdapter = .{ .intern_pool = ip }; | |
const gop = try ip.map.getOrPutAdapted(gpa, Key{ | |
.func_type = extraFuncType(ip, func_type_extra_index), | |
}, adapter); | |
if (gop.found_existing) { | |
ip.extra.items.len = prev_extra_len; | |
return @enumFromInt(gop.index); | |
} | |
ip.items.appendAssumeCapacity(.{ | |
.tag = .type_function, | |
.data = func_type_extra_index, | |
}); | |
return @enumFromInt(ip.items.len - 1); | |
} | |
pub fn getExternFunc(ip: *InternPool, gpa: Allocator, key: Key.ExternFunc) Allocator.Error!Index { | |
const adapter: KeyAdapter = .{ .intern_pool = ip }; | |
const gop = try ip.map.getOrPutAdapted(gpa, Key{ .extern_func = key }, adapter); | |
if (gop.found_existing) return @enumFromInt(gop.index); | |
errdefer _ = ip.map.pop(); | |
const prev_extra_len = ip.extra.items.len; | |
const extra_index = try ip.addExtra(gpa, @as(Tag.ExternFunc, key)); | |
errdefer ip.extra.items.len = prev_extra_len; | |
try ip.items.append(gpa, .{ | |
.tag = .extern_func, | |
.data = extra_index, | |
}); | |
errdefer ip.items.len -= 1; | |
return @enumFromInt(ip.items.len - 1); | |
} | |
pub const GetFuncDeclKey = struct { | |
owner_decl: DeclIndex, | |
ty: Index, | |
zir_body_inst: TrackedInst.Index, | |
lbrace_line: u32, | |
rbrace_line: u32, | |
lbrace_column: u32, | |
rbrace_column: u32, | |
cc: ?std.builtin.CallingConvention, | |
is_noinline: bool, | |
}; | |
pub fn getFuncDecl(ip: *InternPool, gpa: Allocator, key: GetFuncDeclKey) Allocator.Error!Index { | |
// The strategy here is to add the function type unconditionally, then to | |
// ask if it already exists, and if so, revert the lengths of the mutated | |
// arrays. This is similar to what `getOrPutTrailingString` does. | |
const prev_extra_len = ip.extra.items.len; | |
try ip.extra.ensureUnusedCapacity(gpa, @typeInfo(Tag.FuncDecl).Struct.fields.len); | |
try ip.items.ensureUnusedCapacity(gpa, 1); | |
try ip.map.ensureUnusedCapacity(gpa, 1); | |
const func_decl_extra_index = ip.addExtraAssumeCapacity(Tag.FuncDecl{ | |
.analysis = .{ | |
.state = if (key.cc == .Inline) .inline_only else .none, | |
.is_cold = false, | |
.is_noinline = key.is_noinline, | |
.calls_or_awaits_errorable_fn = false, | |
.stack_alignment = .none, | |
.inferred_error_set = false, | |
}, | |
.owner_decl = key.owner_decl, | |
.ty = key.ty, | |
.zir_body_inst = key.zir_body_inst, | |
.lbrace_line = key.lbrace_line, | |
.rbrace_line = key.rbrace_line, | |
.lbrace_column = key.lbrace_column, | |
.rbrace_column = key.rbrace_column, | |
}); | |
const adapter: KeyAdapter = .{ .intern_pool = ip }; | |
const gop = ip.map.getOrPutAssumeCapacityAdapted(Key{ | |
.func = extraFuncDecl(ip, func_decl_extra_index), | |
}, adapter); | |
if (gop.found_existing) { | |
ip.extra.items.len = prev_extra_len; | |
return @enumFromInt(gop.index); | |
} | |
ip.items.appendAssumeCapacity(.{ | |
.tag = .func_decl, | |
.data = func_decl_extra_index, | |
}); | |
return @enumFromInt(ip.items.len - 1); | |
} | |
pub const GetFuncDeclIesKey = struct { | |
owner_decl: DeclIndex, | |
param_types: []Index, | |
noalias_bits: u32, | |
comptime_bits: u32, | |
bare_return_type: Index, | |
/// null means generic. | |
cc: ?std.builtin.CallingConvention, | |
/// null means generic. | |
alignment: ?Alignment, | |
section_is_generic: bool, | |
addrspace_is_generic: bool, | |
is_var_args: bool, | |
is_generic: bool, | |
is_noinline: bool, | |
zir_body_inst: TrackedInst.Index, | |
lbrace_line: u32, | |
rbrace_line: u32, | |
lbrace_column: u32, | |
rbrace_column: u32, | |
}; | |
pub fn getFuncDeclIes(ip: *InternPool, gpa: Allocator, key: GetFuncDeclIesKey) Allocator.Error!Index { | |
// Validate input parameters. | |
assert(key.bare_return_type != .none); | |
for (key.param_types) |param_type| assert(param_type != .none); | |
// The strategy here is to add the function decl unconditionally, then to | |
// ask if it already exists, and if so, revert the lengths of the mutated | |
// arrays. This is similar to what `getOrPutTrailingString` does. | |
const prev_extra_len = ip.extra.items.len; | |
const params_len: u32 = @intCast(key.param_types.len); | |
try ip.map.ensureUnusedCapacity(gpa, 4); | |
try ip.extra.ensureUnusedCapacity(gpa, @typeInfo(Tag.FuncDecl).Struct.fields.len + | |
1 + // inferred_error_set | |
@typeInfo(Tag.ErrorUnionType).Struct.fields.len + | |
@typeInfo(Tag.TypeFunction).Struct.fields.len + | |
@intFromBool(key.comptime_bits != 0) + | |
@intFromBool(key.noalias_bits != 0) + | |
params_len); | |
try ip.items.ensureUnusedCapacity(gpa, 4); | |
const func_decl_extra_index = ip.addExtraAssumeCapacity(Tag.FuncDecl{ | |
.analysis = .{ | |
.state = if (key.cc == .Inline) .inline_only else .none, | |
.is_cold = false, | |
.is_noinline = key.is_noinline, | |
.calls_or_awaits_errorable_fn = false, | |
.stack_alignment = .none, | |
.inferred_error_set = true, | |
}, | |
.owner_decl = key.owner_decl, | |
.ty = @enumFromInt(ip.items.len + 3), | |
.zir_body_inst = key.zir_body_inst, | |
.lbrace_line = key.lbrace_line, | |
.rbrace_line = key.rbrace_line, | |
.lbrace_column = key.lbrace_column, | |
.rbrace_column = key.rbrace_column, | |
}); | |
ip.items.appendAssumeCapacity(.{ | |
.tag = .func_decl, | |
.data = func_decl_extra_index, | |
}); | |
ip.extra.appendAssumeCapacity(@intFromEnum(Index.none)); | |
ip.items.appendAssumeCapacity(.{ | |
.tag = .type_error_union, | |
.data = ip.addExtraAssumeCapacity(Tag.ErrorUnionType{ | |
.error_set_type = @enumFromInt(ip.items.len + 1), | |
.payload_type = key.bare_return_type, | |
}), | |
}); | |
ip.items.appendAssumeCapacity(.{ | |
.tag = .type_inferred_error_set, | |
.data = @intCast(ip.items.len - 2), | |
}); | |
const func_type_extra_index = ip.addExtraAssumeCapacity(Tag.TypeFunction{ | |
.params_len = params_len, | |
.return_type = @enumFromInt(ip.items.len - 2), | |
.flags = .{ | |
.alignment = key.alignment orelse .none, | |
.cc = key.cc orelse .Unspecified, | |
.is_var_args = key.is_var_args, | |
.has_comptime_bits = key.comptime_bits != 0, | |
.has_noalias_bits = key.noalias_bits != 0, | |
.is_generic = key.is_generic, | |
.is_noinline = key.is_noinline, | |
.align_is_generic = key.alignment == null, | |
.cc_is_generic = key.cc == null, | |
.section_is_generic = key.section_is_generic, | |
.addrspace_is_generic = key.addrspace_is_generic, | |
}, | |
}); | |
if (key.comptime_bits != 0) ip.extra.appendAssumeCapacity(key.comptime_bits); | |
if (key.noalias_bits != 0) ip.extra.appendAssumeCapacity(key.noalias_bits); | |
ip.extra.appendSliceAssumeCapacity(@ptrCast(key.param_types)); | |
ip.items.appendAssumeCapacity(.{ | |
.tag = .type_function, | |
.data = func_type_extra_index, | |
}); | |
const adapter: KeyAdapter = .{ .intern_pool = ip }; | |
const gop = ip.map.getOrPutAssumeCapacityAdapted(Key{ | |
.func = extraFuncDecl(ip, func_decl_extra_index), | |
}, adapter); | |
if (!gop.found_existing) { | |
assert(!ip.map.getOrPutAssumeCapacityAdapted(Key{ .error_union_type = .{ | |
.error_set_type = @enumFromInt(ip.items.len - 2), | |
.payload_type = key.bare_return_type, | |
} }, adapter).found_existing); | |
assert(!ip.map.getOrPutAssumeCapacityAdapted(Key{ | |
.inferred_error_set_type = @enumFromInt(ip.items.len - 4), | |
}, adapter).found_existing); | |
assert(!ip.map.getOrPutAssumeCapacityAdapted(Key{ | |
.func_type = extraFuncType(ip, func_type_extra_index), | |
}, adapter).found_existing); | |
return @enumFromInt(ip.items.len - 4); | |
} | |
// An existing function type was found; undo the additions to our two arrays. | |
ip.items.len -= 4; | |
ip.extra.items.len = prev_extra_len; | |
return @enumFromInt(gop.index); | |
} | |
pub fn getErrorSetType( | |
ip: *InternPool, | |
gpa: Allocator, | |
names: []const NullTerminatedString, | |
) Allocator.Error!Index { | |
assert(std.sort.isSorted(NullTerminatedString, names, {}, NullTerminatedString.indexLessThan)); | |
// The strategy here is to add the type unconditionally, then to ask if it | |
// already exists, and if so, revert the lengths of the mutated arrays. | |
// This is similar to what `getOrPutTrailingString` does. | |
try ip.extra.ensureUnusedCapacity(gpa, @typeInfo(Tag.ErrorSet).Struct.fields.len + names.len); | |
const prev_extra_len = ip.extra.items.len; | |
errdefer ip.extra.items.len = prev_extra_len; | |
const predicted_names_map: MapIndex = @enumFromInt(ip.maps.items.len); | |
const error_set_extra_index = ip.addExtraAssumeCapacity(Tag.ErrorSet{ | |
.names_len = @intCast(names.len), | |
.names_map = predicted_names_map, | |
}); | |
ip.extra.appendSliceAssumeCapacity(@ptrCast(names)); | |
const adapter: KeyAdapter = .{ .intern_pool = ip }; | |
const gop = try ip.map.getOrPutAdapted(gpa, Key{ | |
.error_set_type = extraErrorSet(ip, error_set_extra_index), | |
}, adapter); | |
errdefer _ = ip.map.pop(); | |
if (gop.found_existing) { | |
ip.extra.items.len = prev_extra_len; | |
return @enumFromInt(gop.index); | |
} | |
try ip.items.append(gpa, .{ | |
.tag = .type_error_set, | |
.data = error_set_extra_index, | |
}); | |
errdefer ip.items.len -= 1; | |
const names_map = try ip.addMap(gpa, names.len); | |
assert(names_map == predicted_names_map); | |
errdefer _ = ip.maps.pop(); | |
addStringsToMap(ip, names_map, names); | |
return @enumFromInt(ip.items.len - 1); | |
} | |
pub const GetFuncInstanceKey = struct { | |
/// Has the length of the instance function (may be lesser than | |
/// comptime_args). | |
param_types: []Index, | |
/// Has the length of generic_owner's parameters (may be greater than | |
/// param_types). | |
comptime_args: []const Index, | |
noalias_bits: u32, | |
bare_return_type: Index, | |
cc: std.builtin.CallingConvention, | |
alignment: Alignment, | |
section: OptionalNullTerminatedString, | |
is_noinline: bool, | |
generic_owner: Index, | |
inferred_error_set: bool, | |
}; | |
pub fn getFuncInstance(ip: *InternPool, gpa: Allocator, arg: GetFuncInstanceKey) Allocator.Error!Index { | |
if (arg.inferred_error_set) | |
return getFuncInstanceIes(ip, gpa, arg); | |
const func_ty = try ip.getFuncType(gpa, .{ | |
.param_types = arg.param_types, | |
.return_type = arg.bare_return_type, | |
.noalias_bits = arg.noalias_bits, | |
.alignment = arg.alignment, | |
.cc = arg.cc, | |
.is_noinline = arg.is_noinline, | |
}); | |
const generic_owner = unwrapCoercedFunc(ip, arg.generic_owner); | |
assert(arg.comptime_args.len == ip.funcTypeParamsLen(ip.typeOf(generic_owner))); | |
try ip.extra.ensureUnusedCapacity(gpa, @typeInfo(Tag.FuncInstance).Struct.fields.len + | |
arg.comptime_args.len); | |
const prev_extra_len = ip.extra.items.len; | |
errdefer ip.extra.items.len = prev_extra_len; | |
const func_extra_index = ip.addExtraAssumeCapacity(Tag.FuncInstance{ | |
.analysis = .{ | |
.state = if (arg.cc == .Inline) .inline_only else .none, | |
.is_cold = false, | |
.is_noinline = arg.is_noinline, | |
.calls_or_awaits_errorable_fn = false, | |
.stack_alignment = .none, | |
.inferred_error_set = false, | |
}, | |
// This is populated after we create the Decl below. It is not read | |
// by equality or hashing functions. | |
.owner_decl = undefined, | |
.ty = func_ty, | |
.branch_quota = 0, | |
.generic_owner = generic_owner, | |
}); | |
ip.extra.appendSliceAssumeCapacity(@ptrCast(arg.comptime_args)); | |
const gop = try ip.map.getOrPutAdapted(gpa, Key{ | |
.func = extraFuncInstance(ip, func_extra_index), | |
}, KeyAdapter{ .intern_pool = ip }); | |
errdefer _ = ip.map.pop(); | |
if (gop.found_existing) { | |
ip.extra.items.len = prev_extra_len; | |
return @enumFromInt(gop.index); | |
} | |
const func_index: Index = @enumFromInt(ip.items.len); | |
try ip.items.append(gpa, .{ | |
.tag = .func_instance, | |
.data = func_extra_index, | |
}); | |
errdefer ip.items.len -= 1; | |
return finishFuncInstance( | |
ip, | |
gpa, | |
generic_owner, | |
func_index, | |
func_extra_index, | |
func_ty, | |
arg.section, | |
); | |
} | |
/// This function exists separately than `getFuncInstance` because it needs to | |
/// create 4 new items in the InternPool atomically before it can look for an | |
/// existing item in the map. | |
pub fn getFuncInstanceIes( | |
ip: *InternPool, | |
gpa: Allocator, | |
arg: GetFuncInstanceKey, | |
) Allocator.Error!Index { | |
// Validate input parameters. | |
assert(arg.inferred_error_set); | |
assert(arg.bare_return_type != .none); | |
for (arg.param_types) |param_type| assert(param_type != .none); | |
const generic_owner = unwrapCoercedFunc(ip, arg.generic_owner); | |
// The strategy here is to add the function decl unconditionally, then to | |
// ask if it already exists, and if so, revert the lengths of the mutated | |
// arrays. This is similar to what `getOrPutTrailingString` does. | |
const prev_extra_len = ip.extra.items.len; | |
const params_len: u32 = @intCast(arg.param_types.len); | |
try ip.map.ensureUnusedCapacity(gpa, 4); | |
try ip.extra.ensureUnusedCapacity(gpa, @typeInfo(Tag.FuncInstance).Struct.fields.len + | |
1 + // inferred_error_set | |
arg.comptime_args.len + | |
@typeInfo(Tag.ErrorUnionType).Struct.fields.len + | |
@typeInfo(Tag.TypeFunction).Struct.fields.len + | |
@intFromBool(arg.noalias_bits != 0) + | |
params_len); | |
try ip.items.ensureUnusedCapacity(gpa, 4); | |
const func_index: Index = @enumFromInt(ip.items.len); | |
const error_union_type: Index = @enumFromInt(ip.items.len + 1); | |
const error_set_type: Index = @enumFromInt(ip.items.len + 2); | |
const func_ty: Index = @enumFromInt(ip.items.len + 3); | |
const func_extra_index = ip.addExtraAssumeCapacity(Tag.FuncInstance{ | |
.analysis = .{ | |
.state = if (arg.cc == .Inline) .inline_only else .none, | |
.is_cold = false, | |
.is_noinline = arg.is_noinline, | |
.calls_or_awaits_errorable_fn = false, | |
.stack_alignment = .none, | |
.inferred_error_set = true, | |
}, | |
// This is populated after we create the Decl below. It is not read | |
// by equality or hashing functions. | |
.owner_decl = undefined, | |
.ty = func_ty, | |
.branch_quota = 0, | |
.generic_owner = generic_owner, | |
}); | |
ip.extra.appendAssumeCapacity(@intFromEnum(Index.none)); // resolved error set | |
ip.extra.appendSliceAssumeCapacity(@ptrCast(arg.comptime_args)); | |
const func_type_extra_index = ip.addExtraAssumeCapacity(Tag.TypeFunction{ | |
.params_len = params_len, | |
.return_type = error_union_type, | |
.flags = .{ | |
.alignment = arg.alignment, | |
.cc = arg.cc, | |
.is_var_args = false, | |
.has_comptime_bits = false, | |
.has_noalias_bits = arg.noalias_bits != 0, | |
.is_generic = false, | |
.is_noinline = arg.is_noinline, | |
.align_is_generic = false, | |
.cc_is_generic = false, | |
.section_is_generic = false, | |
.addrspace_is_generic = false, | |
}, | |
}); | |
// no comptime_bits because has_comptime_bits is false | |
if (arg.noalias_bits != 0) ip.extra.appendAssumeCapacity(arg.noalias_bits); | |
ip.extra.appendSliceAssumeCapacity(@ptrCast(arg.param_types)); | |
// TODO: add appendSliceAssumeCapacity to MultiArrayList. | |
ip.items.appendAssumeCapacity(.{ | |
.tag = .func_instance, | |
.data = func_extra_index, | |
}); | |
ip.items.appendAssumeCapacity(.{ | |
.tag = .type_error_union, | |
.data = ip.addExtraAssumeCapacity(Tag.ErrorUnionType{ | |
.error_set_type = error_set_type, | |
.payload_type = arg.bare_return_type, | |
}), | |
}); | |
ip.items.appendAssumeCapacity(.{ | |
.tag = .type_inferred_error_set, | |
.data = @intFromEnum(func_index), | |
}); | |
ip.items.appendAssumeCapacity(.{ | |
.tag = .type_function, | |
.data = func_type_extra_index, | |
}); | |
const adapter: KeyAdapter = .{ .intern_pool = ip }; | |
const gop = ip.map.getOrPutAssumeCapacityAdapted(Key{ | |
.func = extraFuncInstance(ip, func_extra_index), | |
}, adapter); | |
if (gop.found_existing) { | |
// Hot path: undo the additions to our two arrays. | |
ip.items.len -= 4; | |
ip.extra.items.len = prev_extra_len; | |
return @enumFromInt(gop.index); | |
} | |
// Synchronize the map with items. | |
assert(!ip.map.getOrPutAssumeCapacityAdapted(Key{ .error_union_type = .{ | |
.error_set_type = error_set_type, | |
.payload_type = arg.bare_return_type, | |
} }, adapter).found_existing); | |
assert(!ip.map.getOrPutAssumeCapacityAdapted(Key{ | |
.inferred_error_set_type = func_index, | |
}, adapter).found_existing); | |
assert(!ip.map.getOrPutAssumeCapacityAdapted(Key{ | |
.func_type = extraFuncType(ip, func_type_extra_index), | |
}, adapter).found_existing); | |
return finishFuncInstance( | |
ip, | |
gpa, | |
generic_owner, | |
func_index, | |
func_extra_index, | |
func_ty, | |
arg.section, | |
); | |
} | |
fn finishFuncInstance( | |
ip: *InternPool, | |
gpa: Allocator, | |
generic_owner: Index, | |
func_index: Index, | |
func_extra_index: u32, | |
func_ty: Index, | |
section: OptionalNullTerminatedString, | |
) Allocator.Error!Index { | |
const fn_owner_decl = ip.declPtr(ip.funcDeclOwner(generic_owner)); | |
const decl_index = try ip.createDecl(gpa, .{ | |
.name = undefined, | |
.src_namespace = fn_owner_decl.src_namespace, | |
.src_node = fn_owner_decl.src_node, | |
.src_line = fn_owner_decl.src_line, | |
.has_tv = true, | |
.owns_tv = true, | |
.ty = @import("type.zig").Type.fromInterned(func_ty), | |
.val = @import("Value.zig").fromInterned(func_index), | |
.alignment = .none, | |
.@"linksection" = section, | |
.@"addrspace" = fn_owner_decl.@"addrspace", | |
.analysis = .complete, | |
.zir_decl_index = fn_owner_decl.zir_decl_index, | |
.src_scope = fn_owner_decl.src_scope, | |
.is_pub = fn_owner_decl.is_pub, | |
.is_exported = fn_owner_decl.is_exported, | |
.alive = true, | |
.kind = .anon, | |
}); | |
errdefer ip.destroyDecl(gpa, decl_index); | |
// Populate the owner_decl field which was left undefined until now. | |
ip.extra.items[ | |
func_extra_index + std.meta.fieldIndex(Tag.FuncInstance, "owner_decl").? | |
] = @intFromEnum(decl_index); | |
// TODO: improve this name | |
const decl = ip.declPtr(decl_index); | |
decl.name = try ip.getOrPutStringFmt(gpa, "{}__anon_{d}", .{ | |
fn_owner_decl.name.fmt(ip), @intFromEnum(decl_index), | |
}); | |
return func_index; | |
} | |
/// Provides API for completing an enum type after calling `getIncompleteEnum`. | |
pub const IncompleteEnumType = struct { | |
index: Index, | |
tag_ty_index: u32, | |
names_map: MapIndex, | |
names_start: u32, | |
values_map: OptionalMapIndex, | |
values_start: u32, | |
pub fn setTagType(self: @This(), ip: *InternPool, tag_ty: Index) void { | |
assert(tag_ty == .noreturn_type or ip.isIntegerType(tag_ty)); | |
ip.extra.items[self.tag_ty_index] = @intFromEnum(tag_ty); | |
} | |
/// Returns the already-existing field with the same name, if any. | |
pub fn addFieldName( | |
self: @This(), | |
ip: *InternPool, | |
name: NullTerminatedString, | |
) ?u32 { | |
return ip.addFieldName(self.names_map, self.names_start, name); | |
} | |
/// Returns the already-existing field with the same value, if any. | |
/// Make sure the type of the value has the integer tag type of the enum. | |
pub fn addFieldValue( | |
self: @This(), | |
ip: *InternPool, | |
value: Index, | |
) ?u32 { | |
assert(ip.typeOf(value) == @as(Index, @enumFromInt(ip.extra.items[self.tag_ty_index]))); | |
const map = &ip.maps.items[@intFromEnum(self.values_map.unwrap().?)]; | |
const field_index = map.count(); | |
const indexes = ip.extra.items[self.values_start..][0..field_index]; | |
const adapter: Index.Adapter = .{ .indexes = @ptrCast(indexes) }; | |
const gop = map.getOrPutAssumeCapacityAdapted(value, adapter); | |
if (gop.found_existing) return @intCast(gop.index); | |
ip.extra.items[self.values_start + field_index] = @intFromEnum(value); | |
return null; | |
} | |
}; | |
/// This is used to create an enum type in the `InternPool`, with the ability | |
/// to update the tag type, field names, and field values later. | |
pub fn getIncompleteEnum( | |
ip: *InternPool, | |
gpa: Allocator, | |
enum_type: Key.IncompleteEnumType, | |
) Allocator.Error!IncompleteEnumType { | |
switch (enum_type.tag_mode) { | |
.auto => return getIncompleteEnumAuto(ip, gpa, enum_type), | |
.explicit => return getIncompleteEnumExplicit(ip, gpa, enum_type, .type_enum_explicit), | |
.nonexhaustive => return getIncompleteEnumExplicit(ip, gpa, enum_type, .type_enum_nonexhaustive), | |
} | |
} | |
fn getIncompleteEnumAuto( | |
ip: *InternPool, | |
gpa: Allocator, | |
enum_type: Key.IncompleteEnumType, | |
) Allocator.Error!IncompleteEnumType { | |
const int_tag_type = if (enum_type.tag_ty != .none) | |
enum_type.tag_ty | |
else | |
try ip.get(gpa, .{ .int_type = .{ | |
.bits = if (enum_type.fields_len == 0) 0 else std.math.log2_int_ceil(u32, enum_type.fields_len), | |
.signedness = .unsigned, | |
} }); | |
// We must keep the map in sync with `items`. The hash and equality functions | |
// for enum types only look at the decl field, which is present even in | |
// an `IncompleteEnumType`. | |
const adapter: KeyAdapter = .{ .intern_pool = ip }; | |
const gop = try ip.map.getOrPutAdapted(gpa, enum_type.toKey(), adapter); | |
assert(!gop.found_existing); | |
const names_map = try ip.addMap(gpa, enum_type.fields_len); | |
const extra_fields_len: u32 = @typeInfo(EnumAuto).Struct.fields.len; | |
try ip.extra.ensureUnusedCapacity(gpa, extra_fields_len + enum_type.fields_len); | |
try ip.items.ensureUnusedCapacity(gpa, 1); | |
const extra_index = ip.addExtraAssumeCapacity(EnumAuto{ | |
.decl = enum_type.decl, | |
.namespace = enum_type.namespace, | |
.int_tag_type = int_tag_type, | |
.names_map = names_map, | |
.fields_len = enum_type.fields_len, | |
.zir_index = enum_type.zir_index, | |
}); | |
ip.items.appendAssumeCapacity(.{ | |
.tag = .type_enum_auto, | |
.data = extra_index, | |
}); | |
ip.extra.appendNTimesAssumeCapacity(@intFromEnum(Index.none), enum_type.fields_len); | |
return .{ | |
.index = @enumFromInt(ip.items.len - 1), | |
.tag_ty_index = extra_index + std.meta.fieldIndex(EnumAuto, "int_tag_type").?, | |
.names_map = names_map, | |
.names_start = extra_index + extra_fields_len, | |
.values_map = .none, | |
.values_start = undefined, | |
}; | |
} | |
fn getIncompleteEnumExplicit( | |
ip: *InternPool, | |
gpa: Allocator, | |
enum_type: Key.IncompleteEnumType, | |
tag: Tag, | |
) Allocator.Error!IncompleteEnumType { | |
// We must keep the map in sync with `items`. The hash and equality functions | |
// for enum types only look at the decl field, which is present even in | |
// an `IncompleteEnumType`. | |
const adapter: KeyAdapter = .{ .intern_pool = ip }; | |
const gop = try ip.map.getOrPutAdapted(gpa, enum_type.toKey(), adapter); | |
assert(!gop.found_existing); | |
const names_map = try ip.addMap(gpa, enum_type.fields_len); | |
const values_map: OptionalMapIndex = if (!enum_type.has_values) .none else m: { | |
const values_map = try ip.addMap(gpa, enum_type.fields_len); | |
break :m values_map.toOptional(); | |
}; | |
const reserved_len = enum_type.fields_len + | |
if (enum_type.has_values) enum_type.fields_len else 0; | |
const extra_fields_len: u32 = @typeInfo(EnumExplicit).Struct.fields.len; | |
try ip.extra.ensureUnusedCapacity(gpa, extra_fields_len + reserved_len); | |
try ip.items.ensureUnusedCapacity(gpa, 1); | |
const extra_index = ip.addExtraAssumeCapacity(EnumExplicit{ | |
.decl = enum_type.decl, | |
.namespace = enum_type.namespace, | |
.int_tag_type = enum_type.tag_ty, | |
.fields_len = enum_type.fields_len, | |
.names_map = names_map, | |
.values_map = values_map, | |
.zir_index = enum_type.zir_index, | |
}); | |
ip.items.appendAssumeCapacity(.{ | |
.tag = tag, | |
.data = extra_index, | |
}); | |
// This is both fields and values (if present). | |
ip.extra.appendNTimesAssumeCapacity(@intFromEnum(Index.none), reserved_len); | |
return .{ | |
.index = @enumFromInt(ip.items.len - 1), | |
.tag_ty_index = extra_index + std.meta.fieldIndex(EnumExplicit, "int_tag_type").?, | |
.names_map = names_map, | |
.names_start = extra_index + extra_fields_len, | |
.values_map = values_map, | |
.values_start = extra_index + extra_fields_len + enum_type.fields_len, | |
}; | |
} | |
pub const GetEnumInit = struct { | |
decl: DeclIndex, | |
namespace: OptionalNamespaceIndex, | |
tag_ty: Index, | |
names: []const NullTerminatedString, | |
values: []const Index, | |
tag_mode: Key.EnumType.TagMode, | |
zir_index: TrackedInst.Index.Optional, | |
}; | |
pub fn getEnum(ip: *InternPool, gpa: Allocator, ini: GetEnumInit) Allocator.Error!Index { | |
const adapter: KeyAdapter = .{ .intern_pool = ip }; | |
const gop = try ip.map.getOrPutAdapted(gpa, Key{ | |
.enum_type = .{ | |
// Only the decl is used for hashing and equality. | |
.decl = ini.decl, | |
.namespace = undefined, | |
.tag_ty = undefined, | |
.names = undefined, | |
.values = undefined, | |
.tag_mode = undefined, | |
.names_map = undefined, | |
.values_map = undefined, | |
.zir_index = undefined, | |
}, | |
}, adapter); | |
if (gop.found_existing) return @enumFromInt(gop.index); | |
errdefer _ = ip.map.pop(); | |
try ip.items.ensureUnusedCapacity(gpa, 1); | |
assert(ini.tag_ty == .noreturn_type or ip.isIntegerType(ini.tag_ty)); | |
for (ini.values) |value| assert(ip.typeOf(value) == ini.tag_ty); | |
switch (ini.tag_mode) { | |
.auto => { | |
const names_map = try ip.addMap(gpa, ini.names.len); | |
addStringsToMap(ip, names_map, ini.names); | |
const fields_len: u32 = @intCast(ini.names.len); | |
try ip.extra.ensureUnusedCapacity(gpa, @typeInfo(EnumAuto).Struct.fields.len + | |
fields_len); | |
ip.items.appendAssumeCapacity(.{ | |
.tag = .type_enum_auto, | |
.data = ip.addExtraAssumeCapacity(EnumAuto{ | |
.decl = ini.decl, | |
.namespace = ini.namespace, | |
.int_tag_type = ini.tag_ty, | |
.names_map = names_map, | |
.fields_len = fields_len, | |
.zir_index = ini.zir_index, | |
}), | |
}); | |
ip.extra.appendSliceAssumeCapacity(@ptrCast(ini.names)); | |
return @enumFromInt(ip.items.len - 1); | |
}, | |
.explicit => return finishGetEnum(ip, gpa, ini, .type_enum_explicit), | |
.nonexhaustive => return finishGetEnum(ip, gpa, ini, .type_enum_nonexhaustive), | |
} | |
} | |
pub fn finishGetEnum( | |
ip: *InternPool, | |
gpa: Allocator, | |
ini: GetEnumInit, | |
tag: Tag, | |
) Allocator.Error!Index { | |
const names_map = try ip.addMap(gpa, ini.names.len); | |
addStringsToMap(ip, names_map, ini.names); | |
const values_map: OptionalMapIndex = if (ini.values.len == 0) .none else m: { | |
const values_map = try ip.addMap(gpa, ini.values.len); | |
addIndexesToMap(ip, values_map, ini.values); | |
break :m values_map.toOptional(); | |
}; | |
const fields_len: u32 = @intCast(ini.names.len); | |
try ip.extra.ensureUnusedCapacity(gpa, @typeInfo(EnumExplicit).Struct.fields.len + | |
fields_len); | |
ip.items.appendAssumeCapacity(.{ | |
.tag = tag, | |
.data = ip.addExtraAssumeCapacity(EnumExplicit{ | |
.decl = ini.decl, | |
.namespace = ini.namespace, | |
.int_tag_type = ini.tag_ty, | |
.fields_len = fields_len, | |
.names_map = names_map, | |
.values_map = values_map, | |
.zir_index = ini.zir_index, | |
}), | |
}); | |
ip.extra.appendSliceAssumeCapacity(@ptrCast(ini.names)); | |
ip.extra.appendSliceAssumeCapacity(@ptrCast(ini.values)); | |
return @enumFromInt(ip.items.len - 1); | |
} | |
pub fn getIfExists(ip: *const InternPool, key: Key) ?Index { | |
const adapter: KeyAdapter = .{ .intern_pool = ip }; | |
const index = ip.map.getIndexAdapted(key, adapter) orelse return null; | |
return @enumFromInt(index); | |
} | |
pub fn getAssumeExists(ip: *const InternPool, key: Key) Index { | |
return ip.getIfExists(key).?; | |
} | |
fn addStringsToMap( | |
ip: *InternPool, | |
map_index: MapIndex, | |
strings: []const NullTerminatedString, | |
) void { | |
const map = &ip.maps.items[@intFromEnum(map_index)]; | |
const adapter: NullTerminatedString.Adapter = .{ .strings = strings }; | |
for (strings) |string| { | |
const gop = map.getOrPutAssumeCapacityAdapted(string, adapter); | |
assert(!gop.found_existing); | |
} | |
} | |
fn addIndexesToMap( | |
ip: *InternPool, | |
map_index: MapIndex, | |
indexes: []const Index, | |
) void { | |
const map = &ip.maps.items[@intFromEnum(map_index)]; | |
const adapter: Index.Adapter = .{ .indexes = indexes }; | |
for (indexes) |index| { | |
const gop = map.getOrPutAssumeCapacityAdapted(index, adapter); | |
assert(!gop.found_existing); | |
} | |
} | |
fn addMap(ip: *InternPool, gpa: Allocator, cap: usize) Allocator.Error!MapIndex { | |
const ptr = try ip.maps.addOne(gpa); | |
errdefer _ = ip.maps.pop(); | |
ptr.* = .{}; | |
try ptr.ensureTotalCapacity(gpa, cap); | |
return @enumFromInt(ip.maps.items.len - 1); | |
} | |
/// This operation only happens under compile error conditions. | |
/// Leak the index until the next garbage collection. | |
/// TODO: this is a bit problematic to implement, can we get away without it? | |
pub const remove = @compileError("InternPool.remove is not currently a supported operation; put a TODO there instead"); | |
fn addInt(ip: *InternPool, gpa: Allocator, ty: Index, tag: Tag, limbs: []const Limb) !void { | |
const limbs_len = @as(u32, @intCast(limbs.len)); | |
try ip.reserveLimbs(gpa, @typeInfo(Int).Struct.fields.len + limbs_len); | |
ip.items.appendAssumeCapacity(.{ | |
.tag = tag, | |
.data = ip.addLimbsExtraAssumeCapacity(Int{ | |
.ty = ty, | |
.limbs_len = limbs_len, | |
}), | |
}); | |
ip.addLimbsAssumeCapacity(limbs); | |
} | |
fn addExtra(ip: *InternPool, gpa: Allocator, extra: anytype) Allocator.Error!u32 { | |
const fields = @typeInfo(@TypeOf(extra)).Struct.fields; | |
try ip.extra.ensureUnusedCapacity(gpa, fields.len); | |
return ip.addExtraAssumeCapacity(extra); | |
} | |
fn addExtraAssumeCapacity(ip: *InternPool, extra: anytype) u32 { | |
const result = @as(u32, @intCast(ip.extra.items.len)); | |
inline for (@typeInfo(@TypeOf(extra)).Struct.fields) |field| { | |
ip.extra.appendAssumeCapacity(switch (field.type) { | |
Index, | |
DeclIndex, | |
NamespaceIndex, | |
OptionalNamespaceIndex, | |
MapIndex, | |
OptionalMapIndex, | |
RuntimeIndex, | |
String, | |
NullTerminatedString, | |
OptionalNullTerminatedString, | |
Tag.TypePointer.VectorIndex, | |
TrackedInst.Index, | |
TrackedInst.Index.Optional, | |
=> @intFromEnum(@field(extra, field.name)), | |
u32, | |
i32, | |
FuncAnalysis, | |
Tag.TypePointer.Flags, | |
Tag.TypeFunction.Flags, | |
Tag.TypePointer.PackedOffset, | |
Tag.TypeUnion.Flags, | |
Tag.TypeStruct.Flags, | |
Tag.TypeStructPacked.Flags, | |
Tag.Variable.Flags, | |
=> @bitCast(@field(extra, field.name)), | |
else => @compileError("bad field type: " ++ @typeName(field.type)), | |
}); | |
} | |
return result; | |
} | |
fn reserveLimbs(ip: *InternPool, gpa: Allocator, n: usize) !void { | |
switch (@sizeOf(Limb)) { | |
@sizeOf(u32) => try ip.extra.ensureUnusedCapacity(gpa, n), | |
@sizeOf(u64) => try ip.limbs.ensureUnusedCapacity(gpa, n), | |
else => @compileError("unsupported host"), | |
} | |
} | |
fn addLimbsExtraAssumeCapacity(ip: *InternPool, extra: anytype) u32 { | |
switch (@sizeOf(Limb)) { | |
@sizeOf(u32) => return addExtraAssumeCapacity(ip, extra), | |
@sizeOf(u64) => {}, | |
else => @compileError("unsupported host"), | |
} | |
const result = @as(u32, @intCast(ip.limbs.items.len)); | |
inline for (@typeInfo(@TypeOf(extra)).Struct.fields, 0..) |field, i| { | |
const new: u32 = switch (field.type) { | |
u32 => @field(extra, field.name), | |
Index => @intFromEnum(@field(extra, field.name)), | |
else => @compileError("bad field type: " ++ @typeName(field.type)), | |
}; | |
if (i % 2 == 0) { | |
ip.limbs.appendAssumeCapacity(new); | |
} else { | |
ip.limbs.items[ip.limbs.items.len - 1] |= @as(u64, new) << 32; | |
} | |
} | |
return result; | |
} | |
fn addLimbsAssumeCapacity(ip: *InternPool, limbs: []const Limb) void { | |
switch (@sizeOf(Limb)) { | |
@sizeOf(u32) => ip.extra.appendSliceAssumeCapacity(limbs), | |
@sizeOf(u64) => ip.limbs.appendSliceAssumeCapacity(limbs), | |
else => @compileError("unsupported host"), | |
} | |
} | |
fn extraDataTrail(ip: *const InternPool, comptime T: type, index: usize) struct { data: T, end: u32 } { | |
var result: T = undefined; | |
const fields = @typeInfo(T).Struct.fields; | |
inline for (fields, 0..) |field, i| { | |
const int32 = ip.extra.items[i + index]; | |
@field(result, field.name) = switch (field.type) { | |
Index, | |
DeclIndex, | |
NamespaceIndex, | |
OptionalNamespaceIndex, | |
MapIndex, | |
OptionalMapIndex, | |
RuntimeIndex, | |
String, | |
NullTerminatedString, | |
OptionalNullTerminatedString, | |
Tag.TypePointer.VectorIndex, | |
TrackedInst.Index, | |
TrackedInst.Index.Optional, | |
=> @enumFromInt(int32), | |
u32, | |
i32, | |
Tag.TypePointer.Flags, | |
Tag.TypeFunction.Flags, | |
Tag.TypePointer.PackedOffset, | |
Tag.TypeUnion.Flags, | |
Tag.TypeStruct.Flags, | |
Tag.TypeStructPacked.Flags, | |
Tag.Variable.Flags, | |
FuncAnalysis, | |
=> @bitCast(int32), | |
else => @compileError("bad field type: " ++ @typeName(field.type)), | |
}; | |
} | |
return .{ | |
.data = result, | |
.end = @intCast(index + fields.len), | |
}; | |
} | |
fn extraData(ip: *const InternPool, comptime T: type, index: usize) T { | |
return extraDataTrail(ip, T, index).data; | |
} | |
/// Asserts the struct has 32-bit fields and the number of fields is evenly divisible by 2. | |
fn limbData(ip: *const InternPool, comptime T: type, index: usize) T { | |
switch (@sizeOf(Limb)) { | |
@sizeOf(u32) => return extraData(ip, T, index), | |
@sizeOf(u64) => {}, | |
else => @compileError("unsupported host"), | |
} | |
var result: T = undefined; | |
inline for (@typeInfo(T).Struct.fields, 0..) |field, i| { | |
const host_int = ip.limbs.items[index + i / 2]; | |
const int32 = if (i % 2 == 0) | |
@as(u32, @truncate(host_int)) | |
else | |
@as(u32, @truncate(host_int >> 32)); | |
@field(result, field.name) = switch (field.type) { | |
u32 => int32, | |
Index => @as(Index, @enumFromInt(int32)), | |
else => @compileError("bad field type: " ++ @typeName(field.type)), | |
}; | |
} | |
return result; | |
} | |
/// This function returns the Limb slice that is trailing data after a payload. | |
fn limbSlice(ip: *const InternPool, comptime S: type, limb_index: u32, len: u32) []const Limb { | |
const field_count = @typeInfo(S).Struct.fields.len; | |
switch (@sizeOf(Limb)) { | |
@sizeOf(u32) => { | |
const start = limb_index + field_count; | |
return ip.extra.items[start..][0..len]; | |
}, | |
@sizeOf(u64) => { | |
const start = limb_index + @divExact(field_count, 2); | |
return ip.limbs.items[start..][0..len]; | |
}, | |
else => @compileError("unsupported host"), | |
} | |
} | |
const LimbsAsIndexes = struct { | |
start: u32, | |
len: u32, | |
}; | |
fn limbsSliceToIndex(ip: *const InternPool, limbs: []const Limb) LimbsAsIndexes { | |
const host_slice = switch (@sizeOf(Limb)) { | |
@sizeOf(u32) => ip.extra.items, | |
@sizeOf(u64) => ip.limbs.items, | |
else => @compileError("unsupported host"), | |
}; | |
// TODO: https://github.com/ziglang/zig/issues/1738 | |
return .{ | |
.start = @as(u32, @intCast(@divExact(@intFromPtr(limbs.ptr) - @intFromPtr(host_slice.ptr), @sizeOf(Limb)))), | |
.len = @as(u32, @intCast(limbs.len)), | |
}; | |
} | |
/// This function converts Limb array indexes to a primitive slice type. | |
fn limbsIndexToSlice(ip: *const InternPool, limbs: LimbsAsIndexes) []const Limb { | |
return switch (@sizeOf(Limb)) { | |
@sizeOf(u32) => ip.extra.items[limbs.start..][0..limbs.len], | |
@sizeOf(u64) => ip.limbs.items[limbs.start..][0..limbs.len], | |
else => @compileError("unsupported host"), | |
}; | |
} | |
test "basic usage" { | |
const gpa = std.testing.allocator; | |
var ip: InternPool = .{}; | |
defer ip.deinit(gpa); | |
const i32_type = try ip.get(gpa, .{ .int_type = .{ | |
.signedness = .signed, | |
.bits = 32, | |
} }); | |
const array_i32 = try ip.get(gpa, .{ .array_type = .{ | |
.len = 10, | |
.child = i32_type, | |
.sentinel = .none, | |
} }); | |
const another_i32_type = try ip.get(gpa, .{ .int_type = .{ | |
.signedness = .signed, | |
.bits = 32, | |
} }); | |
try std.testing.expect(another_i32_type == i32_type); | |
const another_array_i32 = try ip.get(gpa, .{ .array_type = .{ | |
.len = 10, | |
.child = i32_type, | |
.sentinel = .none, | |
} }); | |
try std.testing.expect(another_array_i32 == array_i32); | |
} | |
pub fn childType(ip: *const InternPool, i: Index) Index { | |
return switch (ip.indexToKey(i)) { | |
.ptr_type => |ptr_type| ptr_type.child, | |
.vector_type => |vector_type| vector_type.child, | |
.array_type => |array_type| array_type.child, | |
.opt_type, .anyframe_type => |child| child, | |
else => unreachable, | |
}; | |
} | |
/// Given a slice type, returns the type of the ptr field. | |
pub fn slicePtrType(ip: *const InternPool, i: Index) Index { | |
switch (i) { | |
.slice_const_u8_type => return .manyptr_const_u8_type, | |
.slice_const_u8_sentinel_0_type => return .manyptr_const_u8_sentinel_0_type, | |
else => {}, | |
} | |
const item = ip.items.get(@intFromEnum(i)); | |
switch (item.tag) { | |
.type_slice => return @enumFromInt(item.data), | |
else => unreachable, // not a slice type | |
} | |
} | |
/// Given a slice value, returns the value of the ptr field. | |
pub fn slicePtr(ip: *const InternPool, i: Index) Index { | |
const item = ip.items.get(@intFromEnum(i)); | |
switch (item.tag) { | |
.ptr_slice => return ip.extraData(PtrSlice, item.data).ptr, | |
else => unreachable, // not a slice value | |
} | |
} | |
/// Given a slice value, returns the value of the len field. | |
pub fn sliceLen(ip: *const InternPool, i: Index) Index { | |
const item = ip.items.get(@intFromEnum(i)); | |
switch (item.tag) { | |
.ptr_slice => return ip.extraData(PtrSlice, item.data).len, | |
else => unreachable, // not a slice value | |
} | |
} | |
/// Given an existing value, returns the same value but with the supplied type. | |
/// Only some combinations are allowed: | |
/// * identity coercion | |
/// * undef => any | |
/// * int <=> int | |
/// * int <=> enum | |
/// * enum_literal => enum | |
/// * float <=> float | |
/// * ptr <=> ptr | |
/// * opt ptr <=> ptr | |
/// * opt ptr <=> opt ptr | |
/// * int <=> ptr | |
/// * null_value => opt | |
/// * payload => opt | |
/// * error set <=> error set | |
/// * error union <=> error union | |
/// * error set => error union | |
/// * payload => error union | |
/// * fn <=> fn | |
/// * aggregate <=> aggregate (where children can also be coerced) | |
pub fn getCoerced(ip: *InternPool, gpa: Allocator, val: Index, new_ty: Index) Allocator.Error!Index { | |
const old_ty = ip.typeOf(val); | |
if (old_ty == new_ty) return val; | |
const tags = ip.items.items(.tag); | |
switch (val) { | |
.undef => return ip.get(gpa, .{ .undef = new_ty }), | |
.null_value => { | |
if (ip.isOptionalType(new_ty)) return ip.get(gpa, .{ .opt = .{ | |
.ty = new_ty, | |
.val = .none, | |
} }); | |
if (ip.isPointerType(new_ty)) switch (ip.indexToKey(new_ty).ptr_type.flags.size) { | |
.One, .Many, .C => return ip.get(gpa, .{ .ptr = .{ | |
.ty = new_ty, | |
.addr = .{ .int = .zero_usize }, | |
} }), | |
.Slice => return ip.get(gpa, .{ .slice = .{ | |
.ty = new_ty, | |
.ptr = try ip.get(gpa, .{ .ptr = .{ | |
.ty = ip.slicePtrType(new_ty), | |
.addr = .{ .int = .zero_usize }, | |
} }), | |
.len = try ip.get(gpa, .{ .undef = .usize_type }), | |
} }), | |
}; | |
}, | |
else => switch (tags[@intFromEnum(val)]) { | |
.func_decl => return getCoercedFuncDecl(ip, gpa, val, new_ty), | |
.func_instance => return getCoercedFuncInstance(ip, gpa, val, new_ty), | |
.func_coerced => { | |
const extra_index = ip.items.items(.data)[@intFromEnum(val)]; | |
const func: Index = @enumFromInt( | |
ip.extra.items[extra_index + std.meta.fieldIndex(Tag.FuncCoerced, "func").?], | |
); | |
switch (tags[@intFromEnum(func)]) { | |
.func_decl => return getCoercedFuncDecl(ip, gpa, val, new_ty), | |
.func_instance => return getCoercedFuncInstance(ip, gpa, val, new_ty), | |
else => unreachable, | |
} | |
}, | |
else => {}, | |
}, | |
} | |
switch (ip.indexToKey(val)) { | |
.undef => return ip.get(gpa, .{ .undef = new_ty }), | |
.extern_func => |extern_func| if (ip.isFunctionType(new_ty)) | |
return ip.get(gpa, .{ .extern_func = .{ | |
.ty = new_ty, | |
.decl = extern_func.decl, | |
.lib_name = extern_func.lib_name, | |
} }), | |
.func => unreachable, | |
.int => |int| switch (ip.indexToKey(new_ty)) { | |
.enum_type => |enum_type| return ip.get(gpa, .{ .enum_tag = .{ | |
.ty = new_ty, | |
.int = try ip.getCoerced(gpa, val, enum_type.tag_ty), | |
} }), | |
.ptr_type => return ip.get(gpa, .{ .ptr = .{ | |
.ty = new_ty, | |
.addr = .{ .int = try ip.getCoerced(gpa, val, .usize_type) }, | |
} }), | |
else => if (ip.isIntegerType(new_ty)) | |
return getCoercedInts(ip, gpa, int, new_ty), | |
}, | |
.float => |float| switch (ip.indexToKey(new_ty)) { | |
.simple_type => |simple| switch (simple) { | |
.f16, | |
.f32, | |
.f64, | |
.f80, | |
.f128, | |
.c_longdouble, | |
.comptime_float, | |
=> return ip.get(gpa, .{ .float = .{ | |
.ty = new_ty, | |
.storage = float.storage, | |
} }), | |
else => {}, | |
}, | |
else => {}, | |
}, | |
.enum_tag => |enum_tag| if (ip.isIntegerType(new_ty)) | |
return getCoercedInts(ip, gpa, ip.indexToKey(enum_tag.int).int, new_ty), | |
.enum_literal => |enum_literal| switch (ip.indexToKey(new_ty)) { | |
.enum_type => |enum_type| { | |
const index = enum_type.nameIndex(ip, enum_literal).?; | |
return ip.get(gpa, .{ .enum_tag = .{ | |
.ty = new_ty, | |
.int = if (enum_type.values.len != 0) | |
enum_type.values.get(ip)[index] | |
else | |
try ip.get(gpa, .{ .int = .{ | |
.ty = enum_type.tag_ty, | |
.storage = .{ .u64 = index }, | |
} }), | |
} }); | |
}, | |
else => {}, | |
}, | |
.slice => |slice| if (ip.isPointerType(new_ty) and ip.indexToKey(new_ty).ptr_type.flags.size == .Slice) | |
return ip.get(gpa, .{ .slice = .{ | |
.ty = new_ty, | |
.ptr = try ip.getCoerced(gpa, slice.ptr, ip.slicePtrType(new_ty)), | |
.len = slice.len, | |
} }) | |
else if (ip.isIntegerType(new_ty)) | |
return ip.getCoerced(gpa, slice.ptr, new_ty), | |
.ptr => |ptr| if (ip.isPointerType(new_ty) and ip.indexToKey(new_ty).ptr_type.flags.size != .Slice) | |
return ip.get(gpa, .{ .ptr = .{ | |
.ty = new_ty, | |
.addr = ptr.addr, | |
} }) | |
else if (ip.isIntegerType(new_ty)) | |
switch (ptr.addr) { | |
.int => |int| return ip.getCoerced(gpa, int, new_ty), | |
else => {}, | |
}, | |
.opt => |opt| switch (ip.indexToKey(new_ty)) { | |
.ptr_type => |ptr_type| return switch (opt.val) { | |
.none => switch (ptr_type.flags.size) { | |
.One, .Many, .C => try ip.get(gpa, .{ .ptr = .{ | |
.ty = new_ty, | |
.addr = .{ .int = .zero_usize }, | |
} }), | |
.Slice => try ip.get(gpa, .{ .slice = .{ | |
.ty = new_ty, | |
.ptr = try ip.get(gpa, .{ .ptr = .{ | |
.ty = ip.slicePtrType(new_ty), | |
.addr = .{ .int = .zero_usize }, | |
} }), | |
.len = try ip.get(gpa, .{ .undef = .usize_type }), | |
} }), | |
}, | |
else => |payload| try ip.getCoerced(gpa, payload, new_ty), | |
}, | |
.opt_type => |child_type| return try ip.get(gpa, .{ .opt = .{ | |
.ty = new_ty, | |
.val = switch (opt.val) { | |
.none => .none, | |
else => try ip.getCoerced(gpa, opt.val, child_type), | |
}, | |
} }), | |
else => {}, | |
}, | |
.err => |err| if (ip.isErrorSetType(new_ty)) | |
return ip.get(gpa, .{ .err = .{ | |
.ty = new_ty, | |
.name = err.name, | |
} }) | |
else if (ip.isErrorUnionType(new_ty)) | |
return ip.get(gpa, .{ .error_union = .{ | |
.ty = new_ty, | |
.val = .{ .err_name = err.name }, | |
} }), | |
.error_union => |error_union| if (ip.isErrorUnionType(new_ty)) | |
return ip.get(gpa, .{ .error_union = .{ | |
.ty = new_ty, | |
.val = error_union.val, | |
} }), | |
.aggregate => |aggregate| { | |
const new_len = @as(usize, @intCast(ip.aggregateTypeLen(new_ty))); | |
direct: { | |
const old_ty_child = switch (ip.indexToKey(old_ty)) { | |
inline .array_type, .vector_type => |seq_type| seq_type.child, | |
.anon_struct_type, .struct_type => break :direct, | |
else => unreachable, | |
}; | |
const new_ty_child = switch (ip.indexToKey(new_ty)) { | |
inline .array_type, .vector_type => |seq_type| seq_type.child, | |
.anon_struct_type, .struct_type => break :direct, | |
else => unreachable, | |
}; | |
if (old_ty_child != new_ty_child) break :direct; | |
// TODO: write something like getCoercedInts to avoid needing to dupe here | |
switch (aggregate.storage) { | |
.bytes => |bytes| { | |
const bytes_copy = try gpa.dupe(u8, bytes[0..new_len]); | |
defer gpa.free(bytes_copy); | |
return ip.get(gpa, .{ .aggregate = .{ | |
.ty = new_ty, | |
.storage = .{ .bytes = bytes_copy }, | |
} }); | |
}, | |
.elems => |elems| { | |
const elems_copy = try gpa.dupe(Index, elems[0..new_len]); | |
defer gpa.free(elems_copy); | |
return ip.get(gpa, .{ .aggregate = .{ | |
.ty = new_ty, | |
.storage = .{ .elems = elems_copy }, | |
} }); | |
}, | |
.repeated_elem => |elem| { | |
return ip.get(gpa, .{ .aggregate = .{ | |
.ty = new_ty, | |
.storage = .{ .repeated_elem = elem }, | |
} }); | |
}, | |
} | |
} | |
// Direct approach failed - we must recursively coerce elems | |
const agg_elems = try gpa.alloc(Index, new_len); | |
defer gpa.free(agg_elems); | |
// First, fill the vector with the uncoerced elements. We do this to avoid key | |
// lifetime issues, since it'll allow us to avoid referencing `aggregate` after we | |
// begin interning elems. | |
switch (aggregate.storage) { | |
.bytes => { | |
// We have to intern each value here, so unfortunately we can't easily avoid | |
// the repeated indexToKey calls. | |
for (agg_elems, 0..) |*elem, i| { | |
const x = ip.indexToKey(val).aggregate.storage.bytes[i]; | |
elem.* = try ip.get(gpa, .{ .int = .{ | |
.ty = .u8_type, | |
.storage = .{ .u64 = x }, | |
} }); | |
} | |
}, | |
.elems => |elems| @memcpy(agg_elems, elems[0..new_len]), | |
.repeated_elem => |elem| @memset(agg_elems, elem), | |
} | |
// Now, coerce each element to its new type. | |
for (agg_elems, 0..) |*elem, i| { | |
const new_elem_ty = switch (ip.indexToKey(new_ty)) { | |
inline .array_type, .vector_type => |seq_type| seq_type.child, | |
.anon_struct_type => |anon_struct_type| anon_struct_type.types.get(ip)[i], | |
.struct_type => |struct_type| struct_type.field_types.get(ip)[i], | |
else => unreachable, | |
}; | |
elem.* = try ip.getCoerced(gpa, elem.*, new_elem_ty); | |
} | |
return ip.get(gpa, .{ .aggregate = .{ .ty = new_ty, .storage = .{ .elems = agg_elems } } }); | |
}, | |
else => {}, | |
} | |
switch (ip.indexToKey(new_ty)) { | |
.opt_type => |child_type| switch (val) { | |
.null_value => return ip.get(gpa, .{ .opt = .{ | |
.ty = new_ty, | |
.val = .none, | |
} }), | |
else => return ip.get(gpa, .{ .opt = .{ | |
.ty = new_ty, | |
.val = try ip.getCoerced(gpa, val, child_type), | |
} }), | |
}, | |
.error_union_type => |error_union_type| return ip.get(gpa, .{ .error_union = .{ | |
.ty = new_ty, | |
.val = .{ .payload = try ip.getCoerced(gpa, val, error_union_type.payload_type) }, | |
} }), | |
else => {}, | |
} | |
if (std.debug.runtime_safety) { | |
std.debug.panic("InternPool.getCoerced of {s} not implemented from {s} to {s}", .{ | |
@tagName(ip.indexToKey(val)), | |
@tagName(ip.indexToKey(old_ty)), | |
@tagName(ip.indexToKey(new_ty)), | |
}); | |
} | |
unreachable; | |
} | |
fn getCoercedFuncDecl(ip: *InternPool, gpa: Allocator, val: Index, new_ty: Index) Allocator.Error!Index { | |
const datas = ip.items.items(.data); | |
const extra_index = datas[@intFromEnum(val)]; | |
const prev_ty: Index = @enumFromInt( | |
ip.extra.items[extra_index + std.meta.fieldIndex(Tag.FuncDecl, "ty").?], | |
); | |
if (new_ty == prev_ty) return val; | |
return getCoercedFunc(ip, gpa, val, new_ty); | |
} | |
fn getCoercedFuncInstance(ip: *InternPool, gpa: Allocator, val: Index, new_ty: Index) Allocator.Error!Index { | |
const datas = ip.items.items(.data); | |
const extra_index = datas[@intFromEnum(val)]; | |
const prev_ty: Index = @enumFromInt( | |
ip.extra.items[extra_index + std.meta.fieldIndex(Tag.FuncInstance, "ty").?], | |
); | |
if (new_ty == prev_ty) return val; | |
return getCoercedFunc(ip, gpa, val, new_ty); | |
} | |
fn getCoercedFunc(ip: *InternPool, gpa: Allocator, func: Index, ty: Index) Allocator.Error!Index { | |
const prev_extra_len = ip.extra.items.len; | |
try ip.extra.ensureUnusedCapacity(gpa, @typeInfo(Tag.FuncCoerced).Struct.fields.len); | |
try ip.items.ensureUnusedCapacity(gpa, 1); | |
try ip.map.ensureUnusedCapacity(gpa, 1); | |
const extra_index = ip.addExtraAssumeCapacity(Tag.FuncCoerced{ | |
.ty = ty, | |
.func = func, | |
}); | |
const adapter: KeyAdapter = .{ .intern_pool = ip }; | |
const gop = ip.map.getOrPutAssumeCapacityAdapted(Key{ | |
.func = extraFuncCoerced(ip, extra_index), | |
}, adapter); | |
if (gop.found_existing) { | |
ip.extra.items.len = prev_extra_len; | |
return @enumFromInt(gop.index); | |
} | |
ip.items.appendAssumeCapacity(.{ | |
.tag = .func_coerced, | |
.data = extra_index, | |
}); | |
return @enumFromInt(ip.items.len - 1); | |
} | |
/// Asserts `val` has an integer type. | |
/// Assumes `new_ty` is an integer type. | |
pub fn getCoercedInts(ip: *InternPool, gpa: Allocator, int: Key.Int, new_ty: Index) Allocator.Error!Index { | |
// The key cannot be passed directly to `get`, otherwise in the case of | |
// big_int storage, the limbs would be invalidated before they are read. | |
// Here we pre-reserve the limbs to ensure that the logic in `addInt` will | |
// not use an invalidated limbs pointer. | |
const new_storage: Key.Int.Storage = switch (int.storage) { | |
.u64, .i64, .lazy_align, .lazy_size => int.storage, | |
.big_int => |big_int| storage: { | |
const positive = big_int.positive; | |
const limbs = ip.limbsSliceToIndex(big_int.limbs); | |
// This line invalidates the limbs slice, but the indexes computed in the | |
// previous line are still correct. | |
try reserveLimbs(ip, gpa, @typeInfo(Int).Struct.fields.len + big_int.limbs.len); | |
break :storage .{ .big_int = .{ | |
.limbs = ip.limbsIndexToSlice(limbs), | |
.positive = positive, | |
} }; | |
}, | |
}; | |
return ip.get(gpa, .{ .int = .{ | |
.ty = new_ty, | |
.storage = new_storage, | |
} }); | |
} | |
pub fn indexToFuncType(ip: *const InternPool, val: Index) ?Key.FuncType { | |
assert(val != .none); | |
const tags = ip.items.items(.tag); | |
const datas = ip.items.items(.data); | |
switch (tags[@intFromEnum(val)]) { | |
.type_function => return extraFuncType(ip, datas[@intFromEnum(val)]), | |
else => return null, | |
} | |
} | |
/// includes .comptime_int_type | |
pub fn isIntegerType(ip: *const InternPool, ty: Index) bool { | |
return switch (ty) { | |
.usize_type, | |
.isize_type, | |
.c_char_type, | |
.c_short_type, | |
.c_ushort_type, | |
.c_int_type, | |
.c_uint_type, | |
.c_long_type, | |
.c_ulong_type, | |
.c_longlong_type, | |
.c_ulonglong_type, | |
.comptime_int_type, | |
=> true, | |
else => switch (ip.items.items(.tag)[@intFromEnum(ty)]) { | |
.type_int_signed, | |
.type_int_unsigned, | |
=> true, | |
else => false, | |
}, | |
}; | |
} | |
/// does not include .enum_literal_type | |
pub fn isEnumType(ip: *const InternPool, ty: Index) bool { | |
return switch (ty) { | |
.atomic_order_type, | |
.atomic_rmw_op_type, | |
.calling_convention_type, | |
.address_space_type, | |
.float_mode_type, | |
.reduce_op_type, | |
.call_modifier_type, | |
=> true, | |
else => ip.indexToKey(ty) == .enum_type, | |
}; | |
} | |
pub fn isUnion(ip: *const InternPool, ty: Index) bool { | |
return ip.indexToKey(ty) == .union_type; | |
} | |
pub fn isFunctionType(ip: *const InternPool, ty: Index) bool { | |
return ip.indexToKey(ty) == .func_type; | |
} | |
pub fn isPointerType(ip: *const InternPool, ty: Index) bool { | |
return ip.indexToKey(ty) == .ptr_type; | |
} | |
pub fn isOptionalType(ip: *const InternPool, ty: Index) bool { | |
return ip.indexToKey(ty) == .opt_type; | |
} | |
/// includes .inferred_error_set_type | |
pub fn isErrorSetType(ip: *const InternPool, ty: Index) bool { | |
return switch (ty) { | |
.anyerror_type, .adhoc_inferred_error_set_type => true, | |
else => switch (ip.indexToKey(ty)) { | |
.error_set_type, .inferred_error_set_type => true, | |
else => false, | |
}, | |
}; | |
} | |
pub fn isInferredErrorSetType(ip: *const InternPool, ty: Index) bool { | |
return ty == .adhoc_inferred_error_set_type or ip.indexToKey(ty) == .inferred_error_set_type; | |
} | |
pub fn isErrorUnionType(ip: *const InternPool, ty: Index) bool { | |
return ip.indexToKey(ty) == .error_union_type; | |
} | |
pub fn isAggregateType(ip: *const InternPool, ty: Index) bool { | |
return switch (ip.indexToKey(ty)) { | |
.array_type, .vector_type, .anon_struct_type, .struct_type => true, | |
else => false, | |
}; | |
} | |
pub fn errorUnionSet(ip: *const InternPool, ty: Index) Index { | |
return ip.indexToKey(ty).error_union_type.error_set_type; | |
} | |
pub fn errorUnionPayload(ip: *const InternPool, ty: Index) Index { | |
return ip.indexToKey(ty).error_union_type.payload_type; | |
} | |
/// The is only legal because the initializer is not part of the hash. | |
pub fn mutateVarInit(ip: *InternPool, index: Index, init_index: Index) void { | |
const item = ip.items.get(@intFromEnum(index)); | |
assert(item.tag == .variable); | |
ip.extra.items[item.data + std.meta.fieldIndex(Tag.Variable, "init").?] = @intFromEnum(init_index); | |
} | |
pub fn dump(ip: *const InternPool) void { | |
dumpStatsFallible(ip, std.heap.page_allocator) catch return; | |
dumpAllFallible(ip) catch return; | |
} | |
fn dumpStatsFallible(ip: *const InternPool, arena: Allocator) anyerror!void { | |
const items_size = (1 + 4) * ip.items.len; | |
const extra_size = 4 * ip.extra.items.len; | |
const limbs_size = 8 * ip.limbs.items.len; | |
const decls_size = ip.allocated_decls.len * @sizeOf(Module.Decl); | |
// TODO: map overhead size is not taken into account | |
const total_size = @sizeOf(InternPool) + items_size + extra_size + limbs_size + decls_size; | |
std.debug.print( | |
\\InternPool size: {d} bytes | |
\\ {d} items: {d} bytes | |
\\ {d} extra: {d} bytes | |
\\ {d} limbs: {d} bytes | |
\\ {d} decls: {d} bytes | |
\\ | |
, .{ | |
total_size, | |
ip.items.len, | |
items_size, | |
ip.extra.items.len, | |
extra_size, | |
ip.limbs.items.len, | |
limbs_size, | |
ip.allocated_decls.len, | |
decls_size, | |
}); | |
const tags = ip.items.items(.tag); | |
const datas = ip.items.items(.data); | |
const TagStats = struct { | |
count: usize = 0, | |
bytes: usize = 0, | |
}; | |
var counts = std.AutoArrayHashMap(Tag, TagStats).init(arena); | |
for (tags, datas) |tag, data| { | |
const gop = try counts.getOrPut(tag); | |
if (!gop.found_existing) gop.value_ptr.* = .{}; | |
gop.value_ptr.count += 1; | |
gop.value_ptr.bytes += 1 + 4 + @as(usize, switch (tag) { | |
.type_int_signed => 0, | |
.type_int_unsigned => 0, | |
.type_array_small => @sizeOf(Vector), | |
.type_array_big => @sizeOf(Array), | |
.type_vector => @sizeOf(Vector), | |
.type_pointer => @sizeOf(Tag.TypePointer), | |
.type_slice => 0, | |
.type_optional => 0, | |
.type_anyframe => 0, | |
.type_error_union => @sizeOf(Key.ErrorUnionType), | |
.type_anyerror_union => 0, | |
.type_error_set => b: { | |
const info = ip.extraData(Tag.ErrorSet, data); | |
break :b @sizeOf(Tag.ErrorSet) + (@sizeOf(u32) * info.names_len); | |
}, | |
.type_inferred_error_set => 0, | |
.type_enum_explicit, .type_enum_nonexhaustive => @sizeOf(EnumExplicit), | |
.type_enum_auto => @sizeOf(EnumAuto), | |
.type_opaque => @sizeOf(Key.OpaqueType), | |
.type_struct => b: { | |
const info = ip.extraData(Tag.TypeStruct, data); | |
var ints: usize = @typeInfo(Tag.TypeStruct).Struct.fields.len; | |
ints += info.fields_len; // types | |
if (!info.flags.is_tuple) { | |
ints += 1; // names_map | |
ints += info.fields_len; // names | |
} | |
if (info.flags.any_default_inits) | |
ints += info.fields_len; // inits | |
ints += @intFromBool(info.flags.has_namespace); // namespace | |
if (info.flags.any_aligned_fields) | |
ints += (info.fields_len + 3) / 4; // aligns | |
if (info.flags.any_comptime_fields) | |
ints += (info.fields_len + 31) / 32; // comptime bits | |
if (!info.flags.is_extern) | |
ints += info.fields_len; // runtime order | |
ints += info.fields_len; // offsets | |
break :b @sizeOf(u32) * ints; | |
}, | |
.type_struct_ns => @sizeOf(Module.Namespace), | |
.type_struct_anon => b: { | |
const info = ip.extraData(TypeStructAnon, data); | |
break :b @sizeOf(TypeStructAnon) + (@sizeOf(u32) * 3 * info.fields_len); | |
}, | |
.type_struct_packed => b: { | |
const info = ip.extraData(Tag.TypeStructPacked, data); | |
break :b @sizeOf(u32) * (@typeInfo(Tag.TypeStructPacked).Struct.fields.len + | |
info.fields_len + info.fields_len); | |
}, | |
.type_struct_packed_inits => b: { | |
const info = ip.extraData(Tag.TypeStructPacked, data); | |
break :b @sizeOf(u32) * (@typeInfo(Tag.TypeStructPacked).Struct.fields.len + | |
info.fields_len + info.fields_len + info.fields_len); | |
}, | |
.type_tuple_anon => b: { | |
const info = ip.extraData(TypeStructAnon, data); | |
break :b @sizeOf(TypeStructAnon) + (@sizeOf(u32) * 2 * info.fields_len); | |
}, | |
.type_union => b: { | |
const info = ip.extraData(Tag.TypeUnion, data); | |
const enum_info = ip.indexToKey(info.tag_ty).enum_type; | |
const fields_len: u32 = @intCast(enum_info.names.len); | |
const per_field = @sizeOf(u32); // field type | |
// 1 byte per field for alignment, rounded up to the nearest 4 bytes | |
const alignments = if (info.flags.any_aligned_fields) | |
((fields_len + 3) / 4) * 4 | |
else | |
0; | |
break :b @sizeOf(Tag.TypeUnion) + (fields_len * per_field) + alignments; | |
}, | |
.type_function => b: { | |
const info = ip.extraData(Tag.TypeFunction, data); | |
break :b @sizeOf(Tag.TypeFunction) + | |
(@sizeOf(Index) * info.params_len) + | |
(@as(u32, 4) * @intFromBool(info.flags.has_comptime_bits)) + | |
(@as(u32, 4) * @intFromBool(info.flags.has_noalias_bits)); | |
}, | |
.undef => 0, | |
.simple_type => 0, | |
.simple_value => 0, | |
.ptr_decl => @sizeOf(PtrDecl), | |
.ptr_mut_decl => @sizeOf(PtrMutDecl), | |
.ptr_anon_decl => @sizeOf(PtrAnonDecl), | |
.ptr_anon_decl_aligned => @sizeOf(PtrAnonDeclAligned), | |
.ptr_comptime_field => @sizeOf(PtrComptimeField), | |
.ptr_int => @sizeOf(PtrBase), | |
.ptr_eu_payload => @sizeOf(PtrBase), | |
.ptr_opt_payload => @sizeOf(PtrBase), | |
.ptr_elem => @sizeOf(PtrBaseIndex), | |
.ptr_field => @sizeOf(PtrBaseIndex), | |
.ptr_slice => @sizeOf(PtrSlice), | |
.opt_null => 0, | |
.opt_payload => @sizeOf(Tag.TypeValue), | |
.int_u8 => 0, | |
.int_u16 => 0, | |
.int_u32 => 0, | |
.int_i32 => 0, | |
.int_usize => 0, | |
.int_comptime_int_u32 => 0, | |
.int_comptime_int_i32 => 0, | |
.int_small => @sizeOf(IntSmall), | |
.int_positive, | |
.int_negative, | |
=> b: { | |
const int = ip.limbData(Int, data); | |
break :b @sizeOf(Int) + int.limbs_len * 8; | |
}, | |
.int_lazy_align, .int_lazy_size => @sizeOf(IntLazy), | |
.error_set_error, .error_union_error => @sizeOf(Key.Error), | |
.error_union_payload => @sizeOf(Tag.TypeValue), | |
.enum_literal => 0, | |
.enum_tag => @sizeOf(Tag.EnumTag), | |
.bytes => b: { | |
const info = ip.extraData(Bytes, data); | |
const len = @as(u32, @intCast(ip.aggregateTypeLenIncludingSentinel(info.ty))); | |
break :b @sizeOf(Bytes) + len + | |
@intFromBool(ip.string_bytes.items[@intFromEnum(info.bytes) + len - 1] != 0); | |
}, | |
.aggregate => b: { | |
const info = ip.extraData(Tag.Aggregate, data); | |
const fields_len: u32 = @intCast(ip.aggregateTypeLenIncludingSentinel(info.ty)); | |
break :b @sizeOf(Tag.Aggregate) + (@sizeOf(Index) * fields_len); | |
}, | |
.repeated => @sizeOf(Repeated), | |
.float_f16 => 0, | |
.float_f32 => 0, | |
.float_f64 => @sizeOf(Float64), | |
.float_f80 => @sizeOf(Float80), | |
.float_f128 => @sizeOf(Float128), | |
.float_c_longdouble_f80 => @sizeOf(Float80), | |
.float_c_longdouble_f128 => @sizeOf(Float128), | |
.float_comptime_float => @sizeOf(Float128), | |
.variable => @sizeOf(Tag.Variable), | |
.extern_func => @sizeOf(Tag.ExternFunc), | |
.func_decl => @sizeOf(Tag.FuncDecl), | |
.func_instance => b: { | |
const info = ip.extraData(Tag.FuncInstance, data); | |
const ty = ip.typeOf(info.generic_owner); | |
const params_len = ip.indexToKey(ty).func_type.param_types.len; | |
break :b @sizeOf(Tag.FuncInstance) + @sizeOf(Index) * params_len; | |
}, | |
.func_coerced => @sizeOf(Tag.FuncCoerced), | |
.only_possible_value => 0, | |
.union_value => @sizeOf(Key.Union), | |
.memoized_call => b: { | |
const info = ip.extraData(MemoizedCall, data); | |
break :b @sizeOf(MemoizedCall) + (@sizeOf(Index) * info.args_len); | |
}, | |
}); | |
} | |
const SortContext = struct { | |
map: *std.AutoArrayHashMap(Tag, TagStats), | |
pub fn lessThan(ctx: @This(), a_index: usize, b_index: usize) bool { | |
const values = ctx.map.values(); | |
return values[a_index].bytes > values[b_index].bytes; | |
//return values[a_index].count > values[b_index].count; | |
} | |
}; | |
counts.sort(SortContext{ .map = &counts }); | |
const len = @min(50, counts.count()); | |
std.debug.print(" top 50 tags:\n", .{}); | |
for (counts.keys()[0..len], counts.values()[0..len]) |tag, stats| { | |
std.debug.print(" {s}: {d} occurrences, {d} total bytes\n", .{ | |
@tagName(tag), stats.count, stats.bytes, | |
}); | |
} | |
} | |
fn dumpAllFallible(ip: *const InternPool) anyerror!void { | |
const tags = ip.items.items(.tag); | |
const datas = ip.items.items(.data); | |
var bw = std.io.bufferedWriter(std.io.getStdErr().writer()); | |
const w = bw.writer(); | |
for (tags, datas, 0..) |tag, data, i| { | |
try w.print("${d} = {s}(", .{ i, @tagName(tag) }); | |
switch (tag) { | |
.simple_type => try w.print("{s}", .{@tagName(@as(SimpleType, @enumFromInt(data)))}), | |
.simple_value => try w.print("{s}", .{@tagName(@as(SimpleValue, @enumFromInt(data)))}), | |
.type_int_signed, | |
.type_int_unsigned, | |
.type_array_small, | |
.type_array_big, | |
.type_vector, | |
.type_pointer, | |
.type_optional, | |
.type_anyframe, | |
.type_error_union, | |
.type_anyerror_union, | |
.type_error_set, | |
.type_inferred_error_set, | |
.type_enum_explicit, | |
.type_enum_nonexhaustive, | |
.type_enum_auto, | |
.type_opaque, | |
.type_struct, | |
.type_struct_ns, | |
.type_struct_anon, | |
.type_struct_packed, | |
.type_struct_packed_inits, | |
.type_tuple_anon, | |
.type_union, | |
.type_function, | |
.undef, | |
.ptr_decl, | |
.ptr_mut_decl, | |
.ptr_anon_decl, | |
.ptr_anon_decl_aligned, | |
.ptr_comptime_field, | |
.ptr_int, | |
.ptr_eu_payload, | |
.ptr_opt_payload, | |
.ptr_elem, | |
.ptr_field, | |
.ptr_slice, | |
.opt_payload, | |
.int_u8, | |
.int_u16, | |
.int_u32, | |
.int_i32, | |
.int_usize, | |
.int_comptime_int_u32, | |
.int_comptime_int_i32, | |
.int_small, | |
.int_positive, | |
.int_negative, | |
.int_lazy_align, | |
.int_lazy_size, | |
.error_set_error, | |
.error_union_error, | |
.error_union_payload, | |
.enum_literal, | |
.enum_tag, | |
.bytes, | |
.aggregate, | |
.repeated, | |
.float_f16, | |
.float_f32, | |
.float_f64, | |
.float_f80, | |
.float_f128, | |
.float_c_longdouble_f80, | |
.float_c_longdouble_f128, | |
.float_comptime_float, | |
.variable, | |
.extern_func, | |
.func_decl, | |
.func_instance, | |
.func_coerced, | |
.union_value, | |
.memoized_call, | |
=> try w.print("{d}", .{data}), | |
.opt_null, | |
.type_slice, | |
.only_possible_value, | |
=> try w.print("${d}", .{data}), | |
} | |
try w.writeAll(")\n"); | |
} | |
try bw.flush(); | |
} | |
pub fn dumpGenericInstances(ip: *const InternPool, allocator: Allocator) void { | |
ip.dumpGenericInstancesFallible(allocator) catch return; | |
} | |
pub fn dumpGenericInstancesFallible(ip: *const InternPool, allocator: Allocator) anyerror!void { | |
var arena_allocator = std.heap.ArenaAllocator.init(allocator); | |
defer arena_allocator.deinit(); | |
const arena = arena_allocator.allocator(); | |
var bw = std.io.bufferedWriter(std.io.getStdErr().writer()); | |
const w = bw.writer(); | |
var instances: std.AutoArrayHashMapUnmanaged(Index, std.ArrayListUnmanaged(Index)) = .{}; | |
const datas = ip.items.items(.data); | |
for (ip.items.items(.tag), 0..) |tag, i| { | |
if (tag != .func_instance) continue; | |
const info = ip.extraData(Tag.FuncInstance, datas[i]); | |
const gop = try instances.getOrPut(arena, info.generic_owner); | |
if (!gop.found_existing) gop.value_ptr.* = .{}; | |
try gop.value_ptr.append(arena, @enumFromInt(i)); | |
} | |
const SortContext = struct { | |
values: []std.ArrayListUnmanaged(Index), | |
pub fn lessThan(ctx: @This(), a_index: usize, b_index: usize) bool { | |
return ctx.values[a_index].items.len > ctx.values[b_index].items.len; | |
} | |
}; | |
instances.sort(SortContext{ .values = instances.values() }); | |
var it = instances.iterator(); | |
while (it.next()) |entry| { | |
const generic_fn_owner_decl = ip.declPtrConst(ip.funcDeclOwner(entry.key_ptr.*)); | |
try w.print("{} ({}): \n", .{ generic_fn_owner_decl.name.fmt(ip), entry.value_ptr.items.len }); | |
for (entry.value_ptr.items) |index| { | |
const func = ip.extraFuncInstance(datas[@intFromEnum(index)]); | |
const owner_decl = ip.declPtrConst(func.owner_decl); | |
try w.print(" {}: (", .{owner_decl.name.fmt(ip)}); | |
for (func.comptime_args.get(ip)) |arg| { | |
if (arg != .none) { | |
const key = ip.indexToKey(arg); | |
try w.print(" {} ", .{key}); | |
} | |
} | |
try w.writeAll(")\n"); | |
} | |
} | |
try bw.flush(); | |
} | |
pub fn declPtr(ip: *InternPool, index: DeclIndex) *Module.Decl { | |
return ip.allocated_decls.at(@intFromEnum(index)); | |
} | |
pub fn declPtrConst(ip: *const InternPool, index: DeclIndex) *const Module.Decl { | |
return ip.allocated_decls.at(@intFromEnum(index)); | |
} | |
pub fn namespacePtr(ip: *InternPool, index: NamespaceIndex) *Module.Namespace { | |
return ip.allocated_namespaces.at(@intFromEnum(index)); | |
} | |
pub fn createDecl( | |
ip: *InternPool, | |
gpa: Allocator, | |
initialization: Module.Decl, | |
) Allocator.Error!DeclIndex { | |
if (ip.decls_free_list.popOrNull()) |index| { | |
ip.allocated_decls.at(@intFromEnum(index)).* = initialization; | |
return index; | |
} | |
const ptr = try ip.allocated_decls.addOne(gpa); | |
ptr.* = initialization; | |
return @enumFromInt(ip.allocated_decls.len - 1); | |
} | |
pub fn destroyDecl(ip: *InternPool, gpa: Allocator, index: DeclIndex) void { | |
ip.declPtr(index).* = undefined; | |
ip.decls_free_list.append(gpa, index) catch { | |
// In order to keep `destroyDecl` a non-fallible function, we ignore memory | |
// allocation failures here, instead leaking the Decl until garbage collection. | |
}; | |
} | |
pub fn createNamespace( | |
ip: *InternPool, | |
gpa: Allocator, | |
initialization: Module.Namespace, | |
) Allocator.Error!NamespaceIndex { | |
if (ip.namespaces_free_list.popOrNull()) |index| { | |
ip.allocated_namespaces.at(@intFromEnum(index)).* = initialization; | |
return index; | |
} | |
const ptr = try ip.allocated_namespaces.addOne(gpa); | |
ptr.* = initialization; | |
return @enumFromInt(ip.allocated_namespaces.len - 1); | |
} | |
pub fn destroyNamespace(ip: *InternPool, gpa: Allocator, index: NamespaceIndex) void { | |
ip.namespacePtr(index).* = .{ | |
.parent = undefined, | |
.file_scope = undefined, | |
.decl_index = undefined, | |
}; | |
ip.namespaces_free_list.append(gpa, index) catch { | |
// In order to keep `destroyNamespace` a non-fallible function, we ignore memory | |
// allocation failures here, instead leaking the Namespace until garbage collection. | |
}; | |
} | |
pub fn getOrPutString( | |
ip: *InternPool, | |
gpa: Allocator, | |
s: []const u8, | |
) Allocator.Error!NullTerminatedString { | |
try ip.string_bytes.ensureUnusedCapacity(gpa, s.len + 1); | |
ip.string_bytes.appendSliceAssumeCapacity(s); | |
ip.string_bytes.appendAssumeCapacity(0); | |
return ip.getOrPutTrailingString(gpa, s.len + 1); | |
} | |
pub fn getOrPutStringFmt( | |
ip: *InternPool, | |
gpa: Allocator, | |
comptime format: []const u8, | |
args: anytype, | |
) Allocator.Error!NullTerminatedString { | |
// ensure that references to string_bytes in args do not get invalidated | |
const len: usize = @intCast(std.fmt.count(format, args) + 1); | |
try ip.string_bytes.ensureUnusedCapacity(gpa, len); | |
ip.string_bytes.writer(undefined).print(format, args) catch unreachable; | |
ip.string_bytes.appendAssumeCapacity(0); | |
return ip.getOrPutTrailingString(gpa, len); | |
} | |
pub fn getOrPutStringOpt( | |
ip: *InternPool, | |
gpa: Allocator, | |
optional_string: ?[]const u8, | |
) Allocator.Error!OptionalNullTerminatedString { | |
const s = optional_string orelse return .none; | |
const interned = try getOrPutString(ip, gpa, s); | |
return interned.toOptional(); | |
} | |
/// Uses the last len bytes of ip.string_bytes as the key. | |
pub fn getOrPutTrailingString( | |
ip: *InternPool, | |
gpa: Allocator, | |
len: usize, | |
) Allocator.Error!NullTerminatedString { | |
const string_bytes = &ip.string_bytes; | |
const str_index: u32 = @intCast(string_bytes.items.len - len); | |
if (len > 0 and string_bytes.getLast() == 0) { | |
_ = string_bytes.pop(); | |
} else { | |
try string_bytes.ensureUnusedCapacity(gpa, 1); | |
} | |
const key: []const u8 = string_bytes.items[str_index..]; | |
const gop = try ip.string_table.getOrPutContextAdapted(gpa, key, std.hash_map.StringIndexAdapter{ | |
.bytes = string_bytes, | |
}, std.hash_map.StringIndexContext{ | |
.bytes = string_bytes, | |
}); | |
if (gop.found_existing) { | |
string_bytes.shrinkRetainingCapacity(str_index); | |
return @enumFromInt(gop.key_ptr.*); | |
} else { | |
gop.key_ptr.* = str_index; | |
string_bytes.appendAssumeCapacity(0); | |
return @enumFromInt(str_index); | |
} | |
} | |
/// Uses the last len bytes of ip.string_bytes as the key. | |
pub fn getTrailingAggregate( | |
ip: *InternPool, | |
gpa: Allocator, | |
ty: Index, | |
len: usize, | |
) Allocator.Error!Index { | |
try ip.items.ensureUnusedCapacity(gpa, 1); | |
try ip.extra.ensureUnusedCapacity(gpa, @typeInfo(Bytes).Struct.fields.len); | |
const str: String = @enumFromInt(ip.string_bytes.items.len - len); | |
const adapter: KeyAdapter = .{ .intern_pool = ip }; | |
const gop = try ip.map.getOrPutAdapted(gpa, Key{ .aggregate = .{ | |
.ty = ty, | |
.storage = .{ .bytes = ip.string_bytes.items[@intFromEnum(str)..] }, | |
} }, adapter); | |
if (gop.found_existing) return @enumFromInt(gop.index); | |
ip.items.appendAssumeCapacity(.{ | |
.tag = .bytes, | |
.data = ip.addExtraAssumeCapacity(Bytes{ | |
.ty = ty, | |
.bytes = str, | |
}), | |
}); | |
return @enumFromInt(ip.items.len - 1); | |
} | |
pub fn getString(ip: *InternPool, s: []const u8) OptionalNullTerminatedString { | |
if (ip.string_table.getKeyAdapted(s, std.hash_map.StringIndexAdapter{ | |
.bytes = &ip.string_bytes, | |
})) |index| { | |
return @as(NullTerminatedString, @enumFromInt(index)).toOptional(); | |
} else { | |
return .none; | |
} | |
} | |
pub fn stringToSlice(ip: *const InternPool, s: NullTerminatedString) [:0]const u8 { | |
const string_bytes = ip.string_bytes.items; | |
const start = @intFromEnum(s); | |
var end: usize = start; | |
while (string_bytes[end] != 0) end += 1; | |
return string_bytes[start..end :0]; | |
} | |
pub fn stringToSliceUnwrap(ip: *const InternPool, s: OptionalNullTerminatedString) ?[:0]const u8 { | |
return ip.stringToSlice(s.unwrap() orelse return null); | |
} | |
pub fn stringEqlSlice(ip: *const InternPool, a: NullTerminatedString, b: []const u8) bool { | |
return std.mem.eql(u8, stringToSlice(ip, a), b); | |
} | |
pub fn typeOf(ip: *const InternPool, index: Index) Index { | |
// This optimization of static keys is required so that typeOf can be called | |
// on static keys that haven't been added yet during static key initialization. | |
// An alternative would be to topological sort the static keys, but this would | |
// mean that the range of type indices would not be dense. | |
return switch (index) { | |
.u0_type, | |
.i0_type, | |
.u1_type, | |
.u8_type, | |
.i8_type, | |
.u16_type, | |
.i16_type, | |
.u29_type, | |
.u32_type, | |
.i32_type, | |
.u64_type, | |
.i64_type, | |
.u80_type, | |
.u128_type, | |
.i128_type, | |
.usize_type, | |
.isize_type, | |
.c_char_type, | |
.c_short_type, | |
.c_ushort_type, | |
.c_int_type, | |
.c_uint_type, | |
.c_long_type, | |
.c_ulong_type, | |
.c_longlong_type, | |
.c_ulonglong_type, | |
.c_longdouble_type, | |
.f16_type, | |
.f32_type, | |
.f64_type, | |
.f80_type, | |
.f128_type, | |
.anyopaque_type, | |
.bool_type, | |
.void_type, | |
.type_type, | |
.anyerror_type, | |
.comptime_int_type, | |
.comptime_float_type, | |
.noreturn_type, | |
.anyframe_type, | |
.null_type, | |
.undefined_type, | |
.enum_literal_type, | |
.atomic_order_type, | |
.atomic_rmw_op_type, | |
.calling_convention_type, | |
.address_space_type, | |
.float_mode_type, | |
.reduce_op_type, | |
.call_modifier_type, | |
.prefetch_options_type, | |
.export_options_type, | |
.extern_options_type, | |
.type_info_type, | |
.manyptr_u8_type, | |
.manyptr_const_u8_type, | |
.manyptr_const_u8_sentinel_0_type, | |
.single_const_pointer_to_comptime_int_type, | |
.slice_const_u8_type, | |
.slice_const_u8_sentinel_0_type, | |
.optional_noreturn_type, | |
.anyerror_void_error_union_type, | |
.adhoc_inferred_error_set_type, | |
.generic_poison_type, | |
.empty_struct_type, | |
=> .type_type, | |
.undef => .undefined_type, | |
.zero, .one, .negative_one => .comptime_int_type, | |
.zero_usize, .one_usize => .usize_type, | |
.zero_u8, .one_u8, .four_u8 => .u8_type, | |
.calling_convention_c, .calling_convention_inline => .calling_convention_type, | |
.void_value => .void_type, | |
.unreachable_value => .noreturn_type, | |
.null_value => .null_type, | |
.bool_true, .bool_false => .bool_type, | |
.empty_struct => .empty_struct_type, | |
.generic_poison => .generic_poison_type, | |
// This optimization on tags is needed so that indexToKey can call | |
// typeOf without being recursive. | |
_ => switch (ip.items.items(.tag)[@intFromEnum(index)]) { | |
.type_int_signed, | |
.type_int_unsigned, | |
.type_array_big, | |
.type_array_small, | |
.type_vector, | |
.type_pointer, | |
.type_slice, | |
.type_optional, | |
.type_anyframe, | |
.type_error_union, | |
.type_anyerror_union, | |
.type_error_set, | |
.type_inferred_error_set, | |
.type_enum_auto, | |
.type_enum_explicit, | |
.type_enum_nonexhaustive, | |
.simple_type, | |
.type_opaque, | |
.type_struct, | |
.type_struct_ns, | |
.type_struct_anon, | |
.type_struct_packed, | |
.type_struct_packed_inits, | |
.type_tuple_anon, | |
.type_union, | |
.type_function, | |
=> .type_type, | |
.undef, | |
.opt_null, | |
.only_possible_value, | |
=> @enumFromInt(ip.items.items(.data)[@intFromEnum(index)]), | |
.simple_value => unreachable, // handled via Index above | |
inline .ptr_decl, | |
.ptr_mut_decl, | |
.ptr_anon_decl, | |
.ptr_anon_decl_aligned, | |
.ptr_comptime_field, | |
.ptr_int, | |
.ptr_eu_payload, | |
.ptr_opt_payload, | |
.ptr_elem, | |
.ptr_field, | |
.ptr_slice, | |
.opt_payload, | |
.error_union_payload, | |
.int_small, | |
.int_lazy_align, | |
.int_lazy_size, | |
.error_set_error, | |
.error_union_error, | |
.enum_tag, | |
.variable, | |
.extern_func, | |
.func_decl, | |
.func_instance, | |
.func_coerced, | |
.union_value, | |
.bytes, | |
.aggregate, | |
.repeated, | |
=> |t| { | |
const extra_index = ip.items.items(.data)[@intFromEnum(index)]; | |
const field_index = std.meta.fieldIndex(t.Payload(), "ty").?; | |
return @enumFromInt(ip.extra.items[extra_index + field_index]); | |
}, | |
.int_u8 => .u8_type, | |
.int_u16 => .u16_type, | |
.int_u32 => .u32_type, | |
.int_i32 => .i32_type, | |
.int_usize => .usize_type, | |
.int_comptime_int_u32, | |
.int_comptime_int_i32, | |
=> .comptime_int_type, | |
// Note these are stored in limbs data, not extra data. | |
.int_positive, | |
.int_negative, | |
=> ip.limbData(Int, ip.items.items(.data)[@intFromEnum(index)]).ty, | |
.enum_literal => .enum_literal_type, | |
.float_f16 => .f16_type, | |
.float_f32 => .f32_type, | |
.float_f64 => .f64_type, | |
.float_f80 => .f80_type, | |
.float_f128 => .f128_type, | |
.float_c_longdouble_f80, | |
.float_c_longdouble_f128, | |
=> .c_longdouble_type, | |
.float_comptime_float => .comptime_float_type, | |
.memoized_call => unreachable, | |
}, | |
.var_args_param_type => unreachable, | |
.none => unreachable, | |
}; | |
} | |
/// Assumes that the enum's field indexes equal its value tags. | |
pub fn toEnum(ip: *const InternPool, comptime E: type, i: Index) E { | |
const int = ip.indexToKey(i).enum_tag.int; | |
return @enumFromInt(ip.indexToKey(int).int.storage.u64); | |
} | |
pub fn aggregateTypeLen(ip: *const InternPool, ty: Index) u64 { | |
return switch (ip.indexToKey(ty)) { | |
.struct_type => |struct_type| struct_type.field_types.len, | |
.anon_struct_type => |anon_struct_type| anon_struct_type.types.len, | |
.array_type => |array_type| array_type.len, | |
.vector_type => |vector_type| vector_type.len, | |
else => unreachable, | |
}; | |
} | |
pub fn aggregateTypeLenIncludingSentinel(ip: *const InternPool, ty: Index) u64 { | |
return switch (ip.indexToKey(ty)) { | |
.struct_type => |struct_type| struct_type.field_types.len, | |
.anon_struct_type => |anon_struct_type| anon_struct_type.types.len, | |
.array_type => |array_type| array_type.len + @intFromBool(array_type.sentinel != .none), | |
.vector_type => |vector_type| vector_type.len, | |
else => unreachable, | |
}; | |
} | |
pub fn funcTypeReturnType(ip: *const InternPool, ty: Index) Index { | |
const item = ip.items.get(@intFromEnum(ty)); | |
const child_item = switch (item.tag) { | |
.type_pointer => ip.items.get(ip.extra.items[ | |
item.data + std.meta.fieldIndex(Tag.TypePointer, "child").? | |
]), | |
.type_function => item, | |
else => unreachable, | |
}; | |
assert(child_item.tag == .type_function); | |
return @enumFromInt(ip.extra.items[ | |
child_item.data + std.meta.fieldIndex(Tag.TypeFunction, "return_type").? | |
]); | |
} | |
pub fn isNoReturn(ip: *const InternPool, ty: Index) bool { | |
return switch (ty) { | |
.noreturn_type => true, | |
else => switch (ip.items.items(.tag)[@intFromEnum(ty)]) { | |
.type_error_set => ip.extra.items[ip.items.items(.data)[@intFromEnum(ty)] + std.meta.fieldIndex(Tag.ErrorSet, "names_len").?] == 0, | |
else => false, | |
}, | |
}; | |
} | |
pub fn isUndef(ip: *const InternPool, val: Index) bool { | |
return val == .undef or ip.items.items(.tag)[@intFromEnum(val)] == .undef; | |
} | |
pub fn isVariable(ip: *const InternPool, val: Index) bool { | |
return ip.items.items(.tag)[@intFromEnum(val)] == .variable; | |
} | |
pub fn getBackingDecl(ip: *const InternPool, val: Index) OptionalDeclIndex { | |
var base = @intFromEnum(val); | |
while (true) { | |
switch (ip.items.items(.tag)[base]) { | |
inline .ptr_decl, | |
.ptr_mut_decl, | |
=> |tag| return @enumFromInt(ip.extra.items[ | |
ip.items.items(.data)[base] + std.meta.fieldIndex(tag.Payload(), "decl").? | |
]), | |
inline .ptr_eu_payload, | |
.ptr_opt_payload, | |
.ptr_elem, | |
.ptr_field, | |
=> |tag| base = ip.extra.items[ | |
ip.items.items(.data)[base] + std.meta.fieldIndex(tag.Payload(), "base").? | |
], | |
inline .ptr_slice => |tag| base = ip.extra.items[ | |
ip.items.items(.data)[base] + std.meta.fieldIndex(tag.Payload(), "ptr").? | |
], | |
else => return .none, | |
} | |
} | |
} | |
pub fn getBackingAddrTag(ip: *const InternPool, val: Index) ?Key.Ptr.Addr.Tag { | |
var base = @intFromEnum(val); | |
while (true) { | |
switch (ip.items.items(.tag)[base]) { | |
.ptr_decl => return .decl, | |
.ptr_mut_decl => return .mut_decl, | |
.ptr_anon_decl, .ptr_anon_decl_aligned => return .anon_decl, | |
.ptr_comptime_field => return .comptime_field, | |
.ptr_int => return .int, | |
inline .ptr_eu_payload, | |
.ptr_opt_payload, | |
.ptr_elem, | |
.ptr_field, | |
=> |tag| base = ip.extra.items[ | |
ip.items.items(.data)[base] + std.meta.fieldIndex(tag.Payload(), "base").? | |
], | |
inline .ptr_slice => |tag| base = ip.extra.items[ | |
ip.items.items(.data)[base] + std.meta.fieldIndex(tag.Payload(), "ptr").? | |
], | |
else => return null, | |
} | |
} | |
} | |
/// This is a particularly hot function, so we operate directly on encodings | |
/// rather than the more straightforward implementation of calling `indexToKey`. | |
pub fn zigTypeTagOrPoison(ip: *const InternPool, index: Index) error{GenericPoison}!std.builtin.TypeId { | |
return switch (index) { | |
.u0_type, | |
.i0_type, | |
.u1_type, | |
.u8_type, | |
.i8_type, | |
.u16_type, | |
.i16_type, | |
.u29_type, | |
.u32_type, | |
.i32_type, | |
.u64_type, | |
.i64_type, | |
.u80_type, | |
.u128_type, | |
.i128_type, | |
.usize_type, | |
.isize_type, | |
.c_char_type, | |
.c_short_type, | |
.c_ushort_type, | |
.c_int_type, | |
.c_uint_type, | |
.c_long_type, | |
.c_ulong_type, | |
.c_longlong_type, | |
.c_ulonglong_type, | |
=> .Int, | |
.c_longdouble_type, | |
.f16_type, | |
.f32_type, | |
.f64_type, | |
.f80_type, | |
.f128_type, | |
=> .Float, | |
.anyopaque_type => .Opaque, | |
.bool_type => .Bool, | |
.void_type => .Void, | |
.type_type => .Type, | |
.anyerror_type, .adhoc_inferred_error_set_type => .ErrorSet, | |
.comptime_int_type => .ComptimeInt, | |
.comptime_float_type => .ComptimeFloat, | |
.noreturn_type => .NoReturn, | |
.anyframe_type => .AnyFrame, | |
.null_type => .Null, | |
.undefined_type => .Undefined, | |
.enum_literal_type => .EnumLiteral, | |
.atomic_order_type, | |
.atomic_rmw_op_type, | |
.calling_convention_type, | |
.address_space_type, | |
.float_mode_type, | |
.reduce_op_type, | |
.call_modifier_type, | |
=> .Enum, | |
.prefetch_options_type, | |
.export_options_type, | |
.extern_options_type, | |
=> .Struct, | |
.type_info_type => .Union, | |
.manyptr_u8_type, | |
.manyptr_const_u8_type, | |
.manyptr_const_u8_sentinel_0_type, | |
.single_const_pointer_to_comptime_int_type, | |
.slice_const_u8_type, | |
.slice_const_u8_sentinel_0_type, | |
=> .Pointer, | |
.optional_noreturn_type => .Optional, | |
.anyerror_void_error_union_type => .ErrorUnion, | |
.empty_struct_type => .Struct, | |
.generic_poison_type => return error.GenericPoison, | |
// values, not types | |
.undef => unreachable, | |
.zero => unreachable, | |
.zero_usize => unreachable, | |
.zero_u8 => unreachable, | |
.one => unreachable, | |
.one_usize => unreachable, | |
.one_u8 => unreachable, | |
.four_u8 => unreachable, | |
.negative_one => unreachable, | |
.calling_convention_c => unreachable, | |
.calling_convention_inline => unreachable, | |
.void_value => unreachable, | |
.unreachable_value => unreachable, | |
.null_value => unreachable, | |
.bool_true => unreachable, | |
.bool_false => unreachable, | |
.empty_struct => unreachable, | |
.generic_poison => unreachable, | |
.var_args_param_type => unreachable, // special tag | |
_ => switch (ip.items.items(.tag)[@intFromEnum(index)]) { | |
.type_int_signed, | |
.type_int_unsigned, | |
=> .Int, | |
.type_array_big, | |
.type_array_small, | |
=> .Array, | |
.type_vector => .Vector, | |
.type_pointer, | |
.type_slice, | |
=> .Pointer, | |
.type_optional => .Optional, | |
.type_anyframe => .AnyFrame, | |
.type_error_union, | |
.type_anyerror_union, | |
=> .ErrorUnion, | |
.type_error_set, | |
.type_inferred_error_set, | |
=> .ErrorSet, | |
.type_enum_auto, | |
.type_enum_explicit, | |
.type_enum_nonexhaustive, | |
=> .Enum, | |
.simple_type => unreachable, // handled via Index tag above | |
.type_opaque => .Opaque, | |
.type_struct, | |
.type_struct_ns, | |
.type_struct_anon, | |
.type_struct_packed, | |
.type_struct_packed_inits, | |
.type_tuple_anon, | |
=> .Struct, | |
.type_union => .Union, | |
.type_function => .Fn, | |
// values, not types | |
.undef, | |
.simple_value, | |
.ptr_decl, | |
.ptr_mut_decl, | |
.ptr_anon_decl, | |
.ptr_anon_decl_aligned, | |
.ptr_comptime_field, | |
.ptr_int, | |
.ptr_eu_payload, | |
.ptr_opt_payload, | |
.ptr_elem, | |
.ptr_field, | |
.ptr_slice, | |
.opt_payload, | |
.opt_null, | |
.int_u8, | |
.int_u16, | |
.int_u32, | |
.int_i32, | |
.int_usize, | |
.int_comptime_int_u32, | |
.int_comptime_int_i32, | |
.int_small, | |
.int_positive, | |
.int_negative, | |
.int_lazy_align, | |
.int_lazy_size, | |
.error_set_error, | |
.error_union_error, | |
.error_union_payload, | |
.enum_literal, | |
.enum_tag, | |
.float_f16, | |
.float_f32, | |
.float_f64, | |
.float_f80, | |
.float_f128, | |
.float_c_longdouble_f80, | |
.float_c_longdouble_f128, | |
.float_comptime_float, | |
.variable, | |
.extern_func, | |
.func_decl, | |
.func_instance, | |
.func_coerced, | |
.only_possible_value, | |
.union_value, | |
.bytes, | |
.aggregate, | |
.repeated, | |
// memoization, not types | |
.memoized_call, | |
=> unreachable, | |
}, | |
.none => unreachable, // special tag | |
}; | |
} | |
pub fn isFuncBody(ip: *const InternPool, i: Index) bool { | |
assert(i != .none); | |
return switch (ip.items.items(.tag)[@intFromEnum(i)]) { | |
.func_decl, .func_instance, .func_coerced => true, | |
else => false, | |
}; | |
} | |
pub fn funcAnalysis(ip: *const InternPool, i: Index) *FuncAnalysis { | |
assert(i != .none); | |
const item = ip.items.get(@intFromEnum(i)); | |
const extra_index = switch (item.tag) { | |
.func_decl => item.data + std.meta.fieldIndex(Tag.FuncDecl, "analysis").?, | |
.func_instance => item.data + std.meta.fieldIndex(Tag.FuncInstance, "analysis").?, | |
.func_coerced => i: { | |
const extra_index = item.data + std.meta.fieldIndex(Tag.FuncCoerced, "func").?; | |
const func_index: Index = @enumFromInt(ip.extra.items[extra_index]); | |
const sub_item = ip.items.get(@intFromEnum(func_index)); | |
break :i switch (sub_item.tag) { | |
.func_decl => sub_item.data + std.meta.fieldIndex(Tag.FuncDecl, "analysis").?, | |
.func_instance => sub_item.data + std.meta.fieldIndex(Tag.FuncInstance, "analysis").?, | |
else => unreachable, | |
}; | |
}, | |
else => unreachable, | |
}; | |
return @ptrCast(&ip.extra.items[extra_index]); | |
} | |
pub fn funcHasInferredErrorSet(ip: *const InternPool, i: Index) bool { | |
return funcAnalysis(ip, i).inferred_error_set; | |
} | |
pub fn funcZirBodyInst(ip: *const InternPool, i: Index) TrackedInst.Index { | |
assert(i != .none); | |
const item = ip.items.get(@intFromEnum(i)); | |
const zir_body_inst_field_index = std.meta.fieldIndex(Tag.FuncDecl, "zir_body_inst").?; | |
const extra_index = switch (item.tag) { | |
.func_decl => item.data + zir_body_inst_field_index, | |
.func_instance => b: { | |
const generic_owner_field_index = std.meta.fieldIndex(Tag.FuncInstance, "generic_owner").?; | |
const func_decl_index = ip.extra.items[item.data + generic_owner_field_index]; | |
assert(ip.items.items(.tag)[func_decl_index] == .func_decl); | |
break :b ip.items.items(.data)[func_decl_index] + zir_body_inst_field_index; | |
}, | |
.func_coerced => { | |
const datas = ip.items.items(.data); | |
const uncoerced_func_index: Index = @enumFromInt(ip.extra.items[ | |
datas[@intFromEnum(i)] + std.meta.fieldIndex(Tag.FuncCoerced, "func").? | |
]); | |
return ip.funcZirBodyInst(uncoerced_func_index); | |
}, | |
else => unreachable, | |
}; | |
return @enumFromInt(ip.extra.items[extra_index]); | |
} | |
pub fn iesFuncIndex(ip: *const InternPool, ies_index: Index) Index { | |
assert(ies_index != .none); | |
const tags = ip.items.items(.tag); | |
assert(tags[@intFromEnum(ies_index)] == .type_inferred_error_set); | |
const func_index = ip.items.items(.data)[@intFromEnum(ies_index)]; | |
switch (tags[func_index]) { | |
.func_decl, .func_instance => {}, | |
else => unreachable, // assertion failed | |
} | |
return @enumFromInt(func_index); | |
} | |
/// Returns a mutable pointer to the resolved error set type of an inferred | |
/// error set function. The returned pointer is invalidated when anything is | |
/// added to `ip`. | |
pub fn iesResolved(ip: *const InternPool, ies_index: Index) *Index { | |
assert(ies_index != .none); | |
const tags = ip.items.items(.tag); | |
const datas = ip.items.items(.data); | |
assert(tags[@intFromEnum(ies_index)] == .type_inferred_error_set); | |
const func_index = datas[@intFromEnum(ies_index)]; | |
return funcIesResolved(ip, func_index); | |
} | |
/// Returns a mutable pointer to the resolved error set type of an inferred | |
/// error set function. The returned pointer is invalidated when anything is | |
/// added to `ip`. | |
pub fn funcIesResolved(ip: *const InternPool, func_index: Index) *Index { | |
const tags = ip.items.items(.tag); | |
const datas = ip.items.items(.data); | |
assert(funcHasInferredErrorSet(ip, func_index)); | |
const func_start = datas[@intFromEnum(func_index)]; | |
const extra_index = switch (tags[@intFromEnum(func_index)]) { | |
.func_decl => func_start + @typeInfo(Tag.FuncDecl).Struct.fields.len, | |
.func_instance => func_start + @typeInfo(Tag.FuncInstance).Struct.fields.len, | |
.func_coerced => i: { | |
const uncoerced_func_index: Index = @enumFromInt(ip.extra.items[ | |
func_start + std.meta.fieldIndex(Tag.FuncCoerced, "func").? | |
]); | |
const uncoerced_func_start = datas[@intFromEnum(uncoerced_func_index)]; | |
break :i switch (tags[@intFromEnum(uncoerced_func_index)]) { | |
.func_decl => uncoerced_func_start + @typeInfo(Tag.FuncDecl).Struct.fields.len, | |
.func_instance => uncoerced_func_start + @typeInfo(Tag.FuncInstance).Struct.fields.len, | |
else => unreachable, | |
}; | |
}, | |
else => unreachable, | |
}; | |
return @ptrCast(&ip.extra.items[extra_index]); | |
} | |
pub fn funcDeclInfo(ip: *const InternPool, i: Index) Key.Func { | |
const tags = ip.items.items(.tag); | |
const datas = ip.items.items(.data); | |
assert(tags[@intFromEnum(i)] == .func_decl); | |
return extraFuncDecl(ip, datas[@intFromEnum(i)]); | |
} | |
pub fn funcDeclOwner(ip: *const InternPool, i: Index) DeclIndex { | |
return funcDeclInfo(ip, i).owner_decl; | |
} | |
pub fn funcTypeParamsLen(ip: *const InternPool, i: Index) u32 { | |
const tags = ip.items.items(.tag); | |
const datas = ip.items.items(.data); | |
assert(tags[@intFromEnum(i)] == .type_function); | |
const start = datas[@intFromEnum(i)]; | |
return ip.extra.items[start + std.meta.fieldIndex(Tag.TypeFunction, "params_len").?]; | |
} | |
fn unwrapCoercedFunc(ip: *const InternPool, i: Index) Index { | |
const tags = ip.items.items(.tag); | |
return switch (tags[@intFromEnum(i)]) { | |
.func_coerced => { | |
const datas = ip.items.items(.data); | |
return @enumFromInt(ip.extra.items[ | |
datas[@intFromEnum(i)] + std.meta.fieldIndex(Tag.FuncCoerced, "func").? | |
]); | |
}, | |
.func_instance, .func_decl => i, | |
else => unreachable, | |
}; | |
} | |
/// Having resolved a builtin type to a real struct/union/enum (which is now at `resolverd_index`), | |
/// make `want_index` refer to this type instead. This invalidates `resolved_index`, so must be | |
/// called only when it is guaranteed that no reference to `resolved_index` exists. | |
pub fn resolveBuiltinType(ip: *InternPool, want_index: Index, resolved_index: Index) void { | |
assert(@intFromEnum(want_index) >= @intFromEnum(Index.first_type)); | |
assert(@intFromEnum(want_index) <= @intFromEnum(Index.last_type)); | |
// Make sure the type isn't already resolved! | |
assert(ip.indexToKey(want_index) == .simple_type); | |
// Make sure it's the same kind of type | |
assert((ip.zigTypeTagOrPoison(want_index) catch unreachable) == | |
(ip.zigTypeTagOrPoison(resolved_index) catch unreachable)); | |
// Copy the data | |
const item = ip.items.get(@intFromEnum(resolved_index)); | |
ip.items.set(@intFromEnum(want_index), item); | |
if (std.debug.runtime_safety) { | |
// Make the value unreachable - this is a weird value which will make (incorrect) existing | |
// references easier to spot | |
ip.items.set(@intFromEnum(resolved_index), .{ | |
.tag = .simple_value, | |
.data = @intFromEnum(SimpleValue.@"unreachable"), | |
}); | |
} else { | |
// Here we could add the index to a free-list for reuse, but since | |
// there is so little garbage created this way it's not worth it. | |
} | |
} | |
pub fn anonStructFieldTypes(ip: *const InternPool, i: Index) []const Index { | |
return ip.indexToKey(i).anon_struct_type.types; | |
} | |
pub fn anonStructFieldsLen(ip: *const InternPool, i: Index) u32 { | |
return @intCast(ip.indexToKey(i).anon_struct_type.types.len); | |
} | |
/// Asserts the type is a struct. | |
pub fn structDecl(ip: *const InternPool, i: Index) OptionalDeclIndex { | |
return switch (ip.indexToKey(i)) { | |
.struct_type => |t| t.decl, | |
else => unreachable, | |
}; | |
} | |
/// Returns the already-existing field with the same name, if any. | |
pub fn addFieldName( | |
ip: *InternPool, | |
names_map: MapIndex, | |
names_start: u32, | |
name: NullTerminatedString, | |
) ?u32 { | |
const map = &ip.maps.items[@intFromEnum(names_map)]; | |
const field_index = map.count(); | |
const strings = ip.extra.items[names_start..][0..field_index]; | |
const adapter: NullTerminatedString.Adapter = .{ .strings = @ptrCast(strings) }; | |
const gop = map.getOrPutAssumeCapacityAdapted(name, adapter); | |
if (gop.found_existing) return @intCast(gop.index); | |
ip.extra.items[names_start + field_index] = @intFromEnum(name); | |
return null; | |
} | |
/// Used only by `get` for pointer values, and mainly intended to use `Tag.ptr_anon_decl` | |
/// encoding instead of `Tag.ptr_anon_decl_aligned` when possible. | |
fn ptrsHaveSameAlignment(ip: *InternPool, a_ty: Index, a_info: Key.PtrType, b_ty: Index) bool { | |
if (a_ty == b_ty) return true; | |
const b_info = ip.indexToKey(b_ty).ptr_type; | |
return a_info.flags.alignment == b_info.flags.alignment and | |
(a_info.child == b_info.child or a_info.flags.alignment != .none); | |
} | |
//! For each AIR instruction, we want to know: | |
//! * Is the instruction unreferenced (e.g. dies immediately)? | |
//! * For each of its operands, does the operand die with this instruction (e.g. is | |
//! this the last reference to it)? | |
//! Some instructions are special, such as: | |
//! * Conditional Branches | |
//! * Switch Branches | |
const std = @import("std"); | |
const log = std.log.scoped(.liveness); | |
const assert = std.debug.assert; | |
const Allocator = std.mem.Allocator; | |
const Log2Int = std.math.Log2Int; | |
const Liveness = @This(); | |
const trace = @import("tracy.zig").trace; | |
const Air = @import("Air.zig"); | |
const InternPool = @import("InternPool.zig"); | |
pub const Verify = @import("Liveness/Verify.zig"); | |
/// This array is split into sets of 4 bits per AIR instruction. | |
/// The MSB (0bX000) is whether the instruction is unreferenced. | |
/// The LSB (0b000X) is the first operand, and so on, up to 3 operands. A set bit means the | |
/// operand dies after this instruction. | |
/// Instructions which need more data to track liveness have special handling via the | |
/// `special` table. | |
tomb_bits: []usize, | |
/// Sparse table of specially handled instructions. The value is an index into the `extra` | |
/// array. The meaning of the data depends on the AIR tag. | |
/// * `cond_br` - points to a `CondBr` in `extra` at this index. | |
/// * `try`, `try_ptr` - points to a `CondBr` in `extra` at this index. The error path (the block | |
/// in the instruction) is considered the "else" path, and the rest of the block the "then". | |
/// * `switch_br` - points to a `SwitchBr` in `extra` at this index. | |
/// * `block` - points to a `Block` in `extra` at this index. | |
/// * `asm`, `call`, `aggregate_init` - the value is a set of bits which are the extra tomb | |
/// bits of operands. | |
/// The main tomb bits are still used and the extra ones are starting with the lsb of the | |
/// value here. | |
special: std.AutoHashMapUnmanaged(Air.Inst.Index, u32), | |
/// Auxiliary data. The way this data is interpreted is determined contextually. | |
extra: []const u32, | |
/// Trailing is the set of instructions whose lifetimes end at the start of the then branch, | |
/// followed by the set of instructions whose lifetimes end at the start of the else branch. | |
pub const CondBr = struct { | |
then_death_count: u32, | |
else_death_count: u32, | |
}; | |
/// Trailing is: | |
/// * For each case in the same order as in the AIR: | |
/// - case_death_count: u32 | |
/// - Air.Inst.Index for each `case_death_count`: set of instructions whose lifetimes | |
/// end at the start of this case. | |
/// * Air.Inst.Index for each `else_death_count`: set of instructions whose lifetimes | |
/// end at the start of the else case. | |
pub const SwitchBr = struct { | |
else_death_count: u32, | |
}; | |
/// Trailing is the set of instructions which die in the block. Note that these are not additional | |
/// deaths (they are all recorded as normal within the block), but backends may use this information | |
/// as a more efficient way to track which instructions are still alive after a block. | |
pub const Block = struct { | |
death_count: u32, | |
}; | |
/// Liveness analysis runs in several passes. Each pass iterates backwards over instructions in | |
/// bodies, and recurses into bodies. | |
const LivenessPass = enum { | |
/// In this pass, we perform some basic analysis of loops to gain information the main pass | |
/// needs. In particular, for every `loop`, we track the following information: | |
/// * Every block which the loop body contains a `br` to. | |
/// * Every operand referenced within the loop body but created outside the loop. | |
/// This gives the main analysis pass enough information to determine the full set of | |
/// instructions which need to be alive when a loop repeats. This data is TEMPORARILY stored in | |
/// `a.extra`. It is not re-added to `extra` by the main pass, since it is not useful to | |
/// backends. | |
loop_analysis, | |
/// This pass performs the main liveness analysis, setting up tombs and extra data while | |
/// considering control flow etc. | |
main_analysis, | |
}; | |
/// Each analysis pass may wish to pass data through calls. A pointer to a `LivenessPassData(pass)` | |
/// stored on the stack is passed through calls to `analyzeInst` etc. | |
fn LivenessPassData(comptime pass: LivenessPass) type { | |
return switch (pass) { | |
.loop_analysis => struct { | |
/// The set of blocks which are exited with a `br` instruction at some point within this | |
/// body and which we are currently within. | |
breaks: std.AutoHashMapUnmanaged(Air.Inst.Index, void) = .{}, | |
/// The set of operands for which we have seen at least one usage but not their birth. | |
live_set: std.AutoHashMapUnmanaged(Air.Inst.Index, void) = .{}, | |
fn deinit(self: *@This(), gpa: Allocator) void { | |
self.breaks.deinit(gpa); | |
self.live_set.deinit(gpa); | |
} | |
}, | |
.main_analysis => struct { | |
/// Every `block` currently under analysis. | |
block_scopes: std.AutoHashMapUnmanaged(Air.Inst.Index, BlockScope) = .{}, | |
/// The set of instructions currently alive in the current control | |
/// flow branch. | |
live_set: std.AutoHashMapUnmanaged(Air.Inst.Index, void) = .{}, | |
/// The extra data initialized by the `loop_analysis` pass for this pass to consume. | |
/// Owned by this struct during this pass. | |
old_extra: std.ArrayListUnmanaged(u32) = .{}, | |
const BlockScope = struct { | |
/// The set of instructions which are alive upon a `br` to this block. | |
live_set: std.AutoHashMapUnmanaged(Air.Inst.Index, void), | |
}; | |
fn deinit(self: *@This(), gpa: Allocator) void { | |
var it = self.block_scopes.valueIterator(); | |
while (it.next()) |block| { | |
block.live_set.deinit(gpa); | |
} | |
self.block_scopes.deinit(gpa); | |
self.live_set.deinit(gpa); | |
self.old_extra.deinit(gpa); | |
} | |
}, | |
}; | |
} | |
pub fn analyze(gpa: Allocator, air: Air, intern_pool: *const InternPool) Allocator.Error!Liveness { | |
const tracy = trace(@src()); | |
defer tracy.end(); | |
var a: Analysis = .{ | |
.gpa = gpa, | |
.air = air, | |
.tomb_bits = try gpa.alloc( | |
usize, | |
(air.instructions.len * bpi + @bitSizeOf(usize) - 1) / @bitSizeOf(usize), | |
), | |
.extra = .{}, | |
.special = .{}, | |
.intern_pool = intern_pool, | |
}; | |
errdefer gpa.free(a.tomb_bits); | |
errdefer a.special.deinit(gpa); | |
defer a.extra.deinit(gpa); | |
@memset(a.tomb_bits, 0); | |
const main_body = air.getMainBody(); | |
{ | |
var data: LivenessPassData(.loop_analysis) = .{}; | |
defer data.deinit(gpa); | |
try analyzeBody(&a, .loop_analysis, &data, main_body); | |
} | |
{ | |
var data: LivenessPassData(.main_analysis) = .{}; | |
defer data.deinit(gpa); | |
data.old_extra = a.extra; | |
a.extra = .{}; | |
try analyzeBody(&a, .main_analysis, &data, main_body); | |
assert(data.live_set.count() == 0); | |
} | |
return .{ | |
.tomb_bits = a.tomb_bits, | |
.special = a.special, | |
.extra = try a.extra.toOwnedSlice(gpa), | |
}; | |
} | |
pub fn getTombBits(l: Liveness, inst: Air.Inst.Index) Bpi { | |
const usize_index = (@intFromEnum(inst) * bpi) / @bitSizeOf(usize); | |
return @as(Bpi, @truncate(l.tomb_bits[usize_index] >> | |
@as(Log2Int(usize), @intCast((@intFromEnum(inst) % (@bitSizeOf(usize) / bpi)) * bpi)))); | |
} | |
pub fn isUnused(l: Liveness, inst: Air.Inst.Index) bool { | |
const usize_index = (@intFromEnum(inst) * bpi) / @bitSizeOf(usize); | |
const mask = @as(usize, 1) << | |
@as(Log2Int(usize), @intCast((@intFromEnum(inst) % (@bitSizeOf(usize) / bpi)) * bpi + (bpi - 1))); | |
return (l.tomb_bits[usize_index] & mask) != 0; | |
} | |
pub fn operandDies(l: Liveness, inst: Air.Inst.Index, operand: OperandInt) bool { | |
assert(operand < bpi - 1); | |
const usize_index = (@intFromEnum(inst) * bpi) / @bitSizeOf(usize); | |
const mask = @as(usize, 1) << | |
@as(Log2Int(usize), @intCast((@intFromEnum(inst) % (@bitSizeOf(usize) / bpi)) * bpi + operand)); | |
return (l.tomb_bits[usize_index] & mask) != 0; | |
} | |
pub fn clearOperandDeath(l: Liveness, inst: Air.Inst.Index, operand: OperandInt) void { | |
assert(operand < bpi - 1); | |
const usize_index = (@intFromEnum(inst) * bpi) / @bitSizeOf(usize); | |
const mask = @as(usize, 1) << | |
@as(Log2Int(usize), @intCast((@intFromEnum(inst) % (@bitSizeOf(usize) / bpi)) * bpi + operand)); | |
l.tomb_bits[usize_index] &= ~mask; | |
} | |
const OperandCategory = enum { | |
/// The operand lives on, but this instruction cannot possibly mutate memory. | |
none, | |
/// The operand lives on and this instruction can mutate memory. | |
write, | |
/// The operand dies at this instruction. | |
tomb, | |
/// The operand lives on, and this instruction is noreturn. | |
noret, | |
/// This instruction is too complicated for analysis, no information is available. | |
complex, | |
}; | |
/// Given an instruction that we are examining, and an operand that we are looking for, | |
/// returns a classification. | |
pub fn categorizeOperand( | |
l: Liveness, | |
air: Air, | |
inst: Air.Inst.Index, | |
operand: Air.Inst.Index, | |
ip: *const InternPool, | |
) OperandCategory { | |
const air_tags = air.instructions.items(.tag); | |
const air_datas = air.instructions.items(.data); | |
const operand_ref = operand.toRef(); | |
switch (air_tags[@intFromEnum(inst)]) { | |
.add, | |
.add_safe, | |
.add_wrap, | |
.add_sat, | |
.add_optimized, | |
.sub, | |
.sub_safe, | |
.sub_wrap, | |
.sub_sat, | |
.sub_optimized, | |
.mul, | |
.mul_safe, | |
.mul_wrap, | |
.mul_sat, | |
.mul_optimized, | |
.div_float, | |
.div_trunc, | |
.div_floor, | |
.div_exact, | |
.rem, | |
.mod, | |
.bit_and, | |
.bit_or, | |
.xor, | |
.cmp_lt, | |
.cmp_lte, | |
.cmp_eq, | |
.cmp_gte, | |
.cmp_gt, | |
.cmp_neq, | |
.bool_and, | |
.bool_or, | |
.array_elem_val, | |
.slice_elem_val, | |
.ptr_elem_val, | |
.shl, | |
.shl_exact, | |
.shl_sat, | |
.shr, | |
.shr_exact, | |
.min, | |
.max, | |
.div_float_optimized, | |
.div_trunc_optimized, | |
.div_floor_optimized, | |
.div_exact_optimized, | |
.rem_optimized, | |
.mod_optimized, | |
.neg_optimized, | |
.cmp_lt_optimized, | |
.cmp_lte_optimized, | |
.cmp_eq_optimized, | |
.cmp_gte_optimized, | |
.cmp_gt_optimized, | |
.cmp_neq_optimized, | |
=> { | |
const o = air_datas[@intFromEnum(inst)].bin_op; | |
if (o.lhs == operand_ref) return matchOperandSmallIndex(l, inst, 0, .none); | |
if (o.rhs == operand_ref) return matchOperandSmallIndex(l, inst, 1, .none); | |
return .none; | |
}, | |
.store, | |
.store_safe, | |
.atomic_store_unordered, | |
.atomic_store_monotonic, | |
.atomic_store_release, | |
.atomic_store_seq_cst, | |
.set_union_tag, | |
.memset, | |
.memset_safe, | |
.memcpy, | |
=> { | |
const o = air_datas[@intFromEnum(inst)].bin_op; | |
if (o.lhs == operand_ref) return matchOperandSmallIndex(l, inst, 0, .write); | |
if (o.rhs == operand_ref) return matchOperandSmallIndex(l, inst, 1, .write); | |
return .write; | |
}, | |
.vector_store_elem => { | |
const o = air_datas[@intFromEnum(inst)].vector_store_elem; | |
const extra = air.extraData(Air.Bin, o.payload).data; | |
if (o.vector_ptr == operand_ref) return matchOperandSmallIndex(l, inst, 0, .write); | |
if (extra.lhs == operand_ref) return matchOperandSmallIndex(l, inst, 1, .none); | |
if (extra.rhs == operand_ref) return matchOperandSmallIndex(l, inst, 2, .none); | |
return .write; | |
}, | |
.arg, | |
.alloc, | |
.inferred_alloc, | |
.inferred_alloc_comptime, | |
.ret_ptr, | |
.trap, | |
.breakpoint, | |
.dbg_stmt, | |
.unreach, | |
.ret_addr, | |
.frame_addr, | |
.wasm_memory_size, | |
.err_return_trace, | |
.save_err_return_trace_index, | |
.c_va_start, | |
.work_item_id, | |
.work_group_size, | |
.work_group_id, | |
=> return .none, | |
.fence => return .write, | |
.not, | |
.bitcast, | |
.load, | |
.fpext, | |
.fptrunc, | |
.intcast, | |
.trunc, | |
.optional_payload, | |
.optional_payload_ptr, | |
.wrap_optional, | |
.unwrap_errunion_payload, | |
.unwrap_errunion_err, | |
.unwrap_errunion_payload_ptr, | |
.unwrap_errunion_err_ptr, | |
.wrap_errunion_payload, | |
.wrap_errunion_err, | |
.slice_ptr, | |
.slice_len, | |
.ptr_slice_len_ptr, | |
.ptr_slice_ptr_ptr, | |
.struct_field_ptr_index_0, | |
.struct_field_ptr_index_1, | |
.struct_field_ptr_index_2, | |
.struct_field_ptr_index_3, | |
.array_to_slice, | |
.int_from_float, | |
.int_from_float_optimized, | |
.float_from_int, | |
.get_union_tag, | |
.clz, | |
.ctz, | |
.popcount, | |
.byte_swap, | |
.bit_reverse, | |
.splat, | |
.error_set_has_value, | |
.addrspace_cast, | |
.c_va_arg, | |
.c_va_copy, | |
.abs, | |
=> { | |
const o = air_datas[@intFromEnum(inst)].ty_op; | |
if (o.operand == operand_ref) return matchOperandSmallIndex(l, inst, 0, .none); | |
return .none; | |
}, | |
.optional_payload_ptr_set, | |
.errunion_payload_ptr_set, | |
=> { | |
const o = air_datas[@intFromEnum(inst)].ty_op; | |
if (o.operand == operand_ref) return matchOperandSmallIndex(l, inst, 0, .write); | |
return .write; | |
}, | |
.is_null, | |
.is_non_null, | |
.is_null_ptr, | |
.is_non_null_ptr, | |
.is_err, | |
.is_non_err, | |
.is_err_ptr, | |
.is_non_err_ptr, | |
.int_from_ptr, | |
.int_from_bool, | |
.is_named_enum_value, | |
.tag_name, | |
.error_name, | |
.sqrt, | |
.sin, | |
.cos, | |
.tan, | |
.exp, | |
.exp2, | |
.log, | |
.log2, | |
.log10, | |
.floor, | |
.ceil, | |
.round, | |
.trunc_float, | |
.neg, | |
.cmp_lt_errors_len, | |
.c_va_end, | |
=> { | |
const o = air_datas[@intFromEnum(inst)].un_op; | |
if (o == operand_ref) return matchOperandSmallIndex(l, inst, 0, .none); | |
return .none; | |
}, | |
.ret, | |
.ret_safe, | |
.ret_load, | |
=> { | |
const o = air_datas[@intFromEnum(inst)].un_op; | |
if (o == operand_ref) return matchOperandSmallIndex(l, inst, 0, .noret); | |
return .noret; | |
}, | |
.set_err_return_trace => { | |
const o = air_datas[@intFromEnum(inst)].un_op; | |
if (o == operand_ref) return matchOperandSmallIndex(l, inst, 0, .write); | |
return .write; | |
}, | |
.add_with_overflow, | |
.sub_with_overflow, | |
.mul_with_overflow, | |
.shl_with_overflow, | |
.ptr_add, | |
.ptr_sub, | |
.ptr_elem_ptr, | |
.slice_elem_ptr, | |
.slice, | |
=> { | |
const ty_pl = air_datas[@intFromEnum(inst)].ty_pl; | |
const extra = air.extraData(Air.Bin, ty_pl.payload).data; | |
if (extra.lhs == operand_ref) return matchOperandSmallIndex(l, inst, 0, .none); | |
if (extra.rhs == operand_ref) return matchOperandSmallIndex(l, inst, 1, .none); | |
return .none; | |
}, | |
.dbg_var_ptr, | |
.dbg_var_val, | |
=> { | |
const o = air_datas[@intFromEnum(inst)].pl_op.operand; | |
if (o == operand_ref) return matchOperandSmallIndex(l, inst, 0, .none); | |
return .none; | |
}, | |
.prefetch => { | |
const prefetch = air_datas[@intFromEnum(inst)].prefetch; | |
if (prefetch.ptr == operand_ref) return matchOperandSmallIndex(l, inst, 0, .none); | |
return .none; | |
}, | |
.call, .call_always_tail, .call_never_tail, .call_never_inline => { | |
const inst_data = air_datas[@intFromEnum(inst)].pl_op; | |
const callee = inst_data.operand; | |
const extra = air.extraData(Air.Call, inst_data.payload); | |
const args = @as([]const Air.Inst.Ref, @ptrCast(air.extra[extra.end..][0..extra.data.args_len])); | |
if (args.len + 1 <= bpi - 1) { | |
if (callee == operand_ref) return matchOperandSmallIndex(l, inst, 0, .write); | |
for (args, 0..) |arg, i| { | |
if (arg == operand_ref) return matchOperandSmallIndex(l, inst, @as(OperandInt, @intCast(i + 1)), .write); | |
} | |
return .write; | |
} | |
var bt = l.iterateBigTomb(inst); | |
if (bt.feed()) { | |
if (callee == operand_ref) return .tomb; | |
} else { | |
if (callee == operand_ref) return .write; | |
} | |
for (args) |arg| { | |
if (bt.feed()) { | |
if (arg == operand_ref) return .tomb; | |
} else { | |
if (arg == operand_ref) return .write; | |
} | |
} | |
return .write; | |
}, | |
.select => { | |
const pl_op = air_datas[@intFromEnum(inst)].pl_op; | |
const extra = air.extraData(Air.Bin, pl_op.payload).data; | |
if (pl_op.operand == operand_ref) return matchOperandSmallIndex(l, inst, 0, .none); | |
if (extra.lhs == operand_ref) return matchOperandSmallIndex(l, inst, 1, .none); | |
if (extra.rhs == operand_ref) return matchOperandSmallIndex(l, inst, 2, .none); | |
return .none; | |
}, | |
.shuffle => { | |
const extra = air.extraData(Air.Shuffle, air_datas[@intFromEnum(inst)].ty_pl.payload).data; | |
if (extra.a == operand_ref) return matchOperandSmallIndex(l, inst, 0, .none); | |
if (extra.b == operand_ref) return matchOperandSmallIndex(l, inst, 1, .none); | |
return .none; | |
}, | |
.reduce, .reduce_optimized => { | |
const reduce = air_datas[@intFromEnum(inst)].reduce; | |
if (reduce.operand == operand_ref) return matchOperandSmallIndex(l, inst, 0, .none); | |
return .none; | |
}, | |
.cmp_vector, .cmp_vector_optimized => { | |
const extra = air.extraData(Air.VectorCmp, air_datas[@intFromEnum(inst)].ty_pl.payload).data; | |
if (extra.lhs == operand_ref) return matchOperandSmallIndex(l, inst, 0, .none); | |
if (extra.rhs == operand_ref) return matchOperandSmallIndex(l, inst, 1, .none); | |
return .none; | |
}, | |
.aggregate_init => { | |
const ty_pl = air_datas[@intFromEnum(inst)].ty_pl; | |
const aggregate_ty = ty_pl.ty.toType(); | |
const len = @as(usize, @intCast(aggregate_ty.arrayLenIp(ip))); | |
const elements = @as([]const Air.Inst.Ref, @ptrCast(air.extra[ty_pl.payload..][0..len])); | |
if (elements.len <= bpi - 1) { | |
for (elements, 0..) |elem, i| { | |
if (elem == operand_ref) return matchOperandSmallIndex(l, inst, @as(OperandInt, @intCast(i)), .none); | |
} | |
return .none; | |
} | |
var bt = l.iterateBigTomb(inst); | |
for (elements) |elem| { | |
if (bt.feed()) { | |
if (elem == operand_ref) return .tomb; | |
} else { | |
if (elem == operand_ref) return .write; | |
} | |
} | |
return .write; | |
}, | |
.union_init => { | |
const extra = air.extraData(Air.UnionInit, air_datas[@intFromEnum(inst)].ty_pl.payload).data; | |
if (extra.init == operand_ref) return matchOperandSmallIndex(l, inst, 0, .none); | |
return .none; | |
}, | |
.struct_field_ptr, .struct_field_val => { | |
const extra = air.extraData(Air.StructField, air_datas[@intFromEnum(inst)].ty_pl.payload).data; | |
if (extra.struct_operand == operand_ref) return matchOperandSmallIndex(l, inst, 0, .none); | |
return .none; | |
}, | |
.field_parent_ptr => { | |
const extra = air.extraData(Air.FieldParentPtr, air_datas[@intFromEnum(inst)].ty_pl.payload).data; | |
if (extra.field_ptr == operand_ref) return matchOperandSmallIndex(l, inst, 0, .none); | |
return .none; | |
}, | |
.cmpxchg_strong, .cmpxchg_weak => { | |
const extra = air.extraData(Air.Cmpxchg, air_datas[@intFromEnum(inst)].ty_pl.payload).data; | |
if (extra.ptr == operand_ref) return matchOperandSmallIndex(l, inst, 0, .write); | |
if (extra.expected_value == operand_ref) return matchOperandSmallIndex(l, inst, 1, .write); | |
if (extra.new_value == operand_ref) return matchOperandSmallIndex(l, inst, 2, .write); | |
return .write; | |
}, | |
.mul_add => { | |
const pl_op = air_datas[@intFromEnum(inst)].pl_op; | |
const extra = air.extraData(Air.Bin, pl_op.payload).data; | |
if (extra.lhs == operand_ref) return matchOperandSmallIndex(l, inst, 0, .none); | |
if (extra.rhs == operand_ref) return matchOperandSmallIndex(l, inst, 1, .none); | |
if (pl_op.operand == operand_ref) return matchOperandSmallIndex(l, inst, 2, .none); | |
return .none; | |
}, | |
.atomic_load => { | |
const ptr = air_datas[@intFromEnum(inst)].atomic_load.ptr; | |
if (ptr == operand_ref) return matchOperandSmallIndex(l, inst, 0, .none); | |
return .none; | |
}, | |
.atomic_rmw => { | |
const pl_op = air_datas[@intFromEnum(inst)].pl_op; | |
const extra = air.extraData(Air.AtomicRmw, pl_op.payload).data; | |
if (pl_op.operand == operand_ref) return matchOperandSmallIndex(l, inst, 0, .write); | |
if (extra.operand == operand_ref) return matchOperandSmallIndex(l, inst, 1, .write); | |
return .write; | |
}, | |
.br => { | |
const br = air_datas[@intFromEnum(inst)].br; | |
if (br.operand == operand_ref) return matchOperandSmallIndex(l, operand, 0, .noret); | |
return .noret; | |
}, | |
.assembly => { | |
return .complex; | |
}, | |
.block, .dbg_inline_block => |tag| { | |
const ty_pl = air_datas[@intFromEnum(inst)].ty_pl; | |
const body: []const Air.Inst.Index = @ptrCast(switch (tag) { | |
inline .block, .dbg_inline_block => |comptime_tag| body: { | |
const extra = air.extraData(switch (comptime_tag) { | |
.block => Air.Block, | |
.dbg_inline_block => Air.DbgInlineBlock, | |
else => unreachable, | |
}, ty_pl.payload); | |
break :body air.extra[extra.end..][0..extra.data.body_len]; | |
}, | |
else => unreachable, | |
}); | |
if (body.len == 1 and air_tags[@intFromEnum(body[0])] == .cond_br) { | |
// Peephole optimization for "panic-like" conditionals, which have | |
// one empty branch and another which calls a `noreturn` function. | |
// This allows us to infer that safety checks do not modify memory, | |
// as far as control flow successors are concerned. | |
const inst_data = air_datas[@intFromEnum(body[0])].pl_op; | |
const cond_extra = air.extraData(Air.CondBr, inst_data.payload); | |
if (inst_data.operand == operand_ref and operandDies(l, body[0], 0)) | |
return .tomb; | |
if (cond_extra.data.then_body_len > 2 or cond_extra.data.else_body_len > 2) | |
return .complex; | |
const then_body: []const Air.Inst.Index = @ptrCast(air.extra[cond_extra.end..][0..cond_extra.data.then_body_len]); | |
const else_body: []const Air.Inst.Index = @ptrCast(air.extra[cond_extra.end + cond_extra.data.then_body_len ..][0..cond_extra.data.else_body_len]); | |
if (then_body.len > 1 and air_tags[@intFromEnum(then_body[1])] != .unreach) | |
return .complex; | |
if (else_body.len > 1 and air_tags[@intFromEnum(else_body[1])] != .unreach) | |
return .complex; | |
var operand_live: bool = true; | |
for (&[_]Air.Inst.Index{ then_body[0], else_body[0] }) |cond_inst| { | |
if (l.categorizeOperand(air, cond_inst, operand, ip) == .tomb) | |
operand_live = false; | |
switch (air_tags[@intFromEnum(cond_inst)]) { | |
.br => { // Breaks immediately back to block | |
const br = air_datas[@intFromEnum(cond_inst)].br; | |
if (br.block_inst != inst) | |
return .complex; | |
}, | |
.call => {}, // Calls a noreturn function | |
else => return .complex, | |
} | |
} | |
return if (operand_live) .none else .tomb; | |
} | |
return .complex; | |
}, | |
.@"try" => { | |
return .complex; | |
}, | |
.try_ptr => { | |
return .complex; | |
}, | |
.loop => { | |
return .complex; | |
}, | |
.cond_br => { | |
return .complex; | |
}, | |
.switch_br => { | |
return .complex; | |
}, | |
.wasm_memory_grow => { | |
const pl_op = air_datas[@intFromEnum(inst)].pl_op; | |
if (pl_op.operand == operand_ref) return matchOperandSmallIndex(l, inst, 0, .none); | |
return .none; | |
}, | |
} | |
} | |
fn matchOperandSmallIndex( | |
l: Liveness, | |
inst: Air.Inst.Index, | |
operand: OperandInt, | |
default: OperandCategory, | |
) OperandCategory { | |
if (operandDies(l, inst, operand)) { | |
return .tomb; | |
} else { | |
return default; | |
} | |
} | |
/// Higher level API. | |
pub const CondBrSlices = struct { | |
then_deaths: []const Air.Inst.Index, | |
else_deaths: []const Air.Inst.Index, | |
}; | |
pub fn getCondBr(l: Liveness, inst: Air.Inst.Index) CondBrSlices { | |
var index: usize = l.special.get(inst) orelse return .{ | |
.then_deaths = &.{}, | |
.else_deaths = &.{}, | |
}; | |
const then_death_count = l.extra[index]; | |
index += 1; | |
const else_death_count = l.extra[index]; | |
index += 1; | |
const then_deaths: []const Air.Inst.Index = @ptrCast(l.extra[index..][0..then_death_count]); | |
index += then_death_count; | |
return .{ | |
.then_deaths = then_deaths, | |
.else_deaths = @ptrCast(l.extra[index..][0..else_death_count]), | |
}; | |
} | |
/// Indexed by case number as they appear in AIR. | |
/// Else is the last element. | |
pub const SwitchBrTable = struct { | |
deaths: []const []const Air.Inst.Index, | |
}; | |
/// Caller owns the memory. | |
pub fn getSwitchBr(l: Liveness, gpa: Allocator, inst: Air.Inst.Index, cases_len: u32) Allocator.Error!SwitchBrTable { | |
var index: usize = l.special.get(inst) orelse return SwitchBrTable{ | |
.deaths = &.{}, | |
}; | |
const else_death_count = l.extra[index]; | |
index += 1; | |
var deaths = std.ArrayList([]const Air.Inst.Index).init(gpa); | |
defer deaths.deinit(); | |
try deaths.ensureTotalCapacity(cases_len + 1); | |
var case_i: u32 = 0; | |
while (case_i < cases_len - 1) : (case_i += 1) { | |
const case_death_count: u32 = l.extra[index]; | |
index += 1; | |
const case_deaths: []const Air.Inst.Index = @ptrCast(l.extra[index..][0..case_death_count]); | |
index += case_death_count; | |
deaths.appendAssumeCapacity(case_deaths); | |
} | |
{ | |
// Else | |
const else_deaths: []const Air.Inst.Index = @ptrCast(l.extra[index..][0..else_death_count]); | |
deaths.appendAssumeCapacity(else_deaths); | |
} | |
return SwitchBrTable{ | |
.deaths = try deaths.toOwnedSlice(), | |
}; | |
} | |
/// Note that this information is technically redundant, but is useful for | |
/// backends nonetheless: see `Block`. | |
pub const BlockSlices = struct { | |
deaths: []const Air.Inst.Index, | |
}; | |
pub fn getBlock(l: Liveness, inst: Air.Inst.Index) BlockSlices { | |
const index: usize = l.special.get(inst) orelse return .{ | |
.deaths = &.{}, | |
}; | |
const death_count = l.extra[index]; | |
const deaths: []const Air.Inst.Index = @ptrCast(l.extra[index + 1 ..][0..death_count]); | |
return .{ | |
.deaths = deaths, | |
}; | |
} | |
pub const LoopSlice = struct { | |
deaths: []const Air.Inst.Index, | |
}; | |
pub fn deinit(l: *Liveness, gpa: Allocator) void { | |
gpa.free(l.tomb_bits); | |
gpa.free(l.extra); | |
l.special.deinit(gpa); | |
l.* = undefined; | |
} | |
pub fn iterateBigTomb(l: Liveness, inst: Air.Inst.Index) BigTomb { | |
return .{ | |
.tomb_bits = l.getTombBits(inst), | |
.extra_start = l.special.get(inst) orelse 0, | |
.extra_offset = 0, | |
.extra = l.extra, | |
.bit_index = 0, | |
.reached_end = false, | |
}; | |
} | |
/// How many tomb bits per AIR instruction. | |
pub const bpi = 4; | |
pub const Bpi = std.meta.Int(.unsigned, bpi); | |
pub const OperandInt = std.math.Log2Int(Bpi); | |
/// Useful for decoders of Liveness information. | |
pub const BigTomb = struct { | |
tomb_bits: Liveness.Bpi, | |
bit_index: u32, | |
extra_start: u32, | |
extra_offset: u32, | |
extra: []const u32, | |
reached_end: bool, | |
/// Returns whether the next operand dies. | |
pub fn feed(bt: *BigTomb) bool { | |
if (bt.reached_end) return false; | |
const this_bit_index = bt.bit_index; | |
bt.bit_index += 1; | |
const small_tombs = bpi - 1; | |
if (this_bit_index < small_tombs) { | |
const dies = @as(u1, @truncate(bt.tomb_bits >> @as(Liveness.OperandInt, @intCast(this_bit_index)))) != 0; | |
return dies; | |
} | |
const big_bit_index = this_bit_index - small_tombs; | |
while (big_bit_index - bt.extra_offset * 31 >= 31) { | |
if (@as(u1, @truncate(bt.extra[bt.extra_start + bt.extra_offset] >> 31)) != 0) { | |
bt.reached_end = true; | |
return false; | |
} | |
bt.extra_offset += 1; | |
} | |
const dies = @as(u1, @truncate(bt.extra[bt.extra_start + bt.extra_offset] >> | |
@as(u5, @intCast(big_bit_index - bt.extra_offset * 31)))) != 0; | |
return dies; | |
} | |
}; | |
/// In-progress data; on successful analysis converted into `Liveness`. | |
const Analysis = struct { | |
gpa: Allocator, | |
air: Air, | |
intern_pool: *const InternPool, | |
tomb_bits: []usize, | |
special: std.AutoHashMapUnmanaged(Air.Inst.Index, u32), | |
extra: std.ArrayListUnmanaged(u32), | |
fn storeTombBits(a: *Analysis, inst: Air.Inst.Index, tomb_bits: Bpi) void { | |
const usize_index = (inst * bpi) / @bitSizeOf(usize); | |
a.tomb_bits[usize_index] |= @as(usize, tomb_bits) << | |
@as(Log2Int(usize), @intCast((inst % (@bitSizeOf(usize) / bpi)) * bpi)); | |
} | |
fn addExtra(a: *Analysis, extra: anytype) Allocator.Error!u32 { | |
const fields = std.meta.fields(@TypeOf(extra)); | |
try a.extra.ensureUnusedCapacity(a.gpa, fields.len); | |
return addExtraAssumeCapacity(a, extra); | |
} | |
fn addExtraAssumeCapacity(a: *Analysis, extra: anytype) u32 { | |
const fields = std.meta.fields(@TypeOf(extra)); | |
const result = @as(u32, @intCast(a.extra.items.len)); | |
inline for (fields) |field| { | |
a.extra.appendAssumeCapacity(switch (field.type) { | |
u32 => @field(extra, field.name), | |
else => @compileError("bad field type"), | |
}); | |
} | |
return result; | |
} | |
}; | |
fn analyzeBody( | |
a: *Analysis, | |
comptime pass: LivenessPass, | |
data: *LivenessPassData(pass), | |
body: []const Air.Inst.Index, | |
) Allocator.Error!void { | |
var i: usize = body.len; | |
while (i != 0) { | |
i -= 1; | |
const inst = body[i]; | |
try analyzeInst(a, pass, data, inst); | |
} | |
} | |
fn analyzeInst( | |
a: *Analysis, | |
comptime pass: LivenessPass, | |
data: *LivenessPassData(pass), | |
inst: Air.Inst.Index, | |
) Allocator.Error!void { | |
const ip = a.intern_pool; | |
const inst_tags = a.air.instructions.items(.tag); | |
const inst_datas = a.air.instructions.items(.data); | |
switch (inst_tags[@intFromEnum(inst)]) { | |
.add, | |
.add_safe, | |
.add_optimized, | |
.add_wrap, | |
.add_sat, | |
.sub, | |
.sub_safe, | |
.sub_optimized, | |
.sub_wrap, | |
.sub_sat, | |
.mul, | |
.mul_safe, | |
.mul_optimized, | |
.mul_wrap, | |
.mul_sat, | |
.div_float, | |
.div_float_optimized, | |
.div_trunc, | |
.div_trunc_optimized, | |
.div_floor, | |
.div_floor_optimized, | |
.div_exact, | |
.div_exact_optimized, | |
.rem, | |
.rem_optimized, | |
.mod, | |
.mod_optimized, | |
.bit_and, | |
.bit_or, | |
.xor, | |
.cmp_lt, | |
.cmp_lt_optimized, | |
.cmp_lte, | |
.cmp_lte_optimized, | |
.cmp_eq, | |
.cmp_eq_optimized, | |
.cmp_gte, | |
.cmp_gte_optimized, | |
.cmp_gt, | |
.cmp_gt_optimized, | |
.cmp_neq, | |
.cmp_neq_optimized, | |
.bool_and, | |
.bool_or, | |
.store, | |
.store_safe, | |
.array_elem_val, | |
.slice_elem_val, | |
.ptr_elem_val, | |
.shl, | |
.shl_exact, | |
.shl_sat, | |
.shr, | |
.shr_exact, | |
.atomic_store_unordered, | |
.atomic_store_monotonic, | |
.atomic_store_release, | |
.atomic_store_seq_cst, | |
.set_union_tag, | |
.min, | |
.max, | |
.memset, | |
.memset_safe, | |
.memcpy, | |
=> { | |
const o = inst_datas[@intFromEnum(inst)].bin_op; | |
return analyzeOperands(a, pass, data, inst, .{ o.lhs, o.rhs, .none }); | |
}, | |
.vector_store_elem => { | |
const o = inst_datas[@intFromEnum(inst)].vector_store_elem; | |
const extra = a.air.extraData(Air.Bin, o.payload).data; | |
return analyzeOperands(a, pass, data, inst, .{ o.vector_ptr, extra.lhs, extra.rhs }); | |
}, | |
.arg, | |
.alloc, | |
.ret_ptr, | |
.breakpoint, | |
.dbg_stmt, | |
.fence, | |
.ret_addr, | |
.frame_addr, | |
.wasm_memory_size, | |
.err_return_trace, | |
.save_err_return_trace_index, | |
.c_va_start, | |
.work_item_id, | |
.work_group_size, | |
.work_group_id, | |
=> return analyzeOperands(a, pass, data, inst, .{ .none, .none, .none }), | |
.inferred_alloc, .inferred_alloc_comptime => unreachable, | |
.trap, | |
.unreach, | |
=> return analyzeFuncEnd(a, pass, data, inst, .{ .none, .none, .none }), | |
.not, | |
.bitcast, | |
.load, | |
.fpext, | |
.fptrunc, | |
.intcast, | |
.trunc, | |
.optional_payload, | |
.optional_payload_ptr, | |
.optional_payload_ptr_set, | |
.errunion_payload_ptr_set, | |
.wrap_optional, | |
.unwrap_errunion_payload, | |
.unwrap_errunion_err, | |
.unwrap_errunion_payload_ptr, | |
.unwrap_errunion_err_ptr, | |
.wrap_errunion_payload, | |
.wrap_errunion_err, | |
.slice_ptr, | |
.slice_len, | |
.ptr_slice_len_ptr, | |
.ptr_slice_ptr_ptr, | |
.struct_field_ptr_index_0, | |
.struct_field_ptr_index_1, | |
.struct_field_ptr_index_2, | |
.struct_field_ptr_index_3, | |
.array_ |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment