commit before transpiling
This commit is contained in:
parent
20fe6c8f97
commit
df53762d92
@ -5,14 +5,18 @@ pub fn build(b: *std.Build) !void {
|
||||
all = b.option(bool, "all", "All") orelse false;
|
||||
const target = b.standardTargetOptions(.{});
|
||||
const optimization = b.standardOptimizeOption(.{});
|
||||
const use_llvm = b.option(bool, "use_llvm", "Use LLVM as the backend for generate the compiler binary") orelse true;
|
||||
const exe = b.addExecutable(.{
|
||||
.name = "nativity",
|
||||
.root_source_file = .{ .path = "src/main.zig" },
|
||||
.target = target,
|
||||
.optimize = optimization,
|
||||
.use_llvm = true,
|
||||
.use_llvm = use_llvm,
|
||||
.use_lld = false,
|
||||
});
|
||||
exe.unwind_tables = false;
|
||||
exe.omit_frame_pointer = false;
|
||||
|
||||
b.installArtifact(exe);
|
||||
b.installDirectory(.{
|
||||
.source_dir = std.Build.LazyPath.relative("lib"),
|
||||
@ -31,6 +35,8 @@ pub fn build(b: *std.Build) !void {
|
||||
const debug_command = switch (@import("builtin").os.tag) {
|
||||
.linux => blk: {
|
||||
const result = b.addSystemCommand(&.{"gf2"});
|
||||
result.addArg("-ex=r");
|
||||
result.addArgs(&.{ "-ex", "up" });
|
||||
result.addArg("--args");
|
||||
result.addArtifactArg(exe);
|
||||
break :blk result;
|
||||
|
5
ci.sh
5
ci.sh
@ -2,7 +2,8 @@
|
||||
|
||||
echo "Testing Nativity with Zig"
|
||||
echo "Compiling Nativity with Zig"
|
||||
zig build
|
||||
nativity_use_llvm=false
|
||||
zig build -Duse_llvm=$nativity_use_llvm
|
||||
failed_test_count=0
|
||||
passed_test_count=0
|
||||
test_directory_name=test
|
||||
@ -18,7 +19,7 @@ failed_tests=()
|
||||
for dir in $test_directory
|
||||
do
|
||||
MY_TESTNAME=${dir##*/}
|
||||
zig build run -- $dir/main.nat
|
||||
zig build run -Duse_llvm=$nativity_use_llvm -- $dir/main.nat -log ir
|
||||
if [[ "$?" == "0" ]]; then
|
||||
passed_compilation_count=$(($passed_compilation_count + 1))
|
||||
if [[ "$OSTYPE" == "linux-gnu"* ]]; then
|
||||
|
@ -53,6 +53,7 @@ fn parseArguments(allocator: Allocator) !Compilation.Module.Descriptor {
|
||||
var maybe_executable_path: ?[]const u8 = null;
|
||||
var maybe_main_package_path: ?[]const u8 = null;
|
||||
var target_triplet: []const u8 = "x86_64-linux-gnu";
|
||||
var transpile_to_c: ?bool = null;
|
||||
|
||||
var i: usize = 0;
|
||||
while (i < arguments.len) : (i += 1) {
|
||||
@ -112,6 +113,21 @@ fn parseArguments(allocator: Allocator) !Compilation.Module.Descriptor {
|
||||
} else {
|
||||
reportUnterminatedArgumentError(current_argument);
|
||||
}
|
||||
} else if (equal(u8, current_argument, "-transpile_to_c")) {
|
||||
if (i + 1 != arguments.len) {
|
||||
i += 1;
|
||||
|
||||
const arg = arguments[i];
|
||||
if (std.mem.eql(u8, arg, "true")) {
|
||||
transpile_to_c = true;
|
||||
} else if (std.mem.equal(u8, arg, "false")) {
|
||||
transpile_to_c = false;
|
||||
} else {
|
||||
unreachable;
|
||||
}
|
||||
} else {
|
||||
reportUnterminatedArgumentError(current_argument);
|
||||
}
|
||||
} else {
|
||||
maybe_main_package_path = current_argument;
|
||||
}
|
||||
@ -133,6 +149,7 @@ fn parseArguments(allocator: Allocator) !Compilation.Module.Descriptor {
|
||||
.main_package_path = main_package_path,
|
||||
.executable_path = executable_path,
|
||||
.target = target,
|
||||
.transpile_to_c = transpile_to_c orelse true,
|
||||
};
|
||||
}
|
||||
|
||||
@ -236,7 +253,7 @@ pub const Type = union(enum) {
|
||||
pub fn getIndex(integer: Integer) Compilation.Type.Index {
|
||||
return .{
|
||||
.block = 0,
|
||||
.index = @ctz(integer.bit_count) - @ctz(@as(u8, 8)) + @as(u6, switch (integer.signedness) {
|
||||
.element = @ctz(integer.bit_count) - @ctz(@as(u8, 8)) + @as(u6, switch (integer.signedness) {
|
||||
.signed => Compilation.HardwareSignedIntegerType.offset,
|
||||
.unsigned => Compilation.HardwareUnsignedIntegerType.offset,
|
||||
}),
|
||||
@ -287,26 +304,6 @@ pub const Type = union(enum) {
|
||||
};
|
||||
|
||||
// Each time an enum is added here, a corresponding insertion in the initialization must be made
|
||||
pub const Values = enum {
|
||||
bool_false,
|
||||
bool_true,
|
||||
@"unreachable",
|
||||
|
||||
pub fn getIndex(value: Values) Value.Index {
|
||||
const absolute: u32 = @intFromEnum(value);
|
||||
const foo = @as(Value.Index, undefined);
|
||||
const ElementT = @TypeOf(@field(foo, "index"));
|
||||
const BlockT = @TypeOf(@field(foo, "block"));
|
||||
const divider = std.math.maxInt(ElementT);
|
||||
const element_index: ElementT = @intCast(absolute % divider);
|
||||
const block_index: BlockT = @intCast(absolute / divider);
|
||||
return .{
|
||||
.index = element_index,
|
||||
.block = block_index,
|
||||
};
|
||||
}
|
||||
};
|
||||
|
||||
pub const Intrinsic = enum {
|
||||
@"error",
|
||||
import,
|
||||
@ -556,6 +553,7 @@ pub const BinaryOperation = struct {
|
||||
divide,
|
||||
shift_left,
|
||||
shift_right,
|
||||
compare_equal,
|
||||
};
|
||||
};
|
||||
|
||||
@ -563,6 +561,17 @@ pub const CallingConvention = enum {
|
||||
system_v,
|
||||
};
|
||||
|
||||
pub const Branch = struct {
|
||||
condition: Value.Index,
|
||||
true_expression: Value.Index,
|
||||
false_expression: Value.Index,
|
||||
reaches_end: bool,
|
||||
|
||||
pub const List = BlockList(@This());
|
||||
pub const Index = List.Index;
|
||||
pub const Allocation = List.Allocation;
|
||||
};
|
||||
|
||||
pub const Value = union(enum) {
|
||||
unresolved: Unresolved,
|
||||
declaration: Declaration.Index,
|
||||
@ -572,7 +581,8 @@ pub const Value = union(enum) {
|
||||
undefined,
|
||||
@"unreachable",
|
||||
loop: Loop.Index,
|
||||
function: Function.Index,
|
||||
function_definition: Function.Index,
|
||||
function_declaration: Function.Index,
|
||||
block: Block.Index,
|
||||
runtime: Runtime,
|
||||
assign: Assignment.Index,
|
||||
@ -589,6 +599,7 @@ pub const Value = union(enum) {
|
||||
sign_extend: Cast.Index,
|
||||
zero_extend: Cast.Index,
|
||||
binary_operation: BinaryOperation.Index,
|
||||
branch: Branch.Index,
|
||||
|
||||
pub const List = BlockList(@This());
|
||||
pub const Index = List.Index;
|
||||
@ -606,7 +617,7 @@ pub const Value = union(enum) {
|
||||
|
||||
pub fn isComptime(value: *Value, module: *Module) bool {
|
||||
return switch (value.*) {
|
||||
.bool, .void, .undefined, .function, .type, .enum_field => true,
|
||||
.bool, .void, .undefined, .function_definition, .type, .enum_field => true,
|
||||
.integer => |integer| integer.type.eq(Type.comptime_int),
|
||||
.call => false,
|
||||
.binary_operation => false,
|
||||
@ -623,8 +634,11 @@ pub const Value = union(enum) {
|
||||
.string_literal => |string_literal_hash| module.string_literal_types.get(@intCast(module.getStringLiteral(string_literal_hash).?.len)).?,
|
||||
.type => Type.type,
|
||||
.enum_field => |enum_field_index| module.enums.get(module.enum_fields.get(enum_field_index).parent).type,
|
||||
.function => |function_index| module.functions.get(function_index).prototype,
|
||||
.function_definition => |function_index| module.function_definitions.get(function_index).prototype,
|
||||
.function_declaration => |function_index| module.function_declarations.get(function_index).prototype,
|
||||
.binary_operation => |binary_operation| module.binary_operations.get(binary_operation).type,
|
||||
.bool => Type.boolean,
|
||||
.declaration => Type.void,
|
||||
else => |t| @panic(@tagName(t)),
|
||||
};
|
||||
|
||||
@ -703,9 +717,10 @@ pub const Module = struct {
|
||||
scopes: BlockList(Scope) = .{},
|
||||
files: BlockList(File) = .{},
|
||||
values: BlockList(Value) = .{},
|
||||
functions: BlockList(Function) = .{},
|
||||
fields: BlockList(Field) = .{},
|
||||
function_definitions: BlockList(Function) = .{},
|
||||
function_declarations: BlockList(Function) = .{},
|
||||
function_prototypes: BlockList(Function.Prototype) = .{},
|
||||
fields: BlockList(Field) = .{},
|
||||
types: BlockList(Type) = .{},
|
||||
blocks: BlockList(Block) = .{},
|
||||
loops: BlockList(Loop) = .{},
|
||||
@ -721,6 +736,7 @@ pub const Module = struct {
|
||||
arrays: BlockList(Array) = .{},
|
||||
casts: BlockList(Cast) = .{},
|
||||
binary_operations: BlockList(BinaryOperation) = .{},
|
||||
branches: BlockList(Branch) = .{},
|
||||
string_literal_types: data_structures.AutoArrayHashMap(u32, Type.Index) = .{},
|
||||
array_types: data_structures.AutoArrayHashMap(Array, Type.Index) = .{},
|
||||
entry_point: Function.Index = Function.Index.invalid,
|
||||
@ -729,6 +745,7 @@ pub const Module = struct {
|
||||
main_package_path: []const u8,
|
||||
executable_path: []const u8,
|
||||
target: std.Target,
|
||||
transpile_to_c: bool,
|
||||
};
|
||||
|
||||
const ImportFileResult = struct {
|
||||
@ -1047,17 +1064,7 @@ pub fn compileModule(compilation: *Compilation, descriptor: Module.Descriptor) !
|
||||
_ = try module.types.append(compilation.base_allocator, type_data);
|
||||
}
|
||||
|
||||
_ = try module.values.append(compilation.base_allocator, .{
|
||||
.bool = false,
|
||||
});
|
||||
|
||||
_ = try module.values.append(compilation.base_allocator, .{
|
||||
.bool = true,
|
||||
});
|
||||
|
||||
_ = try module.values.append(compilation.base_allocator, .{
|
||||
.@"unreachable" = {},
|
||||
});
|
||||
semantic_analyzer.unreachable_index = (try module.values.append(compilation.base_allocator, .@"unreachable")).index;
|
||||
|
||||
const value_allocation = try module.values.append(compilation.base_allocator, .{
|
||||
.unresolved = .{
|
||||
@ -1196,13 +1203,14 @@ pub fn log(comptime logger_scope: LoggerScope, logger: getLoggerScopeType(logger
|
||||
}
|
||||
|
||||
pub fn panic(message: []const u8, stack_trace: ?*std.builtin.StackTrace, return_address: ?usize) noreturn {
|
||||
const print_stack_trace = true;
|
||||
const print_stack_trace = false;
|
||||
switch (print_stack_trace) {
|
||||
true => @call(.always_inline, std.builtin.default_panic, .{ message, stack_trace, return_address }),
|
||||
false => {
|
||||
writer.writeAll("\nPANIC: ") catch {};
|
||||
writer.writeAll(message) catch {};
|
||||
writer.writeByte('\n') catch {};
|
||||
@breakpoint();
|
||||
std.os.abort();
|
||||
},
|
||||
}
|
||||
|
@ -9,6 +9,7 @@ const expectEqual = std.testing.expectEqual;
|
||||
const Compilation = @import("../Compilation.zig");
|
||||
|
||||
const ir = @import("intermediate_representation.zig");
|
||||
const IR = ir.IR;
|
||||
|
||||
const data_structures = @import("../data_structures.zig");
|
||||
const ArrayList = data_structures.ArrayList;
|
||||
@ -234,7 +235,7 @@ pub fn get(comptime arch: std.Target.Cpu.Arch) type {
|
||||
};
|
||||
|
||||
return struct {
|
||||
pub fn initialize(allocator: Allocator, intermediate: *ir.Result, descriptor: Compilation.Module.Descriptor) !void {
|
||||
pub fn initialize(allocator: Allocator, intermediate: *IR, descriptor: Compilation.Module.Descriptor) !void {
|
||||
switch (arch) {
|
||||
.x86_64 => {
|
||||
var mir = try backend.MIR.selectInstructions(allocator, intermediate, descriptor.target);
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -4,6 +4,7 @@ const assert = std.debug.assert;
|
||||
const panic = std.debug.panic;
|
||||
const emit = @import("emit.zig");
|
||||
const ir = @import("intermediate_representation.zig");
|
||||
const IR = ir.IR;
|
||||
|
||||
const Compilation = @import("../Compilation.zig");
|
||||
|
||||
@ -1243,13 +1244,13 @@ const InstructionSelection = struct {
|
||||
|
||||
const instruction = mir.ir.instructions.get(ir_instruction_index);
|
||||
const defer_materialization = switch (instruction.u) {
|
||||
.stack => !instruction_selection.stack_map.contains(ir_instruction_index),
|
||||
.load_integer => false,
|
||||
.stack_slot => !instruction_selection.stack_map.contains(ir_instruction_index),
|
||||
.constant_integer => false,
|
||||
else => true,
|
||||
};
|
||||
|
||||
if (defer_materialization) {
|
||||
const ir_type = getIrType(mir.ir, ir_instruction_index);
|
||||
const ir_type = getIrType(mir.ir, ir_instruction_index) orelse unreachable;
|
||||
const value_type = resolveType(ir_type);
|
||||
const register_class = register_classes.get(value_type);
|
||||
const new_register = try mir.createVirtualRegister(register_class);
|
||||
@ -1257,7 +1258,7 @@ const InstructionSelection = struct {
|
||||
return new_register;
|
||||
} else {
|
||||
const new_register = switch (instruction.u) {
|
||||
.load_integer => try instruction_selection.materializeInteger(mir, ir_instruction_index),
|
||||
.constant_integer => try instruction_selection.materializeInteger(mir, ir_instruction_index),
|
||||
else => unreachable,
|
||||
};
|
||||
try instruction_selection.local_value_map.put(mir.allocator, ir_instruction_index, new_register);
|
||||
@ -1269,8 +1270,8 @@ const InstructionSelection = struct {
|
||||
// Moving an immediate to a register
|
||||
fn materializeInteger(instruction_selection: *InstructionSelection, mir: *MIR, ir_instruction_index: ir.Instruction.Index) !Register {
|
||||
// const destination_register = try instruction_selection.getRegisterForValue(mir, ir_instruction_index);
|
||||
const integer = mir.ir.instructions.get(ir_instruction_index).u.load_integer;
|
||||
const value_type = resolveType(integer.type);
|
||||
const integer = mir.ir.instructions.get(ir_instruction_index).u.constant_integer;
|
||||
const value_type = resolveScalarType(integer.type);
|
||||
switch (integer.value.unsigned == 0) {
|
||||
true => {
|
||||
const instruction_id: Instruction.Id = switch (value_type) {
|
||||
@ -1361,7 +1362,7 @@ const InstructionSelection = struct {
|
||||
fn getAddressingModeFromIr(instruction_selection: *InstructionSelection, mir: *MIR, ir_instruction_index: ir.Instruction.Index) AddressingMode {
|
||||
const instruction = mir.ir.instructions.get(ir_instruction_index);
|
||||
switch (instruction.u) {
|
||||
.stack => {
|
||||
.stack_slot => {
|
||||
const frame_index: u32 = @intCast(instruction_selection.stack_map.getIndex(ir_instruction_index).?);
|
||||
return AddressingMode{
|
||||
.base = .{
|
||||
@ -1387,10 +1388,9 @@ const InstructionSelection = struct {
|
||||
}
|
||||
}
|
||||
|
||||
fn lowerArguments(instruction_selection: *InstructionSelection, mir: *MIR, ir_function: *ir.Function) !void {
|
||||
const ir_function_declaration = mir.ir.function_declarations.get(ir_function.declaration);
|
||||
const ir_arguments = ir_function_declaration.arguments.values();
|
||||
const calling_convention = calling_conventions.get(ir_function_declaration.calling_convention);
|
||||
fn lowerArguments(instruction_selection: *InstructionSelection, mir: *MIR, ir_function: *ir.FunctionDefinition) !void {
|
||||
const ir_arguments = ir_function.callable.argument_map.values();
|
||||
const calling_convention = calling_conventions.get(ir_function.callable.calling_convention);
|
||||
|
||||
try instruction_selection.local_value_map.ensureUnusedCapacity(mir.allocator, ir_arguments.len);
|
||||
|
||||
@ -1400,10 +1400,11 @@ const InstructionSelection = struct {
|
||||
const ir_argument_instruction = mir.ir.instructions.get(ir_argument_instruction_index);
|
||||
const ir_argument = mir.ir.arguments.get(ir_argument_instruction.u.argument);
|
||||
switch (ir_argument.type) {
|
||||
.scalar => |scalar_type| switch (scalar_type) {
|
||||
.i8, .i16, .i32, .i64 => gp_count += 1,
|
||||
.void,
|
||||
.noreturn,
|
||||
=> unreachable,
|
||||
else => unreachable,
|
||||
},
|
||||
else => unreachable,
|
||||
}
|
||||
}
|
||||
|
||||
@ -1574,16 +1575,20 @@ const Instruction = struct {
|
||||
add32rr,
|
||||
add32rm,
|
||||
add32mr,
|
||||
and8ri,
|
||||
and32rm,
|
||||
and32mr,
|
||||
and32rr,
|
||||
call64pcrel32,
|
||||
cmp32mi,
|
||||
cmp32mi8,
|
||||
copy,
|
||||
idiv32r,
|
||||
idiv32m,
|
||||
imul32mr,
|
||||
imul32rm,
|
||||
imul32rr,
|
||||
jump_pcrel32,
|
||||
lea64r,
|
||||
mov32r0,
|
||||
mov32rm,
|
||||
@ -1600,6 +1605,7 @@ const Instruction = struct {
|
||||
or32mr,
|
||||
or32rr,
|
||||
ret,
|
||||
seter,
|
||||
shl32mi,
|
||||
shl32ri,
|
||||
shr32mi,
|
||||
@ -1802,6 +1808,7 @@ pub const Operand = struct {
|
||||
unknown,
|
||||
i32mem,
|
||||
i64mem,
|
||||
gp8,
|
||||
gp32,
|
||||
gp64,
|
||||
gp64_nosp,
|
||||
@ -1813,6 +1820,7 @@ pub const Operand = struct {
|
||||
lea64mem,
|
||||
ccr,
|
||||
};
|
||||
|
||||
pub const Type = enum(u1) {
|
||||
use = 0,
|
||||
def = 1,
|
||||
@ -1879,6 +1887,11 @@ pub const Operand = struct {
|
||||
const PCRelative = struct {
|
||||
index: u32,
|
||||
section: u16,
|
||||
kind: enum {
|
||||
function,
|
||||
block,
|
||||
rodata,
|
||||
},
|
||||
};
|
||||
|
||||
const Lea64Mem = struct {
|
||||
@ -1896,6 +1909,7 @@ const register_class_operand_matcher = std.EnumArray(Operand.Id, Register.Class)
|
||||
.i64i32imm_brtarget = .not_a_register,
|
||||
.i32mem = .not_a_register,
|
||||
.i64mem = .not_a_register,
|
||||
.gp8 = .gp8,
|
||||
.gp32 = .gp32,
|
||||
.gp64 = .gp64,
|
||||
.gp64_nosp = .gp64_nosp,
|
||||
@ -1965,6 +1979,25 @@ const instruction_descriptors = std.EnumArray(Instruction.Id, Instruction.Descri
|
||||
},
|
||||
.implicit_definitions = &.{.eflags},
|
||||
},
|
||||
.and8ri = .{
|
||||
// .format = .mrm_dest_reg, // right?
|
||||
.opcode = 0x21,
|
||||
.operands = &.{
|
||||
.{
|
||||
.id = .gp8,
|
||||
.kind = .dst,
|
||||
},
|
||||
.{
|
||||
.id = .gp8,
|
||||
.kind = .src,
|
||||
},
|
||||
.{
|
||||
.id = .imm8,
|
||||
.kind = .src,
|
||||
},
|
||||
},
|
||||
.implicit_definitions = &.{.eflags},
|
||||
},
|
||||
.and32mr = .{
|
||||
// .format = .mrm_dest_reg, // right?
|
||||
.opcode = 0x21,
|
||||
@ -2034,6 +2067,34 @@ const instruction_descriptors = std.EnumArray(Instruction.Id, Instruction.Descri
|
||||
.implicit_definitions = &.{},
|
||||
.implicit_uses = &.{},
|
||||
},
|
||||
.cmp32mi8 = .{
|
||||
.opcode = 0x83,
|
||||
.operands = &.{
|
||||
.{
|
||||
.id = .gp32,
|
||||
.kind = .dst,
|
||||
},
|
||||
.{
|
||||
.id = .imm8,
|
||||
.kind = .src,
|
||||
},
|
||||
},
|
||||
.implicit_definitions = &.{.eflags},
|
||||
},
|
||||
.cmp32mi = .{
|
||||
.opcode = 0x81,
|
||||
.operands = &.{
|
||||
.{
|
||||
.id = .gp32,
|
||||
.kind = .dst,
|
||||
},
|
||||
.{
|
||||
.id = .imm32,
|
||||
.kind = .src,
|
||||
},
|
||||
},
|
||||
.implicit_definitions = &.{.eflags},
|
||||
},
|
||||
.copy = .{
|
||||
// .format = .pseudo,
|
||||
.opcode = 0,
|
||||
@ -2129,6 +2190,16 @@ const instruction_descriptors = std.EnumArray(Instruction.Id, Instruction.Descri
|
||||
},
|
||||
.implicit_definitions = &.{.eflags},
|
||||
},
|
||||
.jump_pcrel32 = .{
|
||||
// .format = .mrm_source_mem,
|
||||
.opcode = 0xe9,
|
||||
.operands = &.{
|
||||
.{
|
||||
.id = .i64i32imm_brtarget,
|
||||
.kind = .src,
|
||||
},
|
||||
},
|
||||
},
|
||||
.lea64r = .{
|
||||
// .format = .mrm_source_mem,
|
||||
.opcode = 0x8d,
|
||||
@ -2359,6 +2430,16 @@ const instruction_descriptors = std.EnumArray(Instruction.Id, Instruction.Descri
|
||||
},
|
||||
},
|
||||
},
|
||||
.seter = .{
|
||||
.opcode = 0xc3,
|
||||
.operands = &.{
|
||||
.{
|
||||
.id = .gp8,
|
||||
.kind = .dst,
|
||||
},
|
||||
},
|
||||
.implicit_uses = &.{.eflags},
|
||||
},
|
||||
.shl32ri = .{
|
||||
.opcode = 0xc1,
|
||||
.operands = &.{
|
||||
@ -2603,7 +2684,7 @@ const BasicBlock = struct {
|
||||
|
||||
pub const MIR = struct {
|
||||
allocator: Allocator,
|
||||
ir: *ir.Result,
|
||||
ir: *IR,
|
||||
target: std.Target,
|
||||
instructions: BlockList(Instruction) = .{},
|
||||
functions: BlockList(Function) = .{},
|
||||
@ -2611,10 +2692,10 @@ pub const MIR = struct {
|
||||
operands: BlockList(Operand) = .{},
|
||||
instruction_selections: ArrayList(InstructionSelection) = .{},
|
||||
virtual_registers: BlockList(Register.Virtual) = .{},
|
||||
function_declaration_map: std.AutoHashMapUnmanaged(ir.Function.Declaration.Index, Function.Index) = .{},
|
||||
function_definition_map: std.AutoHashMapUnmanaged(ir.FunctionDefinition.Index, Function.Index) = .{},
|
||||
entry_point: u32 = 0,
|
||||
|
||||
pub fn selectInstructions(allocator: Allocator, intermediate: *ir.Result, target: std.Target) !*MIR {
|
||||
pub fn selectInstructions(allocator: Allocator, intermediate: *IR, target: std.Target) !*MIR {
|
||||
logln(.codegen, .instruction_selection_block, "\n[INSTRUCTION SELECTION]\n", .{});
|
||||
const mir = try allocator.create(MIR);
|
||||
mir.* = .{
|
||||
@ -2623,20 +2704,21 @@ pub const MIR = struct {
|
||||
.target = target,
|
||||
};
|
||||
|
||||
try mir.blocks.ensureCapacity(allocator, intermediate.blocks.len);
|
||||
try mir.blocks.ensureCapacity(allocator, intermediate.basic_blocks.len);
|
||||
try mir.functions.ensureCapacity(allocator, intermediate.function_definitions.len);
|
||||
try mir.instruction_selections.ensureUnusedCapacity(allocator, intermediate.function_definitions.len);
|
||||
|
||||
var ir_function_definition_iterator = intermediate.function_definitions.iterator();
|
||||
try mir.function_declaration_map.ensureTotalCapacity(mir.allocator, @intCast(intermediate.function_definitions.len));
|
||||
try mir.function_definition_map.ensureTotalCapacity(mir.allocator, @intCast(intermediate.function_definitions.len));
|
||||
|
||||
while (ir_function_definition_iterator.nextPointer()) |ir_function| {
|
||||
const fn_name = mir.ir.getFunctionName(ir_function.declaration);
|
||||
var entry_point: ?u32 = null;
|
||||
while (ir_function_definition_iterator.nextIndex()) |ir_function_index| {
|
||||
const fn_name = mir.ir.getFunctionName(ir_function_index);
|
||||
|
||||
const instruction_selection = mir.instruction_selections.addOneAssumeCapacity();
|
||||
const function_allocation = try mir.functions.addOne(mir.allocator);
|
||||
const function = function_allocation.ptr;
|
||||
mir.function_declaration_map.putAssumeCapacityNoClobber(ir_function.declaration, function_allocation.index);
|
||||
mir.function_definition_map.putAssumeCapacityNoClobber(ir_function_index, function_allocation.index);
|
||||
function.* = .{
|
||||
.mir = mir,
|
||||
.instruction_selection = instruction_selection,
|
||||
@ -2645,52 +2727,57 @@ pub const MIR = struct {
|
||||
instruction_selection.* = .{
|
||||
.function = function,
|
||||
};
|
||||
|
||||
if (ir_function_index.eq(intermediate.entry_point)) {
|
||||
entry_point = function_allocation.index.uniqueInteger();
|
||||
}
|
||||
}
|
||||
|
||||
var function_iterator = mir.functions.iterator();
|
||||
ir_function_definition_iterator = intermediate.function_definitions.iterator();
|
||||
|
||||
var entry_point: ?u32 = null;
|
||||
var ir_function_index = ir_function_definition_iterator.getCurrentIndex();
|
||||
while (ir_function_definition_iterator.nextPointer()) |ir_function| {
|
||||
const function_index = function_iterator.getCurrentIndex();
|
||||
const function = function_iterator.nextPointer() orelse unreachable;
|
||||
while (ir_function_definition_iterator.nextIndex()) |ir_function_index| {
|
||||
const ir_function = intermediate.function_definitions.get(ir_function_index);
|
||||
const function_index = function_iterator.nextIndex() orelse unreachable;
|
||||
const function = mir.functions.get(function_index);
|
||||
logln(.codegen, .instruction_selection_ir_function, "Selecting instructions for {}", .{ir_function});
|
||||
const instruction_selection = function.instruction_selection;
|
||||
|
||||
if (ir_function_index.eq(intermediate.entry_point)) {
|
||||
entry_point = function_index.uniqueInteger();
|
||||
}
|
||||
const calling_convention = calling_conventions.get(ir_function.callable.calling_convention);
|
||||
|
||||
const ir_function_declaration = mir.ir.function_declarations.get(ir_function.declaration);
|
||||
const calling_convention = calling_conventions.get(ir_function_declaration.calling_convention);
|
||||
const ir_basic_blocks = try ir.findReachableBlocks(.{
|
||||
.allocator = allocator,
|
||||
.ir = mir.ir,
|
||||
.first = ir_function.entry_block,
|
||||
.traverse_functions = false,
|
||||
});
|
||||
|
||||
try instruction_selection.block_map.ensureUnusedCapacity(allocator, @intCast(ir_function.blocks.items.len));
|
||||
try function.blocks.ensureTotalCapacity(allocator, ir_function.blocks.items.len);
|
||||
try instruction_selection.block_map.ensureUnusedCapacity(allocator, @intCast(ir_basic_blocks.items.len));
|
||||
try function.blocks.ensureTotalCapacity(allocator, ir_basic_blocks.items.len);
|
||||
|
||||
for (ir_function.blocks.items) |block| {
|
||||
for (ir_basic_blocks.items) |ir_block_index| {
|
||||
const block_allocation = try mir.blocks.append(allocator, .{});
|
||||
instruction_selection.block_map.putAssumeCapacity(block, block_allocation.index);
|
||||
instruction_selection.block_map.putAssumeCapacity(ir_block_index, block_allocation.index);
|
||||
function.blocks.appendAssumeCapacity(block_allocation.index);
|
||||
}
|
||||
|
||||
for (mir.ir.blocks.get(ir_function.blocks.items[0]).instructions.items) |ir_instruction_index| {
|
||||
const ir_block = mir.ir.basic_blocks.get(ir_block_index);
|
||||
for (ir_block.instructions.items) |ir_instruction_index| {
|
||||
const ir_instruction = mir.ir.instructions.get(ir_instruction_index);
|
||||
|
||||
// TODO: take into account exceptions, dynamic allocas?
|
||||
if (ir_instruction.u == .stack) {
|
||||
const stack = mir.ir.stack_references.get(ir_instruction.u.stack);
|
||||
const ir_type = getIrType(mir.ir, ir_instruction_index);
|
||||
const value_type = resolveType(ir_type);
|
||||
const type_info = value_types.get(value_type);
|
||||
const total_size = type_info.size * stack.count;
|
||||
assert(total_size <= 8);
|
||||
const frame_index = try mir.createStackObject(instruction_selection, total_size, @intCast(stack.alignment), ir_instruction_index, false);
|
||||
if (ir_instruction.u == .stack_slot) {
|
||||
const ir_type = getIrType(mir.ir, ir_instruction_index) orelse unreachable;
|
||||
const size = ir_type.getSize();
|
||||
const alignment = ir_type.getAlignment();
|
||||
|
||||
assert(size <= 8 and alignment <= 8);
|
||||
const frame_index = try mir.createStackObject(instruction_selection, size, alignment, ir_instruction_index, false);
|
||||
try instruction_selection.stack_map.putNoClobber(allocator, ir_instruction_index, frame_index);
|
||||
}
|
||||
|
||||
// TODO: handle stack references outside blocks
|
||||
}
|
||||
}
|
||||
|
||||
instruction_selection.current_block = function.blocks.items[0];
|
||||
|
||||
@ -2701,10 +2788,9 @@ pub const MIR = struct {
|
||||
while (block_i > 0) {
|
||||
block_i -= 1;
|
||||
|
||||
const block_index = function.blocks.items[block_i];
|
||||
_ = block_index;
|
||||
const ir_block_index = ir_function.blocks.items[block_i];
|
||||
const ir_block = mir.ir.blocks.get(ir_block_index);
|
||||
const ir_block_index = ir_basic_blocks.items[block_i];
|
||||
const ir_block = mir.ir.basic_blocks.get(ir_block_index);
|
||||
instruction_selection.current_block = instruction_selection.block_map.get(ir_block_index).?;
|
||||
|
||||
var instruction_i: usize = ir_block.instructions.items.len;
|
||||
|
||||
@ -2721,14 +2807,14 @@ pub const MIR = struct {
|
||||
switch (ir_instruction.u) {
|
||||
.ret => |ir_ret_index| {
|
||||
const ir_ret = mir.ir.returns.get(ir_ret_index);
|
||||
switch (ir_ret.instruction.invalid) {
|
||||
switch (ir_ret.value.invalid) {
|
||||
true => {
|
||||
const ret = try mir.buildInstruction(instruction_selection, .ret, &.{});
|
||||
try instruction_selection.instruction_cache.append(mir.allocator, ret);
|
||||
},
|
||||
false => {
|
||||
const value_type = resolveType(getIrType(mir.ir, ir_ret.instruction));
|
||||
const source_register = try instruction_selection.getRegisterForValue(mir, ir_ret.instruction);
|
||||
const value_type = resolveType(getIrType(mir.ir, ir_ret.value) orelse unreachable);
|
||||
const source_register = try instruction_selection.getRegisterForValue(mir, ir_ret.value);
|
||||
|
||||
const register_class = register_classes.get(value_type);
|
||||
|
||||
@ -2783,7 +2869,7 @@ pub const MIR = struct {
|
||||
},
|
||||
}
|
||||
},
|
||||
.load_integer => unreachable,
|
||||
.constant_integer => {},
|
||||
.@"unreachable" => try instruction_selection.instruction_cache.append(mir.allocator, try mir.buildInstruction(instruction_selection, .ud2, &.{})),
|
||||
.syscall => |ir_syscall_index| {
|
||||
const ir_syscall = mir.ir.syscalls.get(ir_syscall_index);
|
||||
@ -2894,8 +2980,8 @@ pub const MIR = struct {
|
||||
const ir_load_instruction_index = ir_sign_extend.value;
|
||||
const ir_load_instruction = mir.ir.instructions.get(ir_sign_extend.value);
|
||||
const ir_load = mir.ir.loads.get(ir_load_instruction.u.load);
|
||||
const ir_source = ir_load.instruction;
|
||||
const source_type = resolveType(getIrType(mir.ir, ir_source));
|
||||
const ir_source = ir_load.value;
|
||||
const source_type = resolveType(getIrType(mir.ir, ir_source) orelse unreachable);
|
||||
|
||||
if (destination_type != source_type) {
|
||||
try instruction_selection.folded_loads.putNoClobber(mir.allocator, ir_load_instruction_index, {});
|
||||
@ -2953,9 +3039,9 @@ pub const MIR = struct {
|
||||
.load => |ir_load_index| {
|
||||
if (!instruction_selection.folded_loads.swapRemove(ir_instruction_index)) {
|
||||
const ir_load = mir.ir.loads.get(ir_load_index);
|
||||
const ir_source = ir_load.instruction;
|
||||
const ir_source = ir_load.value;
|
||||
const addressing_mode = instruction_selection.getAddressingModeFromIr(mir, ir_source);
|
||||
const value_type = resolveType(getIrType(mir.ir, ir_source));
|
||||
const value_type = resolveType(getIrType(mir.ir, ir_source) orelse unreachable);
|
||||
|
||||
switch (value_type) {
|
||||
inline .i32,
|
||||
@ -3015,15 +3101,15 @@ pub const MIR = struct {
|
||||
const addressing_mode = instruction_selection.getAddressingModeFromIr(mir, ir_destination);
|
||||
const ir_source = mir.ir.instructions.get(ir_source_index);
|
||||
|
||||
const value_type = resolveType(getIrType(mir.ir, ir_source_index));
|
||||
const value_type = resolveType(getIrType(mir.ir, ir_source_index) orelse unreachable);
|
||||
|
||||
if (ir_source.u == .load_integer and value_types.get(value_type).getSize() <= @sizeOf(u32)) {
|
||||
if (ir_source.u == .constant_integer and value_types.get(value_type).getSize() <= @sizeOf(u32)) {
|
||||
const instruction_id: Instruction.Id = switch (value_type) {
|
||||
.i32 => .mov32mi,
|
||||
else => unreachable,
|
||||
};
|
||||
|
||||
const source_immediate = ir_source.u.load_integer;
|
||||
const source_immediate = ir_source.u.constant_integer;
|
||||
const instruction_descriptor = instruction_descriptors.getPtrConst(instruction_id);
|
||||
|
||||
const source_operand_index = 1;
|
||||
@ -3096,7 +3182,7 @@ pub const MIR = struct {
|
||||
}
|
||||
}
|
||||
},
|
||||
.stack => {
|
||||
.stack_slot => {
|
||||
assert(instruction_selection.stack_map.get(ir_instruction_index) != null);
|
||||
},
|
||||
.call => |ir_call_index| {
|
||||
@ -3153,8 +3239,9 @@ pub const MIR = struct {
|
||||
.id = .i64i32imm_brtarget,
|
||||
.u = .{
|
||||
.pc_relative = .{
|
||||
.index = @bitCast(mir.function_declaration_map.get(ir_call.function).?),
|
||||
.index = @bitCast(mir.function_definition_map.get(ir_call.callable.function_definition).?),
|
||||
.section = @intCast(mir.ir.section_manager.getTextSectionIndex()),
|
||||
.kind = .function,
|
||||
},
|
||||
},
|
||||
.flags = .{},
|
||||
@ -3163,12 +3250,7 @@ pub const MIR = struct {
|
||||
|
||||
try instruction_selection.instruction_cache.append(mir.allocator, call);
|
||||
|
||||
const ir_return_type = mir.ir.function_declarations.get(ir_call.function).return_type;
|
||||
switch (ir_return_type) {
|
||||
.void,
|
||||
.noreturn,
|
||||
=> {},
|
||||
else => {
|
||||
if (mir.ir.function_definitions.get(ir_call.callable.function_definition).callable.return_type) |ir_return_type| {
|
||||
const return_type = resolveType(ir_return_type);
|
||||
switch (return_type) {
|
||||
inline .i64, .i32 => |rt| {
|
||||
@ -3213,10 +3295,9 @@ pub const MIR = struct {
|
||||
},
|
||||
else => |t| @panic(@tagName(t)),
|
||||
}
|
||||
},
|
||||
}
|
||||
},
|
||||
.load_string_literal => |ir_load_string_literal_index| {
|
||||
.constant_string_literal => |ir_load_string_literal_index| {
|
||||
const ir_load_string_literal = mir.ir.string_literals.get(ir_load_string_literal_index);
|
||||
const virtual_register = try instruction_selection.getRegisterForValue(mir, ir_instruction_index);
|
||||
const virtual_operand = Operand{
|
||||
@ -3236,6 +3317,7 @@ pub const MIR = struct {
|
||||
.displacement = Operand.PCRelative{
|
||||
.index = ir_load_string_literal.offset,
|
||||
.section = mir.ir.section_manager.rodata orelse unreachable,
|
||||
.kind = .rodata,
|
||||
},
|
||||
},
|
||||
},
|
||||
@ -3263,12 +3345,12 @@ pub const MIR = struct {
|
||||
|
||||
const left_kind: BinaryOperandKind = switch (mir.ir.instructions.get(ir_binary_operation.left).u) {
|
||||
.load => .load,
|
||||
.load_integer => .immediate,
|
||||
.constant_integer => .immediate,
|
||||
else => .rest,
|
||||
};
|
||||
const right_kind: BinaryOperandKind = switch (mir.ir.instructions.get(ir_binary_operation.right).u) {
|
||||
.load => .load,
|
||||
.load_integer => .immediate,
|
||||
.constant_integer => .immediate,
|
||||
else => .rest,
|
||||
};
|
||||
|
||||
@ -3324,7 +3406,7 @@ pub const MIR = struct {
|
||||
const instruction_descriptor = instruction_descriptors.get(instruction_id);
|
||||
const right_operand_id = instruction_descriptor.operands[0].id;
|
||||
const ir_load = mir.ir.loads.get(mir.ir.instructions.get(ir_binary_operation.right).u.load);
|
||||
const right_operand_addressing_mode = instruction_selection.getAddressingModeFromIr(mir, ir_load.instruction);
|
||||
const right_operand_addressing_mode = instruction_selection.getAddressingModeFromIr(mir, ir_load.value);
|
||||
const right_operand = Operand{
|
||||
.id = right_operand_id,
|
||||
.u = .{
|
||||
@ -3438,6 +3520,7 @@ pub const MIR = struct {
|
||||
.signed_divide => unreachable,
|
||||
.shift_left => unreachable,
|
||||
.shift_right => unreachable,
|
||||
.integer_compare_equal => unreachable,
|
||||
};
|
||||
|
||||
try instruction_selection.folded_loads.putNoClobber(mir.allocator, ir_binary_operation.right, {});
|
||||
@ -3448,7 +3531,7 @@ pub const MIR = struct {
|
||||
const left_operand_id = instruction_descriptor.operands[1].id;
|
||||
const right_operand_id = instruction_descriptor.operands[2].id;
|
||||
const ir_load = mir.ir.loads.get(mir.ir.instructions.get(ir_binary_operation.right).u.load);
|
||||
const right_operand_addressing_mode = instruction_selection.getAddressingModeFromIr(mir, ir_load.instruction);
|
||||
const right_operand_addressing_mode = instruction_selection.getAddressingModeFromIr(mir, ir_load.value);
|
||||
|
||||
const destination_operand = Operand{
|
||||
.id = destination_operand_id,
|
||||
@ -3487,6 +3570,8 @@ pub const MIR = struct {
|
||||
try instruction_selection.updateValueMap(mir.allocator, ir_instruction_index, destination_register, false);
|
||||
},
|
||||
.immediate => {
|
||||
switch (ir_binary_operation.id) {
|
||||
.shift_left, .shift_right => {
|
||||
const destination_register = try instruction_selection.getRegisterForValue(mir, ir_instruction_index);
|
||||
const instruction_id: Instruction.Id = switch (ir_binary_operation.id) {
|
||||
.shift_left => .shl32ri,
|
||||
@ -3517,7 +3602,7 @@ pub const MIR = struct {
|
||||
.flags = .{},
|
||||
};
|
||||
|
||||
const right_immediate = mir.ir.instructions.get(ir_binary_operation.right).u.load_integer;
|
||||
const right_immediate = mir.ir.instructions.get(ir_binary_operation.right).u.constant_integer;
|
||||
const right_value_type: ValueType.Id = switch (right_immediate.type) {
|
||||
.i8 => .i8,
|
||||
else => unreachable,
|
||||
@ -3541,6 +3626,105 @@ pub const MIR = struct {
|
||||
|
||||
try instruction_selection.updateValueMap(mir.allocator, ir_instruction_index, destination_register, false);
|
||||
},
|
||||
.integer_compare_equal => {
|
||||
const result = try instruction_selection.getRegisterForValue(mir, ir_instruction_index);
|
||||
const right_immediate = mir.ir.instructions.get(ir_binary_operation.right).u.constant_integer;
|
||||
const instruction_id: Instruction.Id = switch (ir_binary_operation.id) {
|
||||
.integer_compare_equal => blk: {
|
||||
const instruction_id: Instruction.Id = switch (right_immediate.type) {
|
||||
.i32 => if (std.math.cast(i8, right_immediate.value.signed)) |_| .cmp32mi8 else .cmp32mi,
|
||||
else => |t| @panic(@tagName(t)),
|
||||
};
|
||||
break :blk instruction_id;
|
||||
},
|
||||
else => unreachable,
|
||||
};
|
||||
try instruction_selection.folded_loads.putNoClobber(mir.allocator, ir_binary_operation.left, {});
|
||||
//
|
||||
const instruction_descriptor = instruction_descriptors.get(instruction_id);
|
||||
// const left_register = try instruction_selection.getRegisterForValue(mir, ir_binary_operation.left);
|
||||
// const destination_operand_id = instruction_descriptor.operands[0].id;
|
||||
const right_operand_id = instruction_descriptor.operands[1].id;
|
||||
const left_operand_id = instruction_descriptor.operands[0].id;
|
||||
const ir_load = mir.ir.loads.get(mir.ir.instructions.get(ir_binary_operation.left).u.load);
|
||||
const left_operand_addressing_mode = instruction_selection.getAddressingModeFromIr(mir, ir_load.value);
|
||||
const left_operand = Operand{
|
||||
.id = left_operand_id,
|
||||
.u = .{
|
||||
.memory = .{ .addressing_mode = left_operand_addressing_mode },
|
||||
},
|
||||
.flags = .{},
|
||||
};
|
||||
const right_operand = Operand{
|
||||
.id = right_operand_id,
|
||||
.u = .{
|
||||
.immediate = right_immediate.value.unsigned,
|
||||
},
|
||||
.flags = .{},
|
||||
};
|
||||
const binary_op_instruction = try mir.buildInstruction(instruction_selection, instruction_id, &.{
|
||||
left_operand,
|
||||
right_operand,
|
||||
});
|
||||
try instruction_selection.instruction_cache.append(mir.allocator, binary_op_instruction);
|
||||
|
||||
const set_instruction: Instruction.Id = switch (ir_binary_operation.id) {
|
||||
.integer_compare_equal => .seter,
|
||||
else => unreachable,
|
||||
};
|
||||
const flag1_register = try mir.createVirtualRegister(.gp8);
|
||||
const flag1_operand = Operand{
|
||||
.id = .gp8,
|
||||
.u = .{
|
||||
.register = flag1_register,
|
||||
},
|
||||
.flags = .{
|
||||
.type = .def,
|
||||
},
|
||||
};
|
||||
const setcc = try mir.buildInstruction(instruction_selection, set_instruction, &.{
|
||||
flag1_operand,
|
||||
});
|
||||
try instruction_selection.instruction_cache.append(mir.allocator, setcc);
|
||||
// TODO: parity?
|
||||
|
||||
const select_instruction: Instruction.Id = switch (ir_binary_operation.id) {
|
||||
.integer_compare_equal => .and8ri,
|
||||
else => unreachable,
|
||||
};
|
||||
|
||||
const result_operand = Operand{
|
||||
.id = .gp8,
|
||||
.u = .{
|
||||
.register = result,
|
||||
},
|
||||
.flags = .{
|
||||
.type = .def,
|
||||
},
|
||||
};
|
||||
const flag1_src_operand = Operand{
|
||||
.id = flag1_operand.id,
|
||||
.u = flag1_operand.u,
|
||||
.flags = .{},
|
||||
};
|
||||
const select = try mir.buildInstruction(instruction_selection, select_instruction, &.{
|
||||
result_operand,
|
||||
flag1_src_operand,
|
||||
Operand{
|
||||
.id = .imm8,
|
||||
.u = .{
|
||||
.immediate = 0x01,
|
||||
},
|
||||
.flags = .{},
|
||||
},
|
||||
});
|
||||
|
||||
try instruction_selection.instruction_cache.append(mir.allocator, select);
|
||||
try instruction_selection.updateValueMap(mir.allocator, ir_instruction_index, result, false);
|
||||
},
|
||||
else => unreachable,
|
||||
}
|
||||
},
|
||||
.rest => {
|
||||
const destination_register = try instruction_selection.getRegisterForValue(mir, ir_instruction_index);
|
||||
const instruction_id: Instruction.Id = switch (ir_binary_operation.id) {
|
||||
@ -3571,6 +3755,7 @@ pub const MIR = struct {
|
||||
.signed_divide => unreachable,
|
||||
.shift_left => unreachable,
|
||||
.shift_right => unreachable,
|
||||
.integer_compare_equal => unreachable,
|
||||
};
|
||||
|
||||
const instruction_descriptor = instruction_descriptors.get(instruction_id);
|
||||
@ -3651,6 +3836,7 @@ pub const MIR = struct {
|
||||
.signed_divide => unreachable,
|
||||
.shift_left => unreachable,
|
||||
.shift_right => unreachable,
|
||||
.integer_compare_equal => unreachable,
|
||||
};
|
||||
|
||||
const instruction_descriptor = instruction_descriptors.get(instruction_id);
|
||||
@ -3706,88 +3892,38 @@ pub const MIR = struct {
|
||||
.rest => unreachable,
|
||||
},
|
||||
}
|
||||
// if (!is_left_load and is_right_load) {
|
||||
// unreachable;
|
||||
// } else if (is_left_load and !is_right_load) {
|
||||
// try instruction_selection.folded_loads.putNoClobber(mir.allocator, ir_binary_operation.left, {});
|
||||
// const instruction_id: Instruction.Id = switch (ir_binary_operation.id) {
|
||||
// .add => switch (value_type) {
|
||||
// .i32 => .add32mr,
|
||||
// else => unreachable,
|
||||
// },
|
||||
// .sub => switch (value_type) {
|
||||
// .i32 => .sub32mr,
|
||||
// else => unreachable,
|
||||
// },
|
||||
// .logical_and => switch (value_type) {
|
||||
// .i32 => .and32mr,
|
||||
// else => unreachable,
|
||||
// },
|
||||
// .logical_xor => switch (value_type) {
|
||||
// .i32 => .xor32mr,
|
||||
// else => unreachable,
|
||||
// },
|
||||
// .logical_or => switch (value_type) {
|
||||
// .i32 => .or32mr,
|
||||
// else => unreachable,
|
||||
// },
|
||||
// .signed_multiply => switch (value_type) {
|
||||
// .i32 => .imul32mr,
|
||||
// else => unreachable,
|
||||
// },
|
||||
// .signed_divide => unreachable,
|
||||
// .shift_left => unreachable,
|
||||
// .shift_right => unreachable,
|
||||
// };
|
||||
//
|
||||
// const instruction_descriptor = instruction_descriptors.get(instruction_id);
|
||||
// // const left_register = try instruction_selection.getRegisterForValue(mir, ir_binary_operation.left);
|
||||
// const destination_operand_id = instruction_descriptor.operands[0].id;
|
||||
// const left_operand_id = instruction_descriptor.operands[1].id;
|
||||
// const right_operand_id = instruction_descriptor.operands[2].id;
|
||||
// // const ir_load = mir.ir.loads.get(mir.ir.instructions.get(ir_binary_operation.right).u.load);
|
||||
// // const right_operand_addressing_mode = instruction_selection.getAddressingModeFromIr(mir, ir_load.instruction);
|
||||
// const ir_load = mir.ir.loads.get(mir.ir.instructions.get(ir_binary_operation.left).u.load);
|
||||
//
|
||||
// const right_register = try instruction_selection.getRegisterForValue(mir, ir_binary_operation.right);
|
||||
// const right_operand = Operand{
|
||||
// .id = right_operand_id,
|
||||
// .u = .{
|
||||
// .register = right_register,
|
||||
// },
|
||||
// .flags = .{},
|
||||
// };
|
||||
//
|
||||
// const left_operand_addressing_mode = instruction_selection.getAddressingModeFromIr(mir, ir_load.instruction);
|
||||
// const destination_operand = Operand{
|
||||
// .id = destination_operand_id,
|
||||
// .u = .{
|
||||
// .memory = .{ .addressing_mode = left_operand_addressing_mode },
|
||||
// },
|
||||
// .flags = .{},
|
||||
// };
|
||||
//
|
||||
// const left_operand = Operand{
|
||||
// .id = left_operand_id,
|
||||
// .u = .{
|
||||
// .memory = .{ .addressing_mode = left_operand_addressing_mode },
|
||||
// },
|
||||
// .flags = .{},
|
||||
// };
|
||||
//
|
||||
// const binary_op_instruction = try mir.buildInstruction(instruction_selection, instruction_id, &.{
|
||||
// destination_operand,
|
||||
// left_operand,
|
||||
// right_operand,
|
||||
// });
|
||||
//
|
||||
// try instruction_selection.instruction_cache.append(mir.allocator, binary_op_instruction);
|
||||
// } else if (!is_left_load and !is_right_load) {
|
||||
// } else {
|
||||
// }
|
||||
},
|
||||
}
|
||||
},
|
||||
.phi => |ir_phi_index| {
|
||||
const ir_phi = mir.ir.phis.get(ir_phi_index);
|
||||
|
||||
// TODO: check if we should something else here
|
||||
const virtual_register = instruction_selection.value_map.get(ir_instruction_index).?;
|
||||
for (ir_phi.operands.items) |phi_operand| {
|
||||
try instruction_selection.updateValueMap(mir.allocator, phi_operand.value, virtual_register, false);
|
||||
}
|
||||
},
|
||||
.jump => |ir_jump_index| {
|
||||
const ir_jump = mir.ir.jumps.get(ir_jump_index);
|
||||
const ir_target_block = ir_jump.target;
|
||||
const target_block = instruction_selection.block_map.get(ir_target_block).?;
|
||||
|
||||
const jump = try mir.buildInstruction(instruction_selection, .jump_pcrel32, &.{
|
||||
Operand{
|
||||
.id = .i64i32imm_brtarget,
|
||||
.u = .{
|
||||
.pc_relative = .{
|
||||
.index = @bitCast(target_block),
|
||||
.section = @intCast(mir.ir.section_manager.getTextSectionIndex()),
|
||||
.kind = .block,
|
||||
},
|
||||
},
|
||||
.flags = .{},
|
||||
},
|
||||
});
|
||||
try instruction_selection.instruction_cache.append(mir.allocator, jump);
|
||||
},
|
||||
else => |t| @panic(@tagName(t)),
|
||||
}
|
||||
|
||||
@ -3810,8 +3946,6 @@ pub const MIR = struct {
|
||||
try instruction_selection.emitLiveInCopies(mir, function.blocks.items[0]);
|
||||
|
||||
logln(.codegen, .instruction_selection_ir_function, "Selected instructions for {}", .{function});
|
||||
|
||||
ir_function_index = ir_function_definition_iterator.getCurrentIndex();
|
||||
}
|
||||
|
||||
mir.entry_point = entry_point orelse unreachable;
|
||||
@ -4202,7 +4336,7 @@ pub const MIR = struct {
|
||||
.virtual => |virtual_register_index| blk: {
|
||||
const stack_slot = register_allocator.stack_slots.get(virtual_register_index) != null;
|
||||
const live_out = register_allocator.live_virtual_registers.get(virtual_register_index).?.live_out;
|
||||
log(.codegen, .register_allocation_problematic_hint, "Register {s} has stack slot: {}. Live out: {}", .{ @tagName(physical_register), stack_slot, live_out });
|
||||
logln(.codegen, .register_allocation_problematic_hint, "Register {s} has stack slot: {}. Live out: {}", .{ @tagName(physical_register), stack_slot, live_out });
|
||||
const sure_spill = stack_slot or live_out;
|
||||
break :blk if (sure_spill) SpillCost.clean else SpillCost.dirty;
|
||||
},
|
||||
@ -4862,18 +4996,17 @@ pub const MIR = struct {
|
||||
|
||||
fn clearVirtualRegisters(mir: *MIR) void {
|
||||
var vr_it = mir.virtual_registers.iterator();
|
||||
var vr_index = vr_it.getCurrentIndex();
|
||||
var verified_virtual_register_count: usize = 0;
|
||||
var skipped: usize = 0;
|
||||
while (vr_it.nextPointer()) |vr| {
|
||||
while (vr_it.nextIndex()) |virtual_register_index| {
|
||||
const virtual_register = mir.virtual_registers.get(virtual_register_index);
|
||||
verified_virtual_register_count += 1;
|
||||
if (vr.use_def_list_head.invalid) {
|
||||
if (virtual_register.use_def_list_head.invalid) {
|
||||
skipped += 1;
|
||||
continue;
|
||||
}
|
||||
|
||||
// mir.verifyUseList(vr.use_def_list_head, vr_index);
|
||||
vr_index = vr_it.getCurrentIndex();
|
||||
mir.verifyUseList(virtual_register.use_def_list_head, virtual_register_index);
|
||||
}
|
||||
|
||||
logln(.codegen, .register_allocation_operand_list_verification, "Verified {} virtual registers ({} skipped)", .{ verified_virtual_register_count, skipped });
|
||||
@ -4954,8 +5087,8 @@ pub const MIR = struct {
|
||||
try function_offsets.ensureTotalCapacity(mir.allocator, mir.functions.len);
|
||||
try image.section_manager.getTextSection().symbol_table.ensureTotalCapacity(mir.allocator, mir.functions.len);
|
||||
|
||||
while (function_iterator.nextPointer()) |function| {
|
||||
const function_index = mir.functions.indexOf(function);
|
||||
while (function_iterator.nextIndex()) |function_index| {
|
||||
const function = mir.functions.get(function_index);
|
||||
logln(.codegen, .encoding, "\n{s}:", .{function.name});
|
||||
|
||||
const function_offset: u32 = @intCast(image.section_manager.getCodeOffset());
|
||||
@ -5550,6 +5683,65 @@ pub const MIR = struct {
|
||||
|
||||
try image.section_manager.appendCodeByte(source_immediate);
|
||||
},
|
||||
.cmp32mi8 => {
|
||||
const left_operand_index = instruction.operands.items[0];
|
||||
const left_operand = mir.operands.get(left_operand_index);
|
||||
|
||||
switch (left_operand.u) {
|
||||
.memory => |memory| switch (memory.addressing_mode.base) {
|
||||
.register_base => unreachable,
|
||||
.frame_index => |frame_index| {
|
||||
const modrm = ModRm{
|
||||
.rm = @intFromEnum(Encoding.GP64.bp),
|
||||
.reg = switch (instruction.id) {
|
||||
.cmp32mi8 => 7,
|
||||
else => unreachable,
|
||||
},
|
||||
.mod = @as(u2, @intFromBool(false)) << 1 | @intFromBool(true),
|
||||
};
|
||||
try image.section_manager.appendCodeByte(@bitCast(modrm));
|
||||
|
||||
const stack_offset = computeStackOffset(function.instruction_selection.stack_objects.items[0 .. frame_index + 1]);
|
||||
const displacement_bytes: u3 = if (std.math.cast(i8, stack_offset)) |_| @sizeOf(i8) else if (std.math.cast(i32, stack_offset)) |_| @sizeOf(i32) else unreachable;
|
||||
|
||||
const stack_bytes = std.mem.asBytes(&stack_offset)[0..displacement_bytes];
|
||||
try image.section_manager.appendCode(stack_bytes);
|
||||
},
|
||||
},
|
||||
else => unreachable,
|
||||
}
|
||||
|
||||
const source_operand_index = instruction.operands.items[1];
|
||||
const source_operand = mir.operands.get(source_operand_index);
|
||||
assert(source_operand.id == .imm8);
|
||||
const source_immediate: u8 = @intCast(source_operand.u.immediate);
|
||||
|
||||
try image.section_manager.appendCodeByte(source_immediate);
|
||||
},
|
||||
.seter => {
|
||||
const operand_index = instruction.operands.items[0];
|
||||
const operand = mir.operands.get(operand_index);
|
||||
assert(operand.u == .register);
|
||||
const instruction_descriptor = instruction_descriptors.get(instruction.id);
|
||||
const opcode = &[_]u8{ 0x0f, @intCast(instruction_descriptor.opcode) };
|
||||
try image.section_manager.appendCode(opcode);
|
||||
const modrm = ModRm{
|
||||
.rm = @intFromEnum(Encoding.GP64.bp),
|
||||
.reg = 0,
|
||||
.mod = @as(u2, @intFromBool(false)) << 1 | @intFromBool(true),
|
||||
};
|
||||
try image.section_manager.appendCodeByte(@bitCast(modrm));
|
||||
},
|
||||
.and8ri => {
|
||||
const destination_register_index = instruction.operands.items[0];
|
||||
const destination_register_operand = mir.operands.get(destination_register_index);
|
||||
switch (destination_register_operand.u.register.index.physical) {
|
||||
.al => {
|
||||
unreachable;
|
||||
},
|
||||
else => |t| @panic(@tagName(t)),
|
||||
}
|
||||
},
|
||||
else => |t| @panic(@tagName(t)),
|
||||
}
|
||||
|
||||
@ -5741,6 +5933,7 @@ pub const MIR = struct {
|
||||
.id = switch (implicitly_used_register) {
|
||||
.eax => .gp32,
|
||||
.edx => .gp32,
|
||||
.eflags => .ccr,
|
||||
else => |t| @panic(@tagName(t)),
|
||||
},
|
||||
.u = .{
|
||||
@ -5877,33 +6070,44 @@ const ModRm = packed struct(u8) {
|
||||
mod: u2,
|
||||
};
|
||||
|
||||
fn getIrType(intermediate: *ir.Result, ir_instruction_index: ir.Instruction.Index) ir.Type {
|
||||
fn getIrType(intermediate: *IR, ir_instruction_index: ir.Instruction.Index) ?ir.Type {
|
||||
const ir_instruction = intermediate.instructions.get(ir_instruction_index);
|
||||
return switch (ir_instruction.u) {
|
||||
.argument => |argument_index| intermediate.arguments.get(argument_index).type,
|
||||
.stack => |stack_index| intermediate.stack_references.get(stack_index).type,
|
||||
.load => |load_index| getIrType(intermediate, intermediate.loads.get(load_index).instruction),
|
||||
.syscall => |_| .i64,
|
||||
.load_integer => |integer| integer.type,
|
||||
.load_string_literal => .i64,
|
||||
.call => |call_index| intermediate.function_declarations.get(intermediate.calls.get(call_index).function).return_type,
|
||||
.stack_slot => |stack_index| intermediate.stack_slots.get(stack_index).type,
|
||||
.load => |load_index| getIrType(intermediate, intermediate.loads.get(load_index).value),
|
||||
.syscall => |_| .{
|
||||
.scalar = .i64,
|
||||
},
|
||||
.constant_integer => |integer| .{
|
||||
.scalar = integer.type,
|
||||
},
|
||||
.constant_string_literal => .{
|
||||
.scalar = .i64,
|
||||
},
|
||||
.call => |call_index| intermediate.function_definitions.get(intermediate.calls.get(call_index).callable.function_definition).callable.return_type,
|
||||
.sign_extend => |cast_index| intermediate.casts.get(cast_index).type,
|
||||
.binary_operation => |binary_operation_index| intermediate.binary_operations.get(binary_operation_index).type,
|
||||
.phi => |phi_index| getIrType(intermediate, intermediate.phis.get(phi_index).operands.items[0].value),
|
||||
else => |t| @panic(@tagName(t)),
|
||||
};
|
||||
}
|
||||
|
||||
fn resolveScalarType(ir_scalar_type: ir.Type.Scalar) ValueType.Id {
|
||||
return switch (ir_scalar_type) {
|
||||
.i1 => unreachable,
|
||||
inline .i8,
|
||||
.i16,
|
||||
.i32,
|
||||
.i64,
|
||||
=> |ir_type_ct| @field(ValueType.Id, @typeInfo(ir.Type.Scalar).Enum.fields[@intFromEnum(ir_type_ct)].name),
|
||||
};
|
||||
}
|
||||
|
||||
fn resolveType(ir_type: ir.Type) ValueType.Id {
|
||||
return switch (ir_type) {
|
||||
inline //.i8,
|
||||
//.i16,
|
||||
.i32,
|
||||
.i64,
|
||||
=> |ir_type_ct| @field(ValueType.Id, @typeInfo(ir.Type).Enum.fields[@intFromEnum(ir_type_ct)].name),
|
||||
.i8, .i16 => unreachable,
|
||||
.void,
|
||||
.noreturn,
|
||||
=> unreachable,
|
||||
.scalar => |ir_scalar_type| resolveScalarType(ir_scalar_type),
|
||||
else => unreachable,
|
||||
};
|
||||
}
|
||||
|
||||
|
@ -31,6 +31,7 @@ pub fn BlockList(comptime T: type) type {
|
||||
};
|
||||
|
||||
return struct {
|
||||
// TODO: make this not reallocate the whole block. Instead, use a pointer to the block as the ArrayList item
|
||||
blocks: ArrayList(Block) = .{},
|
||||
len: usize = 0,
|
||||
first_block: u32 = 0,
|
||||
@ -38,14 +39,14 @@ pub fn BlockList(comptime T: type) type {
|
||||
const List = @This();
|
||||
|
||||
pub const Index = packed struct(u32) {
|
||||
index: u6,
|
||||
element: u6,
|
||||
block: u24,
|
||||
_reserved: bool = false,
|
||||
invalid: bool = false,
|
||||
|
||||
pub const invalid = Index{
|
||||
.invalid = true,
|
||||
.index = 0,
|
||||
.element = 0,
|
||||
.block = 0,
|
||||
};
|
||||
|
||||
@ -63,42 +64,50 @@ pub fn BlockList(comptime T: type) type {
|
||||
const block: u24 = @intCast(index / item_count);
|
||||
const i: u6 = @intCast(index % item_count);
|
||||
return .{
|
||||
.index = i,
|
||||
.element = i,
|
||||
.block = block,
|
||||
};
|
||||
}
|
||||
};
|
||||
|
||||
pub const Iterator = struct {
|
||||
block_index: u24,
|
||||
element_index: u6,
|
||||
list: *const List,
|
||||
index: Index,
|
||||
list: *List,
|
||||
|
||||
pub fn getCurrentIndex(i: *const Iterator) Index {
|
||||
return .{
|
||||
.block = i.block_index,
|
||||
.index = @intCast(i.element_index),
|
||||
pub const Pair = struct {
|
||||
index: Index,
|
||||
};
|
||||
}
|
||||
|
||||
pub fn next(i: *Iterator) ?T {
|
||||
return if (i.nextPointer()) |ptr| ptr.* else null;
|
||||
}
|
||||
pub fn nextIndex(i: *Iterator) ?Index {
|
||||
// TODO: optimize with ctz and masking out already iterated indices in the bitmask
|
||||
for (i.index.block..i.list.blocks.items.len) |block_index| {
|
||||
for (@as(u8, i.index.element)..item_count) |element_index| {
|
||||
if (i.list.blocks.items[block_index].bitset.isSet(element_index)) {
|
||||
const index = Index{
|
||||
.element = @intCast(element_index),
|
||||
.block = @intCast(block_index),
|
||||
};
|
||||
|
||||
pub fn nextPointer(i: *Iterator) ?*T {
|
||||
for (i.block_index..i.list.blocks.items.len) |block_index| {
|
||||
for (@as(u8, i.element_index)..item_count) |element_index| {
|
||||
if (i.list.blocks.items[i.block_index].bitset.isSet(element_index)) {
|
||||
i.element_index = @intCast(element_index);
|
||||
i.element_index +%= 1;
|
||||
i.block_index = @as(u24, @intCast(block_index)) + @intFromBool(i.element_index < element_index);
|
||||
return &i.list.blocks.items[block_index].items[element_index];
|
||||
i.index = index;
|
||||
i.index.element +%= 1;
|
||||
i.index.block = @as(u24, @intCast(block_index)) + @intFromBool(i.index.element < element_index);
|
||||
|
||||
return index;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return null;
|
||||
}
|
||||
|
||||
pub fn nextPointer(i: *Iterator) ?*T {
|
||||
if (i.nextIndex()) |index| {
|
||||
const result = i.list.get(index);
|
||||
return result;
|
||||
} else {
|
||||
return null;
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
pub const Allocation = struct {
|
||||
@ -106,17 +115,19 @@ pub fn BlockList(comptime T: type) type {
|
||||
index: Index,
|
||||
};
|
||||
|
||||
pub fn iterator(list: *const List) Iterator {
|
||||
pub fn iterator(list: *List) Iterator {
|
||||
return .{
|
||||
.block_index = 0,
|
||||
.element_index = 0,
|
||||
.index = Index{
|
||||
.element = 0,
|
||||
.block = 0,
|
||||
},
|
||||
.list = list,
|
||||
};
|
||||
}
|
||||
|
||||
pub fn get(list: *List, index: Index) *T {
|
||||
assert(!index.invalid);
|
||||
return &list.blocks.items[index.block].items[index.index];
|
||||
return &list.blocks.items[index.block].items[index.element];
|
||||
}
|
||||
|
||||
pub fn append(list: *List, allocator: Allocator, element: T) !Allocation {
|
||||
@ -131,12 +142,12 @@ pub fn BlockList(comptime T: type) type {
|
||||
const result = switch (list.len < max_allocation) {
|
||||
true => blk: {
|
||||
const block = &list.blocks.items[list.first_block];
|
||||
if (block.allocateIndex()) |index| {
|
||||
const ptr = &block.items[index];
|
||||
if (block.allocateIndex()) |element_index| {
|
||||
const ptr = &block.items[element_index];
|
||||
break :blk Allocation{
|
||||
.ptr = ptr,
|
||||
.index = .{
|
||||
.index = index,
|
||||
.element = element_index,
|
||||
.block = @intCast(list.first_block),
|
||||
},
|
||||
};
|
||||
@ -148,13 +159,13 @@ pub fn BlockList(comptime T: type) type {
|
||||
const block_index = list.blocks.items.len;
|
||||
const new_block = list.blocks.addOneAssumeCapacity();
|
||||
new_block.* = .{};
|
||||
const index = new_block.allocateIndex() catch unreachable;
|
||||
const ptr = &new_block.items[index];
|
||||
const element_index = new_block.allocateIndex() catch unreachable;
|
||||
const ptr = &new_block.items[element_index];
|
||||
list.first_block += @intFromBool(block_index != 0);
|
||||
break :blk Allocation{
|
||||
.ptr = ptr,
|
||||
.index = .{
|
||||
.index = index,
|
||||
.element = element_index,
|
||||
.block = @intCast(block_index),
|
||||
},
|
||||
};
|
||||
@ -174,7 +185,7 @@ pub fn BlockList(comptime T: type) type {
|
||||
}
|
||||
}
|
||||
|
||||
pub fn indexOf(list: *const List, elem: *const T) Index {
|
||||
pub fn indexOf(list: *List, elem: *const T) Index {
|
||||
const address = @intFromPtr(elem);
|
||||
for (list.blocks.items, 0..) |*block, block_index| {
|
||||
const base = @intFromPtr(&block.items[0]);
|
||||
@ -182,7 +193,7 @@ pub fn BlockList(comptime T: type) type {
|
||||
if (address >= base and address < top) {
|
||||
return .{
|
||||
.block = @intCast(block_index),
|
||||
.index = @intCast(@divExact(address - base, @sizeOf(T))),
|
||||
.element = @intCast(@divExact(address - base, @sizeOf(T))),
|
||||
};
|
||||
}
|
||||
}
|
||||
|
@ -146,6 +146,7 @@ const Analyzer = struct {
|
||||
var reaches_end = true;
|
||||
const block_node = analyzer.getScopeNode(scope_index, node_index);
|
||||
var statement_nodes = ArrayList(Node.Index){};
|
||||
|
||||
switch (block_node.id) {
|
||||
.block_one, .comptime_block_one => {
|
||||
try statement_nodes.append(analyzer.allocator, block_node.left);
|
||||
@ -175,7 +176,7 @@ const Analyzer = struct {
|
||||
}
|
||||
|
||||
const statement_node = analyzer.getScopeNode(scope_index, statement_node_index);
|
||||
const statement_value = switch (statement_node.id) {
|
||||
const statement_value_index = switch (statement_node.id) {
|
||||
.assign => (try analyzer.module.values.append(analyzer.allocator, try analyzer.processAssignment(scope_index, statement_node_index))).index,
|
||||
.simple_while => blk: {
|
||||
const loop_allocation = try analyzer.module.loops.append(analyzer.allocator, .{
|
||||
@ -196,7 +197,7 @@ const Analyzer = struct {
|
||||
},
|
||||
.@"unreachable" => blk: {
|
||||
reaches_end = false;
|
||||
break :blk Compilation.Values.@"unreachable".getIndex();
|
||||
break :blk unreachable_index;
|
||||
},
|
||||
.simple_symbol_declaration => blk: {
|
||||
const declaration_index = try analyzer.symbolDeclaration(scope_index, statement_node_index, .local);
|
||||
@ -216,18 +217,43 @@ const Analyzer = struct {
|
||||
.@"return" => blk: {
|
||||
reaches_end = false;
|
||||
|
||||
const return_value_allocation = try analyzer.module.values.append(analyzer.allocator, try analyzer.processReturn(scope_index, expect_type, statement_node_index));
|
||||
const return_expresssion = try analyzer.processReturn(scope_index, expect_type, statement_node_index);
|
||||
const return_value_allocation = try analyzer.module.values.append(analyzer.allocator, return_expresssion);
|
||||
|
||||
break :blk return_value_allocation.index;
|
||||
},
|
||||
.call_two, .call => (try analyzer.module.values.append(analyzer.allocator, .{
|
||||
.call = try analyzer.processCall(scope_index, statement_node_index),
|
||||
})).index,
|
||||
.@"switch" => (try analyzer.module.values.append(analyzer.allocator, try analyzer.processSwitch(scope_index, statement_node_index))).index,
|
||||
.call_two, .call => blk: {
|
||||
const call_index = try analyzer.processCall(scope_index, statement_node_index);
|
||||
const call_statement = try analyzer.module.values.append(analyzer.allocator, .{
|
||||
.call = call_index,
|
||||
});
|
||||
if (call_statement.ptr.getType(analyzer.module).eq(Type.noreturn)) {
|
||||
reaches_end = false;
|
||||
}
|
||||
break :blk call_statement.index;
|
||||
},
|
||||
// TODO: reaches end switch statement
|
||||
.@"switch" => blk: {
|
||||
const switch_value = try analyzer.processSwitch(scope_index, expect_type, statement_node_index);
|
||||
switch (switch_value) {
|
||||
.@"return" => reaches_end = false,
|
||||
else => {},
|
||||
}
|
||||
const switch_value_allocation = try analyzer.module.values.append(analyzer.allocator, switch_value);
|
||||
|
||||
break :blk switch_value_allocation.index;
|
||||
},
|
||||
.if_else => blk: {
|
||||
const if_else_value = try analyzer.processIfElse(scope_index, expect_type, statement_node_index);
|
||||
const branch = analyzer.module.branches.get(if_else_value.branch);
|
||||
reaches_end = branch.reaches_end;
|
||||
const branch_statement = try analyzer.module.values.append(analyzer.allocator, if_else_value);
|
||||
break :blk branch_statement.index;
|
||||
},
|
||||
else => |t| @panic(@tagName(t)),
|
||||
};
|
||||
|
||||
try statements.append(analyzer.allocator, statement_value);
|
||||
try statements.append(analyzer.allocator, statement_value_index);
|
||||
}
|
||||
|
||||
const block_allocation = try analyzer.module.blocks.append(analyzer.allocator, .{
|
||||
@ -254,7 +280,7 @@ const Analyzer = struct {
|
||||
|
||||
const left_type = switch (left_value_index.invalid) {
|
||||
false => switch (analyzer.module.values.get(left_value_index).*) {
|
||||
.function => |function_index| analyzer.module.function_prototypes.get(analyzer.module.types.get(analyzer.module.functions.get(function_index).prototype).function).return_type,
|
||||
.function_definition => |function_index| analyzer.module.function_prototypes.get(analyzer.module.types.get(analyzer.module.function_definitions.get(function_index).prototype).function).return_type,
|
||||
else => |t| @panic(@tagName(t)),
|
||||
},
|
||||
true => Type.Index.invalid,
|
||||
@ -270,9 +296,9 @@ const Analyzer = struct {
|
||||
};
|
||||
|
||||
switch (analyzer.module.values.get(left_value_index).*) {
|
||||
.function => |function_index| {
|
||||
const function = analyzer.module.functions.get(function_index);
|
||||
const function_prototype = analyzer.module.function_prototypes.get(analyzer.module.types.get(function.prototype).function);
|
||||
.function_definition => |function_index| {
|
||||
const function_definition = analyzer.module.function_definitions.get(function_index);
|
||||
const function_prototype = analyzer.module.function_prototypes.get(analyzer.module.types.get(function_definition.prototype).function);
|
||||
const argument_declarations = function_prototype.arguments.?;
|
||||
logln(.sema, .call, "Argument declaration count: {}. Argument node list count: {}\n", .{ argument_declarations.len, call_argument_node_list.len });
|
||||
var argument_array = ArrayList(Value.Index){};
|
||||
@ -340,7 +366,8 @@ const Analyzer = struct {
|
||||
}
|
||||
}
|
||||
|
||||
fn processSwitch(analyzer: *Analyzer, scope_index: Scope.Index, node_index: Node.Index) !Value {
|
||||
fn processSwitch(analyzer: *Analyzer, scope_index: Scope.Index, expect_type: ExpectType, node_index: Node.Index) !Value {
|
||||
_ = expect_type;
|
||||
const node = analyzer.getScopeNode(scope_index, node_index);
|
||||
assert(node.id == .@"switch");
|
||||
|
||||
@ -458,6 +485,44 @@ const Analyzer = struct {
|
||||
unreachable;
|
||||
}
|
||||
|
||||
fn processIfElse(analyzer: *Analyzer, scope_index: Scope.Index, expect_type: ExpectType, node_index: Node.Index) !Value {
|
||||
const node = analyzer.getScopeNode(scope_index, node_index);
|
||||
assert(node.id == .if_else);
|
||||
assert(!node.left.invalid);
|
||||
assert(!node.right.invalid);
|
||||
|
||||
const if_branch_node = analyzer.getScopeNode(scope_index, node.left);
|
||||
const if_condition = try analyzer.unresolvedAllocate(scope_index, ExpectType.boolean, if_branch_node.left);
|
||||
switch (if_condition.ptr.*) {
|
||||
.declaration_reference => {
|
||||
const true_expression = try analyzer.unresolvedAllocate(scope_index, expect_type, if_branch_node.right);
|
||||
const true_reaches_end = switch (true_expression.ptr.*) {
|
||||
.block => |block_index| analyzer.module.blocks.get(block_index).reaches_end,
|
||||
else => |t| @panic(@tagName(t)),
|
||||
};
|
||||
const false_expression = try analyzer.unresolvedAllocate(scope_index, expect_type, node.right);
|
||||
const false_reaches_end = switch (true_expression.ptr.*) {
|
||||
.block => |block_index| analyzer.module.blocks.get(block_index).reaches_end,
|
||||
else => |t| @panic(@tagName(t)),
|
||||
};
|
||||
const reaches_end = true_reaches_end and false_reaches_end;
|
||||
|
||||
const branch = try analyzer.module.branches.append(analyzer.allocator, .{
|
||||
.condition = if_condition.index,
|
||||
.true_expression = true_expression.index,
|
||||
.false_expression = false_expression.index,
|
||||
.reaches_end = reaches_end,
|
||||
});
|
||||
|
||||
return Value{
|
||||
.branch = branch.index,
|
||||
};
|
||||
},
|
||||
.bool => unreachable,
|
||||
else => |t| @panic(@tagName(t)),
|
||||
}
|
||||
}
|
||||
|
||||
fn processAssignment(analyzer: *Analyzer, scope_index: Scope.Index, node_index: Node.Index) !Value {
|
||||
const node = analyzer.getScopeNode(scope_index, node_index);
|
||||
assert(node.id == .assign);
|
||||
@ -536,9 +601,15 @@ const Analyzer = struct {
|
||||
.divide => .divide,
|
||||
.shift_left => .shift_left,
|
||||
.shift_right => .shift_right,
|
||||
.compare_equal => .compare_equal,
|
||||
else => |t| @panic(@tagName(t)),
|
||||
};
|
||||
const left_expect_type: ExpectType = switch (binary_operation_id) {
|
||||
.compare_equal => ExpectType.none,
|
||||
else => expect_type,
|
||||
};
|
||||
|
||||
const left_allocation = try analyzer.unresolvedAllocate(scope_index, left_expect_type, node.left);
|
||||
const right_expect_type: ExpectType = switch (binary_operation_id) {
|
||||
.add,
|
||||
.sub,
|
||||
@ -553,18 +624,26 @@ const Analyzer = struct {
|
||||
=> ExpectType{
|
||||
.type_index = Type.u8,
|
||||
},
|
||||
.compare_equal => ExpectType{
|
||||
.type_index = left_allocation.ptr.getType(analyzer.module),
|
||||
},
|
||||
};
|
||||
|
||||
const left_allocation = try analyzer.unresolvedAllocate(scope_index, expect_type, node.left);
|
||||
const right_allocation = try analyzer.unresolvedAllocate(scope_index, right_expect_type, node.right);
|
||||
const left_type = left_allocation.ptr.getType(analyzer.module);
|
||||
const right_type = right_allocation.ptr.getType(analyzer.module);
|
||||
_ = right_type;
|
||||
// const right_type = right_allocation.ptr.getType(analyzer.module);
|
||||
// _ = right_type;
|
||||
|
||||
const binary_operation = try analyzer.module.binary_operations.append(analyzer.allocator, .{
|
||||
.left = left_allocation.index,
|
||||
.right = right_allocation.index,
|
||||
.type = left_type,
|
||||
.type = switch (expect_type) {
|
||||
.none => switch (binary_operation_id) {
|
||||
.logical_and => left_type,
|
||||
else => |t| @panic(@tagName(t)),
|
||||
},
|
||||
.type_index => |type_index| type_index,
|
||||
else => |t| @panic(@tagName(t)),
|
||||
},
|
||||
.id = binary_operation_id,
|
||||
});
|
||||
|
||||
@ -615,7 +694,7 @@ const Analyzer = struct {
|
||||
try analyzer.resolveNode(init_value, lookup.scope, expect_type, init_value.unresolved.node_index);
|
||||
declaration.type = init_value.getType(analyzer.module);
|
||||
switch (init_value.*) {
|
||||
.function => |function_index| {
|
||||
.function_definition => |function_index| {
|
||||
try analyzer.module.function_name_map.put(analyzer.allocator, function_index, declaration.name);
|
||||
},
|
||||
else => {},
|
||||
@ -721,7 +800,7 @@ const Analyzer = struct {
|
||||
const value_ref = analyzer.module.values.get(value_index);
|
||||
break :blk value_ref.*;
|
||||
},
|
||||
.keyword_true => {
|
||||
.keyword_true, .keyword_false => blk: {
|
||||
switch (expect_type) {
|
||||
.none => {},
|
||||
.type_index => |expected_type| {
|
||||
@ -732,10 +811,13 @@ const Analyzer = struct {
|
||||
else => unreachable,
|
||||
}
|
||||
|
||||
// TODO
|
||||
unreachable;
|
||||
|
||||
// break :blk Values.getIndex(.bool_true);
|
||||
break :blk .{
|
||||
.bool = switch (node.id) {
|
||||
.keyword_true => true,
|
||||
.keyword_false => false,
|
||||
else => unreachable,
|
||||
},
|
||||
};
|
||||
},
|
||||
.compiler_intrinsic_one, .compiler_intrinsic_two, .compiler_intrinsic => blk: {
|
||||
const intrinsic_name = analyzer.tokenIdentifier(scope_index, node.token + 1);
|
||||
@ -826,6 +908,8 @@ const Analyzer = struct {
|
||||
});
|
||||
|
||||
const function_prototype_index = try analyzer.functionPrototype(function_scope_allocation.index, node.left);
|
||||
const function_prototype = analyzer.module.function_prototypes.get(function_prototype_index);
|
||||
assert(!function_prototype.attributes.@"extern");
|
||||
|
||||
const function_body = try analyzer.block(function_scope_allocation.index, .{
|
||||
.type_index = analyzer.functionPrototypeReturnType(function_prototype_index),
|
||||
@ -835,14 +919,14 @@ const Analyzer = struct {
|
||||
.function = function_prototype_index,
|
||||
});
|
||||
|
||||
const function_allocation = try analyzer.module.functions.append(analyzer.allocator, .{
|
||||
const function_allocation = try analyzer.module.function_definitions.append(analyzer.allocator, .{
|
||||
.prototype = prototype_type.index,
|
||||
.body = function_body,
|
||||
.scope = function_scope_allocation.index,
|
||||
});
|
||||
|
||||
break :blk .{
|
||||
.function = function_allocation.index,
|
||||
.function_definition = function_allocation.index,
|
||||
};
|
||||
},
|
||||
.function_prototype => blk: {
|
||||
@ -854,13 +938,13 @@ const Analyzer = struct {
|
||||
const prototype_type = try analyzer.module.types.append(analyzer.allocator, .{
|
||||
.function = function_prototype_index,
|
||||
});
|
||||
const function_allocation = try analyzer.module.functions.append(analyzer.allocator, .{
|
||||
const function_declaration = try analyzer.module.function_declarations.append(analyzer.allocator, .{
|
||||
.prototype = prototype_type.index,
|
||||
.body = Block.Index.invalid,
|
||||
.scope = Scope.Index.invalid,
|
||||
});
|
||||
break :b .{
|
||||
.function = function_allocation.index,
|
||||
.function_declaration = function_declaration.index,
|
||||
};
|
||||
},
|
||||
false => unreachable,
|
||||
@ -919,7 +1003,7 @@ const Analyzer = struct {
|
||||
const right_index = try analyzer.doIdentifier(struct_type.scope, ExpectType.none, node.right.value, scope_index);
|
||||
const right_value = analyzer.module.values.get(right_index);
|
||||
switch (right_value.*) {
|
||||
.function, .type, .enum_field => break :blk right_value.*,
|
||||
.function_definition, .type, .enum_field => break :blk right_value.*,
|
||||
.declaration_reference => |declaration_reference| {
|
||||
const declaration = analyzer.module.declarations.get(declaration_reference.value);
|
||||
const declaration_name = analyzer.module.getName(declaration.name).?;
|
||||
@ -983,7 +1067,7 @@ const Analyzer = struct {
|
||||
.string_literal => .{
|
||||
.string_literal = try analyzer.processStringLiteral(scope_index, node_index),
|
||||
},
|
||||
.@"switch" => try analyzer.processSwitch(scope_index, node_index),
|
||||
.@"switch" => try analyzer.processSwitch(scope_index, expect_type, node_index),
|
||||
.enum_type => blk: {
|
||||
const list_node = analyzer.getScopeNode(scope_index, node.left);
|
||||
const field_node_list = switch (list_node.id) {
|
||||
@ -1038,6 +1122,7 @@ const Analyzer = struct {
|
||||
.divide,
|
||||
.shift_left,
|
||||
.shift_right,
|
||||
.compare_equal,
|
||||
=> try analyzer.processBinaryOperation(scope_index, expect_type, node_index),
|
||||
.expression_group => return try analyzer.resolveNode(value, scope_index, expect_type, node.left), //unreachable,
|
||||
else => |t| @panic(@tagName(t)),
|
||||
@ -1146,6 +1231,7 @@ const Analyzer = struct {
|
||||
.void_type => Type.void,
|
||||
.ssize_type => Type.ssize,
|
||||
.usize_type => Type.usize,
|
||||
.bool_type => Type.boolean,
|
||||
else => |t| @panic(@tagName(t)),
|
||||
};
|
||||
return type_index;
|
||||
@ -1525,6 +1611,9 @@ const Analyzer = struct {
|
||||
},
|
||||
else => |t| @panic(@tagName(t)),
|
||||
},
|
||||
.bool => switch (source_type.*) {
|
||||
else => |t| @panic(@tagName(t)),
|
||||
},
|
||||
else => |t| @panic(@tagName(t)),
|
||||
}
|
||||
},
|
||||
@ -1584,6 +1673,8 @@ const ExpectType = union(enum) {
|
||||
};
|
||||
};
|
||||
|
||||
pub var unreachable_index = Value.Index.invalid;
|
||||
|
||||
pub fn initialize(compilation: *Compilation, module: *Module, package: *Package, main_value: *Value) !void {
|
||||
_ = try analyzeExistingPackage(main_value, compilation, module, package);
|
||||
|
||||
@ -1593,7 +1684,7 @@ pub fn initialize(compilation: *Compilation, module: *Module, package: *Package,
|
||||
if (equal(u8, declaration_name, "_start")) {
|
||||
const value = module.values.get(decl.init_value);
|
||||
module.entry_point = switch (value.*) {
|
||||
.function => |function_index| function_index,
|
||||
.function_definition => |function_index| function_index,
|
||||
.unresolved => panic("Unresolved declaration: {s}\n", .{declaration_name}),
|
||||
else => |t| @panic(@tagName(t)),
|
||||
};
|
||||
|
@ -162,6 +162,7 @@ pub const Node = packed struct(u128) {
|
||||
divide = 68,
|
||||
shift_left = 69,
|
||||
shift_right = 70,
|
||||
bool_type = 71,
|
||||
};
|
||||
};
|
||||
|
||||
@ -611,7 +612,7 @@ const Analyzer = struct {
|
||||
analyzer.token_i += 1;
|
||||
|
||||
_ = try analyzer.expectToken(.left_parenthesis);
|
||||
const if_expression = try analyzer.expression();
|
||||
const if_condition = try analyzer.expression();
|
||||
_ = try analyzer.expectToken(.right_parenthesis);
|
||||
|
||||
const if_block = try analyzer.block(.{ .is_comptime = false });
|
||||
@ -619,7 +620,7 @@ const Analyzer = struct {
|
||||
const if_node = try analyzer.addNode(.{
|
||||
.id = .@"if",
|
||||
.token = if_token,
|
||||
.left = if_expression,
|
||||
.left = if_condition,
|
||||
.right = if_block,
|
||||
});
|
||||
|
||||
@ -782,7 +783,6 @@ const Analyzer = struct {
|
||||
const token = analyzer.tokens[analyzer.token_i];
|
||||
// logln("Looping in expression precedence with token {}\n", .{token});
|
||||
const operator: PrecedenceOperator = switch (token.id) {
|
||||
.equal,
|
||||
.semicolon,
|
||||
.right_parenthesis,
|
||||
.right_brace,
|
||||
@ -1180,6 +1180,15 @@ const Analyzer = struct {
|
||||
.right = Node.Index.invalid,
|
||||
}),
|
||||
.hash => analyzer.compilerIntrinsic(),
|
||||
.fixed_keyword_bool => analyzer.addNode(.{
|
||||
.id = .bool_type,
|
||||
.token = blk: {
|
||||
analyzer.token_i += 1;
|
||||
break :blk token_i;
|
||||
},
|
||||
.left = Node.Index.invalid,
|
||||
.right = Node.Index.invalid,
|
||||
}),
|
||||
.keyword_unsigned_integer, .keyword_signed_integer => |signedness| analyzer.addNode(.{
|
||||
.id = switch (signedness) {
|
||||
.keyword_unsigned_integer => .unsigned_integer_type,
|
||||
|
@ -6,6 +6,7 @@ pub const panic = Compilation.panic;
|
||||
|
||||
pub fn main() !void {
|
||||
const allocator = std.heap.page_allocator;
|
||||
|
||||
try Compilation.init(allocator);
|
||||
}
|
||||
|
||||
|
8
test/simple_bool/main.nat
Normal file
8
test/simple_bool/main.nat
Normal file
@ -0,0 +1,8 @@
|
||||
const main = fn () s32 {
|
||||
var false_boolean: bool = false;
|
||||
if (false_boolean) {
|
||||
return 1;
|
||||
} else {
|
||||
return 0;
|
||||
}
|
||||
}
|
Loading…
x
Reference in New Issue
Block a user