From aefabd6108299620f90ae32127987bf847319d59 Mon Sep 17 00:00:00 2001 From: David Gonzalez Martin Date: Tue, 7 Nov 2023 15:00:48 -0600 Subject: [PATCH] almost working register allocator --- src/backend/intermediate_representation.zig | 58 +- src/backend/x86_64.zig | 755 ++++++++++++++++---- src/data_structures.zig | 8 +- src/frontend/semantic_analyzer.zig | 70 +- src/frontend/syntactic_analyzer.zig | 16 +- 5 files changed, 676 insertions(+), 231 deletions(-) diff --git a/src/backend/intermediate_representation.zig b/src/backend/intermediate_representation.zig index 2b4fd0a..bc91dbf 100644 --- a/src/backend/intermediate_representation.zig +++ b/src/backend/intermediate_representation.zig @@ -307,12 +307,12 @@ pub const Function = struct { .phi => {}, .ret => |ret_index| { const ret = function.ir.returns.get(ret_index); - switch (ret.instruction.valid) { - true => { + switch (ret.instruction.invalid) { + false => { const ret_value = function.ir.instructions.get(ret.instruction).*; try writer.print(" {s}", .{@tagName(ret_value)}); }, - false => try writer.writeAll(" void"), + true => try writer.writeAll(" void"), } }, // .load => |load_index| { @@ -518,13 +518,13 @@ pub const Builder = struct { const function_declaration_index = ir_call.function; const function_declaration = builder.ir.function_declarations.get(function_declaration_index); const function_definition_index = function_declaration.definition; - switch (function_definition_index.valid) { - true => { + switch (function_definition_index.invalid) { + false => { const function = builder.ir.function_definitions.get(function_definition_index); const first_block = function.blocks.items[0]; break :blk first_block; }, - false => continue, + true => continue, } }, .@"unreachable", .ret, .store => continue, @@ -625,10 +625,10 @@ pub const Builder = struct { .phi => blk: { var did_something = false; var head = &instruction.phi; - next: while (head.valid) { + next: while (!head.invalid) { const phi = builder.ir.phis.get(head.*); const phi_jump = builder.ir.jumps.get(phi.jump); - assert(phi_jump.source.valid); + assert(!phi_jump.source.invalid); for (reachable_blocks) |block_index| { if (phi_jump.source.eq(block_index)) { @@ -655,12 +655,12 @@ pub const Builder = struct { var only_value = Instruction.Index.invalid; var it = phi_index; - while (it.valid) { + while (!it.invalid) { const phi = builder.ir.phis.get(it); const phi_value = builder.ir.instructions.get(phi.instruction); if (phi_value.* == .phi) unreachable; // TODO: undefined - if (only_value.valid) { + if (!only_value.invalid) { if (!only_value.eq(phi.instruction)) { break :trivial_blk null; } @@ -675,7 +675,7 @@ pub const Builder = struct { }; if (trivial_phi) |trivial_value| { - if (trivial_value.valid) { + if (!trivial_value.invalid) { // Option to delete const delete = false; if (delete) { @@ -740,8 +740,8 @@ pub const Builder = struct { }; for (operands) |operand_instruction_index_pointer| { - switch (operand_instruction_index_pointer.valid) { - true => { + switch (operand_instruction_index_pointer.invalid) { + false => { const operand_value = builder.ir.instructions.get(operand_instruction_index_pointer.*); switch (operand_value.*) { .copy => |copy_value| { @@ -759,7 +759,7 @@ pub const Builder = struct { else => |t| @panic(@tagName(t)), } }, - false => {}, + true => {}, } } @@ -847,13 +847,13 @@ pub const Builder = struct { var arguments = try ArrayList(Instruction.Index).initCapacity(builder.allocator, sema_syscall.argument_count + 1); const sema_syscall_number = sema_syscall.number; - assert(sema_syscall_number.valid); + assert(!sema_syscall_number.invalid); const number_value_index = try builder.emitSyscallArgument(sema_syscall_number); arguments.appendAssumeCapacity(number_value_index); for (sema_syscall.getArguments()) |sema_syscall_argument| { - assert(sema_syscall_argument.valid); + assert(!sema_syscall_argument.invalid); const argument_value_index = try builder.emitSyscallArgument(sema_syscall_argument); arguments.appendAssumeCapacity(argument_value_index); } @@ -890,7 +890,7 @@ pub const Builder = struct { const loop_body_block = try builder.newBlock(); const loop_prologue_block = if (options.emit_exit_block) try builder.newBlock() else BasicBlock.Index.invalid; - const loop_head_block = switch (condition.valid) { + const loop_head_block = switch (!condition.invalid) { false => loop_body_block, true => unreachable, }; @@ -902,7 +902,7 @@ pub const Builder = struct { const sema_body_block = builder.module.blocks.get(sema_loop_body.block); builder.currentFunction().current_basic_block = try builder.blockInsideBasicBlock(sema_body_block, loop_body_block); - if (loop_prologue_block.valid) { + if (!loop_prologue_block.invalid) { builder.ir.blocks.get(loop_prologue_block).seal(); } @@ -921,7 +921,7 @@ pub const Builder = struct { unreachable; } - if (loop_prologue_block.valid) { + if (!loop_prologue_block.invalid) { builder.currentFunction().current_basic_block = loop_prologue_block; } }, @@ -933,13 +933,13 @@ pub const Builder = struct { const sema_ret = builder.module.returns.get(sema_ret_index); const return_value = try builder.emitReturnValue(sema_ret.value); const phi_instruction = builder.ir.instructions.get(builder.currentFunction().return_phi_node); - const phi = switch (phi_instruction.phi.valid) { - true => unreachable, - false => (try builder.ir.phis.append(builder.allocator, std.mem.zeroes(Phi))).ptr, + const phi = switch (phi_instruction.phi.invalid) { + false => unreachable, + true => (try builder.ir.phis.append(builder.allocator, std.mem.zeroes(Phi))).ptr, }; //builder.ir.phis.get(phi_instruction.phi); const exit_jump = try builder.jump(.{ .source = builder.currentFunction().current_basic_block, - .destination = switch (phi_instruction.phi.valid) { + .destination = switch (!phi_instruction.phi.invalid) { true => phi.block, false => builder.currentFunction().return_phi_block, }, @@ -1056,8 +1056,8 @@ pub const Builder = struct { fn processCall(builder: *Builder, sema_call_index: Compilation.Call.Index) anyerror!Instruction.Index { const sema_call = builder.module.calls.get(sema_call_index); const sema_argument_list_index = sema_call.arguments; - const argument_list: []const Instruction.Index = switch (sema_argument_list_index.valid) { - true => blk: { + const argument_list: []const Instruction.Index = switch (sema_argument_list_index.invalid) { + false => blk: { var argument_list = ArrayList(Instruction.Index){}; const sema_argument_list = builder.module.argument_lists.get(sema_argument_list_index); try argument_list.ensureTotalCapacity(builder.allocator, sema_argument_list.array.items.len); @@ -1067,7 +1067,7 @@ pub const Builder = struct { } break :blk argument_list.items; }, - false => &.{}, + true => &.{}, }; const call_index = try builder.call(.{ @@ -1172,15 +1172,15 @@ pub const Builder = struct { fn jump(builder: *Builder, descriptor: Jump) !Jump.Index { const destination_block = builder.ir.blocks.get(descriptor.destination); assert(!destination_block.sealed); - assert(descriptor.source.valid); + assert(!descriptor.source.invalid); const jump_allocation = try builder.ir.jumps.append(builder.allocator, descriptor); return jump_allocation.index; } fn append(builder: *Builder, instruction: Instruction) !Instruction.Index { - assert(builder.current_function_index.valid); + assert(!builder.current_function_index.invalid); const current_function = builder.currentFunction(); - assert(current_function.current_basic_block.valid); + assert(!current_function.current_basic_block.invalid); return builder.appendToBlock(current_function.current_basic_block, instruction); } diff --git a/src/backend/x86_64.zig b/src/backend/x86_64.zig index d15d8e4..a114761 100644 --- a/src/backend/x86_64.zig +++ b/src/backend/x86_64.zig @@ -3,7 +3,7 @@ const Allocator = std.mem.Allocator; const assert = std.debug.assert; const print = std.debug.print; const emit = @import("emit.zig"); -const ir = @import("./intermediate_representation.zig"); +const ir = @import("intermediate_representation.zig"); const Compilation = @import("../Compilation.zig"); @@ -18,6 +18,19 @@ const Register = struct { list: List = .{}, index: Index, + const invalid = Register{ + .index = .{ + .physical = .no_register, + }, + }; + + fn isValid(register: Register) bool { + return switch (register.index) { + .physical => |physical| physical != .no_register, + .virtual => true, + }; + } + const Index = union(enum) { physical: Register.Physical, virtual: Register.Virtual.Index, @@ -929,6 +942,7 @@ const StackObject = struct { const InstructionSelection = struct { local_value_map: data_structures.AutoArrayHashMap(ir.Instruction.Index, Register) = .{}, + value_map: data_structures.AutoArrayHashMap(ir.Instruction.Index, Register) = .{}, block_map: data_structures.AutoHashMap(ir.BasicBlock.Index, BasicBlock.Index) = .{}, liveins: data_structures.AutoArrayHashMap(Register.Physical, Register.Virtual.Index) = .{}, memory_map: data_structures.AutoArrayHashMap(ir.Instruction.Index, Memory.Index) = .{}, @@ -994,6 +1008,7 @@ const InstructionSelection = struct { fn loadRegisterFromStackSlot(instruction_selection: *InstructionSelection, mir: *MIR, insert_before_instruction_index: usize, destination_register: Register.Physical, frame_index: u32, register_class: Register.Class, virtual_register: Register.Virtual.Index) !void { _ = virtual_register; const stack_object = instruction_selection.stack_objects.items[frame_index]; + print("Stack object size: {}\n", .{stack_object.size}); switch (@divExact(stack_object.size, 8)) { @sizeOf(u64) => { switch (register_class) { @@ -1034,31 +1049,74 @@ const InstructionSelection = struct { else => |t| @panic(@tagName(t)), } }, + @sizeOf(u32) => switch (register_class) { + .gp32 => { + const instruction_id = Instruction.Id.mov32rm; + const instruction_descriptor = comptime instruction_descriptors.get(instruction_id); + const source_operand_id = instruction_descriptor.operands[1].id; + const addressing_mode = AddressingMode{ + .base = .{ + .frame_index = frame_index, + }, + }; + const source_operand = Operand{ + .id = source_operand_id, + .u = .{ + .memory = .{ .addressing_mode = addressing_mode }, + }, + .flags = .{}, + }; + const destination_operand = Operand{ + .id = .gp32, + .u = .{ + .register = .{ + .index = .{ + .physical = destination_register, + }, + }, + }, + .flags = .{ .type = .def }, + }; + const instruction_index = try mir.buildInstruction(instruction_selection, instruction_id, &.{ + destination_operand, + source_operand, + }); + print("Inserting instruction at index {}\n", .{insert_before_instruction_index}); + try mir.blocks.get(instruction_selection.current_block).instructions.insert(mir.allocator, insert_before_instruction_index, instruction_index); + }, + else => |t| @panic(@tagName(t)), + }, else => std.debug.panic("Stack object size: {}\n", .{stack_object.size}), } } // TODO: add value map on top of local value map? - fn lookupRegisterForValue(instruction_selection: *InstructionSelection, ir_instruction_index: ir.Instruction.Index) ?Register { - if (instruction_selection.local_value_map.get(ir_instruction_index)) |register| { + fn lookupRegisterForValue(instruction_selection: *InstructionSelection, mir: *MIR, ir_instruction_index: ir.Instruction.Index) !Register { + if (instruction_selection.value_map.get(ir_instruction_index)) |register| { return register; } - return null; + const gop = try instruction_selection.local_value_map.getOrPutValue(mir.allocator, ir_instruction_index, Register.invalid); + return gop.value_ptr.*; } fn getRegisterForValue(instruction_selection: *InstructionSelection, mir: *MIR, ir_instruction_index: ir.Instruction.Index) !Register { - if (instruction_selection.lookupRegisterForValue(ir_instruction_index)) |register| { + const register = try instruction_selection.lookupRegisterForValue(mir, ir_instruction_index); + if (register.isValid()) { return register; } - const ir_type = getIrType(mir.ir, ir_instruction_index); - const value_type = resolveType(ir_type); + const instruction = mir.ir.instructions.get(ir_instruction_index); + if (instruction.* != .stack or !instruction_selection.stack_map.contains(ir_instruction_index)) { + const ir_type = getIrType(mir.ir, ir_instruction_index); + const value_type = resolveType(ir_type); + const register_class = register_classes.get(value_type); + const new_register = try mir.createVirtualRegister(register_class); + try instruction_selection.value_map.putNoClobber(mir.allocator, ir_instruction_index, new_register); + return new_register; + } - const register_class = register_classes.get(value_type); - const virtual_register = try mir.createVirtualRegister(register_class); - try instruction_selection.local_value_map.putNoClobber(mir.allocator, ir_instruction_index, virtual_register); - return virtual_register; + unreachable; } // Moving an immediate to a register @@ -1187,18 +1245,28 @@ const InstructionSelection = struct { } } - fn updateValueMap(instruction_selection: *InstructionSelection, allocator: Allocator, ir_instruction_index: ir.Instruction.Index, register: Register) !void { - const gop = try instruction_selection.local_value_map.getOrPut(allocator, ir_instruction_index); - if (gop.found_existing) { - const stored_register = gop.value_ptr.*; - if (std.meta.eql(stored_register, register)) { - unreachable; - } else { - std.debug.panic("Register mismatch: Stored: {} Got: {}\n", .{ stored_register, register }); - } + fn updateValueMap(instruction_selection: *InstructionSelection, allocator: Allocator, ir_instruction_index: ir.Instruction.Index, register: Register, local: bool) !void { + if (local) { + try instruction_selection.local_value_map.putNoClobber(allocator, ir_instruction_index, register); } else { - gop.value_ptr.* = register; + const gop = try instruction_selection.value_map.getOrPutValue(allocator, ir_instruction_index, Register.invalid); + if (!gop.value_ptr.isValid()) { + gop.value_ptr.* = register; + } else if (!std.meta.eql(gop.value_ptr.index, register.index)) { + unreachable; + } } + // const gop = try instruction_selection.local_value_map.getOrPut(allocator, ir_instruction_index); + // if (gop.found_existing) { + // const stored_register = gop.value_ptr.*; + // if (std.meta.eql(stored_register, register)) { + // unreachable; + // } else { + // std.debug.panic("Register mismatch: Stored: {} Got: {}\n", .{ stored_register, register }); + // } + // } else { + // gop.value_ptr.* = register; + // } } fn lowerArguments(instruction_selection: *InstructionSelection, mir: *MIR, ir_function: *ir.Function) !void { @@ -1253,18 +1321,16 @@ const InstructionSelection = struct { // const operand_register_class = register_class_operand_matcher.get(operand_reference.id); const virtual_register_index = try instruction_selection.createLiveIn(mir, physical_register, register_class); + const result_register = try mir.createVirtualRegister(register_class); try mir.append(instruction_selection, .copy, &.{ Operand{ .id = operand_id, .u = .{ - .register = .{ - .index = .{ - .virtual = virtual_register_index, - }, - }, + .register = result_register, }, .flags = .{ .dead_or_kill = true, + .type = .def, }, }, Operand{ @@ -1272,7 +1338,7 @@ const InstructionSelection = struct { .u = .{ .register = .{ .index = .{ - .physical = physical_register, + .virtual = virtual_register_index, }, }, }, @@ -1284,11 +1350,8 @@ const InstructionSelection = struct { mir.blocks.get(instruction_selection.current_block).current_stack_index += 1; - try instruction_selection.local_value_map.putNoClobber(mir.allocator, ir_argument_instruction_index, Register{ - .index = .{ - .virtual = virtual_register_index, - }, - }); + try instruction_selection.updateValueMap(mir.allocator, ir_argument_instruction_index, result_register, true); + try instruction_selection.value_map.putNoClobber(mir.allocator, ir_argument_instruction_index, result_register); } } @@ -1314,8 +1377,57 @@ const InstructionSelection = struct { return virtual_register_index; } + + fn emitLiveInCopies(instruction_selection: *InstructionSelection, mir: *MIR, entry_block_index: BasicBlock.Index) !void { + const entry_block = mir.blocks.get(entry_block_index); + for (instruction_selection.liveins.keys(), instruction_selection.liveins.values()) |livein_physical_register, livein_virtual_register| { + const vr = mir.virtual_registers.get(livein_virtual_register); + const destination_operand = Operand{ + .id = switch (vr.register_class) { + .gp32 => .gp32, + .gp64 => .gp64, + else => |t| @panic(@tagName(t)), + }, + .u = .{ + .register = .{ + .index = .{ + .virtual = livein_virtual_register, + }, + }, + }, + .flags = .{ + .type = .def, + }, + }; + const source_operand = Operand{ + .id = destination_operand.id, + .u = .{ + .register = .{ + .index = .{ + .physical = livein_physical_register, + }, + }, + }, + .flags = .{}, + }; + + const instruction_index = try mir.buildInstruction(instruction_selection, .copy, &.{ + destination_operand, + source_operand, + }); + + try entry_block.instructions.insert(mir.allocator, 0, instruction_index); + + // TODO: addLiveIn MachineBasicBlock ? unreachable; + } + print("After livein: {}\n", .{instruction_selection.function}); + } }; +fn getRegisterClass(register: Register.Physical) Register.Class { + _ = register; +} + const Instruction = struct { id: Id, operands: ArrayList(Operand.Index), @@ -1332,6 +1444,7 @@ const Instruction = struct { mov64mr, mov32ri, mov32ri64, + movsx64rm32, movsx64rr32, ret, syscall, @@ -1385,7 +1498,7 @@ const Instruction = struct { .mir = mir, }; - if (index.valid) { + if (!index.invalid) { const operand = mir.operands.get(index); if ((!arguments.use and operand.flags.type == .use) or (!arguments.def and operand.flags.type == .def)) { it.advance(); @@ -1395,39 +1508,54 @@ const Instruction = struct { return it; } - fn next(it: *I) switch (arguments.element) { - .instruction => ?*Instruction, - .operand => ?*Operand, - } { - if (it.index.valid) { - var operand = it.mir.operands.get(it.index); - switch (arguments.element) { + const ReturnValue = switch (arguments.element) { + .instruction => Instruction, + .operand => Operand, + }; + + fn next(it: *I) ?ReturnValue.Index { + const original_operand_index = it.index; + switch (it.index.invalid) { + false => switch (arguments.element) { .instruction => { - const instruction = operand.parent; - const i_desc = it.mir.instructions.get(instruction); - print("Instruction: {}\n", .{i_desc.id}); + const original_operand = it.mir.operands.get(original_operand_index); + const instruction = original_operand.parent; + // const i_desc = it.mir.instructions.get(instruction); + // print("Instruction: {}\n", .{i_desc.id}); while (true) { it.advance(); - if (!it.index.valid) return null; - operand = it.mir.operands.get(it.index); - if (!operand.parent.eq(instruction)) break; + if (it.index.invalid) break; + const it_operand = it.mir.operands.get(it.index); + if (!it_operand.parent.eq(instruction)) break; } - return it.mir.instructions.get(operand.parent); + return instruction; }, - .operand => return operand, - } - } else { - return null; + .operand => { + it.advance(); + return original_operand_index; + }, + }, + true => return null, } } + fn nextPointer(it: *I) ?*ReturnValue { + if (it.next()) |next_index| { + const result = switch (arguments.element) { + .instruction => it.mir.instructions.get(next_index), + .operand => it.mir.operands.get(next_index), + }; + return result; + } else return null; + } + fn advance(it: *I) void { - assert(it.index.valid); + assert(!it.index.invalid); it.advanceRaw(); if (!arguments.use) { - if (it.index.valid) { + if (!it.index.invalid) { const operand = it.mir.operands.get(it.index); if (operand.flags.type == .use) { it.index = Operand.Index.invalid; @@ -1436,7 +1564,7 @@ const Instruction = struct { } } } else { - while (it.index.valid) { + while (!it.index.invalid) { const operand = it.mir.operands.get(it.index); if (!arguments.def and operand.flags.type == .def) { it.advanceRaw(); @@ -1448,7 +1576,7 @@ const Instruction = struct { } fn advanceRaw(it: *I) void { - assert(it.index.valid); + assert(!it.index.invalid); const current_operand = it.mir.operands.get(it.index); assert(current_operand.u == .register); const next_index = current_operand.u.register.list.next; @@ -1480,7 +1608,7 @@ pub const Operand = struct { fn isOnRegisterUseList(operand: *const Operand) bool { assert(operand.u == .register); - return operand.u.register.list.previous.valid; + return !operand.u.register.list.previous.invalid; } const Id = enum { @@ -1764,6 +1892,22 @@ const instruction_descriptors = std.EnumArray(Instruction.Id, Instruction.Descri .implicit_def = false, }, }, + .movsx64rm32 = .{ + .format = .mrm_source_reg, + .operands = &.{ + .{ + .id = .gp64, + .kind = .dst, + }, + .{ + .id = .i32mem, + .kind = .src, + }, + }, + .flags = .{ + .implicit_def = false, + }, + }, .movsx64rr32 = .{ .format = .mrm_source_reg, .operands = &.{ @@ -1935,12 +2079,16 @@ pub const MIR = struct { var instruction_i: usize = ir_block.instructions.items.len; print("Instruction count: {}\n", .{instruction_i}); + var folded_load = false; + while (instruction_i > 0) { instruction_i -= 1; const ir_instruction_index = ir_block.instructions.items[instruction_i]; const ir_instruction = mir.ir.instructions.get(ir_instruction_index); + instruction_selection.local_value_map.clearRetainingCapacity(); + print("Instruction #{}\n", .{instruction_i}); switch (ir_instruction.*) { @@ -1972,7 +2120,9 @@ pub const MIR = struct { .u = .{ .register = physical_register, }, - .flags = .{}, + .flags = .{ + .type = .def, + }, }, Operand{ .id = operand_id, @@ -2040,73 +2190,134 @@ pub const MIR = struct { const syscall = try mir.buildInstruction(instruction_selection, .syscall, &.{}); try instruction_selection.instruction_cache.append(mir.allocator, syscall); - const physical_return_register = Register{ - .index = .{ - .physical = .rax, + const produce_syscall_return_value = switch (instruction_i == ir_block.instructions.items.len - 2) { + true => blk: { + const last_block_instruction = mir.ir.instructions.get(ir_block.instructions.items[ir_block.instructions.items.len - 1]); + break :blk switch (last_block_instruction.*) { + .@"unreachable" => false, + else => |t| @panic(@tagName(t)), + }; }, - }; - const physical_return_operand = Operand{ - .id = .gp64, - .u = .{ - .register = physical_return_register, - }, - .flags = .{ .type = .def }, + false => true, }; - const virtual_return_register = try instruction_selection.getRegisterForValue(mir, ir_instruction_index); - const virtual_return_operand = Operand{ - .id = .gp64, - .u = .{ - .register = virtual_return_register, - }, - .flags = .{ .type = .def }, - }; + if (produce_syscall_return_value) { + const physical_return_register = Register{ + .index = .{ + .physical = .rax, + }, + }; + const physical_return_operand = Operand{ + .id = .gp64, + .u = .{ + .register = physical_return_register, + }, + .flags = .{ .type = .def }, + }; - const syscall_result_copy = try mir.buildInstruction(instruction_selection, .copy, &.{ - virtual_return_operand, - physical_return_operand, - }); - try instruction_selection.instruction_cache.append(mir.allocator, syscall_result_copy); + const virtual_return_register = try instruction_selection.getRegisterForValue(mir, ir_instruction_index); + const virtual_return_operand = Operand{ + .id = .gp64, + .u = .{ + .register = virtual_return_register, + }, + .flags = .{ .type = .def }, + }; + + const syscall_result_copy = try mir.buildInstruction(instruction_selection, .copy, &.{ + virtual_return_operand, + physical_return_operand, + }); + try instruction_selection.instruction_cache.append(mir.allocator, syscall_result_copy); + } }, .sign_extend => |ir_cast_index| { const ir_sign_extend = mir.ir.casts.get(ir_cast_index); - const ir_source_instruction = ir_sign_extend.value; + assert(!folded_load); + const ir_source_instruction = blk: { + var source = ir_sign_extend.value; + const source_instruction = mir.ir.instructions.get(source); + const result = switch (source_instruction.*) { + .load => b: { + const load = mir.ir.loads.get(source_instruction.load); + folded_load = true; + break :b load.instruction; + }, + else => |t| @panic(@tagName(t)), + }; + break :blk result; + }; const destination_type = resolveType(ir_sign_extend.type); - const source_register = try instruction_selection.getRegisterForValue(mir, ir_source_instruction); const source_type = resolveType(getIrType(mir.ir, ir_source_instruction)); if (destination_type != source_type) { const instruction_id: Instruction.Id = switch (source_type) { .i32 => switch (destination_type) { - .i64 => .movsx64rr32, + .i64 => switch (folded_load) { + true => .movsx64rm32, + false => .movsx64rr32, + }, else => unreachable, }, else => |t| @panic(@tagName(t)), }; + const instruction_descriptor = instruction_descriptors.getPtrConst(instruction_id); assert(instruction_descriptor.operands.len == 2); const destination_operand_index = 0; - const source_operand_index = 1; - const source_operand = mir.constrainOperandRegisterClass(instruction_descriptor, source_register, source_operand_index, .{}); const destination_register = try instruction_selection.getRegisterForValue(mir, ir_instruction_index); const destination_operand = mir.constrainOperandRegisterClass(instruction_descriptor, destination_register, destination_operand_index, .{ .type = .def }); + const source_operand_index = 1; + + const source_operand = switch (folded_load) { + true => blk: { + const addressing_mode = instruction_selection.getAddressingModeFromIr(mir, ir_source_instruction); + const memory_id: Operand.Id = switch (source_type) { + .i32 => .i32mem, + .i64 => .i64mem, + else => |t| @panic(@tagName(t)), + }; + const operand = Operand{ + .id = memory_id, + .u = .{ + .memory = .{ + .addressing_mode = addressing_mode, + }, + }, + .flags = .{}, + }; + break :blk operand; + }, + false => blk: { + const source_register = try instruction_selection.getRegisterForValue(mir, ir_source_instruction); + break :blk mir.constrainOperandRegisterClass(instruction_descriptor, source_register, source_operand_index, .{}); + }, + }; const sign_extend = try mir.buildInstruction(instruction_selection, instruction_id, &.{ destination_operand, source_operand, }); + try instruction_selection.instruction_cache.append(mir.allocator, sign_extend); + + try instruction_selection.updateValueMap(mir.allocator, ir_instruction_index, destination_register, false); } else { unreachable; } }, .load => |ir_load_index| { + if (folded_load) { + folded_load = false; + continue; + } + const ir_load = mir.ir.loads.get(ir_load_index); - const ir_destination = ir_load.instruction; - const addressing_mode = instruction_selection.getAddressingModeFromIr(mir, ir_destination); - const value_type = resolveType(getIrType(mir.ir, ir_destination)); + const ir_source = ir_load.instruction; + const addressing_mode = instruction_selection.getAddressingModeFromIr(mir, ir_source); + const value_type = resolveType(getIrType(mir.ir, ir_source)); switch (value_type) { inline .i32, @@ -2151,6 +2362,8 @@ pub const MIR = struct { source_operand, }); try instruction_selection.instruction_cache.append(mir.allocator, load); + + try instruction_selection.updateValueMap(mir.allocator, ir_instruction_index, destination_register, false); }, else => |t| @panic(@tagName(t)), } @@ -2338,35 +2551,9 @@ pub const MIR = struct { instruction_selection.instruction_cache.clearRetainingCapacity(); } - - instruction_selection.local_value_map.clearRetainingCapacity(); } - // for (ir_function.blocks.items, function.blocks.items) |ir_block_index, block_index| { - // const ir_block = mir.ir.blocks.get(ir_block_index); - // instruction_selection.current_block = block_index; - // - // for (ir_block.instructions.items) |ir_instruction_index| { - // const ir_instruction = mir.ir.instructions.get(ir_instruction_index); - // switch (ir_instruction.*) { - // .load_string_literal => |ir_load_string_literal_index| { - // // const ir_string_literal = mir.ir.string_literals.get(ir_load_string_literal_index); - // const virtual_register = try mir.createVirtualRegister(Register.Class.gp64); - // const virtual_operand = Operand.new(.gp64, virtual_register, .{ .type = .def }); - // try mir.append(instruction_selection, .lea64r, &.{ - // virtual_operand, - // Operand.Lea64Mem.stringLiteral(ir_load_string_literal_index), - // }); - // - // try instruction_selection.updateValueMap(allocator, ir_instruction_index, virtual_register); - // }, - // .@"unreachable" => try mir.append(instruction_selection, .ud2, &.{}), - // else => |t| @panic(@tagName(t)), - // } - // } - // - // instruction_selection.local_value_map.clearRetainingCapacity(); - // } + try instruction_selection.emitLiveInCopies(mir, function.blocks.items[0]); print("=========\n{}=========\n", .{function}); } @@ -2400,7 +2587,7 @@ pub const MIR = struct { const operand = mir.operands.get(operand_index); assert(operand.u == .register); assert(!std.meta.eql(operand.u.register.index, register)); - operand.flags.renamable = true; + operand.flags.renamable = false; mir.removeRegisterOperandFromUseList(instruction_selection, operand); operand.u.register.index = register; mir.addRegisterOperandFromUseList(instruction_selection, operand_index); @@ -2412,8 +2599,8 @@ pub const MIR = struct { const head_index_ptr = mir.getRegisterListHead(instruction_selection, operand.u.register); const head_index = head_index_ptr.*; - switch (head_index.valid) { - true => { + switch (head_index.invalid) { + false => { const head_operand = mir.operands.get(head_index); assert(std.meta.eql(head_operand.u.register.index, operand.u.register.index)); const last_operand_index = head_operand.u.register.list.previous; @@ -2433,18 +2620,19 @@ pub const MIR = struct { }, } }, - false => { + true => { operand.u.register.list.previous = operand_index; operand.u.register.list.next = Operand.Index.invalid; head_index_ptr.* = operand_index; }, } } + fn removeRegisterOperandFromUseList(mir: *MIR, instruction_selection: *InstructionSelection, operand: *Operand) void { assert(operand.isOnRegisterUseList()); const head_index_ptr = mir.getRegisterListHead(instruction_selection, operand.u.register); const head_index = head_index_ptr.*; - assert(head_index.valid); + assert(!head_index.invalid); const operand_previous = operand.u.register.list.previous; const operand_next = operand.u.register.list.next; @@ -2457,9 +2645,9 @@ pub const MIR = struct { previous.u.register.list.next = operand_next; } - const next = switch (operand_next.valid) { - true => mir.operands.get(operand_next), - false => head, + const next = switch (operand_next.invalid) { + false => mir.operands.get(operand_next), + true => head, }; next.u.register.list.previous = operand_previous; @@ -2632,7 +2820,7 @@ pub const MIR = struct { break :blk null; }; // TODO: handle allocation error here - register_allocator.allocateVirtualRegister(mir, instruction_selection, instruction, live_register, hint, false) catch unreachable; + register_allocator.allocateVirtualRegister(mir, instruction_selection, instruction_index, live_register, hint, false) catch unreachable; } live_register.last_use = instruction_index; @@ -2643,12 +2831,10 @@ pub const MIR = struct { fn isRegisterInClass(register: Register.Physical, register_class: Register.Class) bool { const result = std.mem.indexOfScalar(Register.Physical, registers_by_class.get(register_class), register) != null; - print("Is {s} in class {s}: {}\n", .{ @tagName(register), @tagName(register_class), result }); return result; } - fn allocateVirtualRegister(register_allocator: *RegisterAllocator, mir: *MIR, instruction_selection: *InstructionSelection, instruction: *Instruction, live_register: *LiveRegister, maybe_hint: ?Register, look_at_physical_register_uses: bool) !void { - _ = instruction; + fn allocateVirtualRegister(register_allocator: *RegisterAllocator, mir: *MIR, instruction_selection: *InstructionSelection, instruction_index: Instruction.Index, live_register: *LiveRegister, maybe_hint: ?Register, look_at_physical_register_uses: bool) !void { assert(live_register.physical == .no_register); const virtual_register = live_register.virtual; const register_class = mir.virtual_registers.get(live_register.virtual).register_class; @@ -2656,11 +2842,7 @@ pub const MIR = struct { if (maybe_hint) |hint_register| { if (hint_register.index == .physical // TODO : and isAllocatable - and isRegisterInClass(hint_register.index.physical, register_class) - - // TODO and !isRegUsedInInstr(Hint0, LookAtPhysRegUses)) { - - ) { + and isRegisterInClass(hint_register.index.physical, register_class) and !register_allocator.isRegisterUsedInInstruction(hint_register.index.physical, look_at_physical_register_uses)) { if (register_allocator.register_states.get(hint_register.index.physical) == .free) { register_allocator.assignVirtualToPhysicalRegister(live_register, hint_register.index.physical); return; @@ -2670,8 +2852,21 @@ pub const MIR = struct { const maybe_hint2 = register_allocator.traceCopies(mir, instruction_selection, virtual_register); if (maybe_hint2) |hint| { - _ = hint; - unreachable; + // TODO + const allocatable = true; + if (hint == .physical and allocatable and isRegisterInClass(hint.physical, register_class) and !register_allocator.isRegisterUsedInInstruction(hint.physical, look_at_physical_register_uses)) { + const physical_register = hint.physical; + if (register_allocator.register_states.get(physical_register) == .free) { + register_allocator.assignVirtualToPhysicalRegister(live_register, physical_register); + return; + } else { + print("Second hint {s} not free\n", .{@tagName(physical_register)}); + } + } else { + unreachable; + } + } else { + print("Can't take hint for VR{} for instruction #{}\n", .{ virtual_register.uniqueInteger(), instruction_index.uniqueInteger() }); } const register_class_members = registers_by_class.get(register_class); @@ -2679,16 +2874,14 @@ pub const MIR = struct { var best_cost: u32 = SpillCost.impossible; var best_register = Register.Physical.no_register; - print("Candidates for {s}: ", .{@tagName(register_class)}); - for (register_class_members) |candidate_register| { - print("{s}, ", .{@tagName(candidate_register)}); - } + // print("Candidates for {s}: ", .{@tagName(register_class)}); + // for (register_class_members) |candidate_register| { + // print("{s}, ", .{@tagName(candidate_register)}); + // } print("\n", .{}); for (register_class_members) |candidate_register| { - print("Checking candidate register {s}\n", .{@tagName(candidate_register)}); if (register_allocator.isRegisterUsedInInstruction(candidate_register, look_at_physical_register_uses)) continue; const spill_cost = register_allocator.computeSpillCost(candidate_register); - print("Spill cost: {}\n", .{spill_cost}); if (spill_cost == 0) { register_allocator.assignVirtualToPhysicalRegister(live_register, candidate_register); @@ -2753,8 +2946,43 @@ pub const MIR = struct { } } - fn traceCopies(register_allocator: *RegisterAllocator, mir: *MIR, instruction_selection: *InstructionSelection, virtual_register_index: Register.Virtual.Index) ?Register.Index { + fn traceCopyChain(register_allocator: *RegisterAllocator, mir: *MIR, instruction_selection: *InstructionSelection, register: Register) ?Register.Index { _ = register_allocator; + const chain_length_limit = 3; + _ = chain_length_limit; + var chain_try_count: u32 = 0; + _ = chain_try_count; + while (true) { + switch (register.index) { + .physical => return register.index, + .virtual => |vri| { + const virtual_head_index_ptr = mir.getRegisterListHead(instruction_selection, .{ + .index = .{ + .virtual = vri, + }, + }); + + var vdef = Instruction.Iterator.Get(.{ + .use = false, + .def = true, + .element = .instruction, + }).new(mir, virtual_head_index_ptr.*); + + const vdef_instruction = vdef.nextPointer() orelse break; + if (vdef.nextPointer()) |_| break; + + switch (vdef_instruction.id) { + else => |t| @panic(@tagName(t)), + } + unreachable; + }, + } + } + + return null; + } + + fn traceCopies(register_allocator: *RegisterAllocator, mir: *MIR, instruction_selection: *InstructionSelection, virtual_register_index: Register.Virtual.Index) ?Register.Index { const head_index_ptr = mir.getRegisterListHead(instruction_selection, .{ .index = .{ .virtual = virtual_register_index, @@ -2766,8 +2994,27 @@ pub const MIR = struct { .element = .instruction, }).new(mir, head_index_ptr.*); - while (define_instructions.next()) |_| { - unreachable; + const definition_limit = 3; + var try_count: u32 = 0; + while (define_instructions.next()) |instruction_index| { + const instruction = mir.instructions.get(instruction_index); + switch (instruction.id) { + .mov32rm => unreachable, + .copy => { + const operand_index = instruction.operands.items[1]; + const operand = mir.operands.get(operand_index); + + if (register_allocator.traceCopyChain(mir, instruction_selection, operand.u.register)) |register| { + return register; + } + + print("Missed oportunity for register allocation tracing copy chain for VR{}\n", .{virtual_register_index.uniqueInteger()}); + }, + else => |t| @panic(@tagName(t)), + } + + try_count += 1; + if (try_count >= definition_limit) break; } return null; @@ -2786,9 +3033,16 @@ pub const MIR = struct { // TODO: debug info } + fn usePhysicalRegister(register_allocator: *RegisterAllocator, mir: *MIR, instruction_selection: *InstructionSelection, instruction_index: Instruction.Index, physical_register: Register.Physical) !bool { + const displaced_any = try register_allocator.displacePhysicalRegister(mir, instruction_selection, instruction_index, physical_register); + register_allocator.register_states.set(physical_register, .preassigned); + register_allocator.markUsedRegisterInInstruction(physical_register); + return displaced_any; + } + fn displacePhysicalRegister(register_allocator: *RegisterAllocator, mir: *MIR, instruction_selection: *InstructionSelection, instruction_index: Instruction.Index, physical_register: Register.Physical) !bool { const state = register_allocator.register_states.getPtr(physical_register); - print("Trying to displace register {s} with state {s}\n", .{ @tagName(physical_register), @tagName(state.*) }); + // print("Trying to displace register {s} with state {s}\n", .{ @tagName(physical_register), @tagName(state.*) }); return switch (state.*) { .free => false, .preassigned => blk: { @@ -2799,6 +3053,7 @@ pub const MIR = struct { const live_reg = register_allocator.live_virtual_registers.getPtr(virtual_register).?; const before = mir.getNextInstructionIndex(instruction_index); try register_allocator.reload(mir, instruction_selection, before, virtual_register, physical_register); + state.* = .free; live_reg.physical = .no_register; live_reg.reloaded = true; break :blk true; @@ -2861,16 +3116,24 @@ pub const MIR = struct { fn defineVirtualRegister(register_allocator: *RegisterAllocator, mir: *MIR, instruction_selection: *InstructionSelection, instruction_index: Instruction.Index, operand_index: Operand.Index, virtual_register: Register.Virtual.Index, look_at_physical_register_uses: bool) !bool { const instruction = mir.instructions.get(instruction_index); + const operand = mir.operands.get(operand_index); const gop = try register_allocator.live_virtual_registers.getOrPut(mir.allocator, virtual_register); if (!gop.found_existing) { gop.value_ptr.* = .{ .virtual = virtual_register, }; - unreachable; + if (!operand.flags.dead_or_kill) { + var live_out = false; + if (live_out) { + // TODO + } else { + operand.flags.dead_or_kill = true; + } + } } const live_register = gop.value_ptr; if (live_register.physical == .no_register) { - try register_allocator.allocateVirtualRegister(mir, instruction_selection, instruction, live_register, null, look_at_physical_register_uses); + try register_allocator.allocateVirtualRegister(mir, instruction_selection, instruction_index, live_register, null, look_at_physical_register_uses); } else { assert(!register_allocator.isRegisterUsedInInstruction(live_register.physical, look_at_physical_register_uses)); } @@ -2881,7 +3144,7 @@ pub const MIR = struct { const instruction_descriptor = instruction_descriptors.get(instruction.id); if (!instruction_descriptor.flags.implicit_def) { const spill_before = mir.getNextInstructionIndex(instruction_index); - const kill = !live_register.last_use.valid; + const kill = live_register.last_use.invalid; try register_allocator.spill(mir, instruction_selection, spill_before, virtual_register, physical_register, kill, live_register.live_out); live_register.last_use = Instruction.Index.invalid; @@ -2950,7 +3213,7 @@ pub const MIR = struct { const limit = 8; var count: u32 = 0; - while (iterator.next()) |use_instruction| { + while (iterator.nextPointer()) |use_instruction| { if (!use_instruction.parent.eq(instruction_selection.current_block)) { register_allocator.may_live_across_blocks.set(virtual_register_index.uniqueInteger()); // TODO: return !basic_block.successorsEmpty() @@ -3041,14 +3304,177 @@ pub const MIR = struct { pub fn allocateRegisters(mir: *MIR) !void { print("\n[REGISTER ALLOCATION]\n\n", .{}); const function_count = mir.functions.len; - _ = function_count; var function_iterator = mir.functions.iterator(); - _ = function_iterator; const register_count = @typeInfo(Register.Physical).Enum.fields.len; _ = register_count; const register_unit_count = 173; _ = register_unit_count; + for (0..function_count) |function_index| { + const function = function_iterator.nextPointer().?; + const instruction_selection = &mir.instruction_selections.items[function_index]; + print("Allocating registers for {}\n", .{function}); + + var block_i: usize = function.blocks.items.len; + var register_allocator = try RegisterAllocator.init(mir, instruction_selection); + + while (block_i > 0) { + block_i -= 1; + + const block_index = function.blocks.items[block_i]; + const block = mir.blocks.get(block_index); + + var instruction_i: usize = block.instructions.items.len; + + while (instruction_i > 0) { + instruction_i -= 1; + + const instruction_index = block.instructions.items[instruction_i]; + const instruction = mir.instructions.get(instruction_index); + print("===============\nInstruction {} (#{})\n", .{ instruction_i, instruction_index.uniqueInteger() }); + print("{}\n", .{function}); + + register_allocator.used_in_instruction = RegisterBitset.initEmpty(); + + const max_operand_count = 32; + var define_bitset = std.StaticBitSet(max_operand_count).initEmpty(); + var physical_register_bitset = std.StaticBitSet(max_operand_count).initEmpty(); + var register_mask_bitset = std.StaticBitSet(max_operand_count).initEmpty(); + var virtual_register_define = false; + var assign_live_throughs = false; + + for (instruction.operands.items, 0..) |operand_index, operand_i| { + const operand = mir.operands.get(operand_index); + switch (operand.u) { + .register => |register| { + const is_define = operand.flags.type == .def; + const is_physical = register.index == .physical; + if (is_define and !is_physical) { + virtual_register_define = true; + } + define_bitset.setValue(operand_i, is_define); + physical_register_bitset.setValue(operand_i, is_physical); + if (is_physical and is_define) { + const physical_register = register.index.physical; + const displaced_any = try register_allocator.definePhysicalRegister(mir, instruction_selection, instruction_index, physical_register); + if (!displaced_any) { + operand.flags.dead_or_kill = true; + } + } + }, + else => {}, + } + } + + if (define_bitset.count() > 0) { + if (virtual_register_define) { + var rearranged_implicit_operands = true; + if (assign_live_throughs) { + unreachable; + } else { + while (rearranged_implicit_operands) { + rearranged_implicit_operands = false; + + for (instruction.operands.items) |operand_index| { + const operand = mir.operands.get(operand_index); + switch (operand.u) { + .register => |register| switch (operand.flags.type) { + .def => switch (register.index) { + .virtual => |virtual_register| { + rearranged_implicit_operands = try register_allocator.defineVirtualRegister(mir, instruction_selection, instruction_index, operand_index, virtual_register, false); + if (rearranged_implicit_operands) { + break; + } + }, + .physical => {}, + }, + else => {}, + }, + .lea64mem => |lea64mem| { + assert(lea64mem.gp64 == null); + assert(lea64mem.scale_reg == null); + }, + else => {}, + } + } + } + } + } + + var operand_i = instruction.operands.items.len; + while (operand_i > 0) { + operand_i -= 1; + + if (define_bitset.isSet(operand_i) and physical_register_bitset.isSet(operand_i)) { + const operand_index = instruction.operands.items[operand_i]; + const operand = mir.operands.get(operand_index); + const physical_register = operand.u.register.index.physical; + register_allocator.freePhysicalRegister(physical_register); + register_allocator.unmarkUsedRegisterInInstruction(physical_register); + } + } + } + + if (register_mask_bitset.count() > 0) { + unreachable; + } + + // Physical register use + if (physical_register_bitset.count() > 0) { + for (instruction.operands.items, 0..) |operand_index, operand_i| { + if (!define_bitset.isSet(operand_i) and physical_register_bitset.isSet(operand_i)) { + const operand = mir.operands.get(operand_index); + const physical_register = operand.u.register.index.physical; + if (!register_allocator.reserved.contains(physical_register)) { + const displaced_any = try register_allocator.usePhysicalRegister(mir, instruction_selection, instruction_index, physical_register); + if (!displaced_any) { + operand.flags.dead_or_kill = true; + } + } + } + } + } + + var rearranged_implicit_operands = true; + while (rearranged_implicit_operands) { + rearranged_implicit_operands = false; + for (instruction.operands.items, 0..) |operand_index, operand_i| { + if (!define_bitset.isSet(operand_i)) { + const operand = mir.operands.get(operand_index); + if (operand.u == .register and operand.u.register.index == .virtual) { + const virtual_register = operand.u.register.index.virtual; + rearranged_implicit_operands = try register_allocator.useVirtualRegister(mir, instruction_selection, instruction_index, virtual_register, @intCast(operand_i)); + if (rearranged_implicit_operands) break; + } + } + } + } + + if (instruction.id == .copy and instruction.operands.items.len == 2) { + const dst_register = mir.operands.get(instruction.operands.items[0]).u.register.index; + const src_register = mir.operands.get(instruction.operands.items[1]).u.register.index; + + if (std.meta.eql(dst_register, src_register)) { + try register_allocator.coalesced.append(mir.allocator, instruction_index); + print("Avoiding copy...\n", .{}); + } + } + } + + for (register_allocator.coalesced.items) |coalesced| { + for (block.instructions.items, 0..) |instruction_index, i| { + if (coalesced.eq(instruction_index)) { + const result = block.instructions.orderedRemove(i); + assert(result.eq(coalesced)); + break; + } + } else unreachable; + } + + print("{}\n============\n", .{function}); + } + } + // for (0..function_count) |function_index| { // const function = function_iterator.nextPointer().?; // const instruction_selection = &mir.instruction_selections.items[function_index]; @@ -3314,10 +3740,16 @@ pub const MIR = struct { } fn getRegisterListHead(mir: *MIR, instruction_selection: *InstructionSelection, register: Register) *Operand.Index { - return switch (register.index) { - .physical => |physical| instruction_selection.physical_register_use_or_definition_list.getPtr(physical), - .virtual => |virtual_register_index| &mir.virtual_registers.get(virtual_register_index).use_def_list_head, - }; + switch (register.index) { + .physical => |physical| { + const operand_index = instruction_selection.physical_register_use_or_definition_list.getPtr(physical); + return operand_index; + }, + .virtual => |virtual_register_index| { + const virtual_register = mir.virtual_registers.get(virtual_register_index); + return &virtual_register.use_def_list_head; + }, + } } const Function = struct { @@ -3338,7 +3770,7 @@ pub const MIR = struct { try writer.print("{s}", .{@tagName(instruction.id)}); for (instruction.operands.items, 0..) |operand_index, i| { const operand = function.mir.operands.get(operand_index); - try writer.writeByte(' '); + try writer.print(" O{} ", .{operand_index.uniqueInteger()}); switch (operand.u) { .register => |register| { switch (register.index) { @@ -3346,6 +3778,13 @@ pub const MIR = struct { .virtual => |virtual| try writer.print("VR{}", .{virtual.uniqueInteger()}), } }, + .memory => |memory| { + const base = memory.addressing_mode.base; + switch (base) { + .register_base => unreachable, + .frame_index => |frame_index| try writer.print("SF{}", .{frame_index}), + } + }, else => try writer.writeAll(@tagName(operand.u)), } // switch (operand.u) { @@ -3405,12 +3844,18 @@ pub const MIR = struct { => {}, } } + instruction_allocation.ptr.* = .{ .id = instruction, .operands = list, .parent = instruction_selection.current_block, }; + if (instruction == .copy) { + const i = instruction_allocation.ptr.*; + print("Built copy: DST: {}. SRC: {}\n", .{ mir.operands.get(i.operands.items[0]).u.register.index, mir.operands.get(i.operands.items[1]).u.register.index }); + } + return instruction_allocation.index; } diff --git a/src/data_structures.zig b/src/data_structures.zig index 315c21d..85ea8d0 100644 --- a/src/data_structures.zig +++ b/src/data_structures.zig @@ -40,10 +40,10 @@ pub fn BlockList(comptime T: type) type { index: u6, block: u24, _reserved: bool = false, - valid: bool = true, + invalid: bool = false, pub const invalid = Index{ - .valid = false, + .invalid = true, .index = 0, .block = 0, }; @@ -53,7 +53,7 @@ pub fn BlockList(comptime T: type) type { } pub fn uniqueInteger(index: Index) u32 { - assert(index.valid); + assert(!index.invalid); return @as(u30, @truncate(@as(u32, @bitCast(index)))); } @@ -114,7 +114,7 @@ pub fn BlockList(comptime T: type) type { } pub fn get(list: *List, index: Index) *T { - assert(index.valid); + assert(!index.invalid); return &list.blocks.items[index.block].items[index.index]; } diff --git a/src/frontend/semantic_analyzer.zig b/src/frontend/semantic_analyzer.zig index ce296d6..b839d0f 100644 --- a/src/frontend/semantic_analyzer.zig +++ b/src/frontend/semantic_analyzer.zig @@ -214,23 +214,23 @@ const Analyzer = struct { fn processCall(analyzer: *Analyzer, scope_index: Scope.Index, node_index: Node.Index) !Call.Index { const node = analyzer.getScopeNode(scope_index, node_index); print("Node index: {}. Left index: {}\n", .{ node_index.uniqueInteger(), node.left.uniqueInteger() }); - assert(node.left.valid); - const left_value_index = switch (node.left.valid) { + assert(!node.left.invalid); + const left_value_index = switch (!node.left.invalid) { true => blk: { const member_or_namespace_node_index = node.left; - assert(member_or_namespace_node_index.valid); + assert(!member_or_namespace_node_index.invalid); const this_value_allocation = try analyzer.unresolvedAllocate(scope_index, ExpectType.none, member_or_namespace_node_index); break :blk this_value_allocation.index; }, false => unreachable, //Value.Index.invalid, }; - const left_type = switch (left_value_index.valid) { - true => switch (analyzer.module.values.get(left_value_index).*) { + const left_type = switch (left_value_index.invalid) { + false => switch (analyzer.module.values.get(left_value_index).*) { .function => |function_index| analyzer.module.function_prototypes.get(analyzer.module.types.get(analyzer.module.functions.get(function_index).prototype).function).return_type, else => |t| @panic(@tagName(t)), }, - false => Type.Index.invalid, + true => Type.Index.invalid, }; const arguments_index = switch (node.id) { .call, .call_two => |call_tag| (try analyzer.module.argument_lists.append(analyzer.allocator, .{ @@ -340,8 +340,8 @@ const Analyzer = struct { for (switch_case_node_list, 0..) |switch_case_node_index, index| { _ = index; const switch_case_node = analyzer.getScopeNode(scope_index, switch_case_node_index); - switch (switch_case_node.left.valid) { - true => { + switch (switch_case_node.left.invalid) { + false => { const switch_case_condition_node = analyzer.getScopeNode(scope_index, switch_case_node.left); var switch_case_group = ArrayList(u32){}; switch (switch_case_condition_node.id) { @@ -390,7 +390,7 @@ const Analyzer = struct { switch_case_groups.appendAssumeCapacity(switch_case_group); }, - false => { + true => { unreachable; // if (existing_enums.items.len == enum_type.fields.items.len) { // unreachable; @@ -433,9 +433,9 @@ const Analyzer = struct { fn processAssignment(analyzer: *Analyzer, scope_index: Scope.Index, node_index: Node.Index) !Value { const node = analyzer.getScopeNode(scope_index, node_index); assert(node.id == .assign); - const assignment = switch (node.left.valid) { + const assignment = switch (node.left.invalid) { // In an assignment, the node being invalid means a discarding underscore, like this: ```_ = result``` - false => { + true => { var result = Value{ .unresolved = .{ .node_index = node.right, @@ -446,7 +446,7 @@ const Analyzer = struct { return result; }, - true => { + false => { // const id = analyzer.tokenIdentifier(.token); // print("id: {s}\n", .{id}); // const left = try analyzer.expression(scope_index, ExpectType.none, statement_node.left); @@ -470,9 +470,9 @@ const Analyzer = struct { fn processReturn(analyzer: *Analyzer, scope_index: Scope.Index, expect_type: ExpectType, node_index: Node.Index) !Value { const node = analyzer.getScopeNode(scope_index, node_index); - const return_expression: Value.Index = switch (node_index.valid) { + const return_expression: Value.Index = switch (node_index.invalid) { // TODO: expect type - true => ret: { + false => ret: { const return_value_allocation = try analyzer.module.values.addOne(analyzer.allocator); return_value_allocation.ptr.* = .{ .unresolved = .{ @@ -482,7 +482,7 @@ const Analyzer = struct { try analyzer.resolveNode(return_value_allocation.ptr, scope_index, expect_type, node.left); break :ret return_value_allocation.index; }, - false => @panic("TODO: ret void"), + true => @panic("TODO: ret void"), }; const return_value_allocation = try analyzer.module.returns.append(analyzer.allocator, .{ @@ -501,7 +501,7 @@ const Analyzer = struct { fn lookupDeclarationInCurrentAndParentScopes(analyzer: *Analyzer, scope_index: Scope.Index, identifier_hash: u32) ?DeclarationLookup { var scope_iterator = scope_index; - while (scope_iterator.valid) { + while (!scope_iterator.invalid) { const scope = analyzer.module.scopes.get(scope_iterator); if (scope.declarations.get(identifier_hash)) |declaration_index| { return .{ @@ -535,8 +535,8 @@ const Analyzer = struct { const declaration = analyzer.module.declarations.get(declaration_index); // Up until now, only arguments have no initialization value - const typecheck_result = switch (declaration.init_value.valid) { - true => blk: { + const typecheck_result = switch (declaration.init_value.invalid) { + false => blk: { const init_value = analyzer.module.values.get(declaration.init_value); print("Declaration found: {}\n", .{init_value}); const is_unresolved = init_value.* == .unresolved; @@ -560,14 +560,14 @@ const Analyzer = struct { const typecheck_result = try analyzer.typeCheck(expect_type, declaration.type); if (init_value.isComptime() and declaration.mutability == .@"const") { - assert(declaration.init_value.valid); + assert(!declaration.init_value.invalid); assert(typecheck_result == .success); return declaration.init_value; } break :blk typecheck_result; }, - false => try analyzer.typeCheck(expect_type, declaration.type), + true => try analyzer.typeCheck(expect_type, declaration.type), }; const ref_allocation = try analyzer.module.values.append(analyzer.allocator, .{ @@ -580,7 +580,7 @@ const Analyzer = struct { else => declaration.type, }, .flexible_integer => blk: { - assert(declaration.type.valid); + assert(!declaration.type.invalid); break :blk declaration.type; }, }, @@ -627,7 +627,7 @@ const Analyzer = struct { }, .compiler_intrinsic => { const argument_list_node_index = node.left; - assert(argument_list_node_index.valid); + assert(!argument_list_node_index.invalid); const node_list_node = analyzer.getScopeNode(scope_index, argument_list_node_index); const node_list = analyzer.getScopeNodeList(scope_index, node_list_node); @@ -692,7 +692,7 @@ const Analyzer = struct { }, false => false_block: { const file_type = import_file.file.ptr.type; - assert(file_type.valid); + assert(!file_type.invalid); break :false_block file_type; }, }, @@ -714,7 +714,7 @@ const Analyzer = struct { }; const number_allocation = try analyzer.unresolvedAllocate(scope_index, argument_expect_type, argument_nodes.items[0]); const number = number_allocation.index; - assert(number.valid); + assert(!number.invalid); var arguments = std.mem.zeroes([6]Value.Index); for (argument_nodes.items[1..], 0..) |argument_node_index, argument_index| { const argument_allocation = try analyzer.unresolvedAllocate(scope_index, argument_expect_type, argument_node_index); @@ -840,7 +840,7 @@ const Analyzer = struct { const left_allocation = try analyzer.unresolvedAllocate(scope_index, ExpectType.none, node.left); switch (left_allocation.ptr.*) { .type => |type_index| { - if (type_index.valid) { + if (!type_index.invalid) { const left_type = analyzer.module.types.get(type_index); switch (left_type.*) { .@"struct" => |struct_index| { @@ -930,7 +930,7 @@ const Analyzer = struct { const field_node = analyzer.getScopeNode(scope_index, field_node_index); const identifier = analyzer.tokenIdentifier(scope_index, field_node.token); print("Enum field: {s}\n", .{identifier}); - assert(!field_node.left.valid); + assert(field_node.left.invalid); const enum_hash_name = try analyzer.processIdentifier(identifier); @@ -1049,9 +1049,9 @@ const Analyzer = struct { const arguments_node_index = simple_function_prototype_node.left; const return_type_node_index = simple_function_prototype_node.right; - const arguments: ?[]const Declaration.Index = switch (arguments_node_index.valid) { - false => null, - true => blk: { + const arguments: ?[]const Declaration.Index = switch (arguments_node_index.invalid) { + true => null, + false => blk: { const argument_list_node = analyzer.getScopeNode(scope_index, arguments_node_index); // print("Function prototype argument list node: {}\n", .{function_prototype_node.left.uniqueInteger()}); const argument_node_list = switch (argument_list_node.id) { @@ -1161,7 +1161,7 @@ const Analyzer = struct { const scope = new_scope.ptr; const scope_index = new_scope.index; - const is_file = !parent_scope_index.valid; + const is_file = parent_scope_index.invalid; assert(is_file); const struct_allocation = try analyzer.module.structs.append(analyzer.allocator, .{ @@ -1171,7 +1171,7 @@ const Analyzer = struct { .@"struct" = struct_allocation.index, }); - if (!parent_scope_index.valid) { + if (parent_scope_index.invalid) { file.type = type_allocation.index; } @@ -1270,14 +1270,14 @@ const Analyzer = struct { fn symbolDeclaration(analyzer: *Analyzer, scope_index: Scope.Index, node_index: Node.Index, scope_type: ScopeType) !Declaration.Index { const declaration_node = analyzer.getScopeNode(scope_index, node_index); assert(declaration_node.id == .simple_symbol_declaration); - const expect_type = switch (declaration_node.left.valid) { - true => switch (scope_type) { + const expect_type = switch (declaration_node.left.invalid) { + false => switch (scope_type) { .local => ExpectType{ .type_index = try analyzer.resolveType(scope_index, declaration_node.left), }, .global => ExpectType.none, }, - false => ExpectType.none, + true => ExpectType.none, }; const mutability: Compilation.Mutability = switch (analyzer.getScopeToken(scope_index, declaration_node.token).id) { .fixed_keyword_const => .@"const", @@ -1292,7 +1292,7 @@ const Analyzer = struct { } // TODO: Check if it is a keyword - assert(declaration_node.right.valid); + assert(!declaration_node.right.invalid); const argument = null; assert(argument == null); diff --git a/src/frontend/syntactic_analyzer.zig b/src/frontend/syntactic_analyzer.zig index bd97791..fa0ef7b 100644 --- a/src/frontend/syntactic_analyzer.zig +++ b/src/frontend/syntactic_analyzer.zig @@ -36,24 +36,24 @@ pub const Node = packed struct(u128) { pub const Index = packed struct(u32) { value: u31, - valid: bool = true, + invalid: bool = false, pub const invalid = Index{ .value = 0, - .valid = false, + .invalid = true, }; pub fn get(index: Index) ?u32 { - return if (index.valid) index.value else null; + return if (index.invvalid) null else index.value; } pub fn unwrap(index: Index) u32 { - assert(index.valid); + assert(!index.invalid); return index.value; } pub fn uniqueInteger(index: Index) u32 { - assert(index.valid); + assert(!index.invalid); return index.value; } }; @@ -677,7 +677,7 @@ const Analyzer = struct { fn expressionPrecedence(analyzer: *Analyzer, minimum_precedence: i32) !Node.Index { var result = try analyzer.prefixExpression(); - if (result.valid) { + if (!result.invalid) { const prefix_node = analyzer.nodes.items[result.unwrap()]; std.debug.print("Prefix: {}\n", .{prefix_node.id}); } @@ -906,7 +906,7 @@ const Analyzer = struct { while (true) { const suffix_operator = try analyzer.suffixOperator(result); - if (suffix_operator.valid) { + if (!suffix_operator.invalid) { result = suffix_operator; } else { if (analyzer.tokens[analyzer.token_i].id == .left_parenthesis) { @@ -1183,7 +1183,7 @@ pub fn analyze(allocator: Allocator, tokens: []const Token, source_file: []const }); assert(node_index.value == 0); - assert(node_index.valid); + assert(!node_index.invalid); std.debug.print("Start Parsing file root members\n", .{}); const members = try analyzer.containerMembers();