diff --git a/ci.sh b/ci.sh index 4d971d5..e5fbd1a 100755 --- a/ci.sh +++ b/ci.sh @@ -1,12 +1,44 @@ #!/usr/bin/env bash -set -xe -zig build test -Dall --summary all +echo "Testing Nativity with Zig" +zig build test -Dall -Doptimize=ReleaseSafe --summary all +echo "Compiling Nativity with Zig" +time zig build -Doptimize=ReleaseSafe +failed_test_count=0 +passed_test_count=0 +test_directory_name=test +test_directory=$test_directory_name/* +total_test_count=$(ls 2>/dev/null -Ubad1 -- test/* | wc -l) +ran_test_count=0 +test_i=1 -for dir in test/* +for dir in $test_directory do - zig build run -- $dir/main.nat - if [[ "$OSTYPE" == "linux-gnu"* ]]; then - nat/${dir##*/} + MY_TESTNAME=${dir##*/} + zig build run -Doptimize=ReleaseSafe -- $dir/main.nat + if [[ "$?" == "0" ]]; then + if [[ "$OSTYPE" == "linux-gnu"* ]]; then + nat/$MY_TESTNAME + if [[ "$?" == "0" ]]; then + passed_test_count=$(($passed_test_count + 1)) + result="PASSED" + else + failed_test_count=$(($failed_test_count + 1)) + result="FAILED" + fi + echo "[$test_i/$total_test_count] [$result] $MY_TESTNAME" + ran_test_count=$(($ran_test_count + 1)) + fi + else + "$MY_TESTNAME failed to compile" fi + test_i=$(($test_i + 1)) done + +echo "Ran $ran_test_count tests ($passed_test_count passed, $failed_test_count failed)." + +if [[ $failed_test_count == "0" ]]; then + true +else + false +fi diff --git a/src/Compilation.zig b/src/Compilation.zig index ef81403..3a6d77e 100644 --- a/src/Compilation.zig +++ b/src/Compilation.zig @@ -536,6 +536,22 @@ pub const Cast = struct { pub const Allocation = List.Allocation; }; +pub const BinaryOperation = struct { + left: Value.Index, + right: Value.Index, + type: Type.Index, + id: Id, + + pub const List = BlockList(@This()); + pub const Index = List.Index; + pub const Allocation = List.Allocation; + + const Id = enum { + add, + sub, + }; +}; + pub const CallingConvention = enum { system_v, }; @@ -565,6 +581,7 @@ pub const Value = union(enum) { extern_function: Function.Prototype.Index, sign_extend: Cast.Index, zero_extend: Cast.Index, + binary_operation: BinaryOperation.Index, pub const List = BlockList(@This()); pub const Index = List.Index; @@ -585,6 +602,7 @@ pub const Value = union(enum) { .bool, .void, .undefined, .function, .type, .enum_field => true, .integer => |integer| integer.type.eq(Type.comptime_int), .call => false, + .binary_operation => false, else => |t| @panic(@tagName(t)), }; } @@ -598,6 +616,7 @@ pub const Value = union(enum) { .type => Type.type, .enum_field => |enum_field_index| module.enums.get(module.enum_fields.get(enum_field_index).parent).type, .function => |function_index| module.functions.get(function_index).prototype, + .binary_operation => |binary_operation| module.binary_operations.get(binary_operation).type, else => |t| @panic(@tagName(t)), }; @@ -693,6 +712,7 @@ pub const Module = struct { function_name_map: data_structures.AutoArrayHashMap(Function.Index, u32) = .{}, arrays: BlockList(Array) = .{}, casts: BlockList(Cast) = .{}, + binary_operations: BlockList(BinaryOperation) = .{}, string_literal_types: data_structures.AutoArrayHashMap(u32, Type.Index) = .{}, array_types: data_structures.AutoArrayHashMap(Array, Type.Index) = .{}, entry_point: Function.Index = Function.Index.invalid, @@ -1170,7 +1190,7 @@ pub fn log(comptime logger_scope: LoggerScope, logger: getLoggerScopeType(logger pub fn panic(message: []const u8, stack_trace: ?*std.builtin.StackTrace, return_address: ?usize) noreturn { const print_stack_trace = true; switch (print_stack_trace) { - true => std.builtin.default_panic(message, stack_trace, return_address), + true => @call(.always_inline, std.builtin.default_panic, .{ message, stack_trace, return_address }), false => { writer.writeAll("\nPANIC: ") catch {}; writer.writeAll(message) catch {}; diff --git a/src/backend/intermediate_representation.zig b/src/backend/intermediate_representation.zig index 21c0993..73d877b 100644 --- a/src/backend/intermediate_representation.zig +++ b/src/backend/intermediate_representation.zig @@ -43,9 +43,10 @@ pub const Result = struct { stack_references: BlockList(StackReference) = .{}, string_literals: BlockList(StringLiteral) = .{}, casts: BlockList(Cast) = .{}, + binary_operations: BlockList(BinaryOperation) = .{}, + section_manager: emit.SectionManager, module: *Module, entry_point: Function.Index = Function.Index.invalid, - section_manager: emit.SectionManager, pub fn getFunctionName(ir: *Result, function_index: Function.Declaration.Index) []const u8 { return ir.module.getName(ir.module.function_name_map.get(@bitCast(function_index)).?).?; @@ -115,7 +116,7 @@ pub const BasicBlock = struct { fn hasJump(basic_block: *BasicBlock, ir: *Result) bool { if (basic_block.instructions.items.len > 0) { const last_instruction = ir.instructions.get(basic_block.instructions.getLast()); - return switch (last_instruction.*) { + return switch (last_instruction.u) { .jump => true, else => false, }; @@ -145,8 +146,23 @@ const Syscall = struct { pub const Index = List.Index; }; +pub const AtomicOrder = enum { + unordered, + monotonic, + acquire, + release, + acquire_release, + sequentially_consistent, +}; + pub const Load = struct { instruction: Instruction.Index, + ordering: ?AtomicOrder = null, + @"volatile": bool = false, + + pub fn isUnordered(load: *const Load) bool { + return (load.ordering == null or load.ordering == .unordered) and !load.@"volatile"; + } pub const List = BlockList(@This()); pub const Index = List.Index; @@ -155,6 +171,9 @@ pub const Load = struct { pub const Store = struct { source: Instruction.Index, destination: Instruction.Index, + ordering: ?AtomicOrder = null, + @"volatile": bool = false, + pub const List = BlockList(@This()); pub const Index = List.Index; }; @@ -207,6 +226,22 @@ pub const Cast = struct { pub const Allocation = List.Allocation; }; +pub const BinaryOperation = struct { + left: Instruction.Index, + right: Instruction.Index, + id: Id, + type: Type, + + const Id = enum { + add, + sub, + }; + + pub const List = BlockList(@This()); + pub const Index = List.Index; + pub const Allocation = List.Allocation; +}; + pub const CastType = enum { sign_extend, }; @@ -257,21 +292,27 @@ pub const Type = enum { } }; -pub const Instruction = union(enum) { - call: Call.Index, - jump: Jump.Index, - load: Load.Index, - phi: Phi.Index, - ret: Return.Index, - store: Store.Index, - syscall: Syscall.Index, - copy: Instruction.Index, - @"unreachable", - argument: Argument.Index, - load_integer: Integer, - load_string_literal: StringLiteral.Index, - stack: StackReference.Index, - sign_extend: Cast.Index, +pub const Instruction = struct { + u: U, + use_list: ArrayList(Instruction.Index) = .{}, + + const U = union(enum) { + call: Call.Index, + jump: Jump.Index, + load: Load.Index, + phi: Phi.Index, + ret: Return.Index, + store: Store.Index, + syscall: Syscall.Index, + copy: Instruction.Index, + @"unreachable", + argument: Argument.Index, + load_integer: Integer, + load_string_literal: StringLiteral.Index, + stack: StackReference.Index, + sign_extend: Cast.Index, + binary_operation: BinaryOperation.Index, + }; pub const List = BlockList(@This()); pub const Index = List.Index; @@ -319,15 +360,15 @@ pub const Function = struct { for (block.instructions.items, 0..) |instruction_index, block_instruction_index| { try writer.print("%{} (${}): ", .{ block_instruction_index, instruction_index.uniqueInteger() }); const instruction = function.ir.instructions.get(instruction_index); - try writer.print("{s}", .{@tagName(instruction.*)}); - switch (instruction.*) { + if (instruction.u != .binary_operation) try writer.writeAll(@tagName(instruction.u)); + switch (instruction.u) { .syscall => |syscall_index| { const syscall = function.ir.syscalls.get(syscall_index); try writer.writeAll(" ("); for (syscall.arguments.items, 0..) |arg_index, i| { - const arg_value = function.ir.instructions.get(arg_index).*; + const arg_value = function.ir.instructions.get(arg_index); - try writer.print("${}: {s}", .{ i, @tagName(arg_value) }); + try writer.print("${}: {s}", .{ i, @tagName(arg_value.u) }); if (i < syscall.arguments.items.len - 1) { try writer.writeAll(", "); @@ -344,8 +385,8 @@ pub const Function = struct { const ret = function.ir.returns.get(ret_index); switch (ret.instruction.invalid) { false => { - const ret_value = function.ir.instructions.get(ret.instruction).*; - try writer.print(" {s}", .{@tagName(ret_value)}); + const ret_value = function.ir.instructions.get(ret.instruction); + try writer.print(" {s}", .{@tagName(ret_value.u)}); }, true => try writer.writeAll(" void"), } @@ -356,17 +397,17 @@ pub const Function = struct { // }, .store => |store_index| { const store = function.ir.stores.get(store_index); - const source = function.ir.instructions.get(store.source).*; - const destination = function.ir.instructions.get(store.destination).*; - try writer.print(" {s}, {s}", .{ @tagName(destination), @tagName(source) }); + const source = function.ir.instructions.get(store.source); + const destination = function.ir.instructions.get(store.destination); + try writer.print(" {s}, {s}", .{ @tagName(destination.u), @tagName(source.u) }); }, .call => |call_index| { const call = function.ir.calls.get(call_index); try writer.print(" ${} {s}(", .{ call.function.uniqueInteger(), function.ir.getFunctionName(call.function) }); for (call.arguments, 0..) |arg_index, i| { - const arg_value = function.ir.instructions.get(arg_index).*; + const arg_value = function.ir.instructions.get(arg_index); - try writer.print("${}: {s}", .{ i, @tagName(arg_value) }); + try writer.print("${}: {s}", .{ i, @tagName(arg_value.u) }); if (i < call.arguments.len - 1) { try writer.writeAll(", "); @@ -398,6 +439,11 @@ pub const Function = struct { const load = function.ir.loads.get(load_index); try writer.print(" ${}", .{load.instruction.uniqueInteger()}); }, + .binary_operation => |binary_operation_index| { + const binary_operation = function.ir.binary_operations.get(binary_operation_index); + try writer.writeAll(@tagName(binary_operation.id)); + try writer.print(" {s} ${}, ${}", .{ @tagName(binary_operation.type), binary_operation.left.uniqueInteger(), binary_operation.right.uniqueInteger() }); + }, else => |t| @panic(@tagName(t)), } @@ -411,6 +457,14 @@ pub const Function = struct { } }; +pub const Integer = struct { + value: extern union { + signed: i64, + unsigned: u64, + }, + type: Type, +}; + pub const Builder = struct { allocator: Allocator, ir: Result, @@ -420,6 +474,13 @@ pub const Builder = struct { return builder.ir.function_definitions.get(builder.current_function_index); } + fn useInstruction(builder: *Builder, args: struct { + instruction: Instruction.Index, + user: Instruction.Index, + }) !void { + try builder.ir.instructions.get(args.instruction).use_list.append(builder.allocator, args.user); + } + fn buildFunction(builder: *Builder, sema_function: Compilation.Function) !Function.Index { const sema_prototype = builder.ir.module.function_prototypes.get(builder.ir.module.types.get(sema_function.prototype).function); const function_declaration_allocation = try builder.ir.function_declarations.addOne(builder.allocator); @@ -440,7 +501,9 @@ pub const Builder = struct { .type = try builder.translateType(sema_argument_declaration.type), }); const value_allocation = try builder.ir.instructions.append(builder.allocator, .{ - .argument = argument_allocation.index, + .u = .{ + .argument = argument_allocation.index, + }, }); function_declaration.arguments.putAssumeCapacity(sema_argument_declaration_index, value_allocation.index); } @@ -466,7 +529,9 @@ pub const Builder = struct { if (!is_noreturn) { const exit_block = try builder.newBlock(); const phi_instruction = try builder.appendToBlock(exit_block, .{ - .phi = Phi.Index.invalid, + .u = .{ + .phi = Phi.Index.invalid, + }, }); // phi.ptr.* = .{ // .value = Value.Index.invalid, @@ -475,11 +540,16 @@ pub const Builder = struct { // .next = Phi.Index.invalid, // }; const ret = try builder.appendToBlock(exit_block, .{ - .ret = (try builder.ir.returns.append(builder.allocator, .{ - .instruction = phi_instruction, - })).index, + .u = .{ + .ret = (try builder.ir.returns.append(builder.allocator, .{ + .instruction = phi_instruction, + })).index, + }, + }); + try builder.useInstruction(.{ + .instruction = phi_instruction, + .user = ret, }); - _ = ret; function.return_phi_node = phi_instruction; function.return_phi_block = exit_block; } @@ -488,7 +558,7 @@ pub const Builder = struct { for (function_declaration.arguments.keys(), function_declaration.arguments.values()) |sema_argument_index, ir_argument_instruction_index| { const ir_argument_instruction = builder.ir.instructions.get(ir_argument_instruction_index); - const ir_argument = builder.ir.arguments.get(ir_argument_instruction.argument); + const ir_argument = builder.ir.arguments.get(ir_argument_instruction.u.argument); _ = try builder.stackReference(.{ .type = ir_argument.type, @@ -499,10 +569,11 @@ pub const Builder = struct { for (function_declaration.arguments.keys(), function_declaration.arguments.values()) |sema_argument_index, ir_argument_instruction_index| { const stack_reference = builder.currentFunction().stack_map.get(sema_argument_index).?; - _ = try builder.store(.{ + const store_instruction = try builder.store(.{ .source = ir_argument_instruction_index, .destination = stack_reference, }); + _ = store_instruction; } const sema_block = sema_function.getBodyBlock(builder.ir.module); @@ -511,10 +582,12 @@ pub const Builder = struct { if (!is_noreturn and sema_block.reaches_end) { if (!builder.ir.blocks.get(builder.currentFunction().current_basic_block).hasJump(&builder.ir)) { _ = try builder.append(.{ - .jump = try builder.jump(.{ - .source = builder.currentFunction().current_basic_block, - .destination = builder.currentFunction().return_phi_block, - }), + .u = .{ + .jump = try builder.jump(.{ + .source = builder.currentFunction().current_basic_block, + .destination = builder.currentFunction().return_phi_block, + }), + }, }); } } @@ -527,6 +600,527 @@ pub const Builder = struct { } } + fn blockInsideBasicBlock(builder: *Builder, sema_block: *Compilation.Block, block_index: BasicBlock.Index) !BasicBlock.Index { + const current_function = builder.currentFunction(); + current_function.current_basic_block = block_index; + try builder.block(sema_block, .{}); + return current_function.current_basic_block; + } + + const BlockOptions = packed struct { + emit_exit_block: bool = true, + }; + + fn emitSyscallArgument(builder: *Builder, sema_syscall_argument_value_index: Compilation.Value.Index) !Instruction.Index { + const sema_syscall_argument_value = builder.ir.module.values.get(sema_syscall_argument_value_index); + return switch (sema_syscall_argument_value.*) { + .integer => |integer| try builder.processInteger(integer), + .sign_extend => |cast_index| try builder.processCast(cast_index, .sign_extend), + .declaration_reference => |declaration_reference| try builder.loadDeclarationReference(declaration_reference.value), + else => |t| @panic(@tagName(t)), + }; + } + + fn processCast(builder: *Builder, sema_cast_index: Compilation.Cast.Index, cast_type: CastType) !Instruction.Index { + const sema_cast = builder.ir.module.casts.get(sema_cast_index); + const sema_source_value = builder.ir.module.values.get(sema_cast.value); + const source_value = switch (sema_source_value.*) { + .declaration_reference => |declaration_reference| try builder.loadDeclarationReference(declaration_reference.value), + else => |t| @panic(@tagName(t)), + }; + + const cast_allocation = try builder.ir.casts.append(builder.allocator, .{ + .value = source_value, + .type = try builder.translateType(sema_cast.type), + }); + + const result = try builder.append(.{ + .u = @unionInit(Instruction.U, switch (cast_type) { + inline else => |ct| @tagName(ct), + }, cast_allocation.index), + }); + + return result; + } + + fn processDeclarationReferenceRaw(builder: *Builder, declaration_index: Compilation.Declaration.Index) !Instruction.Index { + const sema_declaration = builder.ir.module.declarations.get(declaration_index); + const result = switch (sema_declaration.scope_type) { + .local => builder.currentFunction().stack_map.get(declaration_index).?, + .global => unreachable, + }; + return result; + } + + fn loadDeclarationReference(builder: *Builder, declaration_index: Compilation.Declaration.Index) !Instruction.Index { + const stack_instruction = try builder.processDeclarationReferenceRaw(declaration_index); + const load = try builder.ir.loads.append(builder.allocator, .{ + .instruction = stack_instruction, + }); + return try builder.append(.{ + .u = .{ + .load = load.index, + }, + }); + } + + fn processInteger(builder: *Builder, integer_value: Compilation.Value.Integer) !Instruction.Index { + const integer = Integer{ + .value = .{ + .unsigned = integer_value.value, + }, + .type = try builder.translateType(integer_value.type), + }; + assert(integer.type.isInteger()); + const instruction_allocation = try builder.ir.instructions.append(builder.allocator, .{ + .u = .{ + .load_integer = integer, + }, + }); + // const load_integer = try builder.append(.{ + // .load_integer = integer, + // }); + return instruction_allocation.index; + } + + fn processSyscall(builder: *Builder, sema_syscall_index: Compilation.Syscall.Index) anyerror!Instruction.Index { + const sema_syscall = builder.ir.module.syscalls.get(sema_syscall_index); + var arguments = try ArrayList(Instruction.Index).initCapacity(builder.allocator, sema_syscall.argument_count + 1); + + const sema_syscall_number = sema_syscall.number; + assert(!sema_syscall_number.invalid); + const number_value_index = try builder.emitSyscallArgument(sema_syscall_number); + + arguments.appendAssumeCapacity(number_value_index); + + for (sema_syscall.getArguments()) |sema_syscall_argument| { + assert(!sema_syscall_argument.invalid); + const argument_value_index = try builder.emitSyscallArgument(sema_syscall_argument); + arguments.appendAssumeCapacity(argument_value_index); + } + + const syscall_allocation = try builder.ir.syscalls.append(builder.allocator, .{ + .arguments = arguments, + }); + + const instruction_index = try builder.append(.{ + .u = .{ + .syscall = syscall_allocation.index, + }, + }); + + for (arguments.items) |argument| { + try builder.useInstruction(.{ + .instruction = argument, + .user = instruction_index, + }); + } + + return instruction_index; + } + + fn processBinaryOperation(builder: *Builder, sema_binary_operation_index: Compilation.BinaryOperation.Index) !Instruction.Index { + const sema_binary_operation = builder.ir.module.binary_operations.get(sema_binary_operation_index); + + const left = try builder.emitBinaryOperationOperand(sema_binary_operation.left); + const right = try builder.emitBinaryOperationOperand(sema_binary_operation.right); + + const binary_operation = try builder.ir.binary_operations.append(builder.allocator, .{ + .left = left, + .right = right, + .id = switch (sema_binary_operation.id) { + .add => .add, + .sub => .sub, + }, + .type = try builder.translateType(sema_binary_operation.type), + }); + + const instruction = try builder.append(.{ + .u = .{ + .binary_operation = binary_operation.index, + }, + }); + + try builder.useInstruction(.{ + .instruction = left, + .user = instruction, + }); + + try builder.useInstruction(.{ + .instruction = right, + .user = instruction, + }); + + return instruction; + } + + fn block(builder: *Builder, sema_block: *Compilation.Block, options: BlockOptions) anyerror!void { + for (sema_block.statements.items) |sema_statement_index| { + const sema_statement = builder.ir.module.values.get(sema_statement_index); + switch (sema_statement.*) { + .loop => |loop_index| { + const sema_loop = builder.ir.module.loops.get(loop_index); + const sema_loop_condition = builder.ir.module.values.get(sema_loop.condition); + const sema_loop_body = builder.ir.module.values.get(sema_loop.body); + const condition: Compilation.Value.Index = switch (sema_loop_condition.*) { + .bool => |bool_value| switch (bool_value) { + true => Compilation.Value.Index.invalid, + false => unreachable, + }, + else => |t| @panic(@tagName(t)), + }; + + const original_block = builder.currentFunction().current_basic_block; + const jump_to_loop = try builder.append(.{ + .u = .{ + .jump = undefined, + }, + }); + const loop_body_block = try builder.newBlock(); + const loop_prologue_block = if (options.emit_exit_block) try builder.newBlock() else BasicBlock.Index.invalid; + + const loop_head_block = switch (!condition.invalid) { + false => loop_body_block, + true => unreachable, + }; + + builder.ir.instructions.get(jump_to_loop).u.jump = try builder.jump(.{ + .source = original_block, + .destination = loop_head_block, + }); + + const sema_body_block = builder.ir.module.blocks.get(sema_loop_body.block); + builder.currentFunction().current_basic_block = try builder.blockInsideBasicBlock(sema_body_block, loop_body_block); + if (!loop_prologue_block.invalid) { + builder.ir.blocks.get(loop_prologue_block).seal(); + } + + if (sema_body_block.reaches_end) { + _ = try builder.append(.{ + .u = .{ + .jump = try builder.jump(.{ + .source = builder.currentFunction().current_basic_block, + .destination = loop_head_block, + }), + }, + }); + } + + builder.ir.blocks.get(builder.currentFunction().current_basic_block).filled = true; + builder.ir.blocks.get(loop_body_block).seal(); + if (!loop_head_block.eq(loop_body_block)) { + unreachable; + } + + if (!loop_prologue_block.invalid) { + builder.currentFunction().current_basic_block = loop_prologue_block; + } + }, + .syscall => |sema_syscall_index| _ = try builder.processSyscall(sema_syscall_index), + .@"unreachable" => _ = try builder.append(.{ + .u = .{ + .@"unreachable" = {}, + }, + }), + .@"return" => |sema_ret_index| { + const sema_ret = builder.ir.module.returns.get(sema_ret_index); + const return_value = try builder.emitReturnValue(sema_ret.value); + const phi_instruction = builder.ir.instructions.get(builder.currentFunction().return_phi_node); + const phi = switch (phi_instruction.u.phi.invalid) { + false => unreachable, + true => (try builder.ir.phis.append(builder.allocator, std.mem.zeroes(Phi))).ptr, + }; //builder.ir.phis.get(phi_instruction.phi); + const exit_jump = try builder.jump(.{ + .source = builder.currentFunction().current_basic_block, + .destination = switch (!phi_instruction.u.phi.invalid) { + true => phi.block, + false => builder.currentFunction().return_phi_block, + }, + }); + + phi_instruction.u.phi = (try builder.ir.phis.append(builder.allocator, .{ + .instruction = return_value, + .jump = exit_jump, + .next = phi_instruction.u.phi, + .block = phi.block, + })).index; + + try builder.useInstruction(.{ + .instruction = return_value, + .user = builder.currentFunction().return_phi_node, + }); + + _ = try builder.append(.{ + .u = .{ + .jump = exit_jump, + }, + }); + }, + .declaration => |sema_declaration_index| { + const sema_declaration = builder.ir.module.declarations.get(sema_declaration_index); + //logln("Name: {s}\n", .{builder.module.getName(sema_declaration.name).?}); + assert(sema_declaration.scope_type == .local); + const declaration_type = builder.ir.module.types.get(sema_declaration.type); + switch (declaration_type.*) { + .comptime_int => unreachable, + else => { + var value_index = try builder.emitDeclarationInitValue(sema_declaration.init_value); + const value = builder.ir.instructions.get(value_index); + value_index = switch (value.u) { + .load_integer, + .call, + .binary_operation, + => value_index, + // .call => try builder.load(value_index), + else => |t| @panic(@tagName(t)), + }; + + const ir_type = try builder.translateType(sema_declaration.type); + + const stack_i = try builder.stackReference(.{ + .type = ir_type, + .sema = sema_declaration_index, + }); + const store_instruction = try builder.store(.{ + .source = value_index, + .destination = stack_i, + }); + _ = store_instruction; + }, + } + }, + .call => |sema_call_index| _ = try builder.processCall(sema_call_index), + else => |t| @panic(@tagName(t)), + } + } + } + + fn emitDeclarationInitValue(builder: *Builder, declaration_init_value_index: Compilation.Value.Index) !Instruction.Index { + const declaration_init_value = builder.ir.module.values.get(declaration_init_value_index); + return switch (declaration_init_value.*) { + .call => |call_index| try builder.processCall(call_index), + .integer => |integer| try builder.processInteger(integer), + .binary_operation => |binary_operation_index| try builder.processBinaryOperation(binary_operation_index), + else => |t| @panic(@tagName(t)), + }; + } + + fn emitReturnValue(builder: *Builder, return_value_index: Compilation.Value.Index) !Instruction.Index { + const return_value = builder.ir.module.values.get(return_value_index); + return switch (return_value.*) { + .syscall => |syscall_index| try builder.processSyscall(syscall_index), + .integer => |integer| try builder.processInteger(integer), + .call => |call_index| try builder.processCall(call_index), + .declaration_reference => |declaration_reference| try builder.loadDeclarationReference(declaration_reference.value), + else => |t| @panic(@tagName(t)), + }; + } + + fn emitBinaryOperationOperand(builder: *Builder, binary_operation_index: Compilation.Value.Index) !Instruction.Index { + const value = builder.ir.module.values.get(binary_operation_index); + return switch (value.*) { + .integer => |integer| try builder.processInteger(integer), + .call => |call_index| try builder.processCall(call_index), + .declaration_reference => |declaration_reference| try builder.loadDeclarationReference(declaration_reference.value), + else => |t| @panic(@tagName(t)), + }; + } + + fn stackReference(builder: *Builder, arguments: struct { + type: Type, + sema: Compilation.Declaration.Index, + alignment: ?u64 = null, + }) !Instruction.Index { + const size = arguments.type.getSize(); + assert(size > 0); + const alignment = if (arguments.alignment) |a| a else arguments.type.getAlignment(); + builder.currentFunction().current_stack_offset = std.mem.alignForward(u64, builder.currentFunction().current_stack_offset, alignment); + builder.currentFunction().current_stack_offset += size; + const stack_offset = builder.currentFunction().current_stack_offset; + const stack_reference_allocation = try builder.ir.stack_references.append(builder.allocator, .{ + .offset = stack_offset, + .type = arguments.type, + .alignment = alignment, + }); + + const instruction_index = try builder.append(.{ + .u = .{ + .stack = stack_reference_allocation.index, + }, + }); + + try builder.currentFunction().stack_map.put(builder.allocator, arguments.sema, instruction_index); + + return instruction_index; + } + + fn store(builder: *Builder, descriptor: Store) !Instruction.Index { + const store_allocation = try builder.ir.stores.append(builder.allocator, descriptor); + + const result = try builder.append(.{ + .u = .{ + .store = store_allocation.index, + }, + }); + + try builder.useInstruction(.{ + .instruction = descriptor.source, + .user = result, + }); + + try builder.useInstruction(.{ + .instruction = descriptor.destination, + .user = result, + }); + + return result; + } + + fn emitCallArgument(builder: *Builder, call_argument_value_index: Compilation.Value.Index) !Instruction.Index { + const call_argument_value = builder.ir.module.values.get(call_argument_value_index); + return switch (call_argument_value.*) { + .integer => |integer| try builder.processInteger(integer), + .declaration_reference => |declaration_reference| try builder.loadDeclarationReference(declaration_reference.value), + .string_literal => |string_literal_index| try builder.processStringLiteral(string_literal_index), + else => |t| @panic(@tagName(t)), + }; + } + + fn processCall(builder: *Builder, sema_call_index: Compilation.Call.Index) anyerror!Instruction.Index { + const sema_call = builder.ir.module.calls.get(sema_call_index); + const sema_argument_list_index = sema_call.arguments; + const argument_list: []const Instruction.Index = switch (sema_argument_list_index.invalid) { + false => blk: { + var argument_list = ArrayList(Instruction.Index){}; + const sema_argument_list = builder.ir.module.argument_lists.get(sema_argument_list_index); + try argument_list.ensureTotalCapacity(builder.allocator, sema_argument_list.array.items.len); + for (sema_argument_list.array.items) |sema_argument_value_index| { + const argument_value_index = try builder.emitCallArgument(sema_argument_value_index); + argument_list.appendAssumeCapacity(argument_value_index); + } + break :blk argument_list.items; + }, + true => &.{}, + }; + + const call_index = try builder.call(.{ + .function = switch (builder.ir.module.values.get(sema_call.value).*) { + .function => |function_index| .{ + .index = function_index.index, + .block = function_index.block, + }, + else => |t| @panic(@tagName(t)), + }, + .arguments = argument_list, + }); + + const instruction_index = try builder.append(.{ + .u = .{ + .call = call_index, + }, + }); + + for (argument_list) |argument| { + try builder.useInstruction(.{ + .instruction = argument, + .user = instruction_index, + }); + } + + return instruction_index; + } + + fn processStringLiteral(builder: *Builder, string_literal_hash: u32) !Instruction.Index { + const string_literal = builder.ir.module.string_literals.getValue(string_literal_hash).?; + + if (builder.ir.section_manager.rodata == null) { + const rodata_index = try builder.ir.section_manager.addSection(.{ + .name = ".rodata", + .size_guess = 0, + .alignment = 0x1000, + .flags = .{ + .read = true, + .write = false, + .execute = false, + }, + .type = .loadable_program, + }); + + builder.ir.section_manager.rodata = @intCast(rodata_index); + } + + const rodata_index = builder.ir.section_manager.rodata orelse unreachable; + const rodata_section_offset = builder.ir.section_manager.getSectionOffset(rodata_index); + + try builder.ir.section_manager.appendToSection(rodata_index, string_literal); + try builder.ir.section_manager.appendByteToSection(rodata_index, 0); + + const string_literal_allocation = try builder.ir.string_literals.append(builder.allocator, .{ + .offset = @intCast(rodata_section_offset), + }); + + const result = try builder.append(.{ + .u = .{ + .load_string_literal = string_literal_allocation.index, + }, + }); + + return result; + } + + fn translateType(builder: *Builder, type_index: Compilation.Type.Index) !Type { + const sema_type = builder.ir.module.types.get(type_index); + return switch (sema_type.*) { + .integer => |integer| switch (integer.bit_count) { + 8 => .i8, + 16 => .i16, + 32 => .i32, + 64 => .i64, + else => unreachable, + }, + // TODO + .pointer => .i64, + .void => .void, + .noreturn => .noreturn, + else => |t| @panic(@tagName(t)), + }; + } + + fn call(builder: *Builder, descriptor: Call) !Call.Index { + const call_allocation = try builder.ir.calls.append(builder.allocator, descriptor); + return call_allocation.index; + } + + fn jump(builder: *Builder, descriptor: Jump) !Jump.Index { + const destination_block = builder.ir.blocks.get(descriptor.destination); + assert(!destination_block.sealed); + assert(!descriptor.source.invalid); + const jump_allocation = try builder.ir.jumps.append(builder.allocator, descriptor); + return jump_allocation.index; + } + + fn append(builder: *Builder, instruction: Instruction) !Instruction.Index { + assert(!builder.current_function_index.invalid); + const current_function = builder.currentFunction(); + assert(!current_function.current_basic_block.invalid); + return builder.appendToBlock(current_function.current_basic_block, instruction); + } + + fn appendToBlock(builder: *Builder, block_index: BasicBlock.Index, instruction: Instruction) !Instruction.Index { + const instruction_allocation = try builder.ir.instructions.append(builder.allocator, instruction); + try builder.ir.blocks.get(block_index).instructions.append(builder.allocator, instruction_allocation.index); + + return instruction_allocation.index; + } + + fn newBlock(builder: *Builder) !BasicBlock.Index { + const new_block_allocation = try builder.ir.blocks.append(builder.allocator, .{}); + const current_function = builder.currentFunction(); + try current_function.blocks.append(builder.allocator, new_block_allocation.index); + + return new_block_allocation.index; + } + const BlockSearcher = struct { to_visit: ArrayList(BasicBlock.Index) = .{}, visited: AutoArrayHashMap(BasicBlock.Index, void) = .{}, @@ -542,7 +1136,7 @@ pub const Builder = struct { const block_to_visit = builder.ir.blocks.get(block_index); const last_instruction_index = block_to_visit.instructions.items[block_to_visit.instructions.items.len - 1]; const last_instruction = builder.ir.instructions.get(last_instruction_index); - const block_to_search = switch (last_instruction.*) { + const block_to_search = switch (last_instruction.u) { .jump => |jump_index| blk: { const ir_jump = builder.ir.jumps.get(jump_index); assert(ir_jump.source.eq(block_index)); @@ -598,7 +1192,7 @@ pub const Builder = struct { if (basic_block.instructions.items.len > 0) { const instruction = builder.ir.instructions.get(basic_block.instructions.getLast()); - switch (instruction.*) { + switch (instruction.u) { .jump => |jump_index| { const jump_instruction = builder.ir.jumps.get(jump_index); const source = basic_block_index; @@ -640,7 +1234,7 @@ pub const Builder = struct { const basic_block = builder.ir.blocks.get(basic_block_index); for (basic_block.instructions.items, 0..) |instruction_index, index| { const instruction = builder.ir.instructions.get(instruction_index); - switch (instruction.*) { + switch (instruction.u) { .copy => try instructions_to_delete.append(builder.allocator, @intCast(index)), else => {}, } @@ -657,10 +1251,10 @@ pub const Builder = struct { fn removeUnreachablePhis(builder: *Builder, reachable_blocks: []const BasicBlock.Index, instruction_index: Instruction.Index) !bool { const instruction = builder.ir.instructions.get(instruction_index); - return switch (instruction.*) { + return switch (instruction.u) { .phi => blk: { var did_something = false; - var head = &instruction.phi; + var head = &instruction.u.phi; next: while (!head.invalid) { const phi = builder.ir.phis.get(head.*); const phi_jump = builder.ir.jumps.get(phi.jump); @@ -685,7 +1279,7 @@ pub const Builder = struct { fn removeTrivialPhis(builder: *Builder, instruction_index: Instruction.Index) !bool { const instruction = builder.ir.instructions.get(instruction_index); - return switch (instruction.*) { + return switch (instruction.u) { .phi => |phi_index| blk: { const trivial_phi: ?Instruction.Index = trivial_blk: { var only_value = Instruction.Index.invalid; @@ -694,7 +1288,7 @@ pub const Builder = struct { while (!it.invalid) { const phi = builder.ir.phis.get(it); const phi_value = builder.ir.instructions.get(phi.instruction); - if (phi_value.* == .phi) unreachable; + if (phi_value.u == .phi) unreachable; // TODO: undefined if (!only_value.invalid) { if (!only_value.eq(phi.instruction)) { @@ -718,18 +1312,22 @@ pub const Builder = struct { unreachable; } else { instruction.* = .{ - .copy = trivial_value, + .u = .{ + .copy = trivial_value, + }, }; } } else { logln(.ir, .phi_removal, "TODO: maybe this phi removal is wrong?", .{}); instruction.* = .{ - .copy = trivial_value, + .u = .{ + .copy = trivial_value, + }, }; } } - break :blk instruction.* != .phi; + break :blk instruction.u != .phi; }, else => false, }; @@ -737,14 +1335,14 @@ pub const Builder = struct { fn removeCopyReferences(builder: *Builder, instruction_index: Instruction.Index) !bool { const instruction = builder.ir.instructions.get(instruction_index); - return switch (instruction.*) { + return switch (instruction.u) { .copy => false, else => { var did_something = false; - const operands: []const *Instruction.Index = switch (instruction.*) { + const operands: []const *Instruction.Index = switch (instruction.u) { .jump, .@"unreachable", .load_integer, .load_string_literal, .stack, .argument => &.{}, - .ret => &.{&builder.ir.returns.get(instruction.ret).instruction}, + .ret => &.{&builder.ir.returns.get(instruction.u.ret).instruction}, // TODO: arguments .call => blk: { var list = ArrayList(*Instruction.Index){}; @@ -772,6 +1370,10 @@ pub const Builder = struct { const load = builder.ir.loads.get(load_index); break :blk &.{&load.instruction}; }, + .binary_operation => |binary_operation_index| blk: { + const binary_operation = builder.ir.binary_operations.get(binary_operation_index); + break :blk &.{ &binary_operation.left, &binary_operation.right }; + }, else => |t| @panic(@tagName(t)), }; @@ -779,7 +1381,7 @@ pub const Builder = struct { switch (operand_instruction_index_pointer.invalid) { false => { const operand_value = builder.ir.instructions.get(operand_instruction_index_pointer.*); - switch (operand_value.*) { + switch (operand_value.u) { .copy => |copy_value| { operand_instruction_index_pointer.* = copy_value; did_something = true; @@ -791,6 +1393,7 @@ pub const Builder = struct { .syscall, .sign_extend, .load, + .binary_operation, => {}, else => |t| @panic(@tagName(t)), } @@ -803,426 +1406,4 @@ pub const Builder = struct { }, }; } - - fn blockInsideBasicBlock(builder: *Builder, sema_block: *Compilation.Block, block_index: BasicBlock.Index) !BasicBlock.Index { - const current_function = builder.currentFunction(); - current_function.current_basic_block = block_index; - try builder.block(sema_block, .{}); - return current_function.current_basic_block; - } - - const BlockOptions = packed struct { - emit_exit_block: bool = true, - }; - - fn emitSyscallArgument(builder: *Builder, sema_syscall_argument_value_index: Compilation.Value.Index) !Instruction.Index { - const sema_syscall_argument_value = builder.ir.module.values.get(sema_syscall_argument_value_index); - return switch (sema_syscall_argument_value.*) { - .integer => |integer| try builder.processInteger(integer), - .sign_extend => |cast_index| try builder.processCast(cast_index, .sign_extend), - .declaration_reference => |declaration_reference| try builder.loadDeclarationReference(declaration_reference.value), - else => |t| @panic(@tagName(t)), - }; - } - - fn processCast(builder: *Builder, sema_cast_index: Compilation.Cast.Index, cast_type: CastType) !Instruction.Index { - const sema_cast = builder.ir.module.casts.get(sema_cast_index); - const sema_source_value = builder.ir.module.values.get(sema_cast.value); - const source_value = switch (sema_source_value.*) { - .declaration_reference => |declaration_reference| try builder.loadDeclarationReference(declaration_reference.value), - else => |t| @panic(@tagName(t)), - }; - - const cast_allocation = try builder.ir.casts.append(builder.allocator, .{ - .value = source_value, - .type = try builder.translateType(sema_cast.type), - }); - - const result = try builder.append(@unionInit(Instruction, switch (cast_type) { - inline else => |ct| @tagName(ct), - }, cast_allocation.index)); - - return result; - } - - fn processDeclarationReferenceRaw(builder: *Builder, declaration_index: Compilation.Declaration.Index) !Instruction.Index { - const sema_declaration = builder.ir.module.declarations.get(declaration_index); - const result = switch (sema_declaration.scope_type) { - .local => builder.currentFunction().stack_map.get(declaration_index).?, - .global => unreachable, - }; - return result; - } - - fn loadDeclarationReference(builder: *Builder, declaration_index: Compilation.Declaration.Index) !Instruction.Index { - const stack_instruction = try builder.processDeclarationReferenceRaw(declaration_index); - const load = try builder.ir.loads.append(builder.allocator, .{ - .instruction = stack_instruction, - }); - return try builder.append(.{ - .load = load.index, - }); - } - - fn processInteger(builder: *Builder, integer_value: Compilation.Value.Integer) !Instruction.Index { - const integer = Integer{ - .value = .{ - .unsigned = integer_value.value, - }, - .type = try builder.translateType(integer_value.type), - }; - assert(integer.type.isInteger()); - const instruction_allocation = try builder.ir.instructions.append(builder.allocator, .{ - .load_integer = integer, - }); - // const load_integer = try builder.append(.{ - // .load_integer = integer, - // }); - return instruction_allocation.index; - } - - fn processSyscall(builder: *Builder, sema_syscall_index: Compilation.Syscall.Index) anyerror!Instruction.Index { - const sema_syscall = builder.ir.module.syscalls.get(sema_syscall_index); - var arguments = try ArrayList(Instruction.Index).initCapacity(builder.allocator, sema_syscall.argument_count + 1); - - const sema_syscall_number = sema_syscall.number; - assert(!sema_syscall_number.invalid); - const number_value_index = try builder.emitSyscallArgument(sema_syscall_number); - - arguments.appendAssumeCapacity(number_value_index); - - for (sema_syscall.getArguments()) |sema_syscall_argument| { - assert(!sema_syscall_argument.invalid); - const argument_value_index = try builder.emitSyscallArgument(sema_syscall_argument); - arguments.appendAssumeCapacity(argument_value_index); - } - - // TODO: undo this mess - const syscall_allocation = try builder.ir.syscalls.append(builder.allocator, .{ - .arguments = arguments, - }); - - const instruction_index = try builder.append(.{ .syscall = syscall_allocation.index }); - return instruction_index; - } - - fn block(builder: *Builder, sema_block: *Compilation.Block, options: BlockOptions) anyerror!void { - for (sema_block.statements.items) |sema_statement_index| { - const sema_statement = builder.ir.module.values.get(sema_statement_index); - switch (sema_statement.*) { - .loop => |loop_index| { - const sema_loop = builder.ir.module.loops.get(loop_index); - const sema_loop_condition = builder.ir.module.values.get(sema_loop.condition); - const sema_loop_body = builder.ir.module.values.get(sema_loop.body); - const condition: Compilation.Value.Index = switch (sema_loop_condition.*) { - .bool => |bool_value| switch (bool_value) { - true => Compilation.Value.Index.invalid, - false => unreachable, - }, - else => |t| @panic(@tagName(t)), - }; - - const original_block = builder.currentFunction().current_basic_block; - const jump_to_loop = try builder.append(.{ - .jump = undefined, - }); - const loop_body_block = try builder.newBlock(); - const loop_prologue_block = if (options.emit_exit_block) try builder.newBlock() else BasicBlock.Index.invalid; - - const loop_head_block = switch (!condition.invalid) { - false => loop_body_block, - true => unreachable, - }; - - builder.ir.instructions.get(jump_to_loop).jump = try builder.jump(.{ - .source = original_block, - .destination = loop_head_block, - }); - - const sema_body_block = builder.ir.module.blocks.get(sema_loop_body.block); - builder.currentFunction().current_basic_block = try builder.blockInsideBasicBlock(sema_body_block, loop_body_block); - if (!loop_prologue_block.invalid) { - builder.ir.blocks.get(loop_prologue_block).seal(); - } - - if (sema_body_block.reaches_end) { - _ = try builder.append(.{ - .jump = try builder.jump(.{ - .source = builder.currentFunction().current_basic_block, - .destination = loop_head_block, - }), - }); - } - - builder.ir.blocks.get(builder.currentFunction().current_basic_block).filled = true; - builder.ir.blocks.get(loop_body_block).seal(); - if (!loop_head_block.eq(loop_body_block)) { - unreachable; - } - - if (!loop_prologue_block.invalid) { - builder.currentFunction().current_basic_block = loop_prologue_block; - } - }, - .syscall => |sema_syscall_index| _ = try builder.processSyscall(sema_syscall_index), - .@"unreachable" => _ = try builder.append(.{ - .@"unreachable" = {}, - }), - .@"return" => |sema_ret_index| { - const sema_ret = builder.ir.module.returns.get(sema_ret_index); - const return_value = try builder.emitReturnValue(sema_ret.value); - const phi_instruction = builder.ir.instructions.get(builder.currentFunction().return_phi_node); - const phi = switch (phi_instruction.phi.invalid) { - false => unreachable, - true => (try builder.ir.phis.append(builder.allocator, std.mem.zeroes(Phi))).ptr, - }; //builder.ir.phis.get(phi_instruction.phi); - const exit_jump = try builder.jump(.{ - .source = builder.currentFunction().current_basic_block, - .destination = switch (!phi_instruction.phi.invalid) { - true => phi.block, - false => builder.currentFunction().return_phi_block, - }, - }); - phi_instruction.phi = (try builder.ir.phis.append(builder.allocator, .{ - .instruction = return_value, - .jump = exit_jump, - .next = phi_instruction.phi, - .block = phi.block, - })).index; - - _ = try builder.append(.{ - .jump = exit_jump, - }); - }, - .declaration => |sema_declaration_index| { - const sema_declaration = builder.ir.module.declarations.get(sema_declaration_index); - //logln("Name: {s}\n", .{builder.module.getName(sema_declaration.name).?}); - assert(sema_declaration.scope_type == .local); - const declaration_type = builder.ir.module.types.get(sema_declaration.type); - switch (declaration_type.*) { - .comptime_int => unreachable, - else => { - var value_index = try builder.emitDeclarationInitValue(sema_declaration.init_value); - const value = builder.ir.instructions.get(value_index); - value_index = switch (value.*) { - .load_integer, - .call, - => value_index, - // .call => try builder.load(value_index), - else => |t| @panic(@tagName(t)), - }; - - const ir_type = try builder.translateType(sema_declaration.type); - _ = try builder.store(.{ - .source = value_index, - .destination = try builder.stackReference(.{ - .type = ir_type, - .sema = sema_declaration_index, - }), - }); - }, - } - }, - .call => |sema_call_index| _ = try builder.processCall(sema_call_index), - else => |t| @panic(@tagName(t)), - } - } - } - - fn emitDeclarationInitValue(builder: *Builder, declaration_init_value_index: Compilation.Value.Index) !Instruction.Index { - const declaration_init_value = builder.ir.module.values.get(declaration_init_value_index); - return switch (declaration_init_value.*) { - .call => |call_index| try builder.processCall(call_index), - .integer => |integer| try builder.processInteger(integer), - else => |t| @panic(@tagName(t)), - }; - } - - fn emitReturnValue(builder: *Builder, return_value_index: Compilation.Value.Index) !Instruction.Index { - const return_value = builder.ir.module.values.get(return_value_index); - return switch (return_value.*) { - .syscall => |syscall_index| try builder.processSyscall(syscall_index), - .integer => |integer| try builder.processInteger(integer), - .call => |call_index| try builder.processCall(call_index), - .declaration_reference => |declaration_reference| try builder.loadDeclarationReference(declaration_reference.value), - else => |t| @panic(@tagName(t)), - }; - } - - fn stackReference(builder: *Builder, arguments: struct { - type: Type, - sema: Compilation.Declaration.Index, - alignment: ?u64 = null, - }) !Instruction.Index { - const size = arguments.type.getSize(); - assert(size > 0); - const alignment = if (arguments.alignment) |a| a else arguments.type.getAlignment(); - builder.currentFunction().current_stack_offset = std.mem.alignForward(u64, builder.currentFunction().current_stack_offset, alignment); - builder.currentFunction().current_stack_offset += size; - const stack_offset = builder.currentFunction().current_stack_offset; - const stack_reference_allocation = try builder.ir.stack_references.append(builder.allocator, .{ - .offset = stack_offset, - .type = arguments.type, - .alignment = alignment, - }); - - const instruction_index = try builder.append(.{ - .stack = stack_reference_allocation.index, - }); - - try builder.currentFunction().stack_map.put(builder.allocator, arguments.sema, instruction_index); - - return instruction_index; - } - - fn store(builder: *Builder, descriptor: Store) !void { - const store_allocation = try builder.ir.stores.append(builder.allocator, descriptor); - _ = try builder.append(.{ - .store = store_allocation.index, - }); - } - - fn emitCallArgument(builder: *Builder, call_argument_value_index: Compilation.Value.Index) !Instruction.Index { - const call_argument_value = builder.ir.module.values.get(call_argument_value_index); - return switch (call_argument_value.*) { - .integer => |integer| try builder.processInteger(integer), - .declaration_reference => |declaration_reference| try builder.loadDeclarationReference(declaration_reference.value), - .string_literal => |string_literal_index| try builder.processStringLiteral(string_literal_index), - else => |t| @panic(@tagName(t)), - }; - } - - fn processCall(builder: *Builder, sema_call_index: Compilation.Call.Index) anyerror!Instruction.Index { - const sema_call = builder.ir.module.calls.get(sema_call_index); - const sema_argument_list_index = sema_call.arguments; - const argument_list: []const Instruction.Index = switch (sema_argument_list_index.invalid) { - false => blk: { - var argument_list = ArrayList(Instruction.Index){}; - const sema_argument_list = builder.ir.module.argument_lists.get(sema_argument_list_index); - try argument_list.ensureTotalCapacity(builder.allocator, sema_argument_list.array.items.len); - for (sema_argument_list.array.items) |sema_argument_value_index| { - const argument_value_index = try builder.emitCallArgument(sema_argument_value_index); - argument_list.appendAssumeCapacity(argument_value_index); - } - break :blk argument_list.items; - }, - true => &.{}, - }; - - const call_index = try builder.call(.{ - .function = switch (builder.ir.module.values.get(sema_call.value).*) { - .function => |function_index| .{ - .index = function_index.index, - .block = function_index.block, - }, - else => |t| @panic(@tagName(t)), - }, - .arguments = argument_list, - }); - - const instruction_index = try builder.append(.{ - .call = call_index, - }); - - return instruction_index; - } - - fn processStringLiteral(builder: *Builder, string_literal_hash: u32) !Instruction.Index { - const string_literal = builder.ir.module.string_literals.getValue(string_literal_hash).?; - - if (builder.ir.section_manager.rodata == null) { - const rodata_index = try builder.ir.section_manager.addSection(.{ - .name = ".rodata", - .size_guess = 0, - .alignment = 0x1000, - .flags = .{ - .read = true, - .write = false, - .execute = false, - }, - .type = .loadable_program, - }); - - builder.ir.section_manager.rodata = @intCast(rodata_index); - } - - const rodata_index = builder.ir.section_manager.rodata orelse unreachable; - const rodata_section_offset = builder.ir.section_manager.getSectionOffset(rodata_index); - - try builder.ir.section_manager.appendToSection(rodata_index, string_literal); - try builder.ir.section_manager.appendByteToSection(rodata_index, 0); - - const string_literal_allocation = try builder.ir.string_literals.append(builder.allocator, .{ - .offset = @intCast(rodata_section_offset), - }); - - const result = try builder.append(.{ - .load_string_literal = string_literal_allocation.index, - }); - - return result; - } - - fn translateType(builder: *Builder, type_index: Compilation.Type.Index) !Type { - const sema_type = builder.ir.module.types.get(type_index); - return switch (sema_type.*) { - .integer => |integer| switch (integer.bit_count) { - 8 => .i8, - 16 => .i16, - 32 => .i32, - 64 => .i64, - else => unreachable, - }, - // TODO - .pointer => .i64, - .void => .void, - .noreturn => .noreturn, - else => |t| @panic(@tagName(t)), - }; - } - - fn call(builder: *Builder, descriptor: Call) !Call.Index { - const call_allocation = try builder.ir.calls.append(builder.allocator, descriptor); - return call_allocation.index; - } - - fn jump(builder: *Builder, descriptor: Jump) !Jump.Index { - const destination_block = builder.ir.blocks.get(descriptor.destination); - assert(!destination_block.sealed); - assert(!descriptor.source.invalid); - const jump_allocation = try builder.ir.jumps.append(builder.allocator, descriptor); - return jump_allocation.index; - } - - fn append(builder: *Builder, instruction: Instruction) !Instruction.Index { - assert(!builder.current_function_index.invalid); - const current_function = builder.currentFunction(); - assert(!current_function.current_basic_block.invalid); - return builder.appendToBlock(current_function.current_basic_block, instruction); - } - - fn appendToBlock(builder: *Builder, block_index: BasicBlock.Index, instruction: Instruction) !Instruction.Index { - const instruction_allocation = try builder.ir.instructions.append(builder.allocator, instruction); - try builder.ir.blocks.get(block_index).instructions.append(builder.allocator, instruction_allocation.index); - - return instruction_allocation.index; - } - - fn newBlock(builder: *Builder) !BasicBlock.Index { - const new_block_allocation = try builder.ir.blocks.append(builder.allocator, .{}); - const current_function = builder.currentFunction(); - const function_block_index = current_function.blocks.items.len; - _ = function_block_index; - try current_function.blocks.append(builder.allocator, new_block_allocation.index); - - return new_block_allocation.index; - } -}; - -pub const Integer = struct { - value: extern union { - signed: i64, - unsigned: u64, - }, - type: Type, }; diff --git a/src/backend/x86_64.zig b/src/backend/x86_64.zig index f7181e5..a97c433 100644 --- a/src/backend/x86_64.zig +++ b/src/backend/x86_64.zig @@ -889,6 +889,10 @@ const ValueType = struct { data_type: DataType, scalarness: Scalarness, + fn getSize(value_type: ValueType) usize { + return value_type.size * value_type.element_count; + } + const DataType = enum(u1) { integer = 0, float = 1, @@ -993,6 +997,9 @@ const InstructionSelection = struct { stack_objects: ArrayList(StackObject) = .{}, function: *MIR.Function, instruction_cache: ArrayList(Instruction.Index) = .{}, + register_fixups: data_structures.AutoArrayHashMap(Register.Index, Register.Index) = .{}, + registers_with_fixups: data_structures.AutoArrayHashMap(Register.Index, void) = .{}, + folded_loads: data_structures.AutoArrayHashMap(ir.Instruction.Index, void) = .{}, fn storeRegisterToStackSlot(instruction_selection: *InstructionSelection, mir: *MIR, insert_before_instruction_index: usize, source_register: Register.Physical, kill: bool, frame_index: u32, register_class: Register.Class, virtual_register: Register.Virtual.Index) !void { _ = virtual_register; @@ -1147,7 +1154,7 @@ const InstructionSelection = struct { } const instruction = mir.ir.instructions.get(ir_instruction_index); - const defer_materialization = switch (instruction.*) { + const defer_materialization = switch (instruction.u) { .stack => !instruction_selection.stack_map.contains(ir_instruction_index), .load_integer => false, else => true, @@ -1161,7 +1168,7 @@ const InstructionSelection = struct { try instruction_selection.value_map.putNoClobber(mir.allocator, ir_instruction_index, new_register); return new_register; } else { - const new_register = switch (instruction.*) { + const new_register = switch (instruction.u) { .load_integer => try instruction_selection.materializeInteger(mir, ir_instruction_index), else => unreachable, }; @@ -1174,7 +1181,7 @@ const InstructionSelection = struct { // Moving an immediate to a register fn materializeInteger(instruction_selection: *InstructionSelection, mir: *MIR, ir_instruction_index: ir.Instruction.Index) !Register { // const destination_register = try instruction_selection.getRegisterForValue(mir, ir_instruction_index); - const integer = mir.ir.instructions.get(ir_instruction_index).load_integer; + const integer = mir.ir.instructions.get(ir_instruction_index).u.load_integer; const value_type = resolveType(integer.type); // const destination_register_class = register_classes.get(value_type); // const instruction_id: Instruction.Id = @@ -1232,12 +1239,13 @@ const InstructionSelection = struct { }; const instruction_descriptor = instruction_descriptors.get(instruction_id); - const operand_id = instruction_descriptor.operands[0].id; + const destination_operand_id = instruction_descriptor.operands[0].id; + const source_operand_id = instruction_descriptor.operands[1].id; const register_class = register_classes.get(value_type); const destination_register = try mir.createVirtualRegister(register_class); const destination_operand = Operand{ - .id = operand_id, + .id = destination_operand_id, .u = .{ .register = destination_register, }, @@ -1245,7 +1253,7 @@ const InstructionSelection = struct { }; const source_operand = Operand{ - .id = .immediate, + .id = source_operand_id, .u = .{ .immediate = integer.value.unsigned, }, @@ -1266,7 +1274,7 @@ const InstructionSelection = struct { fn getAddressingModeFromIr(instruction_selection: *InstructionSelection, mir: *MIR, ir_instruction_index: ir.Instruction.Index) AddressingMode { const instruction = mir.ir.instructions.get(ir_instruction_index); - switch (instruction.*) { + switch (instruction.u) { .stack => { const frame_index: u32 = @intCast(instruction_selection.stack_map.getIndex(ir_instruction_index).?); return AddressingMode{ @@ -1303,7 +1311,7 @@ const InstructionSelection = struct { for (ir_arguments) |ir_argument_instruction_index| { const ir_argument_instruction = mir.ir.instructions.get(ir_argument_instruction_index); - const ir_argument = mir.ir.arguments.get(ir_argument_instruction.argument); + const ir_argument = mir.ir.arguments.get(ir_argument_instruction.u.argument); switch (ir_argument.type) { .i8, .i16, .i32, .i64 => gp_count += 1, .void, @@ -1322,7 +1330,7 @@ const InstructionSelection = struct { for (ir_arguments) |ir_argument_instruction_index| { const ir_argument_instruction = mir.ir.instructions.get(ir_argument_instruction_index); - const ir_argument = mir.ir.arguments.get(ir_argument_instruction.argument); + const ir_argument = mir.ir.arguments.get(ir_argument_instruction.u.argument); const value_type = resolveType(ir_argument.type); const register_class = register_classes.get(value_type); const argument_registers = calling_convention.argument_registers.get(register_class); @@ -1442,11 +1450,33 @@ const InstructionSelection = struct { // TODO: addLiveIn MachineBasicBlock ? unreachable; } } -}; -fn getRegisterClass(register: Register.Physical) Register.Class { - _ = register; -} + fn tryToFoldLoad(instruction_selection: *InstructionSelection, mir: *MIR, ir_load_instruction_index: ir.Instruction.Index, ir_current: ir.Instruction.Index) !bool { + _ = ir_current; + // TODO: assert load one use + // TODO: check users + const ir_load_instruction = mir.ir.instructions.get(ir_load_instruction_index); + assert(ir_load_instruction.* == .load); + const ir_load = mir.ir.loads.get(ir_load_instruction.load); + + if (!ir_load.@"volatile") { + const load_register = try instruction_selection.getRegisterForValue(mir, ir_load_instruction_index); + if (load_register.isValid()) { + const register_has_one_use = true; + if (register_has_one_use) { + if (!instruction_selection.registers_with_fixups.contains(load_register.index)) { + // TODO: architecture dependent part + const addressing_mode = instruction_selection.getAddressingModeFromIr(mir, ir_load.instruction); + _ = addressing_mode; + unreachable; + } + } + } + } + + return false; + } +}; const Instruction = struct { id: Id, @@ -1454,6 +1484,9 @@ const Instruction = struct { parent: BasicBlock.Index, const Id = enum { + add32rr, + add32rm, + add32mr, call64pcrel32, copy, lea64r, @@ -1465,9 +1498,12 @@ const Instruction = struct { mov32ri, mov32ri64, mov32rr, + mov32mi, movsx64rm32, movsx64rr32, ret, + sub32rr, + sub32rm, syscall, ud2, xor32rr, @@ -1476,7 +1512,7 @@ const Instruction = struct { pub const Descriptor = struct { operands: []const Operand.Reference = &.{}, opcode: u16, - format: Format = .pseudo, + // format: Format = .pseudo, flags: Flags = .{}, const Flags = packed struct { @@ -1629,6 +1665,7 @@ const Instruction = struct { } }; }; + pub const Operand = struct { id: Operand.Id, u: union(enum) { @@ -1661,7 +1698,10 @@ pub const Operand = struct { gp32, gp64, gp64_nosp, - immediate, + imm8, + imm16, + imm32, + imm64, i64i32imm_brtarget, lea64mem, }; @@ -1751,13 +1791,70 @@ const register_class_operand_matcher = std.EnumArray(Operand.Id, Register.Class) .gp32 = .gp32, .gp64 = .gp64, .gp64_nosp = .gp64_nosp, - .immediate = .not_a_register, + .imm8 = .not_a_register, + .imm16 = .not_a_register, + .imm32 = .not_a_register, + .imm64 = .not_a_register, .lea64mem = .not_a_register, }); const instruction_descriptors = std.EnumArray(Instruction.Id, Instruction.Descriptor).init(.{ + .add32rr = .{ + // .format = .mrm_dest_reg, // right? + .opcode = 0x01, + .operands = &.{ + .{ + .id = .gp32, + .kind = .dst, + }, + .{ + .id = .gp32, + .kind = .src, + }, + .{ + .id = .gp32, + .kind = .src, + }, + }, + }, + .add32mr = .{ + // .format = .mrm_dest_reg, // right? + .opcode = 0x01, + .operands = &.{ + .{ + .id = .i32mem, + .kind = .dst, + }, + .{ + .id = .i32mem, + .kind = .src, + }, + .{ + .id = .gp32, + .kind = .src, + }, + }, + }, + .add32rm = .{ + // .format = .mrm_dest_reg, // right? + .opcode = 0x03, + .operands = &.{ + .{ + .id = .gp32, + .kind = .dst, + }, + .{ + .id = .gp32, + .kind = .src, + }, + .{ + .id = .i32mem, + .kind = .src, + }, + }, + }, .call64pcrel32 = .{ - .format = .no_operands, + // .format = .no_operands, .opcode = 0xe8, .operands = &.{ .{ @@ -1767,7 +1864,7 @@ const instruction_descriptors = std.EnumArray(Instruction.Id, Instruction.Descri }, }, .copy = .{ - .format = .pseudo, + // .format = .pseudo, .opcode = 0, .operands = &.{ .{ @@ -1781,7 +1878,7 @@ const instruction_descriptors = std.EnumArray(Instruction.Id, Instruction.Descri }, }, .lea64r = .{ - .format = .mrm_source_mem, + // .format = .mrm_source_mem, .opcode = 0x8d, .operands = &.{ .{ @@ -1795,7 +1892,7 @@ const instruction_descriptors = std.EnumArray(Instruction.Id, Instruction.Descri }, }, .mov32r0 = .{ - .format = .pseudo, + // .format = .pseudo, .opcode = 0, .operands = &.{ .{ @@ -1805,7 +1902,7 @@ const instruction_descriptors = std.EnumArray(Instruction.Id, Instruction.Descri }, }, .mov32rm = .{ - .format = .mrm_source_mem, + // .format = .mrm_source_mem, .opcode = 0x8b, .operands = &.{ .{ @@ -1819,7 +1916,7 @@ const instruction_descriptors = std.EnumArray(Instruction.Id, Instruction.Descri }, }, .mov64rm = .{ - .format = .mrm_source_mem, + // .format = .mrm_source_mem, .opcode = 0x8b, .operands = &.{ .{ @@ -1833,7 +1930,7 @@ const instruction_descriptors = std.EnumArray(Instruction.Id, Instruction.Descri }, }, .mov32rr = .{ - .format = .mrm_dest_reg, + // .format = .mrm_dest_reg, .opcode = 0x89, .operands = &.{ .{ @@ -1847,7 +1944,7 @@ const instruction_descriptors = std.EnumArray(Instruction.Id, Instruction.Descri }, }, .mov32mr = .{ - .format = .mrm_dest_mem, + // .format = .mrm_dest_mem, .opcode = 0x89, .operands = &.{ .{ @@ -1861,7 +1958,7 @@ const instruction_descriptors = std.EnumArray(Instruction.Id, Instruction.Descri }, }, .mov64mr = .{ - .format = .mrm_dest_mem, + // .format = .mrm_dest_mem, .opcode = 0x89, .operands = &.{ .{ @@ -1875,7 +1972,7 @@ const instruction_descriptors = std.EnumArray(Instruction.Id, Instruction.Descri }, }, .mov32ri = .{ - .format = .add_reg, + // .format = .add_reg, .opcode = 0xb8, .operands = &.{ .{ @@ -1883,13 +1980,13 @@ const instruction_descriptors = std.EnumArray(Instruction.Id, Instruction.Descri .kind = .dst, }, .{ - .id = .immediate, + .id = .imm32, .kind = .src, }, }, }, .mov32ri64 = .{ - .format = .pseudo, + // .format = .pseudo, .opcode = 0, .operands = &.{ .{ @@ -1897,13 +1994,26 @@ const instruction_descriptors = std.EnumArray(Instruction.Id, Instruction.Descri .kind = .dst, }, .{ - .id = .immediate, + .id = .imm64, + .kind = .src, + }, + }, + }, + .mov32mi = .{ + .opcode = 0xc7, + .operands = &.{ + .{ + .id = .i32mem, + .kind = .dst, + }, + .{ + .id = .imm32, .kind = .src, }, }, }, .movsx64rm32 = .{ - .format = .mrm_source_mem, + // .format = .mrm_source_mem, .opcode = 0x63, .operands = &.{ .{ @@ -1917,7 +2027,7 @@ const instruction_descriptors = std.EnumArray(Instruction.Id, Instruction.Descri }, }, .movsx64rr32 = .{ - .format = .mrm_source_reg, + // .format = .mrm_source_reg, .opcode = 0x63, .operands = &.{ .{ @@ -1931,7 +2041,7 @@ const instruction_descriptors = std.EnumArray(Instruction.Id, Instruction.Descri }, }, .ret = .{ - .format = .no_operands, + // .format = .no_operands, .opcode = 0xc3, .operands = &.{ .{ @@ -1940,8 +2050,44 @@ const instruction_descriptors = std.EnumArray(Instruction.Id, Instruction.Descri }, }, }, + .sub32rr = .{ + // .format = .mrm_dest_reg, // right? + .opcode = 0x29, + .operands = &.{ + .{ + .id = .gp32, + .kind = .dst, + }, + .{ + .id = .gp32, + .kind = .src, + }, + .{ + .id = .gp32, + .kind = .src, + }, + }, + }, + .sub32rm = .{ + // .format = .mrm_dest_reg, // right? + .opcode = 0x2b, + .operands = &.{ + .{ + .id = .gp32, + .kind = .dst, + }, + .{ + .id = .gp32, + .kind = .src, + }, + .{ + .id = .i32mem, + .kind = .src, + }, + }, + }, .syscall = .{ - .format = .no_operands, + // .format = .no_operands, .opcode = 0x05, .operands = &.{}, .flags = .{ @@ -1949,7 +2095,7 @@ const instruction_descriptors = std.EnumArray(Instruction.Id, Instruction.Descri }, }, .ud2 = .{ - .format = .no_operands, + // .format = .no_operands, .opcode = 0x0b, .operands = &.{}, .flags = .{ @@ -1957,7 +2103,7 @@ const instruction_descriptors = std.EnumArray(Instruction.Id, Instruction.Descri }, }, .xor32rr = .{ - .format = .mrm_dest_reg, + // .format = .mrm_dest_reg, .opcode = 0x31, .operands = &.{ .{ @@ -2087,8 +2233,8 @@ pub const MIR = struct { const ir_instruction = mir.ir.instructions.get(ir_instruction_index); // TODO: take into account exceptions, dynamic allocas? - if (ir_instruction.* == .stack) { - const stack = mir.ir.stack_references.get(ir_instruction.stack); + if (ir_instruction.u == .stack) { + const stack = mir.ir.stack_references.get(ir_instruction.u.stack); const ir_type = getIrType(mir.ir, ir_instruction_index); const value_type = resolveType(ir_type); const type_info = value_types.get(value_type); @@ -2116,8 +2262,6 @@ pub const MIR = struct { var instruction_i: usize = ir_block.instructions.items.len; - var folded_load = false; - while (instruction_i > 0) { instruction_i -= 1; @@ -2128,7 +2272,7 @@ pub const MIR = struct { logln(.codegen, .instruction_selection_new_instruction, "Instruction #{}", .{instruction_i}); - switch (ir_instruction.*) { + switch (ir_instruction.u) { .ret => |ir_ret_index| { const ir_ret = mir.ir.returns.get(ir_ret_index); switch (ir_ret.instruction.invalid) { @@ -2234,7 +2378,7 @@ pub const MIR = struct { const produce_syscall_return_value = switch (instruction_i == ir_block.instructions.items.len - 2) { true => blk: { const last_block_instruction = mir.ir.instructions.get(ir_block.instructions.items[ir_block.instructions.items.len - 1]); - break :blk switch (last_block_instruction.*) { + break :blk switch (last_block_instruction.u) { .@"unreachable" => false, .ret => true, else => |t| @panic(@tagName(t)), @@ -2289,47 +2433,43 @@ pub const MIR = struct { }, .sign_extend => |ir_cast_index| { const ir_sign_extend = mir.ir.casts.get(ir_cast_index); - assert(!folded_load); - const ir_source_instruction = blk: { - var source = ir_sign_extend.value; - const source_instruction = mir.ir.instructions.get(source); - const result = switch (source_instruction.*) { - .load => b: { - const load = mir.ir.loads.get(source_instruction.load); - folded_load = true; - break :b load.instruction; - }, - else => |t| @panic(@tagName(t)), + const fold_load = blk: { + const source_instruction = mir.ir.instructions.get(ir_sign_extend.value); + const result = switch (source_instruction.u) { + .load => true, + else => false, }; break :blk result; }; const destination_type = resolveType(ir_sign_extend.type); - const source_type = resolveType(getIrType(mir.ir, ir_source_instruction)); + if (fold_load) { + const ir_load_instruction_index = ir_sign_extend.value; + const ir_load_instruction = mir.ir.instructions.get(ir_sign_extend.value); + const ir_load = mir.ir.loads.get(ir_load_instruction.u.load); + const ir_source = ir_load.instruction; + const source_type = resolveType(getIrType(mir.ir, ir_source)); - if (destination_type != source_type) { - const instruction_id: Instruction.Id = switch (source_type) { - .i32 => switch (destination_type) { - .i64 => switch (folded_load) { - true => .movsx64rm32, - false => .movsx64rr32, + if (destination_type != source_type) { + try instruction_selection.folded_loads.putNoClobber(mir.allocator, ir_load_instruction_index, {}); + + const instruction_id: Instruction.Id = switch (source_type) { + .i32 => switch (destination_type) { + .i64 => .movsx64rm32, + else => unreachable, }, - else => unreachable, - }, - else => |t| @panic(@tagName(t)), - }; + else => |t| @panic(@tagName(t)), + }; - const instruction_descriptor = instruction_descriptors.getPtrConst(instruction_id); - assert(instruction_descriptor.operands.len == 2); - const destination_operand_index = 0; - const destination_register = try instruction_selection.getRegisterForValue(mir, ir_instruction_index); - const destination_operand = mir.constrainOperandRegisterClass(instruction_descriptor, destination_register, destination_operand_index, .{ .type = .def }); - const source_operand_index = 1; + const instruction_descriptor = instruction_descriptors.getPtrConst(instruction_id); + assert(instruction_descriptor.operands.len == 2); + const destination_operand_index = 0; + const destination_register = try instruction_selection.getRegisterForValue(mir, ir_instruction_index); + const destination_operand = mir.constrainOperandRegisterClass(instruction_descriptor, destination_register, destination_operand_index, .{ .type = .def }); - const source_operand = switch (folded_load) { - true => blk: { - const addressing_mode = instruction_selection.getAddressingModeFromIr(mir, ir_source_instruction); + const source_operand = blk: { + const addressing_mode = instruction_selection.getAddressingModeFromIr(mir, ir_source); const memory_id: Operand.Id = switch (source_type) { .i32 => .i32mem, .i64 => .i64mem, @@ -2345,135 +2485,169 @@ pub const MIR = struct { .flags = .{}, }; break :blk operand; - }, - false => blk: { - const source_register = try instruction_selection.getRegisterForValue(mir, ir_source_instruction); - break :blk mir.constrainOperandRegisterClass(instruction_descriptor, source_register, source_operand_index, .{}); - }, - }; + }; - const sign_extend = try mir.buildInstruction(instruction_selection, instruction_id, &.{ - destination_operand, - source_operand, - }); + const sign_extend = try mir.buildInstruction(instruction_selection, instruction_id, &.{ + destination_operand, + source_operand, + }); - try instruction_selection.instruction_cache.append(mir.allocator, sign_extend); + try instruction_selection.instruction_cache.append(mir.allocator, sign_extend); - try instruction_selection.updateValueMap(mir.allocator, ir_instruction_index, destination_register, false); + try instruction_selection.updateValueMap(mir.allocator, ir_instruction_index, destination_register, false); + } else { + unreachable; + } } else { + // const source_register = try instruction_selection.getRegisterForValue(mir, ir_source_instruction); + // const source_operand = mir.constrainOperandRegisterClass(instruction_descriptor, source_register, source_operand_index, .{}); unreachable; } }, .load => |ir_load_index| { - if (folded_load) { - folded_load = false; - continue; - } + if (!instruction_selection.folded_loads.swapRemove(ir_instruction_index)) { + const ir_load = mir.ir.loads.get(ir_load_index); + const ir_source = ir_load.instruction; + const addressing_mode = instruction_selection.getAddressingModeFromIr(mir, ir_source); + const value_type = resolveType(getIrType(mir.ir, ir_source)); - const ir_load = mir.ir.loads.get(ir_load_index); - const ir_source = ir_load.instruction; - const addressing_mode = instruction_selection.getAddressingModeFromIr(mir, ir_source); - const value_type = resolveType(getIrType(mir.ir, ir_source)); + switch (value_type) { + inline .i32, + .i64, + => |vt| { + const instruction_id: Instruction.Id = switch (vt) { + .i32 => .mov32rm, + .i64 => .mov64rm, + else => |t| @panic(@tagName(t)), + }; + const memory_id: Operand.Id = switch (vt) { + .i32 => .i32mem, + .i64 => .i64mem, + else => |t| @panic(@tagName(t)), + }; - switch (value_type) { - inline .i32, - .i64, - => |vt| { - const instruction_id: Instruction.Id = switch (vt) { - .i32 => .mov32rm, - .i64 => .mov64rm, - else => |t| @panic(@tagName(t)), - }; - const memory_id: Operand.Id = switch (vt) { - .i32 => .i32mem, - .i64 => .i64mem, - else => |t| @panic(@tagName(t)), - }; + const instruction_descriptor = instruction_descriptors.getPtrConst(instruction_id); - const instruction_descriptor = instruction_descriptors.getPtrConst(instruction_id); - - const destination_register = try instruction_selection.getRegisterForValue(mir, ir_instruction_index); - const destination_operand_index = 0; - const destination_operand_id = instruction_descriptor.operands[destination_operand_index].id; - const destination_operand = Operand{ - .id = destination_operand_id, - .u = .{ - .register = destination_register, - }, - .flags = .{ .type = .def }, - }; - - const source_operand = Operand{ - .id = memory_id, - .u = .{ - .memory = .{ - .addressing_mode = addressing_mode, + const destination_register = try instruction_selection.getRegisterForValue(mir, ir_instruction_index); + const destination_operand_index = 0; + const destination_operand_id = instruction_descriptor.operands[destination_operand_index].id; + const destination_operand = Operand{ + .id = destination_operand_id, + .u = .{ + .register = destination_register, }, - }, - .flags = .{}, - }; + .flags = .{ .type = .def }, + }; - const load = try mir.buildInstruction(instruction_selection, instruction_id, &.{ - destination_operand, - source_operand, - }); - try instruction_selection.instruction_cache.append(mir.allocator, load); + const source_operand = Operand{ + .id = memory_id, + .u = .{ + .memory = .{ + .addressing_mode = addressing_mode, + }, + }, + .flags = .{}, + }; - try instruction_selection.updateValueMap(mir.allocator, ir_instruction_index, destination_register, false); - }, - else => |t| @panic(@tagName(t)), + const load = try mir.buildInstruction(instruction_selection, instruction_id, &.{ + destination_operand, + source_operand, + }); + try instruction_selection.instruction_cache.append(mir.allocator, load); + + try instruction_selection.updateValueMap(mir.allocator, ir_instruction_index, destination_register, false); + }, + else => |t| @panic(@tagName(t)), + } } }, .store => |ir_store_index| { const ir_store = mir.ir.stores.get(ir_store_index); - const ir_source = ir_store.source; + const ir_source_index = ir_store.source; const ir_destination = ir_store.destination; const addressing_mode = instruction_selection.getAddressingModeFromIr(mir, ir_destination); + const ir_source = mir.ir.instructions.get(ir_source_index); - const source_register = try instruction_selection.getRegisterForValue(mir, ir_source); + const value_type = resolveType(getIrType(mir.ir, ir_source_index)); - const value_type = resolveType(getIrType(mir.ir, ir_source)); + if (ir_source.u == .load_integer and value_types.get(value_type).getSize() <= @sizeOf(u32)) { + const instruction_id: Instruction.Id = switch (value_type) { + .i32 => .mov32mi, + else => unreachable, + }; - switch (value_type) { - inline .i32, .i64 => |vt| { - const instruction_id: Instruction.Id = switch (vt) { - // TODO, non-temporal SSE2 MOVNT - .i32 => .mov32mr, - .i64 => .mov64mr, - else => |t| @panic(@tagName(t)), - }; + const source_immediate = ir_source.u.load_integer; + const instruction_descriptor = instruction_descriptors.getPtrConst(instruction_id); - const instruction_descriptor = comptime instruction_descriptors.getPtrConst(instruction_id); - const source_operand_index = instruction_descriptor.operands.len - 1; - const source_operand_id = instruction_descriptor.operands[source_operand_index].id; - const source_operand = Operand{ - .id = source_operand_id, - .u = .{ - .register = source_register, + const source_operand_index = instruction_descriptor.operands.len - 1; + const source_operand_id = instruction_descriptor.operands[source_operand_index].id; + const source_operand = Operand{ + .id = source_operand_id, + .u = .{ + .immediate = source_immediate.value.unsigned, + }, + .flags = .{}, + }; + const destination_operand_id = instruction_descriptor.operands[0].id; + const destination_operand = Operand{ + .id = destination_operand_id, + .u = .{ + .memory = .{ + .addressing_mode = addressing_mode, }, - .flags = .{}, - }; + }, + .flags = .{}, + }; + const store = try mir.buildInstruction(instruction_selection, instruction_id, &.{ + destination_operand, + source_operand, + }); - const destination_operand_id = instruction_descriptor.operands[0].id; - const destination_operand = Operand{ - .id = destination_operand_id, - .u = .{ - .memory = .{ - .addressing_mode = addressing_mode, + try instruction_selection.instruction_cache.append(mir.allocator, store); + } else { + const source_register = try instruction_selection.getRegisterForValue(mir, ir_source_index); + + switch (value_type) { + .i32, .i64 => |vt| { + const instruction_id: Instruction.Id = switch (vt) { + // TODO, non-temporal SSE2 MOVNT + .i32 => .mov32mr, + .i64 => .mov64mr, + else => |t| @panic(@tagName(t)), + }; + + const instruction_descriptor = instruction_descriptors.getPtrConst(instruction_id); + const source_operand_index = instruction_descriptor.operands.len - 1; + const source_operand_id = instruction_descriptor.operands[source_operand_index].id; + const source_operand = Operand{ + .id = source_operand_id, + .u = .{ + .register = source_register, }, - }, - .flags = .{}, - }; + .flags = .{}, + }; - const store = try mir.buildInstruction(instruction_selection, instruction_id, &.{ - destination_operand, - source_operand, - }); + const destination_operand_id = instruction_descriptor.operands[0].id; + const destination_operand = Operand{ + .id = destination_operand_id, + .u = .{ + .memory = .{ + .addressing_mode = addressing_mode, + }, + }, + .flags = .{}, + }; - try instruction_selection.instruction_cache.append(mir.allocator, store); - }, - else => |t| @panic(@tagName(t)), + const store = try mir.buildInstruction(instruction_selection, instruction_id, &.{ + destination_operand, + source_operand, + }); + + try instruction_selection.instruction_cache.append(mir.allocator, store); + }, + else => |t| @panic(@tagName(t)), + } } }, .stack => { @@ -2631,6 +2805,94 @@ pub const MIR = struct { try instruction_selection.updateValueMap(mir.allocator, ir_instruction_index, virtual_register, false); }, + .binary_operation => |ir_binary_operation_index| { + const ir_binary_operation = mir.ir.binary_operations.get(ir_binary_operation_index); + + const is_right_load = switch (mir.ir.instructions.get(ir_binary_operation.right).u) { + .load => true, + else => false, + }; + + const is_left_load = switch (mir.ir.instructions.get(ir_binary_operation.right).u) { + .load => true, + else => false, + }; + + // TODO: optimize if the result is going to be stored? + // for (ir_instruction.use_list.items) |use_index| { + // const use = mir.ir.instructions.get(use_index); + // std.debug.print("Use: {s}\n", .{@tagName(use.u)}); + // } + const destination_register = try instruction_selection.getRegisterForValue(mir, ir_instruction_index); + const value_type = resolveType(ir_binary_operation.type); + + if (is_right_load and !is_left_load) { + unreachable; + } else if (!is_right_load and is_left_load) { + unreachable; + } else if (!is_right_load and !is_left_load) { + unreachable; + } else { + // If both operands come from memory (both operands are loads), load the left one into a register and operate from the stack with the right one, when possible + const instruction_id: Instruction.Id = switch (ir_binary_operation.id) { + .add => switch (value_type) { + .i32 => .add32rm, + else => unreachable, + }, + .sub => switch (value_type) { + .i32 => .sub32rm, + else => unreachable, + }, + }; + + try instruction_selection.folded_loads.putNoClobber(mir.allocator, ir_binary_operation.right, {}); + + const instruction_descriptor = instruction_descriptors.get(instruction_id); + const left_register = try instruction_selection.getRegisterForValue(mir, ir_binary_operation.left); + const destination_operand_id = instruction_descriptor.operands[0].id; + const left_operand_id = instruction_descriptor.operands[1].id; + const right_operand_id = instruction_descriptor.operands[2].id; + assert(right_operand_id == .i32mem); + const ir_load = mir.ir.loads.get(mir.ir.instructions.get(ir_binary_operation.right).u.load); + const right_operand_addressing_mode = instruction_selection.getAddressingModeFromIr(mir, ir_load.instruction); + + const destination_operand = Operand{ + .id = destination_operand_id, + .u = .{ + .register = destination_register, + }, + .flags = .{ + .type = .def, + }, + }; + + const left_operand = Operand{ + .id = left_operand_id, + .u = .{ + .register = left_register, + }, + .flags = .{}, + }; + + const right_operand = Operand{ + .id = right_operand_id, + .u = .{ + .memory = .{ .addressing_mode = right_operand_addressing_mode }, + }, + .flags = .{}, + }; + + const binary_op_instruction = try mir.buildInstruction(instruction_selection, instruction_id, &.{ + destination_operand, + left_operand, + right_operand, + }); + + try instruction_selection.instruction_cache.append(mir.allocator, binary_op_instruction); + + try instruction_selection.updateValueMap(mir.allocator, ir_instruction_index, destination_register, false); + } + }, else => |t| @panic(@tagName(t)), } @@ -3159,12 +3421,6 @@ pub const MIR = struct { const instruction = mir.instructions.get(instruction_index); switch (instruction.id) { - .mov32rm => {}, - .mov32r0 => {}, - .mov32ri => {}, - .mov64rm => {}, - .lea64r => {}, - .mov32ri64 => {}, .copy => { const operand_index = instruction.operands.items[1]; const operand = mir.operands.get(operand_index); @@ -3175,7 +3431,7 @@ pub const MIR = struct { logln(.codegen, .register_allocation_problematic_hint, "[traceCopies] Missed oportunity for register allocation tracing copy chain for VR{}", .{virtual_register_index.uniqueInteger()}); }, - else => |t| @panic(@tagName(t)), + else => {}, } try_count += 1; @@ -3742,6 +3998,7 @@ pub const MIR = struct { const gp_register_encoding: Encoding.GP32 = switch (physical_register) { .eax => .a, .edi => .di, + .ecx => .c, else => |t| @panic(@tagName(t)), }; @@ -4173,6 +4430,105 @@ pub const MIR = struct { else => |t| @panic(@tagName(t)), } }, + .add32rr, .sub32rr => { + assert(instruction.operands.items.len == 3); + const instruction_descriptor = instruction_descriptors.get(instruction.id); + const opcode: u8 = @intCast(instruction_descriptor.opcode); + try image.section_manager.appendCodeByte(opcode); + + const destination_operand_index = instruction.operands.items[0]; + const left_operand_index = instruction.operands.items[1]; + const right_operand_index = instruction.operands.items[2]; + + const destination_operand = mir.operands.get(destination_operand_index); + const left_operand = mir.operands.get(left_operand_index); + const right_operand = mir.operands.get(right_operand_index); + + const destination_register = getGP32Encoding(destination_operand.*); + const left_register = getGP32Encoding(left_operand.*); + const right_register = getGP32Encoding(right_operand.*); + + assert(destination_register == left_register); + + const modrm = ModRm{ + .rm = @intCast(@intFromEnum(destination_register)), + .reg = @intCast(@intFromEnum(right_register)), + .mod = 0b11, + }; + try image.section_manager.appendCodeByte(@bitCast(modrm)); + }, + .add32rm, .sub32rm => { + assert(instruction.operands.items.len == 3); + const instruction_descriptor = instruction_descriptors.get(instruction.id); + const opcode: u8 = @intCast(instruction_descriptor.opcode); + try image.section_manager.appendCodeByte(opcode); + + const destination_operand_index = instruction.operands.items[0]; + const left_operand_index = instruction.operands.items[1]; + const right_operand_index = instruction.operands.items[2]; + + const destination_operand = mir.operands.get(destination_operand_index); + const left_operand = mir.operands.get(left_operand_index); + const right_operand = mir.operands.get(right_operand_index); + + const destination_register = getGP32Encoding(destination_operand.*); + const left_register = getGP32Encoding(left_operand.*); + assert(destination_register == left_register); + + assert(right_operand.id == .i32mem); + const right_memory = right_operand.u.memory; + + switch (right_memory.addressing_mode.base) { + .register_base => unreachable, + .frame_index => |frame_index| { + const modrm = ModRm{ + .rm = @intFromEnum(Encoding.GP64.bp), + .reg = @intCast(@intFromEnum(destination_register)), + .mod = @as(u2, @intFromBool(false)) << 1 | @intFromBool(true), + }; + try image.section_manager.appendCodeByte(@bitCast(modrm)); + + const stack_offset = computeStackOffset(function.instruction_selection.stack_objects.items[0 .. frame_index + 1]); + const displacement_bytes: u3 = if (std.math.cast(i8, stack_offset)) |_| @sizeOf(i8) else if (std.math.cast(i32, stack_offset)) |_| @sizeOf(i32) else unreachable; + + const stack_bytes = std.mem.asBytes(&stack_offset)[0..displacement_bytes]; + try image.section_manager.appendCode(stack_bytes); + }, + } + }, + .mov32mi => { + assert(instruction.operands.items.len == 2); + + const instruction_descriptor = instruction_descriptors.get(instruction.id); + const opcode: u8 = @intCast(instruction_descriptor.opcode); + try image.section_manager.appendCodeByte(opcode); + + const destination_operand_index = instruction.operands.items[0]; + const destination_operand = mir.operands.get(destination_operand_index); + switch (destination_operand.u.memory.addressing_mode.base) { + .register_base => unreachable, + .frame_index => |frame_index| { + const modrm = ModRm{ + .rm = @intFromEnum(Encoding.GP64.bp), + .reg = 0, + .mod = @as(u2, @intFromBool(false)) << 1 | @intFromBool(true), + }; + try image.section_manager.appendCodeByte(@bitCast(modrm)); + + const stack_offset = computeStackOffset(function.instruction_selection.stack_objects.items[0 .. frame_index + 1]); + const displacement_bytes: u3 = if (std.math.cast(i8, stack_offset)) |_| @sizeOf(i8) else if (std.math.cast(i32, stack_offset)) |_| @sizeOf(i32) else unreachable; + + const stack_bytes = std.mem.asBytes(&stack_offset)[0..displacement_bytes]; + try image.section_manager.appendCode(stack_bytes); + }, + } + + const source_operand_index = instruction.operands.items[1]; + const source_operand = mir.operands.get(source_operand_index); + const source_immediate: u32 = @intCast(source_operand.u.immediate); + + try image.section_manager.appendCode(std.mem.asBytes(&source_immediate)); + }, else => |t| @panic(@tagName(t)), } @@ -4376,12 +4732,32 @@ pub const MIR = struct { if (!clamp or alignment <= stack_alignment) return alignment; return stack_alignment; } -}; -const ModRm = packed struct(u8) { - rm: u3, - reg: u3, - mod: u2, + fn isFoldedOrDeadInstruction(mir: *MIR, instruction_index: ir.Instruction.Index) bool { + const result = !mir.mayWriteToMemory(instruction_index) and !mir.isTerminator(instruction_index); + return result; + } + + fn mayWriteToMemory(mir: *MIR, instruction_index: ir.Instruction.Index) bool { + const ir_instruction = mir.ir.instructions.get(instruction_index); + const result = switch (ir_instruction.*) { + .load => !mir.ir.loads.get(ir_instruction.load).isUnordered(), + .store => true, + else => |t| @panic(@tagName(t)), + }; + + return result; + } + + fn isTerminator(mir: *MIR, instruction_index: ir.Instruction.Index) bool { + const ir_instruction = mir.ir.instructions.get(instruction_index); + const result = switch (ir_instruction.*) { + .load => false, + else => |t| @panic(@tagName(t)), + }; + + return result; + } }; const Rex = packed struct(u8) { @@ -4423,9 +4799,21 @@ const Rex = packed struct(u8) { // } }; +const Sib = packed struct(u8) { + base: u3, + index: u3, + scale: u2, +}; + +const ModRm = packed struct(u8) { + rm: u3, + reg: u3, + mod: u2, +}; + fn getIrType(intermediate: *ir.Result, ir_instruction_index: ir.Instruction.Index) ir.Type { const ir_instruction = intermediate.instructions.get(ir_instruction_index); - return switch (ir_instruction.*) { + return switch (ir_instruction.u) { .argument => |argument_index| intermediate.arguments.get(argument_index).type, .stack => |stack_index| intermediate.stack_references.get(stack_index).type, .load => |load_index| getIrType(intermediate, intermediate.loads.get(load_index).instruction), @@ -4434,6 +4822,7 @@ fn getIrType(intermediate: *ir.Result, ir_instruction_index: ir.Instruction.Inde .load_string_literal => .i64, .call => |call_index| intermediate.function_declarations.get(intermediate.calls.get(call_index).function).return_type, .sign_extend => |cast_index| intermediate.casts.get(cast_index).type, + .binary_operation => |binary_operation_index| intermediate.binary_operations.get(binary_operation_index).type, else => |t| @panic(@tagName(t)), }; } diff --git a/src/frontend/lexical_analyzer.zig b/src/frontend/lexical_analyzer.zig index 45fbc47..ca22468 100644 --- a/src/frontend/lexical_analyzer.zig +++ b/src/frontend/lexical_analyzer.zig @@ -164,7 +164,7 @@ pub fn analyze(allocator: Allocator, text: []const u8, file_index: File.Index) ! inline else => |comptime_fixed_keyword| @field(Token.Id, "fixed_keyword_" ++ @tagName(comptime_fixed_keyword)), } else .identifier; }, - '(', ')', '{', '}', '[', ']', '-', '=', ';', '#', '@', ',', '.', ':', '>', '<', '*', '!' => |operator| blk: { + '(', ')', '{', '}', '[', ']', '=', ';', '#', '@', ',', '.', ':', '>', '<', '!', '+', '-', '*', '\\', '/' => |operator| blk: { index += 1; break :blk @enumFromInt(operator); }, diff --git a/src/frontend/semantic_analyzer.zig b/src/frontend/semantic_analyzer.zig index 35edc47..a9895fa 100644 --- a/src/frontend/semantic_analyzer.zig +++ b/src/frontend/semantic_analyzer.zig @@ -38,7 +38,17 @@ pub const Logger = enum { block, call, - pub var bitset = std.EnumSet(Logger).initEmpty(); + pub var bitset = std.EnumSet(Logger).initMany(&.{ + .type, + .identifier, + .symbol_declaration, + .scope_node, + .node, + .typecheck, + .@"switch", + .block, + .call, + }); }; const lexical_analyzer = @import("lexical_analyzer.zig"); @@ -145,7 +155,7 @@ const Analyzer = struct { try statement_nodes.append(analyzer.allocator, block_node.left); try statement_nodes.append(analyzer.allocator, block_node.right); }, - .block, .comptime_block => unreachable, //statement_nodes = analyzer.getNodeList(scope_index, block_node.left.unwrap()), + .block, .comptime_block => statement_nodes = analyzer.getScopeNodeList(scope_index, analyzer.getScopeNode(scope_index, block_node.left)), else => |t| @panic(@tagName(t)), } @@ -512,6 +522,33 @@ const Analyzer = struct { }; } + fn processBinaryOperation(analyzer: *Analyzer, scope_index: Scope.Index, expect_type: ExpectType, node_index: Node.Index) !Value { + const node = analyzer.getScopeNode(scope_index, node_index); + + const left_allocation = try analyzer.unresolvedAllocate(scope_index, expect_type, node.left); + const right_allocation = try analyzer.unresolvedAllocate(scope_index, expect_type, node.right); + const left_type = left_allocation.ptr.getType(analyzer.module); + const right_type = right_allocation.ptr.getType(analyzer.module); + if (!left_type.eq(right_type)) { + unreachable; + } + + const binary_operation = try analyzer.module.binary_operations.append(analyzer.allocator, .{ + .left = left_allocation.index, + .right = right_allocation.index, + .type = left_type, + .id = switch (node.id) { + .add => .add, + .sub => .sub, + else => |t| @panic(@tagName(t)), + }, + }); + + return .{ + .binary_operation = binary_operation.index, + }; + } + const DeclarationLookup = struct { declaration: Declaration.Index, scope: Scope.Index, @@ -968,6 +1005,9 @@ const Analyzer = struct { .type = try analyzer.resolveType(scope_index, node_index), }, .@"return" => try analyzer.processReturn(scope_index, expect_type, node_index), + .add, + .sub, + => try analyzer.processBinaryOperation(scope_index, expect_type, node_index), else => |t| @panic(@tagName(t)), }; } diff --git a/src/frontend/syntactic_analyzer.zig b/src/frontend/syntactic_analyzer.zig index 1d16353..6e53136 100644 --- a/src/frontend/syntactic_analyzer.zig +++ b/src/frontend/syntactic_analyzer.zig @@ -141,6 +141,8 @@ pub const Node = packed struct(u128) { enum_field = 58, extern_qualifier = 59, function_prototype = 60, + add = 61, + sub = 62, }; }; @@ -692,6 +694,33 @@ const Analyzer = struct { return try analyzer.expressionPrecedence(0); } + const PrecedenceOperator = enum { + compare_equal, + compare_not_equal, + add, + sub, + }; + + const operator_precedence = std.EnumArray(PrecedenceOperator, i32).init(.{ + .compare_equal = 30, + .compare_not_equal = 30, + .add = 60, + .sub = 60, + }); + + const operator_associativity = std.EnumArray(PrecedenceOperator, Associativity).init(.{ + .compare_equal = .none, + .compare_not_equal = .none, + .add = .left, + .sub = .left, + }); + const operator_node_id = std.EnumArray(PrecedenceOperator, Node.Id).init(.{ + .compare_equal = .compare_equal, + .compare_not_equal = .compare_not_equal, + .add = .add, + .sub = .sub, + }); + fn expressionPrecedence(analyzer: *Analyzer, minimum_precedence: i32) !Node.Index { var result = try analyzer.prefixExpression(); if (!result.invalid) { @@ -704,55 +733,75 @@ const Analyzer = struct { while (analyzer.token_i < analyzer.tokens.len) { const token = analyzer.tokens[analyzer.token_i]; // logln("Looping in expression precedence with token {}\n", .{token}); - const precedence: i32 = switch (token.id) { - .equal, .semicolon, .right_parenthesis, .right_brace, .comma, .period, .fixed_keyword_const, .fixed_keyword_var => -1, - .bang => switch (analyzer.tokens[analyzer.token_i + 1].id) { - .equal => 30, - else => unreachable, - }, - else => |t| { - const start = token.start; - logln(.parser, .precedence, "Source file:\n```\n{s}\n```\n", .{analyzer.source_file[start..]}); - @panic(@tagName(t)); + const operator: PrecedenceOperator = switch (token.id) { + .equal, .semicolon, .right_parenthesis, .right_brace, .comma, .period, .fixed_keyword_const, .fixed_keyword_var => break, + else => blk: { + const next_token_index = analyzer.token_i + 1; + if (next_token_index < analyzer.tokens.len) { + const next_token_id = analyzer.tokens[next_token_index].id; + break :blk switch (token.id) { + .equal => switch (next_token_id) { + .equal => .compare_equal, + else => break, + }, + .bang => switch (next_token_id) { + .equal => .compare_not_equal, + else => unreachable, + }, + .plus => switch (next_token_id) { + .plus => unreachable, + .equal => unreachable, + else => .add, + }, + .minus => switch (next_token_id) { + .minus => unreachable, + .equal => unreachable, + else => .sub, + }, + else => |t| @panic(@tagName(t)), + }; + } else { + unreachable; + } }, }; - logln(.parser, .precedence, "Precedence: {} ({s}) (file #{})\n", .{ precedence, @tagName(token.id), analyzer.file_index.uniqueInteger() }); + const precedence = operator_precedence.get(operator); if (precedence < minimum_precedence) { logln(.parser, .precedence, "Breaking for minimum_precedence\n", .{}); break; } - if (precedence == banned_precedence) { - logln(.parser, .precedence, "Breaking for banned precedence\n", .{}); + if (precedence < banned_precedence) { + logln(.parser, .precedence, "Breaking for banned_precedence\n", .{}); break; } const operator_token = analyzer.token_i; - const is_bang_equal = analyzer.tokens[operator_token].id == .bang and analyzer.tokens[operator_token + 1].id == .equal; - analyzer.token_i += @as(u32, 1) + @intFromBool(is_bang_equal); + const extra_token = switch (operator) { + .add, + .sub, + => false, + .compare_equal, + .compare_not_equal, + => true, + // else => |t| @panic(@tagName(t)), + }; + analyzer.token_i += @as(u32, 1) + @intFromBool(extra_token); // TODO: fix this const right = try analyzer.expressionPrecedence(precedence + 1); - const operation_id: Node.Id = switch (is_bang_equal) { - true => .compare_not_equal, - false => switch (analyzer.tokens[operator_token].id) { - else => |t| @panic(@tagName(t)), - }, - }; + const node_id = operator_node_id.get(operator); result = try analyzer.addNode(.{ - .id = operation_id, + .id = node_id, .token = operator_token, .left = result, .right = right, }); - const associativity: Associativity = switch (operation_id) { - .compare_equal, .compare_not_equal, .compare_less_than, .compare_greater_than, .compare_less_or_equal, .compare_greater_or_equal => .none, - else => .left, - }; + const associativity = operator_associativity.get(operator); if (associativity == .none) { banned_precedence = precedence; diff --git a/test/add_sub/main.nat b/test/add_sub/main.nat new file mode 100644 index 0000000..001fd3e --- /dev/null +++ b/test/add_sub/main.nat @@ -0,0 +1,8 @@ +const main = fn() s32 { + const a: s32 = 1; + const b: s32 = 2; + const c: s32 = a + b; + const d: s32 = 3; + const e: s32 = d - c; + return e; +}