From 403822310ea38d5d7afaa779c67dc73d2aadf4d4 Mon Sep 17 00:00:00 2001 From: David Gonzalez Martin Date: Sun, 23 Jul 2023 07:21:18 -0600 Subject: [PATCH] wip --- .github/workflows/ci.yml | 6 +- build.zig | 233 ++-- config/default.json | 4 +- src/birth.zig | 95 +- src/birth/arch/x64_64.zig | 39 +- src/birth/capabilities.zig | 381 ------- src/birth/interface.zig | 1031 +++++++++++++++++ src/birth/syscall.zig | 117 -- src/bootloader.zig | 17 +- src/bootloader/bios.zig | 15 +- src/bootloader/birth/bios/main.zig | 12 +- src/bootloader/birth/uefi/main.zig | 26 +- src/bootloader/uefi.zig | 14 +- src/common.zig | 449 ++------ src/cpu.zig | 849 ++++++++------ src/cpu/arch/x86/64/init.zig | 1079 +++--------------- src/cpu/arch/x86/64/syscall.zig | 78 +- src/cpu/arch/x86_64.zig | 28 +- src/cpu/capabilities.zig | 419 ------- src/cpu/init.zig | 364 ++++++ src/cpu/interface.zig | 698 +++++++++++ src/cpu/main.zig | 5 - src/cpu/test_runner.zig | 38 - src/host/runner/main.zig | 31 +- src/lib.zig | 629 +++++++--- src/lib/arch.zig | 2 + src/lib/arch/x86/common.zig | 4 +- src/lib/arch/x86_64.zig | 33 + src/lib/filesystem/fat32.zig | 3 - src/privileged.zig | 4 + src/privileged/arch/x86/64/paging.zig | 270 ++--- src/user.zig | 117 +- src/user/arch/x86_64.zig | 58 - src/user/arch/x86_64/linker_script.ld | 2 +- src/user/capabilities.zig | 83 +- src/user/core_state.zig | 21 - src/user/libc.zig | 5 +- src/user/mmu_aware_virtual_address_space.zig | 62 - src/user/physical_map.zig | 25 - src/user/physical_memory_region.zig | 81 -- src/user/programs/device_manager/main.zig | 14 +- src/user/programs/init/main.zig | 4 +- src/user/slot_allocator.zig | 28 - src/user/thread.zig | 43 +- src/user/virtual.zig | 118 ++ src/user/virtual_address_space.zig | 60 - 46 files changed, 4115 insertions(+), 3579 deletions(-) delete mode 100644 src/birth/capabilities.zig create mode 100644 src/birth/interface.zig delete mode 100644 src/birth/syscall.zig delete mode 100644 src/cpu/capabilities.zig create mode 100644 src/cpu/init.zig create mode 100644 src/cpu/interface.zig delete mode 100644 src/cpu/test_runner.zig delete mode 100644 src/user/mmu_aware_virtual_address_space.zig delete mode 100644 src/user/physical_map.zig delete mode 100644 src/user/physical_memory_region.zig delete mode 100644 src/user/slot_allocator.zig create mode 100644 src/user/virtual.zig delete mode 100644 src/user/virtual_address_space.zig diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 75f58fc..5297aba 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -40,8 +40,10 @@ jobs: run: zig env - name: Build test executables run: zig build all_tests -Dci --verbose - - name: Run host tests - run: zig build test_host + - name: Set up QEMU + uses: davidgm94/setup-qemu@main + - name: Run all tests + run: zig build test_all -Dci --verbose # build_and_test: # runs-on: [self-hosted, Linux, X64] # steps: diff --git a/build.zig b/build.zig index dec85d8..4b1b0d0 100644 --- a/build.zig +++ b/build.zig @@ -1,31 +1,45 @@ const std = @import("std"); -const common = @import("src/common.zig"); -const os = common.os; +const ArrayList = std.ArrayList; +const assert = std.debug.assert; +const concat = std.mem.concat; +const cwd = std.fs.cwd; +const Cpu = Target.Cpu; +const CrossTarget = std.zig.CrossTarget; +const EnumArray = std.EnumArray; +const enumValues = std.enums.values; +const fields = std.meta.fields; +const json = std.json; +const maxInt = std.math.maxInt; +const OptimizeMode = std.builtin.OptimizeMode; +const Target = std.Target; // Build types const Build = std.Build; -const CompileStep = std.Build.CompileStep; -const FileSource = std.Build.FileSource; -const Module = std.Build.Module; -const ModuleDependency = std.Build.ModuleDependency; -const OptionsStep = std.Build.OptionsStep; -const RunStep = std.Build.RunStep; -const Step = std.Build.Step; +const CompileStep = Build.CompileStep; +const LazyPath = Build.LazyPath; +const Module = Build.Module; +const ModuleDependency = Build.ModuleDependency; +const OptionsStep = Build.OptionsStep; +const RunStep = Build.RunStep; +const Step = Build.Step; -const assert = std.debug.assert; +const builtin = @import("builtin"); +const os = builtin.os.tag; +const cpu = builtin.cpu; + +const common = @import("src/common.zig"); +const ArgumentParser = common.ArgumentParser; const Bootloader = common.Bootloader; +const canVirtualizeWithQEMU = common.canVirtualizeWithQEMU; const Configuration = common.Configuration; -const Cpu = common.Cpu; -const CrossTarget = common.CrossTarget; const DiskType = common.DiskType; const ExecutionType = common.ExecutionType; const ExecutionEnvironment = common.ExecutionEnvironment; const FilesystemType = common.FilesystemType; -const OptimizeMode = common.OptimizeMode; +const ImageConfig = common.ImageConfig; const QEMUOptions = common.QEMUOptions; -const BirthProgram = common.BirthProgram; const Suffix = common.Suffix; -const Target = common.Target; +const TraditionalExecutionMode = common.TraditionalExecutionMode; const Error = error{ not_implemented, @@ -44,9 +58,87 @@ var modules = Modules{}; var b: *Build = undefined; var build_steps: *BuildSteps = undefined; var default_configuration: Configuration = undefined; -var user_modules: []const common.Module = undefined; +var user_modules: []const UserModule = undefined; var options = Options{}; +const supported_architectures = [_]Cpu.Arch{ + .x86_64, + //.aarch64, + //.riscv64, +}; + +fn architectureIndex(comptime arch: Cpu.Arch) comptime_int { + inline for (supported_architectures, 0..) |architecture, index| { + if (arch == architecture) return index; + } + + @compileError("Architecture not found"); +} + +const ArchitectureBootloader = struct { + id: Bootloader, + protocols: []const Bootloader.Protocol, +}; + +const architecture_bootloader_map = blk: { + var array: [supported_architectures.len][]const ArchitectureBootloader = undefined; + + array[architectureIndex(.x86_64)] = &.{ + .{ + .id = .birth, + .protocols = &.{ .bios, .uefi }, + }, + .{ + .id = .limine, + .protocols = &.{ .bios, .uefi }, + }, + }; + + // array[architectureIndex(.aarch64)] = &.{ + // .{ + // .id = .birth, + // .protocols = &.{.uefi}, + // }, + // .{ + // .id = .limine, + // .protocols = &.{.uefi}, + // }, + // }; + + // array[architectureIndex(.riscv64)] = &.{ + // .{ + // .id = .birth, + // .protocols = &.{.uefi}, + // }, + // }; + + break :blk array; +}; + +pub const UserModule = struct { + package: UserPackage, + name: []const u8, +}; +pub const UserPackage = struct { + kind: Kind, + dependencies: []const Dependency, + + pub const Kind = enum { + zig_exe, + }; + + pub const Dependency = struct { + foo: u64 = 0, + }; +}; + +pub const BirthProgram = enum { + bootloader, + cpu, + user, + host, +}; + pub fn build(b_arg: *Build) !void { b = b_arg; ci = b.option(bool, "ci", "CI mode") orelse false; @@ -56,9 +148,9 @@ pub fn build(b_arg: *Build) !void { const default_cfg_override = b.option([]const u8, "default", "Default configuration JSON file") orelse "config/default.json"; modules = blk: { var mods = Modules{}; - inline for (comptime common.enumValues(ModuleID)) |module_id| { + inline for (comptime enumValues(ModuleID)) |module_id| { mods.modules.set(module_id, b.createModule(.{ - .source_file = FileSource.relative(switch (module_id) { + .source_file = LazyPath.relative(switch (module_id) { .limine_installer => "src/bootloader/limine/installer.zig", else => switch (module_id) { .bios, .uefi, .limine => "src/bootloader", @@ -93,12 +185,12 @@ pub fn build(b_arg: *Build) !void { }; default_configuration = blk: { - const default_json_file = try std.fs.cwd().readFileAlloc(b.allocator, default_cfg_override, common.maxInt(usize)); - const parsed_cfg = try std.json.parseFromSlice(Configuration, b.allocator, default_json_file, .{}); + const default_json_file = try cwd().readFileAlloc(b.allocator, default_cfg_override, maxInt(usize)); + const parsed_cfg = try json.parseFromSlice(Configuration, b.allocator, default_json_file, .{}); const cfg = parsed_cfg.value; const optimize_mode = b.option( - std.builtin.Mode, + OptimizeMode, "optimize", "Prioritize performance, safety, or binary size (-O flag)", ) orelse cfg.optimize_mode; @@ -162,13 +254,13 @@ pub fn build(b_arg: *Build) !void { run_native: bool = true, const C = struct { - include_paths: []const []const u8, + include_paths: []const LazyPath, source_files: []const SourceFile, link_libc: bool, link_libcpp: bool, const SourceFile = struct { - path: []const u8, + path: LazyPath, flags: []const []const u8, }; }; @@ -183,10 +275,10 @@ pub fn build(b_arg: *Build) !void { .root_project_path = disk_image_root_path, .modules = disk_image_builder_modules, .c = .{ - .include_paths = &.{"src/bootloader/limine/installables"}, + .include_paths = &.{LazyPath.relative("src/bootloader/limine/installables")}, .source_files = &.{ .{ - .path = "src/bootloader/limine/installables/limine-deploy.c", + .path = LazyPath.relative("src/bootloader/limine/installables/limine-deploy.c"), .flags = &.{}, }, }, @@ -200,7 +292,7 @@ pub fn build(b_arg: *Build) !void { const native_test_optimize_mode = .ReleaseFast; for (native_tests) |native_test| { - const test_name = try std.mem.concat(b.allocator, u8, &.{ native_test.name, "_", @tagName(native_test_optimize_mode) }); + const test_name = try concat(b.allocator, u8, &.{ native_test.name, "_", @tagName(native_test_optimize_mode) }); const test_exe = try addCompileStep(.{ .name = test_name, .root_project_path = native_test.root_project_path, @@ -215,7 +307,7 @@ pub fn build(b_arg: *Build) !void { } for (c.source_files) |source_file| { - test_exe.addCSourceFile(source_file.path, source_file.flags); + test_exe.addCSourceFile(.{ .file = source_file.path, .flags = source_file.flags }); } if (c.link_libc) { @@ -247,20 +339,20 @@ pub fn build(b_arg: *Build) !void { const ovmf_path = ovmf_downloader_run_step.addOutputFileArg("OVMF.fd"); { - var user_module_list = std.ArrayList(common.Module).init(b.allocator); - var user_program_dir = try std.fs.cwd().openIterableDir(user_program_dir_path, .{ .access_sub_paths = true }); + var user_module_list = ArrayList(UserModule).init(b.allocator); + var user_program_dir = try cwd().openIterableDir(user_program_dir_path, .{ .access_sub_paths = true }); defer user_program_dir.close(); var user_program_iterator = user_program_dir.iterate(); while (try user_program_iterator.next()) |entry| { const dir_name = entry.name; - const file_path = try std.mem.concat(b.allocator, u8, &.{ dir_name, "/module.json" }); - const file = try user_program_dir.dir.readFileAlloc(b.allocator, file_path, common.maxInt(usize)); - const parsed_user_program = try std.json.parseFromSlice(common.UserProgram, b.allocator, file, .{}); - const user_program = parsed_user_program.value; + const file_path = try concat(b.allocator, u8, &.{ dir_name, "/module.json" }); + const file = try user_program_dir.dir.readFileAlloc(b.allocator, file_path, maxInt(usize)); + const parsed_user_package = try json.parseFromSlice(UserPackage, b.allocator, file, .{}); + const user_package = parsed_user_package.value; try user_module_list.append(.{ - .program = user_program, + .package = user_package, .name = b.dupe(dir_name), // we have to dupe here otherwise Windows CI fails }); } @@ -270,8 +362,8 @@ pub fn build(b_arg: *Build) !void { const executable_kinds = [2]CompileStep.Kind{ .exe, .@"test" }; - for (common.enumValues(OptimizeMode)) |optimize_mode| { - for (common.supported_architectures, 0..) |architecture, architecture_index| { + for (enumValues(OptimizeMode)) |optimize_mode| { + for (supported_architectures, 0..) |architecture, architecture_index| { const user_target = try getTarget(architecture, .user); for (executable_kinds) |executable_kind| { @@ -301,7 +393,7 @@ pub fn build(b_arg: *Build) !void { else => return Error.architecture_not_supported, }; - const cpu_driver_linker_script_path = FileSource.relative(try std.mem.concat(b.allocator, u8, &.{ cpu_driver_path, "/arch/", switch (architecture) { + const cpu_driver_linker_script_path = LazyPath.relative(try concat(b.allocator, u8, &.{ cpu_driver_path, "/arch/", switch (architecture) { .x86_64 => "x86/64", .x86 => "x86/32", else => @tagName(architecture), @@ -309,14 +401,14 @@ pub fn build(b_arg: *Build) !void { cpu_driver.setLinkerScriptPath(cpu_driver_linker_script_path); - var user_module_list = try std.ArrayList(*CompileStep).initCapacity(b.allocator, user_modules.len); - const user_architecture_source_path = try std.mem.concat(b.allocator, u8, &.{ "src/user/arch/", @tagName(architecture), "/" }); - const user_linker_script_path = FileSource.relative(try std.mem.concat(b.allocator, u8, &.{ user_architecture_source_path, "linker_script.ld" })); + var user_module_list = try ArrayList(*CompileStep).initCapacity(b.allocator, user_modules.len); + const user_architecture_source_path = try concat(b.allocator, u8, &.{ "src/user/arch/", @tagName(architecture), "/" }); + const user_linker_script_path = LazyPath.relative(try concat(b.allocator, u8, &.{ user_architecture_source_path, "linker_script.ld" })); for (user_modules) |module| { const user_module = try addCompileStep(.{ .kind = executable_kind, .name = module.name, - .root_project_path = try std.mem.concat(b.allocator, u8, &.{ user_program_dir_path, "/", module.name }), + .root_project_path = try concat(b.allocator, u8, &.{ user_program_dir_path, "/", module.name }), .target = user_target, .optimize_mode = optimize_mode, .modules = &.{ .lib, .user, .birth }, @@ -328,7 +420,7 @@ pub fn build(b_arg: *Build) !void { user_module_list.appendAssumeCapacity(user_module); } - const bootloaders = common.architecture_bootloader_map[architecture_index]; + const bootloaders = architecture_bootloader_map[architecture_index]; for (bootloaders) |bootloader_struct| { const bootloader = bootloader_struct.id; for (bootloader_struct.protocols) |boot_protocol| { @@ -353,9 +445,9 @@ pub fn build(b_arg: *Build) !void { executable.strip = true; - executable.addAssemblyFile("src/bootloader/arch/x86/64/smp_trampoline.S"); - executable.addAssemblyFile(bootloader_path ++ "/unreal_mode.S"); - executable.setLinkerScriptPath(FileSource.relative(bootloader_path ++ "/linker_script.ld")); + executable.addAssemblyFile(LazyPath.relative("src/bootloader/arch/x86/64/smp_trampoline.S")); + executable.addAssemblyFile(LazyPath.relative(bootloader_path ++ "/unreal_mode.S")); + executable.setLinkerScriptPath(LazyPath.relative(bootloader_path ++ "/linker_script.ld")); executable.code_model = .small; break :blk executable; @@ -380,7 +472,7 @@ pub fn build(b_arg: *Build) !void { executable.strip = true; switch (architecture) { - .x86_64 => executable.addAssemblyFile("src/bootloader/arch/x86/64/smp_trampoline.S"), + .x86_64 => executable.addAssemblyFile(LazyPath.relative("src/bootloader/arch/x86/64/smp_trampoline.S")), else => {}, } @@ -405,7 +497,7 @@ pub fn build(b_arg: *Build) !void { executable.code_model = cpu_driver.code_model; - executable.setLinkerScriptPath(FileSource.relative(try common.concat(b.allocator, u8, &.{ limine_loader_path ++ "arch/", @tagName(architecture), "/linker_script.ld" }))); + executable.setLinkerScriptPath(LazyPath.relative(try concat(b.allocator, u8, &.{ limine_loader_path ++ "arch/", @tagName(architecture), "/linker_script.ld" }))); break :blk executable; }, @@ -431,7 +523,7 @@ pub fn build(b_arg: *Build) !void { }; const execution_types: []const ExecutionType = - switch (common.canVirtualizeWithQEMU(architecture, ci)) { + switch (canVirtualizeWithQEMU(architecture, ci)) { true => &.{ .emulated, .accelerated }, false => &.{.emulated}, }; @@ -448,13 +540,13 @@ pub fn build(b_arg: *Build) !void { .executable_kind = executable_kind, }; - var disk_argument_parser = common.ArgumentParser.DiskImageBuilder{}; + var disk_argument_parser = ArgumentParser.DiskImageBuilder{}; const disk_image_builder_run = b.addRunArtifact(disk_image_builder); const disk_image_path = disk_image_builder_run.addOutputFileArg("disk.hdd"); while (disk_argument_parser.next()) |argument_type| switch (argument_type) { - .configuration => inline for (common.fields(Configuration)) |field| disk_image_builder_run.addArg(@tagName(@field(configuration, field.name))), - .image_configuration_path => disk_image_builder_run.addArg(common.ImageConfig.default_path), + .configuration => inline for (fields(Configuration)) |field| disk_image_builder_run.addArg(@tagName(@field(configuration, field.name))), + .image_configuration_path => disk_image_builder_run.addArg(ImageConfig.default_path), .disk_image_path => { // Must be first assert(@intFromEnum(argument_type) == 0); @@ -540,7 +632,7 @@ pub fn build(b_arg: *Build) !void { } const Options = struct { - arr: std.EnumArray(BirthProgram, *OptionsStep) = std.EnumArray(BirthProgram, *OptionsStep).initUndefined(), + arr: EnumArray(BirthProgram, *OptionsStep) = EnumArray(BirthProgram, *OptionsStep).initUndefined(), pub fn createOption(options_struct: *Options, birth_program: BirthProgram) void { const new_options = b.addOptions(); @@ -591,31 +683,31 @@ fn addFileSize(artifact: *CompileStep, comptime name: []const u8) void { fn newRunnerRunArtifact(arguments: struct { configuration: Configuration, - disk_image_path: FileSource, + disk_image_path: LazyPath, loader: *CompileStep, runner: *CompileStep, cpu_driver: *CompileStep, user_init: *CompileStep, qemu_options: QEMUOptions, - ovmf_path: FileSource, + ovmf_path: LazyPath, is_default: bool, }) !*RunStep { const runner = b.addRunArtifact(arguments.runner); - var argument_parser = common.ArgumentParser.Runner{}; + var argument_parser = ArgumentParser.Runner{}; while (argument_parser.next()) |argument_type| switch (argument_type) { - .configuration => inline for (common.fields(Configuration)) |field| runner.addArg(@tagName(@field(arguments.configuration, field.name))), - .image_configuration_path => runner.addArg(common.ImageConfig.default_path), + .configuration => inline for (fields(Configuration)) |field| runner.addArg(@tagName(@field(arguments.configuration, field.name))), + .image_configuration_path => runner.addArg(ImageConfig.default_path), .cpu_driver => runner.addArtifactArg(arguments.cpu_driver), .loader_path => runner.addArtifactArg(arguments.loader), .init => runner.addArtifactArg(arguments.user_init), - .disk_image_path => runner.addFileSourceArg(arguments.disk_image_path), - .qemu_options => inline for (common.fields(QEMUOptions)) |field| runner.addArg(if (@field(arguments.qemu_options, field.name)) "true" else "false"), + .disk_image_path => runner.addFileArg(arguments.disk_image_path), + .qemu_options => inline for (fields(QEMUOptions)) |field| runner.addArg(if (@field(arguments.qemu_options, field.name)) "true" else "false"), .ci => runner.addArg(if (ci) "true" else "false"), .debug_user => runner.addArg(if (debug_user) "true" else "false"), .debug_loader => runner.addArg(if (debug_loader) "true" else "false"), - .ovmf_path => runner.addFileSourceArg(arguments.ovmf_path), + .ovmf_path => runner.addFileArg(arguments.ovmf_path), .is_default => runner.addArg(if (arguments.is_default) "true" else "false"), }; @@ -631,15 +723,17 @@ const ExecutableDescriptor = struct { modules: []const ModuleID, }; +const main_package_path = LazyPath.relative(source_root_dir); fn addCompileStep(executable_descriptor: ExecutableDescriptor) !*CompileStep { - const main_file = try std.mem.concat(b.allocator, u8, &.{ executable_descriptor.root_project_path, "/main.zig" }); + const main_file = try concat(b.allocator, u8, &.{ executable_descriptor.root_project_path, "/main.zig" }); const compile_step = switch (executable_descriptor.kind) { .exe => blk: { const executable = b.addExecutable(.{ .name = executable_descriptor.name, - .root_source_file = FileSource.relative(main_file), + .root_source_file = LazyPath.relative(main_file), .target = executable_descriptor.target, .optimize = executable_descriptor.optimize_mode, + .main_pkg_path = main_package_path, }); build_steps.build_all.dependOn(&executable.step); @@ -647,13 +741,14 @@ fn addCompileStep(executable_descriptor: ExecutableDescriptor) !*CompileStep { break :blk executable; }, .@"test" => blk: { - const test_file = FileSource.relative(try std.mem.concat(b.allocator, u8, &.{ executable_descriptor.root_project_path, "/test.zig" })); + const test_file = LazyPath.relative(try concat(b.allocator, u8, &.{ executable_descriptor.root_project_path, "/test.zig" })); const test_exe = b.addTest(.{ .name = executable_descriptor.name, .root_source_file = test_file, .target = executable_descriptor.target, .optimize = executable_descriptor.optimize_mode, .test_runner = if (executable_descriptor.target.os_tag) |_| main_file else null, + .main_pkg_path = main_package_path, }); build_steps.build_all_tests.dependOn(&test_exe.step); @@ -669,8 +764,6 @@ fn addCompileStep(executable_descriptor: ExecutableDescriptor) !*CompileStep { compile_step.entry_symbol_name = "_start"; } - compile_step.setMainPkgPath(source_root_dir); - for (executable_descriptor.modules) |module| { modules.addModule(compile_step, module); } @@ -700,8 +793,8 @@ const ModuleID = enum { }; pub const Modules = struct { - modules: std.EnumArray(ModuleID, *Module) = std.EnumArray(ModuleID, *Module).initUndefined(), - dependencies: std.EnumArray(ModuleID, []const ModuleDependency) = std.EnumArray(ModuleID, []const ModuleDependency).initUndefined(), + modules: EnumArray(ModuleID, *Module) = EnumArray(ModuleID, *Module).initUndefined(), + dependencies: EnumArray(ModuleID, []const ModuleDependency) = EnumArray(ModuleID, []const ModuleDependency).initUndefined(), fn addModule(mods: Modules, compile_step: *CompileStep, module_id: ModuleID) void { compile_step.addModule(@tagName(module_id), mods.modules.get(module_id)); @@ -718,7 +811,7 @@ pub const Modules = struct { } }; -fn getTarget(asked_arch: Cpu.Arch, execution_mode: common.TraditionalExecutionMode) Error!CrossTarget { +fn getTarget(asked_arch: Cpu.Arch, execution_mode: TraditionalExecutionMode) Error!CrossTarget { var enabled_features = Cpu.Feature.Set.empty; var disabled_features = Cpu.Feature.Set.empty; @@ -743,14 +836,14 @@ fn getTarget(asked_arch: Cpu.Arch, execution_mode: common.TraditionalExecutionMo return CrossTarget{ .cpu_arch = asked_arch, - .cpu_model = switch (common.cpu.arch) { + .cpu_model = switch (cpu.arch) { .x86 => .determined_by_cpu_arch, .x86_64 => if (execution_mode == .privileged) .determined_by_cpu_arch else // zig fmt off .determined_by_cpu_arch, // .determined_by_cpu_arch, // TODO: this causes some problems: https://github.com/ziglang/zig/issues/15524 - //.{ .explicit = &common.Target.x86.cpu.x86_64_v3 }, + //.{ .explicit = &Target.x86.cpu.x86_64_v3 }, else => .determined_by_cpu_arch, }, .os_tag = .freestanding, diff --git a/config/default.json b/config/default.json index 6a813cb..e0d3c46 100644 --- a/config/default.json +++ b/config/default.json @@ -1,7 +1,7 @@ { "architecture": "x86_64", - "bootloader": "limine", - "boot_protocol": "uefi", + "bootloader": "birth", + "boot_protocol": "bios", "execution_environment": "qemu", "optimize_mode": "Debug", "execution_type": "emulated", diff --git a/src/birth.zig b/src/birth.zig index 10f51fb..1b17326 100644 --- a/src/birth.zig +++ b/src/birth.zig @@ -1,23 +1,94 @@ const lib = @import("lib"); +const assert = lib.assert; pub const arch = @import("birth/arch.zig"); -pub const capabilities = @import("birth/capabilities.zig"); -pub const syscall = @import("birth/syscall.zig"); +pub const interface = @import("birth/interface.zig"); /// This struct is the shared part that the user and the cpu see -pub const UserScheduler = extern struct { - self: *UserScheduler, - disabled: bool, - has_work: bool, +pub const Scheduler = extern struct { + common: Common, + current_thread: *Thread, + thread_queue: ?*Thread = null, + time_slice: u32, core_id: u32, - setup_stack: [lib.arch.valid_page_sizes[0]]u8 align(lib.arch.stack_alignment), - setup_stack_lock: lib.Atomic(bool), + core_state: CoreState, + bootstrap_thread: Thread, - pub inline fn architectureSpecific(user_scheduler: *UserScheduler) *arch.UserScheduler { - return @fieldParentPtr(arch.UserScheduler, "generic", user_scheduler); + pub const Common = extern struct { + self: *Common, + disabled: bool, + has_work: bool, + core_id: u32, + heap: lib.VirtualMemoryRegion, + setup_stack: [lib.arch.valid_page_sizes[0] * 4]u8 align(lib.arch.stack_alignment), + setup_stack_lock: lib.Atomic(bool), + disabled_save_area: arch.RegisterArena, + + pub fn heapAllocateFast(common: *Common, comptime T: type) !*T { + const size = @sizeOf(T); + const alignment = @alignOf(T); + lib.log.debug("Heap: {}. Size: {}. Alignment: {}", .{ common.heap, size, alignment }); + const result = try common.heap.takeSlice(size); + const ptr = &result.access(T)[0]; + assert(lib.isAligned(@intFromPtr(ptr), alignment)); + + return ptr; + } + }; + + pub fn enqueueThread(scheduler: *Scheduler, thread_to_queue: *Thread) void { + // TODO: check queue + // TODO: defer check queue + if (scheduler.thread_queue) |thread_queue| { + _ = thread_queue; + @panic("TODO: enqueueThread"); + } else { + scheduler.thread_queue = thread_to_queue; + thread_to_queue.previous = thread_to_queue; + thread_to_queue.next = thread_to_queue; + } + } + + pub noinline fn restore(scheduler: *Scheduler, register_arena: *const arch.RegisterArena) noreturn { + assert(scheduler.common.generic.disabled); + assert(scheduler.common.generic.has_work); + + assert(register_arena.registers.rip > lib.arch.valid_page_sizes[0]); + assert(register_arena.registers.rflags.IF and register_arena.registers.rflags.reserved0); + + register_arena.contextSwitch(); } }; -pub const CommandBuffer = struct { - foo: u32, +pub const Thread = extern struct { + self: *Thread, + previous: ?*Thread = null, + next: ?*Thread = null, + stack: [*]u8, + stack_top: [*]align(lib.arch.stack_alignment) u8, + register_arena: arch.RegisterArena align(arch.RegisterArena.alignment), + core_id: u32, + + pub fn init(thread: *Thread, scheduler: *Scheduler) void { + thread.* = Thread{ + .self = thread, + .core_id = scheduler.generic.core_id, + .stack = thread.stack, + .stack_top = thread.stack_top, + .register_arena = thread.register_arena, + }; + } +}; + +pub const CoreState = extern struct { + virtual_address_space: *VirtualAddressSpace, +}; +pub const VirtualAddressSpace = extern struct { + // TODO: physical map + // TODO: layout + regions: ?*VirtualMemoryRegion = null, +}; + +pub const VirtualMemoryRegion = extern struct { + next: ?*VirtualMemoryRegion = null, }; diff --git a/src/birth/arch/x64_64.zig b/src/birth/arch/x64_64.zig index 86bc2ea..60ac166 100644 --- a/src/birth/arch/x64_64.zig +++ b/src/birth/arch/x64_64.zig @@ -2,18 +2,14 @@ const lib = @import("lib"); const assert = lib.assert; const birth = @import("birth"); -pub const UserScheduler = extern struct { - generic: birth.UserScheduler, - disabled_save_area: RegisterArena, -}; - pub const RegisterArena = extern struct { fpu: FPU align(lib.arch.stack_alignment), registers: birth.arch.Registers, + pub const alignment = lib.arch.stack_alignment; + pub fn contextSwitch(register_arena: *align(lib.arch.stack_alignment) const RegisterArena) noreturn { assert(lib.isAligned(@intFromPtr(register_arena), lib.arch.stack_alignment)); - //lib.log.debug("ASDASD: {}", .{register_arena}); register_arena.fpu.load(); register_arena.registers.restore(); } @@ -129,9 +125,9 @@ pub const FPU = extern struct { pub const user_code_selector = 0x43; pub const user_data_selector = 0x3b; -pub inline fn syscall(options: birth.syscall.Options, arguments: birth.syscall.Arguments) birth.syscall.Result { - var first: birth.syscall.Result.Birth.First = undefined; - var second: birth.syscall.Result.Birth.Second = undefined; +pub inline fn syscall(options: birth.interface.Raw.Options, arguments: birth.interface.Raw.Arguments) birth.interface.Raw.Result { + var first: birth.interface.Raw.Result.Birth.First = undefined; + var second: birth.interface.Raw.Result.Birth.Second = undefined; asm volatile ( \\syscall : [rax] "={rax}" (first), @@ -153,3 +149,28 @@ pub inline fn syscall(options: birth.syscall.Options, arguments: birth.syscall.A }, }; } + +pub const PageTable = extern struct { + /// The frame that holds the memory of this page table + frame: birth.interface.RAM, + flags: packed struct(u8) { + level: Level4, // TODO: move to other + granularity: Granularity, + reserved: u4 = 0, + }, + + pub const Granularity = enum(u2) { + @"4_kb", + @"2_mb", + @"1_gb", + }; + + pub const Level4 = enum(u2) { + PML4 = 0, + PDP = 1, + PD = 2, + PT = 3, + + pub const count = lib.enumCount(@This()); + }; +}; diff --git a/src/birth/capabilities.zig b/src/birth/capabilities.zig deleted file mode 100644 index 387c7ef..0000000 --- a/src/birth/capabilities.zig +++ /dev/null @@ -1,381 +0,0 @@ -const lib = @import("lib"); -const assert = lib.assert; -const PhysicalAddress = lib.PhysicalAddress; - -const birth = @import("birth"); -const syscall = birth.syscall; - -const Capabilities = @This(); - -pub const Type = enum(u8) { - io, // primitive - cpu, // primitive - ram, // primitive - cpu_memory, // non-primitive Barrelfish: frame - boot, - process, // Temporarily available - page_table, // Barrelfish: vnode - // TODO: device_memory, // primitive - // scheduler, - // irq_table, - - // _, - - pub const Type = u8; - - pub const Mappable = enum { - cpu_memory, - page_table, - - pub inline fn toCapability(mappable: Mappable) Capabilities.Type { - return switch (mappable) { - inline else => |mappable_cap| @field(Capabilities.Type, @tagName(mappable_cap)), - }; - } - }; -}; - -pub const Subtype = u16; -pub const AllTypes = Type; - -pub fn CommandBuilder(comptime list: []const []const u8) type { - const capability_base_command_list = .{ - "copy", - "mint", - "retype", - "delete", - "revoke", - "create", - } ++ list; - const enum_fields = lib.enumAddNames(&.{}, capability_base_command_list); - - // TODO: make this non-exhaustive enums - // PROBLEM: https://github.com/ziglang/zig/issues/12250 - // Currently waiting on this since this will enable some comptime magic - const result = @Type(.{ - .Enum = .{ - .tag_type = Subtype, - .fields = enum_fields, - .decls = &.{}, - .is_exhaustive = true, - }, - }); - return result; -} - -/// Takes some names and integers. Then values are added to the Command enum for an specific capability -/// The number is an offset of the fields with respect to the base command enum fields -pub fn Command(comptime capability: Type) type { - const extra_command_list = switch (capability) { - .io => .{ - "log", - }, - .cpu => .{ - "get_core_id", - "shutdown", - "get_command_buffer", - }, - .ram => [_][]const u8{}, - .cpu_memory => .{ - "allocate", - }, - .boot => .{ - "get_bundle_size", - "get_bundle_file_list_size", - }, - .process => .{ - "exit", - }, - .page_table => [_][]const u8{}, - }; - - return CommandBuilder(&extra_command_list); -} - -const success = 0; -const first_valid_error = success + 1; - -pub fn ErrorSet(comptime error_names: []const []const u8) type { - return lib.ErrorSet(error_names, &.{ - .{ - .name = "forbidden", - .value = first_valid_error + 0, - }, - .{ - .name = "corrupted_input", - .value = first_valid_error + 1, - }, - .{ - .name = "invalid_input", - .value = first_valid_error + 2, - }, - }); -} - -const raw_argument_count = @typeInfo(syscall.Arguments).Array.len; - -pub fn Syscall(comptime capability_type: Type, comptime command_type: Command(capability_type)) type { - const Types = switch (capability_type) { - .io => switch (command_type) { - .copy, .mint, .retype, .delete, .revoke, .create => struct { - pub const ErrorSet = Capabilities.ErrorSet(&.{}); - pub const Result = void; - pub const Arguments = void; - }, - .log => struct { - pub const ErrorSet = Capabilities.ErrorSet(&.{}); - pub const Result = usize; - pub const Arguments = []const u8; - - inline fn toResult(raw_result: syscall.Result.Birth) Result { - return raw_result.second; - } - - inline fn resultToRaw(result: Result) syscall.Result { - return syscall.Result{ - .birth = .{ - .first = .{}, - .second = result, - }, - }; - } - - inline fn argumentsToRaw(arguments: Arguments) syscall.Arguments { - const result = [2]usize{ @intFromPtr(arguments.ptr), arguments.len }; - return result ++ .{0} ** (raw_argument_count - result.len); - } - - inline fn toArguments(raw_arguments: syscall.Arguments) !Arguments { - const message_ptr = @as(?[*]const u8, @ptrFromInt(raw_arguments[0])) orelse return error.invalid_input; - const message_len = raw_arguments[1]; - if (message_len == 0) return error.invalid_input; - const message = message_ptr[0..message_len]; - return message; - } - }, - }, - .cpu => switch (command_type) { - .copy, .mint, .retype, .delete, .revoke, .create => struct { - pub const ErrorSet = Capabilities.ErrorSet(&.{}); - pub const Result = void; - pub const Arguments = void; - }, - .get_core_id => struct { - pub const ErrorSet = Capabilities.ErrorSet(&.{}); - pub const Result = u32; - pub const Arguments = void; - - inline fn toResult(raw_result: syscall.Result.birth) Result { - return @as(Result, @intCast(raw_result.second)); - } - - inline fn resultToRaw(result: Result) syscall.Result { - return syscall.Result{ - .birth = .{ - .first = .{}, - .second = result, - }, - }; - } - }, - .shutdown => struct { - pub const ErrorSet = Capabilities.ErrorSet(&.{}); - pub const Result = noreturn; - pub const Arguments = void; - - pub const toResult = @compileError("noreturn unexpectedly returned"); - }, - .get_command_buffer => struct { - pub const ErrorSet = Capabilities.ErrorSet(&.{}); - pub const Result = noreturn; - pub const Arguments = *birth.CommandBuffer; - - pub const toResult = @compileError("noreturn unexpectedly returned"); - - inline fn toArguments(raw_arguments: syscall.Arguments) !Arguments { - const ptr = @as(?*birth.CommandBuffer, @ptrFromInt(raw_arguments[0])) orelse return error.invalid_input; - return ptr; - } - - inline fn argumentsToRaw(arguments: Arguments) syscall.Arguments { - const result = [1]usize{@intFromPtr(arguments)}; - return result ++ .{0} ** (raw_argument_count - result.len); - } - }, - }, - .ram => struct { - pub const ErrorSet = Capabilities.ErrorSet(&.{}); - pub const Result = void; - pub const Arguments = void; - }, - .cpu_memory => struct { - pub const ErrorSet = Capabilities.ErrorSet(&.{ - "OutOfMemory", - }); - pub const Result = PhysicalAddress; - pub const Arguments = usize; - - inline fn toResult(raw_result: syscall.Result.birth) Result { - return PhysicalAddress.new(raw_result.second); - } - - inline fn resultToRaw(result: Result) syscall.Result { - return syscall.Result{ - .birth = .{ - .first = .{}, - .second = result.value(), - }, - }; - } - - inline fn toArguments(raw_arguments: syscall.Arguments) !Arguments { - const size = raw_arguments[0]; - return size; - } - - inline fn argumentsToRaw(arguments: Arguments) syscall.Arguments { - const result = [1]usize{arguments}; - return result ++ .{0} ** (raw_argument_count - result.len); - } - }, - .boot => switch (command_type) { - .get_bundle_file_list_size, .get_bundle_size => struct { - pub const ErrorSet = Capabilities.ErrorSet(&.{ - "buffer_too_small", - }); - pub const Result = usize; - pub const Arguments = void; - - inline fn resultToRaw(result: Result) syscall.Result { - return syscall.Result{ - .birth = .{ - .first = .{}, - .second = result, - }, - }; - } - - inline fn toResult(raw_result: syscall.Result.birth) Result { - return raw_result.second; - } - }, - else => struct { - pub const ErrorSet = Capabilities.ErrorSet(&.{ - "buffer_too_small", - }); - pub const Result = void; - pub const Arguments = void; - }, - }, - .process => switch (command_type) { - .exit => struct { - pub const ErrorSet = Capabilities.ErrorSet(&.{}); - pub const Result = noreturn; - pub const Arguments = bool; - - inline fn toArguments(raw_arguments: syscall.Arguments) !Arguments { - const result = raw_arguments[0] != 0; - return result; - } - inline fn argumentsToRaw(arguments: Arguments) syscall.Arguments { - const result = [1]usize{@intFromBool(arguments)}; - return result ++ .{0} ** (raw_argument_count - result.len); - } - }, - else => struct { - pub const ErrorSet = Capabilities.ErrorSet(&.{}); - pub const Result = void; - pub const Arguments = void; - }, - }, - .page_table => switch (command_type) { - else => struct { - pub const ErrorSet = Capabilities.ErrorSet(&.{}); - pub const Result = void; - pub const Arguments = void; - }, - }, - // else => @compileError("TODO: " ++ @tagName(capability)), - }; - - return struct { - pub const ErrorSet = Types.ErrorSet; - pub const Result = Types.Result; - pub const Arguments = Types.Arguments; - pub const toResult = Types.toResult; - pub const toArguments = if (Arguments != void) - Types.toArguments - else - struct { - fn lambda(raw_arguments: syscall.Arguments) error{}!void { - _ = raw_arguments; - return {}; - } - }.lambda; - pub const capability = capability_type; - pub const command = command_type; - - pub inline fn resultToRaw(result: Result) syscall.Result { - return if (@hasDecl(Types, "resultToRaw")) blk: { - comptime assert(Result != void and Result != noreturn); - break :blk Types.resultToRaw(result); - } else blk: { - if (Result != void) { - @compileError("expected void type, got " ++ @typeName(Result) ++ ". You forgot to implement a resultToRaw function" ++ " for (" ++ @tagName(capability) ++ ", " ++ @tagName(command) ++ ")."); - } - - break :blk syscall.Result{ - .birth = .{ - .first = .{}, - .second = 0, - }, - }; - }; - } - - pub inline fn errorToRaw(err: @This().ErrorSet.Error) syscall.Result { - const error_enum = switch (err) { - inline else => |comptime_error| @field(@This().ErrorSet.Enum, @errorName(comptime_error)), - }; - return syscall.Result{ - .birth = .{ - .first = .{ - .@"error" = @intFromEnum(error_enum), - }, - .second = 0, - }, - }; - } - - /// This is not meant to be called in the CPU driver - pub fn blocking(arguments: Arguments) @This().ErrorSet.Error!Result { - const raw_arguments = if (Arguments != void) Types.argumentsToRaw(arguments) else [1]usize{0} ** raw_argument_count; - // TODO: make this more reliable and robust? - const options = birth.syscall.Options{ - .birth = .{ - .type = capability, - .command = @intFromEnum(command), - }, - }; - - const raw_result = birth.arch.syscall(options, raw_arguments); - - const raw_error_value = raw_result.birth.first.@"error"; - comptime { - assert(!@hasField(@This().ErrorSet.Enum, "ok")); - assert(!@hasField(@This().ErrorSet.Enum, "success")); - assert(lib.enumFields(@This().ErrorSet.Enum)[0].value == first_valid_error); - } - - return switch (raw_error_value) { - success => switch (Result) { - noreturn => unreachable, - else => toResult(raw_result.birth), - }, - else => switch (@as(@This().ErrorSet.Enum, @enumFromInt(raw_error_value))) { - inline else => |comptime_error_enum| @field(@This().ErrorSet.Error, @tagName(comptime_error_enum)), - }, - }; - } - }; -} diff --git a/src/birth/interface.zig b/src/birth/interface.zig new file mode 100644 index 0000000..e4b5eb8 --- /dev/null +++ b/src/birth/interface.zig @@ -0,0 +1,1031 @@ +const lib = @import("lib"); +const assert = lib.assert; +const PhysicalAddress = lib.PhysicalAddress; + +const birth = @import("birth"); + +pub const Capability = enum(u8) { + io, // primitive + cpu, // primitive + memory, // primitive + cpu_memory, // non-primitive Barrelfish: frame + command_buffer_submission, // inherit from ram + command_buffer_completion, // inherit from ram + boot, + process, // Temporarily available + page_table, // Barrelfish: vnode + memory_mapping, // Barrelfish: Frame Mapping, Device Frame Mapping, + page_table_mapping, // Barrelfish: VNode mapping + // TODO: device_memory, // primitive + // scheduler, + // irq_table, + + // _, + + pub const BackingType = @typeInfo(Capability).Enum.tag_type; + + pub const Mappable = enum { + cpu_memory, + page_table, + command_buffer_completion, + command_buffer_submission, + + pub inline fn toCapability(mappable: Mappable) Capability { + return switch (mappable) { + inline else => |mappable_cap| @field(Capability, @tagName(mappable_cap)), + }; + } + }; + + pub fn getChildTypes(comptime capability: Capability) []const Capability { + comptime { + return switch (capability) { + .memory => &.{ + .cpu_memory, + .command_buffer_completion, + .command_buffer_submission, + }, + else => &.{}, + }; + } + } +}; + +pub const Reference = packed struct(usize) { + integer: usize, +}; + +pub const Memory = packed struct(u64) { + block: u32, + region: u32, +}; + +pub const PageTable = packed struct(u16) { + block: u7 = 0, + index: u7 = 0, + entry_type: EntryType = .page_table, + present: bool = false, + + pub const EntryType = enum(u1) { + page_table = 0, + leaf = 1, + }; +}; + +pub fn CommandBuilder(comptime list: []const []const u8) type { + const capability_base_command_list = .{ + "copy", + "mint", + "retype", + "delete", + "revoke", + "create", + } ++ list; + + const enum_fields = lib.enumAddNames(&.{}, capability_base_command_list); + + // TODO: make this non-exhaustive enums + // PROBLEM: https://github.com/ziglang/zig/issues/12250 + // Currently waiting on this since this will enable some comptime magic + const result = @Type(.{ + .Enum = .{ + .tag_type = Command.Type, + .fields = enum_fields, + .decls = &.{}, + .is_exhaustive = true, + }, + }); + return result; +} + +pub const Command = extern struct { + foo: u32 = 0, + + pub const Buffer = extern struct { + submission_queue: Submission.Queue, + completion_queue: Completion.Queue, + + pub const CreateOptions = packed struct { + submission_entry_count: u16, + completion_entry_count: u16, + }; + }; + + pub const Submission = extern struct { + pub const Queue = extern struct { + head: *u16, + tail: *u16, + }; + + pub const Header = extern struct { + head: u16, + tail: u16, + }; + }; + + pub const Completion = extern struct { + foo: u32 = 0, + + pub const Queue = extern struct { + head: *u16, + tail: *u16, + }; + + pub const Header = extern struct { + head: u16, + tail: u16, + }; + }; + pub const Type = u16; + pub fn fromCapability(comptime capability: Capability) type { + const extra_command_list = switch (capability) { + .io => .{ + "log", + }, + .cpu => .{ + "get_core_id", + "shutdown", + "get_command_buffer", + }, + .memory => .{ + "allocate", + }, + .cpu_memory => .{}, + .command_buffer_submission, .command_buffer_completion => .{ + "map", + }, + .boot => .{ + "get_bundle_size", + "get_bundle_file_list_size", + }, + .process => .{ + "exit", + "panic", + }, + .page_table => .{ + "get", + }, + .memory_mapping => .{}, + .page_table_mapping => .{}, + }; + + return CommandBuilder(&extra_command_list); + } +}; + +/// Takes some names and integers. Then values are added to the Command enum for an specific capability +/// The number is an offset of the fields with respect to the base command enum fields +const success = 0; +const first_valid_error = success + 1; +pub fn ErrorSet(comptime error_names: []const []const u8) type { + const predefined_error_names = &.{ "forbidden", "corrupted_input", "invalid_input" }; + comptime var current_error = first_valid_error; + comptime var predefined_errors: []const lib.Type.EnumField = &.{}; + + inline for (predefined_error_names) |predefined_error_name| { + defer current_error += 1; + + predefined_errors = predefined_errors ++ .{ + .{ + .name = predefined_error_name, + .value = current_error, + }, + }; + } + + return lib.ErrorSet(error_names, predefined_errors); +} + +const DefaultErrorSet = ErrorSet(&.{}); +const Types = struct { + Result: type = void, + Arguments: type = void, + ErrorSet: type = DefaultErrorSet, +}; + +fn CommandDescriptor(comptime capability: Capability, comptime command: Command.fromCapability(capability)) type { + const type_descriptor: Types = switch (capability) { + .io => switch (command) { + .log => .{ + .Result = usize, + .Arguments = []const u8, + }, + else => .{}, + }, + .memory => switch (command) { + .allocate => .{ + .Result = Memory, + .Arguments = usize, + .ErrorSet = ErrorSet(&.{"OutOfMemory"}), + }, + .retype => .{ + .Result = Reference, + .Arguments = extern struct { + source: Memory, + destination: Capability, + }, + .ErrorSet = ErrorSet(&.{"OutOfMemory"}), + }, + else => .{}, + }, + .process => switch (command) { + .exit => .{ + .Result = noreturn, + .Arguments = bool, + }, + .panic => .{ + .Result = noreturn, + .Arguments = struct { + message: []const u8, + exit_code: u64, + }, + }, + else => .{}, + }, + .cpu => switch (command) { + .get_core_id => .{ + .Result = u32, + }, + .shutdown => .{ + .Result = noreturn, + }, + .get_command_buffer => .{ + .Result = void, + .Arguments = extern struct { + submission_frame: Reference, + completion_frame: Reference, + options: Command.Buffer.CreateOptions, + }, + }, + else => .{}, + }, + .boot => switch (command) { + .get_bundle_file_list_size, .get_bundle_size => .{ + .Result = usize, + }, + else => .{}, + }, + .command_buffer_completion, .command_buffer_submission => switch (command) { + .map => .{ + .Result = usize, + .Arguments = extern struct { + frame: Reference, + flags: packed struct(u64) { + write: bool, + execute: bool, + reserved: u62 = 0, + }, + }, + }, + else => .{}, + }, + .page_table => switch (command) { + .get => .{ + .Arguments = extern struct { + descriptor: PageTable, + buffer: *[512]PageTable, + }, + .ErrorSet = ErrorSet(&.{ + "index_out_of_bounds", + "not_present", + }), + }, + else => .{}, + }, + else => .{}, + }; + + const ToArguments = fn (Raw.Arguments) callconv(.Inline) type_descriptor.ErrorSet.Error!type_descriptor.Arguments; + const FromArguments = fn (type_descriptor.Arguments) callconv(.Inline) Raw.Arguments; + + const functions = switch (type_descriptor.Result) { + else => blk: { + const ToResult = fn (Raw.Result.Birth) callconv(.Inline) type_descriptor.Result; + const FromResult = fn (type_descriptor.Result) callconv(.Inline) Raw.Result; + + // return if (type_descriptor.Result == void and type_descriptor.Arguments == void and type_descriptor.ErrorSet == DefaultErrorSet) struct { + break :blk if (type_descriptor.ErrorSet == DefaultErrorSet and type_descriptor.Result == void and type_descriptor.Arguments == void) struct { + toResult: ToResult = voidToResult, + fromResult: FromResult = voidFromResult, + toArguments: ToArguments = voidToArguments, + fromArguments: FromArguments = voidFromArguments, + } else if (type_descriptor.ErrorSet == DefaultErrorSet and type_descriptor.Result == void) struct { + toResult: ToResult = voidToResult, + fromResult: FromResult = voidFromResult, + toArguments: ToArguments, + fromArguments: FromArguments, + } else if (type_descriptor.ErrorSet == DefaultErrorSet and type_descriptor.Arguments == void) struct { + toResult: ToResult, + fromResult: FromResult, + toArguments: ToArguments = voidToArguments, + fromArguments: FromArguments = voidFromArguments, + } else struct { + toResult: ToResult, + fromResult: FromResult, + toArguments: ToArguments, + fromArguments: FromArguments, + }; + }, + noreturn => if (type_descriptor.ErrorSet == DefaultErrorSet and type_descriptor.Arguments == void) struct { + toArguments: ToArguments = voidToArguments, + fromArguments: FromArguments = voidFromArguments, + } else struct { + toArguments: ToArguments, + fromArguments: FromArguments, + }, + }; + + return struct { + types: Types = type_descriptor, + functions: functions, + }; +} + +pub fn Descriptor(comptime capability: Capability, comptime command: Command.fromCapability(capability)) type { + const D = CommandDescriptor(capability, command); + const T = @as(?*const Types, @ptrCast(@typeInfo(D).Struct.fields[0].default_value)).?.*; + const d = D{ + .functions = switch (capability) { + .memory => switch (command) { + .allocate => blk: { + const F = struct { + inline fn toResult(raw_result: Raw.Result.Birth) Memory { + return @bitCast(raw_result.second); + } + + inline fn fromResult(result: Memory) Raw.Result { + return .{ + .birth = .{ + .first = .{}, + .second = @bitCast(result), + }, + }; + } + + inline fn toArguments(raw_arguments: Raw.Arguments) T.ErrorSet.Error!usize { + const size = raw_arguments[0]; + return size; + } + + inline fn fromArguments(arguments: usize) Raw.Arguments { + const result = [1]usize{arguments}; + return result ++ .{0} ** (Raw.argument_count - result.len); + } + }; + break :blk .{ + .toResult = F.toResult, + .fromResult = F.fromResult, + .toArguments = F.toArguments, + .fromArguments = F.fromArguments, + }; + }, + .retype => blk: { + const F = struct { + inline fn toResult(raw_result: Raw.Result.Birth) Reference { + return @bitCast(raw_result.second); + } + + inline fn fromResult(result: Reference) Raw.Result { + return .{ + .birth = .{ + .first = .{}, + .second = @bitCast(result), + }, + }; + } + inline fn toArguments(raw_arguments: Raw.Arguments) T.ErrorSet.Error!T.Arguments { + return T.Arguments{ + .source = @bitCast(raw_arguments[0]), + .destination = @enumFromInt(@as(@typeInfo(Capability).Enum.tag_type, @intCast(raw_arguments[1]))), + }; + } + + inline fn fromArguments(arguments: T.Arguments) Raw.Arguments { + const result = [2]usize{ + @bitCast(arguments.source), + @intFromEnum(arguments.destination), + }; + return result ++ .{0} ** (Raw.argument_count - result.len); + } + }; + break :blk .{ + .toResult = F.toResult, + .fromResult = F.fromResult, + .toArguments = F.toArguments, + .fromArguments = F.fromArguments, + }; + }, + else => .{}, + }, + .command_buffer_submission => switch (command) { + .map => blk: { + const struct_helper = StructHelperArguments(T.Arguments); + const F = struct { + inline fn toResult(raw_result: Raw.Result.Birth) T.Result { + _ = raw_result; + @panic("TODO: toResult"); + } + + inline fn fromResult(result: T.Result) Raw.Result { + _ = result; + @panic("TODO: fromResult"); + } + + inline fn toArguments(raw_arguments: Raw.Arguments) T.ErrorSet.Error!T.Arguments { + const arguments = try struct_helper.toArguments(raw_arguments); + if (arguments.flags.execute) { + return error.invalid_input; + } + + return arguments; + } + + inline fn fromArguments(arguments: T.Arguments) Raw.Arguments { + return struct_helper.fromArguments(arguments); + } + }; + break :blk .{ + .toArguments = F.toArguments, + .fromArguments = F.fromArguments, + .toResult = F.toResult, + .fromResult = F.fromResult, + }; + }, + else => .{}, + }, + .command_buffer_completion => switch (command) { + .map => blk: { + const F = struct { + inline fn toResult(raw_result: Raw.Result.Birth) T.Result { + _ = raw_result; + @panic("TODO: toResult"); + } + + inline fn fromResult(result: T.Result) Raw.Result { + _ = result; + @panic("TODO: fromResult"); + } + + inline fn toArguments(raw_arguments: Raw.Arguments) T.ErrorSet.Error!T.Arguments { + _ = raw_arguments; + @panic("TODO: toArguments"); + } + + inline fn fromArguments(arguments: T.Arguments) Raw.Arguments { + _ = arguments; + @panic("TODO: fromArguments"); + } + }; + break :blk .{ + .toArguments = F.toArguments, + .fromArguments = F.fromArguments, + .toResult = F.toResult, + .fromResult = F.fromResult, + }; + }, + else => .{}, + }, + .process => switch (command) { + .exit => blk: { + const F = struct { + inline fn toArguments(raw_arguments: Raw.Arguments) T.ErrorSet.Error!T.Arguments { + const result = raw_arguments[0] != 0; + return result; + } + + inline fn fromArguments(arguments: T.Arguments) Raw.Arguments { + const result = [1]usize{@intFromBool(arguments)}; + return result ++ .{0} ** (Raw.argument_count - result.len); + } + }; + break :blk .{ + // .toResult = F.toResult, + // .fromResult = F.fromResult, + .toArguments = F.toArguments, + .fromArguments = F.fromArguments, + }; + }, + .panic => blk: { + const F = struct { + inline fn toArguments(raw_arguments: Raw.Arguments) T.ErrorSet.Error!T.Arguments { + if (@as(?[*]const u8, @ptrFromInt(raw_arguments[0]))) |message_ptr| { + const message_len = raw_arguments[1]; + + if (message_len != 0) { + const message = message_ptr[0..message_len]; + const exit_code = raw_arguments[2]; + + return .{ + .message = message, + .exit_code = exit_code, + }; + } + } + + return error.invalid_input; + } + + inline fn fromArguments(arguments: T.Arguments) Raw.Arguments { + const result: [3]usize = .{ @intFromPtr(arguments.message.ptr), arguments.message.len, arguments.exit_code }; + return result ++ .{0} ** (Raw.argument_count - result.len); + } + }; + break :blk .{ + .toArguments = F.toArguments, + .fromArguments = F.fromArguments, + }; + }, + else => .{}, + }, + .io => switch (command) { + .log => blk: { + const F = struct { + inline fn toResult(raw_result: Raw.Result.Birth) T.Result { + return raw_result.second; + } + + inline fn fromResult(result: T.Result) Raw.Result { + return Raw.Result{ + .birth = .{ + .first = .{}, + .second = result, + }, + }; + } + + inline fn toArguments(raw_arguments: Raw.Arguments) T.ErrorSet.Error!T.Arguments { + const message_ptr = @as(?[*]const u8, @ptrFromInt(raw_arguments[0])) orelse return error.invalid_input; + const message_len = raw_arguments[1]; + if (message_len == 0) return error.invalid_input; + const message = message_ptr[0..message_len]; + return message; + } + + inline fn fromArguments(arguments: T.Arguments) Raw.Arguments { + const result = [2]usize{ @intFromPtr(arguments.ptr), arguments.len }; + return result ++ .{0} ** (Raw.argument_count - result.len); + } + }; + break :blk .{ + .toResult = F.toResult, + .fromResult = F.fromResult, + .toArguments = F.toArguments, + .fromArguments = F.fromArguments, + }; + }, + else => .{}, + }, + .cpu => switch (command) { + .get_core_id => blk: { + const F = struct { + inline fn toResult(raw_result: Raw.Result.Birth) T.Result { + return @as(T.Result, @intCast(raw_result.second)); + } + + inline fn fromResult(result: T.Result) Raw.Result { + return Raw.Result{ + .birth = .{ + .first = .{}, + .second = result, + }, + }; + } + }; + break :blk .{ + .toResult = F.toResult, + .fromResult = F.fromResult, + }; + }, + .shutdown => .{ + .toArguments = voidToArguments, + .fromArguments = voidFromArguments, + }, + .get_command_buffer => blk: { + const struct_helper = StructHelperArguments(T.Arguments); + const F = struct { + inline fn toArguments(raw_arguments: Raw.Arguments) T.ErrorSet.Error!T.Arguments { + const args = try struct_helper.toArguments(raw_arguments); + + if (args.options.submission_entry_count == 0) { + return error.invalid_input; + } + + if (args.options.completion_entry_count == 0) { + return error.invalid_input; + } + + return args; + } + + inline fn fromArguments(arguments: T.Arguments) Raw.Arguments { + return struct_helper.fromArguments(arguments); + } + }; + + break :blk .{ + .toArguments = F.toArguments, + .fromArguments = F.fromArguments, + }; + }, + else => .{}, + }, + .boot => switch (command) { + .get_bundle_file_list_size, .get_bundle_size => blk: { + const F = struct { + inline fn toResult(raw_result: Raw.Result.Birth) T.Result { + return raw_result.second; + } + + inline fn fromResult(result: T.Result) Raw.Result { + return Raw.Result{ + .birth = .{ + .first = .{}, + .second = result, + }, + }; + } + }; + + break :blk .{ + .toResult = F.toResult, + .fromResult = F.fromResult, + }; + }, + else => .{}, + }, + .page_table => switch (command) { + .get => blk: { + const F = struct { + inline fn toResult(raw_result: Raw.Result.Birth) void { + _ = raw_result; + } + + inline fn fromResult(result: T.Result) Raw.Result { + _ = result; + return Raw.Result{ + .birth = .{ + .first = .{}, + .second = 0, + }, + }; + } + + const struct_helper = StructHelperArguments(T.Arguments); + inline fn toArguments(raw_arguments: Raw.Arguments) T.ErrorSet.Error!T.Arguments { + const args = try struct_helper.toArguments(raw_arguments); + + return args; + } + + inline fn fromArguments(arguments: T.Arguments) Raw.Arguments { + return struct_helper.fromArguments(arguments); + } + }; + + break :blk .{ + .toResult = F.toResult, + .fromResult = F.fromResult, + .toArguments = F.toArguments, + .fromArguments = F.fromArguments, + }; + }, + else => .{}, + }, + else => .{}, + }, + }; + + return struct { + pub const Capability = capability; + pub const Command = command; + pub const Error = T.ErrorSet.Error; + pub const Result = T.Result; + pub const Arguments = T.Arguments; + + pub const toResult = d.functions.toResult; + pub const fromResult = d.functions.fromResult; + pub const toArguments = d.functions.toArguments; + pub const fromArguments = d.functions.fromArguments; + + pub fn fromError(err: Error) Raw.Result { + const error_enum = switch (err) { + inline else => |comptime_error| @field(T.ErrorSet.Enum, @errorName(comptime_error)), + }; + return Raw.Result{ + .birth = .{ + .first = .{ + .@"error" = @intFromEnum(error_enum), + }, + .second = 0, + }, + }; + } + + pub fn blocking(arguments: T.Arguments) Error!Result { + const raw_arguments = d.functions.fromArguments(arguments); + // TODO: make this more reliable and robust? + const options = birth.interface.Raw.Options{ + .birth = .{ + .type = capability, + .command = @intFromEnum(command), + }, + }; + + const raw_result = birth.arch.syscall(options, raw_arguments); + + const raw_error_value = raw_result.birth.first.@"error"; + comptime { + assert(lib.enumFields(T.ErrorSet.Enum)[0].value == first_valid_error); + } + + return switch (raw_error_value) { + success => switch (T.Result) { + noreturn => unreachable, + else => d.functions.toResult(raw_result.birth), + }, + else => switch (@as(T.ErrorSet.Enum, @enumFromInt(raw_error_value))) { + inline else => |comptime_error_enum| @field(Error, @tagName(comptime_error_enum)), + }, + }; + } + + pub fn buffer(command_buffer: *birth.CommandBuffer, arguments: T.Arguments) void { + _ = arguments; + _ = command_buffer; + + @panic("TODO: buffer"); + } + }; +} + +inline fn voidToResult(raw_result: Raw.Result.Birth) void { + _ = raw_result; +} + +inline fn voidFromResult(result: void) Raw.Result { + _ = result; + + return .{ + .birth = .{ + .first = .{}, + .second = 0, + }, + }; +} + +inline fn voidToArguments(raw_arguments: Raw.Arguments) DefaultErrorSet.Error!void { + _ = raw_arguments; +} + +inline fn voidFromArguments(arguments: void) Raw.Arguments { + _ = arguments; + return .{0} ** Raw.argument_count; +} + +fn getPacked(comptime T: type) lib.Type.Struct { + comptime { + const type_info = @typeInfo(T); + assert(type_info == .Struct); + assert(type_info.Struct.layout == .Packed); + + return type_info.Struct; + } +} + +fn encodePackedStruct(comptime T: type, raw: usize) T { + const Packed = getPacked(T); + const BackingInteger = Packed.backing_integer.?; + return switch (BackingInteger) { + u8, u16, u32, u64 => @bitCast(@as(BackingInteger, raw)), + else => @compileError("Not supported backing integer"), + }; +} + +fn decodePackedStruct(value: anytype) usize { + _ = getPacked(@TypeOf(value)); + + return @bitCast(value); +} + +fn ensureUnionCorrectness(comptime union_type_info: lib.Type.Union) void { + comptime { + assert(union_type_info.layout == .Extern); + assert(union_type_info.tag_type == null); + var first_field = union_type_info.fields[0]; + inline for (union_type_info.fields) |field| { + if (@sizeOf(field.type) != @sizeOf(first_field.type)) { + @compileError("All fields must match in size"); + } + } + } +} + +fn StructHelperArguments(comptime Arguments: type) type { + return struct { + fn toArguments(raw_arguments: Raw.Arguments) !Arguments { + var args = lib.zeroes(Arguments); + + switch (@typeInfo(Arguments)) { + .Struct => |struct_type_info| switch (struct_type_info.layout) { + .Extern => inline for (struct_type_info.fields, 0..) |argument_field, index| { + const raw_argument = raw_arguments[index]; + @field(args, argument_field.name) = switch (@sizeOf(argument_field.type) == @sizeOf(@TypeOf(raw_arguments[0]))) { + true => switch (@typeInfo(argument_field.type)) { + .Pointer => @as(?argument_field.type, @ptrFromInt(raw_argument)) orelse return error.invalid_input, + else => @bitCast(raw_argument), + }, + false => @bitCast(lib.cast(@Type(.{ + .Int = .{ + .signedness = .unsigned, + .bits = @bitSizeOf(argument_field.type), + }, + }), raw_argument) orelse return error.corrupted_input), + }; + }, + .Auto => @compileError("Auto structs are forbidden for Birth ABI"), + .Packed => { + args = encodePackedStruct(Arguments, raw_arguments[0]); + }, + }, + .Union => |union_type_info| { + ensureUnionCorrectness(union_type_info); + + const FirstFieldType = union_type_info.fields[0].type; + switch (@typeInfo(FirstFieldType)) { + .Struct => |struct_type_info| switch (struct_type_info.layout) { + .Extern => @compileError("TODO: extern structs"), + .Auto => @compileError("Auto structs are forbidden for Birth ABI"), + .Packed => { + args = @bitCast(encodePackedStruct(FirstFieldType, raw_arguments[0])); + }, + }, + else => @compileError("TODO: " ++ @typeName(FirstFieldType)), + } + }, + else => |unknown_type_info| @compileError("TODO: " ++ @tagName(unknown_type_info)), + } + + return args; + } + + fn fromArguments(arguments: Arguments) Raw.Arguments { + var raw_arguments: Raw.Arguments = .{0} ** Raw.argument_count; + switch (@typeInfo(Arguments)) { + .Struct => |struct_type_info| switch (struct_type_info.layout) { + .Extern => { + inline for (lib.fields(@TypeOf(arguments)), 0..) |argument_field, index| { + const arg_value = @field(arguments, argument_field.name); + raw_arguments[index] = switch (@sizeOf(argument_field.type) == @sizeOf(@TypeOf(raw_arguments[0]))) { + true => switch (@typeInfo(argument_field.type)) { + .Pointer => @intFromPtr(arg_value), + else => @bitCast(arg_value), + }, + false => @as(@Type(.{ + .Int = .{ + .signedness = .unsigned, + .bits = @bitSizeOf(argument_field.type), + }, + }), @bitCast(arg_value)), + }; + } + }, + .Packed => { + raw_arguments[0] = @as(@TypeOf(raw_arguments[0]), @bitCast(arguments)); + }, + .Auto => @compileError("Auto structs are forbidden for Birth ABI"), + }, + .Union => |union_type_info| { + comptime { + assert(union_type_info.layout == .Extern); + } + }, + else => |unknown_type_info| @compileError("TODO: " ++ @tagName(unknown_type_info)), + } + + return raw_arguments; + } + }; +} + +pub const Raw = extern struct { + pub const Arguments = [argument_count]usize; + pub const argument_count = 6; + + pub const Convention = enum(u1) { + emulated = 0, + birth = 1, + }; + + pub const Options = extern union { + general: General, + birth: Birth, + emulated: EmulatedOperatingSystem, + + pub const General = packed struct(u64) { + number: Number, + convention: Convention, + + pub const Number = @Type(.{ + .Int = .{ + .signedness = .unsigned, + .bits = union_space_bits, + }, + }); + + comptime { + assertSize(@This()); + } + + pub inline fn getNumberInteger(general: General, comptime convention: Convention) NumberIntegerType(convention) { + const options_integer = @as(u64, @bitCast(general)); + return @as(NumberIntegerType(convention), @truncate(options_integer)); + } + + pub fn NumberIntegerType(comptime convention: Convention) type { + return switch (convention) { + .birth => birth.IDInteger, + .emulated => u64, + }; + } + }; + + pub const Birth = packed struct(u64) { + type: Capability, + command: Command.Type, + reserved: ReservedInt = 0, + convention: Convention = .birth, + + const ReservedInt = @Type(.{ + .Int = .{ + .signedness = .unsigned, + .bits = @bitSizeOf(u64) - @bitSizeOf(Capability) - @bitSizeOf(Command.Type) - @bitSizeOf(Convention), + }, + }); + + comptime { + Options.assertSize(@This()); + } + + // const IDInteger = u16; + // pub const ID = enum(IDInteger) { + // qemu_exit = 0, + // print = 1, + // }; + }; + + pub const EmulatedOperatingSystem = enum(u64) { + _, + comptime { + Options.assertSize(@This()); + } + }; + + pub const union_space_bits = @bitSizeOf(u64) - @bitSizeOf(Convention); + + fn assertSize(comptime T: type) void { + assert(@sizeOf(T) == @sizeOf(u64)); + assert(@bitSizeOf(T) == @bitSizeOf(u64)); + } + + comptime { + assertSize(@This()); + } + }; + + pub const Result = extern union { + general: General, + birth: Birth, + emulated: EmulatedOperatingSystem, + + pub const General = extern struct { + first: packed struct(u64) { + argument: u63, + convention: Convention, + }, + second: u64, + }; + + pub const Birth = extern struct { + first: First, + second: Second, + + pub const First = packed struct(u64) { + padding: u48 = 0, + @"error": u15 = 0, + convention: Convention = .birth, + }; + + pub const Second = u64; + }; + + pub const EmulatedOperatingSystem = extern struct { + result: u64, + reserved: u64 = 0, + }; + + fn assertSize(comptime T: type) void { + assert(@sizeOf(T) == @sizeOf(u64)); + assert(@bitSizeOf(T) == @bitSizeOf(u64)); + } + }; +}; diff --git a/src/birth/syscall.zig b/src/birth/syscall.zig deleted file mode 100644 index 203e5c3..0000000 --- a/src/birth/syscall.zig +++ /dev/null @@ -1,117 +0,0 @@ -const lib = @import("lib"); -const assert = lib.assert; -const log = lib.log.scoped(.Syscall); - -const birth = @import("birth"); -const capabilities = birth.capabilities; - -pub const argument_count = 6; -pub const Arguments = [argument_count]usize; - -pub const Convention = enum(u1) { - linux = 0, - birth = 1, -}; - -pub const Options = extern union { - general: General, - birth: Birth, - linux: Linux, - - pub const General = packed struct(u64) { - number: Number, - convention: Convention, - - pub const Number = lib.IntType(.unsigned, union_space_bits); - - comptime { - assertSize(@This()); - } - - pub inline fn getNumberInteger(general: General, comptime convention: Convention) NumberIntegerType(convention) { - const options_integer = @as(u64, @bitCast(general)); - return @as(NumberIntegerType(convention), @truncate(options_integer)); - } - - pub fn NumberIntegerType(comptime convention: Convention) type { - return switch (convention) { - .birth => birth.IDInteger, - .linux => u64, - }; - } - }; - - pub const Birth = packed struct(u64) { - type: capabilities.Type, - command: capabilities.Subtype, - reserved: lib.IntType(.unsigned, @bitSizeOf(u64) - @bitSizeOf(capabilities.Type) - @bitSizeOf(capabilities.Subtype) - @bitSizeOf(Convention)) = 0, - convention: Convention = .birth, - - comptime { - Options.assertSize(@This()); - } - - const IDInteger = u16; - pub const ID = enum(IDInteger) { - qemu_exit = 0, - print = 1, - }; - }; - - pub const Linux = enum(u64) { - _, - comptime { - Options.assertSize(@This()); - } - }; - - pub const union_space_bits = @bitSizeOf(u64) - @bitSizeOf(Convention); - - fn assertSize(comptime T: type) void { - assert(@sizeOf(T) == @sizeOf(u64)); - assert(@bitSizeOf(T) == @bitSizeOf(u64)); - } - - comptime { - assertSize(@This()); - } -}; - -pub const Result = extern union { - general: General, - birth: Birth, - linux: Linux, - - pub const General = extern struct { - first: packed struct(u64) { - argument: u63, - convention: Convention, - }, - second: u64, - }; - - pub const Birth = extern struct { - first: First, - second: Second, - - pub const First = packed struct(u64) { - padding1: u32 = 0, - @"error": u16 = 0, - padding2: u8 = 0, - padding3: u7 = 0, - convention: Convention = .birth, - }; - - pub const Second = u64; - }; - - pub const Linux = extern struct { - result: u64, - reserved: u64 = 0, - }; - - fn assertSize(comptime T: type) void { - assert(@sizeOf(T) == @sizeOf(u64)); - assert(@bitSizeOf(T) == @bitSizeOf(u64)); - } -}; diff --git a/src/bootloader.zig b/src/bootloader.zig index 2470ba4..0785691 100644 --- a/src/bootloader.zig +++ b/src/bootloader.zig @@ -315,6 +315,13 @@ pub const Information = extern struct { return Error.unexpected_memory_map_entry_count; } bootloader_information.configuration.memory_map_diff = @as(u8, @intCast(memory_map_entry_count - new_memory_map_entry_count)); + + const entries = bootloader_information.getMemoryMapEntries(); + const entry = entries[total_allocation.index]; + assert(entry.region.address.value() == total_allocation.region.address.value()); + assert(entry.region.size == total_allocation.region.size); + + page_counters[total_allocation.index] = bootloader_information.getAlignedTotalSize() >> lib.arch.page_shifter(lib.arch.valid_page_sizes[0]); } // Check if the host entry still corresponds to the same index @@ -366,7 +373,9 @@ pub const Information = extern struct { } const aligned_size = lib.alignForward(u64, ph.size_in_memory, lib.arch.valid_page_sizes[0]); - const physical_allocation = try bootloader_information.allocatePages(aligned_size, lib.arch.valid_page_sizes[0], .{}); + const physical_allocation = try bootloader_information.allocatePages(aligned_size, lib.arch.valid_page_sizes[0], .{ + .virtual_address = @bitCast(@as(u64, 0)), + }); const physical_address = physical_allocation.address; const virtual_address = VirtualAddress.new(ph.virtual_address); const flags = Mapping.Flags{ .write = ph.flags.writable, .execute = ph.flags.executable }; @@ -469,7 +478,7 @@ pub const Information = extern struct { } pub inline fn getSlice(information: *const Information, comptime offset_name: Slice.Name) []Slice.TypeMap[@intFromEnum(offset_name)] { - const slice_offset = information.slices.array.values[@intFromEnum(offset_name)]; + const slice_offset = &information.slices.array.values[@intFromEnum(offset_name)]; return slice_offset.dereference(offset_name, information); } @@ -684,6 +693,10 @@ pub const MemoryMapEntry = extern struct { bad_memory = 2, }; + pub fn getUsedRegion(mmap_entry: MemoryMapEntry, page_counter: u32) PhysicalMemoryRegion { + return mmap_entry.region.slice(page_counter << lib.arch.page_shifter(lib.arch.valid_page_sizes[0])); + } + pub fn getFreeRegion(mmap_entry: MemoryMapEntry, page_counter: u32) PhysicalMemoryRegion { return mmap_entry.region.offset(page_counter << lib.arch.page_shifter(lib.arch.valid_page_sizes[0])); } diff --git a/src/bootloader/bios.zig b/src/bootloader/bios.zig index 964f2d6..fb7fb56 100644 --- a/src/bootloader/bios.zig +++ b/src/bootloader/bios.zig @@ -68,10 +68,8 @@ pub const Disk = extern struct { .segment = 0, .lba = lba, }; - lib.log.debug("DAP: {}", .{dap}); const dap_address = @intFromPtr(&dap); - lib.log.debug("DAP address: 0x{x}", .{dap_address}); const dap_offset = offset(dap_address); const dap_segment = segment(dap_address); var registers = Registers{ @@ -81,9 +79,7 @@ pub const Disk = extern struct { .ds = dap_segment, }; - lib.log.debug("Start int", .{}); interrupt(0x13, ®isters, ®isters); - lib.log.debug("End int", .{}); if (registers.eflags.flags.carry_flag) return error.read_error; @@ -92,7 +88,6 @@ pub const Disk = extern struct { const src_slice = buffer[0..bytes_to_copy]; if (maybe_provided_buffer) |provided_buffer| { - lib.log.debug("A", .{}); const dst_slice = provided_buffer[@as(usize, @intCast(provided_buffer_offset))..][0..bytes_to_copy]; // TODO: report Zig that this codegen is so bad that we have to use rep movsb instead to make it go fast @@ -102,19 +97,11 @@ pub const Disk = extern struct { const use_rep_movsb = true; if (use_rep_movsb) { lib.memcpy(dst_slice, src_slice); - const bytes_left = asm volatile ( - \\rep movsb - : [ret] "={ecx}" (-> usize), - : [dest] "{edi}" (dst_slice.ptr), - [src] "{esi}" (src_slice.ptr), - [len] "{ecx}" (src_slice.len), - ); - assert(bytes_left == 0); } else { @memcpy(dst_slice, src_slice); } } else { - lib.log.debug("B", .{}); + //lib.log.debug("B", .{}); } } diff --git a/src/bootloader/birth/bios/main.zig b/src/bootloader/birth/bios/main.zig index 2961eb9..325c803 100644 --- a/src/bootloader/birth/bios/main.zig +++ b/src/bootloader/birth/bios/main.zig @@ -5,8 +5,7 @@ const log = lib.log; const privileged = @import("privileged"); const ACPI = privileged.ACPI; const MemoryManager = privileged.MemoryManager; -const PhysicalHeap = privileged.PhyicalHeap; -const writer = privileged.writer; +pub const writer = privileged.writer; const stopCPU = privileged.arch.stopCPU; const GDT = privileged.arch.x86_64.GDT; @@ -54,10 +53,7 @@ pub const std_options = struct { _ = format; _ = scope; _ = level; - // _ = level; - // writer.writeByte('[') catch stopCPU(); - // writer.writeAll(@tagName(scope)) catch stopCPU(); - // writer.writeAll("] ") catch stopCPU(); + _ = level; // lib.format(writer, format, args) catch stopCPU(); // writer.writeByte('\n') catch stopCPU(); } @@ -83,17 +79,13 @@ const Filesystem = extern struct { } pub fn readFile(filesystem: *Filesystem, file_path: []const u8, file_buffer: []u8) ![]const u8 { - log.debug("File {s} read started", .{file_path}); assert(filesystem.fat_allocator.allocated <= filesystem.fat_allocator.buffer.len); const file = try filesystem.fat_cache.readFileToBuffer(file_path, file_buffer); - log.debug("File read succeeded", .{}); return file; } pub fn sneakFile(filesystem: *Filesystem, file_path: []const u8, size: usize) ![]const u8 { - log.debug("File {s} read started", .{file_path}); const file = try filesystem.fat_cache.readFileToCache(file_path, size); - log.debug("File read succeeded", .{}); return file; } diff --git a/src/bootloader/birth/uefi/main.zig b/src/bootloader/birth/uefi/main.zig index 049216d..874b6d6 100644 --- a/src/bootloader/birth/uefi/main.zig +++ b/src/bootloader/birth/uefi/main.zig @@ -14,16 +14,13 @@ const uefi = @import("uefi"); const BootloaderInformation = uefi.BootloaderInformation; const BootServices = uefi.BootServices; const ConfigurationTable = uefi.ConfigurationTable; -const FileProtocol = uefi.FileProtocol; const Handle = uefi.Handle; -const LoadedImageProtocol = uefi.LoadedImageProtocol; const LoadKernelFunction = uefi.LoadKernelFunction; const MemoryCategory = uefi.MemoryCategory; const MemoryDescriptor = uefi.MemoryDescriptor; const ProgramSegment = uefi.ProgramSegment; const Protocol = uefi.Protocol; const page_table_estimated_size = uefi.page_table_estimated_size; -const SimpleFilesystemProtocol = uefi.SimpleFilesystemProtocol; const SystemTable = uefi.SystemTable; const privileged = @import("privileged"); @@ -68,7 +65,7 @@ pub fn panic(message: []const u8, _: ?*lib.StackTrace, _: ?usize) noreturn { } const Filesystem = extern struct { - root: *FileProtocol, + root: *uefi.protocol.File, buffer: [0x200 * 10]u8 = undefined, pub fn deinitialize(filesystem: *Filesystem) !void { @@ -91,7 +88,7 @@ const Filesystem = extern struct { } const FileDescriptor = struct { - handle: *FileProtocol, + handle: *uefi.protocol.File, path_size: u32, }; @@ -111,14 +108,14 @@ const Filesystem = extern struct { return Error.boot_services_exited; } - var file: *FileProtocol = undefined; + var file: *uefi.protocol.File = undefined; var path_buffer: [256:0]u16 = undefined; const length = try lib.unicode.utf8ToUtf16Le(&path_buffer, file_path); path_buffer[length] = 0; const path = path_buffer[0..length :0]; const uefi_path = if (path[0] == '/') path[1..] else path; - try uefi.Try(filesystem.root.open(&file, uefi_path, FileProtocol.efi_file_mode_read, 0)); + try uefi.Try(filesystem.root.open(&file, uefi_path, uefi.protocol.File.efi_file_mode_read, 0)); const result = FileDescriptor{ .handle = file, @@ -236,16 +233,16 @@ const Initialization = struct { }, .filesystem = .{ .root = blk: { - const loaded_image = try Protocol.open(LoadedImageProtocol, boot_services, handle); - const filesystem_protocol = try Protocol.open(SimpleFilesystemProtocol, boot_services, loaded_image.device_handle orelse @panic("No device handle")); + const loaded_image = try Protocol.open(uefi.protocol.LoadedImage, boot_services, handle); + const filesystem_protocol = try Protocol.open(uefi.protocol.SimpleFileSystem, boot_services, loaded_image.device_handle orelse @panic("No device handle")); - var root: *FileProtocol = undefined; + var root: *uefi.protocol.File = undefined; try uefi.Try(filesystem_protocol.openVolume(&root)); break :blk root; }, }, .framebuffer = blk: { - const gop = try Protocol.locate(uefi.GraphicsOutputProtocol, boot_services); + const gop = try Protocol.locate(uefi.protocol.GraphicsOutput, boot_services); const pixel_format_info: struct { red_color_mask: bootloader.Framebuffer.ColorMask, @@ -253,20 +250,19 @@ const Initialization = struct { green_color_mask: bootloader.Framebuffer.ColorMask, bpp: u8, } = switch (gop.mode.info.pixel_format) { - .PixelRedGreenBlueReserved8BitPerColor => .{ + .RedGreenBlueReserved8BitPerColor => .{ .red_color_mask = .{ .size = 8, .shift = 0 }, .green_color_mask = .{ .size = 8, .shift = 8 }, .blue_color_mask = .{ .size = 8, .shift = 16 }, .bpp = 32, }, - .PixelBlueGreenRedReserved8BitPerColor => .{ + .BlueGreenRedReserved8BitPerColor => .{ .red_color_mask = .{ .size = 8, .shift = 16 }, .green_color_mask = .{ .size = 8, .shift = 8 }, .blue_color_mask = .{ .size = 8, .shift = 0 }, .bpp = 32, }, - .PixelBitMask, .PixelBltOnly => @panic("Unsupported pixel format"), - .PixelFormatMax => @panic("Corrupted pixel format"), + .BitMask, .BltOnly => @panic("Unsupported pixel format"), }; break :blk bootloader.Framebuffer{ diff --git a/src/bootloader/uefi.zig b/src/bootloader/uefi.zig index 3ac8fc8..2a2440a 100644 --- a/src/bootloader/uefi.zig +++ b/src/bootloader/uefi.zig @@ -8,13 +8,14 @@ const uefi = lib.uefi; pub const BootServices = uefi.tables.BootServices; pub const ConfigurationTable = uefi.tables.ConfigurationTable; pub const Error = Status.EfiError; -pub const FileInfo = uefi.protocols.FileInfo; -pub const FileProtocol = uefi.protocols.FileProtocol; -pub const GraphicsOutputProtocol = uefi.protocols.GraphicsOutputProtocol; -pub const LoadedImageProtocol = uefi.protocols.LoadedImageProtocol; +pub const FileInfo = uefi.FileInfo; +// pub const FileProtocol = protocol.FileProtocol; +// pub const GraphicsOutputProtocol = protocol.GraphicsOutputProtocol; +// pub const LoadedImageProtocol = protocol.LoadedImageProtocol; +// pub const SimpleFilesystemProtocol = protocol.SimpleFileSystemProtocol; pub const Handle = uefi.Handle; pub const MemoryDescriptor = uefi.tables.MemoryDescriptor; -pub const SimpleFilesystemProtocol = uefi.protocols.SimpleFileSystemProtocol; +pub const protocol = uefi.protocol; pub const Status = uefi.Status; pub const SystemTable = uefi.tables.SystemTable; pub const Try = Status.err; @@ -25,9 +26,6 @@ pub const page_size = 0x1000; pub const page_shifter = lib.arch.page_shifter(page_size); const privileged = @import("privileged"); -const PhysicalAddress = privileged.PhysicalAddress; -const VirtualAddress = privileged.VirtualAddress; -const VirtualMemoryRegion = privileged.VirtualMemoryRegion; const stopCPU = privileged.arch.stopCPU; pub fn panic(comptime format: []const u8, arguments: anytype) noreturn { diff --git a/src/common.zig b/src/common.zig index 66f6d8e..0337aab 100644 --- a/src/common.zig +++ b/src/common.zig @@ -1,254 +1,25 @@ -const compiler_builtin = @import("builtin"); -pub const cpu = compiler_builtin.cpu; -pub const os = compiler_builtin.os.tag; -pub const build_mode = compiler_builtin.mode; -pub const is_test = compiler_builtin.is_test; +// This file is meant to be shared between all parts of the project, including build.zig +const std = @import("std"); +const maxInt = std.math.maxInt; +const containsAtLeast = std.mem.containsAtLeast; +const Target = std.Target; +const Cpu = Target.Cpu; +const OptimizeMode = std.builtin.OptimizeMode; -pub const kb = 1024; -pub const mb = kb * 1024; -pub const gb = mb * 1024; -pub const tb = gb * 1024; +const Allocator = std.mem.Allocator; -pub const SizeUnit = enum(u64) { - byte = 1, - kilobyte = 1024, - megabyte = 1024 * 1024, - gigabyte = 1024 * 1024 * 1024, - terabyte = 1024 * 1024 * 1024 * 1024, -}; +const builtin = @import("builtin"); +const cpu = builtin.cpu; +const os = builtin.os.tag; -pub const std = @import("std"); -pub const Target = std.Target; -pub const Cpu = Target.Cpu; -pub const CrossTarget = std.zig.CrossTarget; - -pub const log = std.log; - -pub const Atomic = std.atomic.Atomic; - -pub const Reader = std.io.Reader; -pub const Writer = std.io.Writer; - -pub const FixedBufferStream = std.io.FixedBufferStream; -pub const fixedBufferStream = std.io.fixedBufferStream; - -pub fn assert(ok: bool) void { - if (!ok) { - if (@inComptime()) { - @compileError("Assert failed!"); - } else { - @panic("Assert failed!"); - } - } -} - -pub const deflate = std.compress.deflate; - -const debug = std.debug; -pub const print = debug.print; -pub const StackIterator = debug.StackIterator; -pub const dwarf = std.dwarf; -pub const ModuleDebugInfo = std.debug.ModuleDebugInfo; - -pub const elf = std.elf; - -const fmt = std.fmt; -pub const format = std.fmt.format; -pub const FormatOptions = fmt.FormatOptions; -pub const bufPrint = fmt.bufPrint; -pub const allocPrint = fmt.allocPrint; -pub const comptimePrint = fmt.comptimePrint; -pub const parseUnsigned = fmt.parseUnsigned; - -const heap = std.heap; -pub const FixedBufferAllocator = heap.FixedBufferAllocator; - -pub const json = std.json; - -const mem = std.mem; -pub const ZigAllocator = mem.Allocator; -pub const equal = mem.eql; -pub const length = mem.len; -pub const startsWith = mem.startsWith; -pub const endsWith = mem.endsWith; -pub const indexOf = mem.indexOf; -// Ideal for small inputs -pub const indexOfPosLinear = mem.indexOfPosLinear; -pub const lastIndexOf = mem.lastIndexOf; -pub const asBytes = mem.asBytes; -pub const readIntBig = mem.readIntBig; -pub const readIntSliceBig = mem.readIntSliceBig; -pub const concat = mem.concat; -pub const sliceAsBytes = mem.sliceAsBytes; -pub const bytesAsSlice = mem.bytesAsSlice; -pub const alignForward = mem.alignForward; -pub const alignBackward = mem.alignBackward; -pub const isAligned = mem.isAligned; -pub const isAlignedGeneric = mem.isAlignedGeneric; -pub const reverse = mem.reverse; -pub const tokenize = mem.tokenize; -pub const containsAtLeast = mem.containsAtLeast; -pub const sliceTo = mem.sliceTo; -pub const swap = mem.swap; - -pub const random = std.rand; - -pub const testing = std.testing; - -pub const sort = std.sort; - -pub fn fieldSize(comptime T: type, field_name: []const u8) comptime_int { - var foo: T = undefined; - return @sizeOf(@TypeOf(@field(foo, field_name))); -} - -const DiffError = error{ - diff, -}; - -pub fn diff(file1: []const u8, file2: []const u8) !void { - assert(file1.len == file2.len); - var different_bytes: u64 = 0; - for (file1, 0..) |byte1, index| { - const byte2 = file2[index]; - const is_different_byte = byte1 != byte2; - different_bytes += @intFromBool(is_different_byte); - if (is_different_byte) { - log.debug("Byte [0x{x}] is different: 0x{x} != 0x{x}", .{ index, byte1, byte2 }); - } - } - - if (different_bytes != 0) { - log.debug("Total different bytes: 0x{x}", .{different_bytes}); - return DiffError.diff; - } -} - -pub fn zeroes(comptime T: type) T { - var result: T = undefined; - const slice = asBytes(&result); - @memset(slice, 0); - return result; -} - -const ascii = std.ascii; -pub const upperString = ascii.upperString; -pub const isUpper = ascii.isUpper; -pub const isAlphabetic = ascii.isAlphabetic; - -const std_builtin = std.builtin; -pub const AtomicRmwOp = std_builtin.AtomicRmwOp; -pub const AtomicOrder = std_builtin.AtomicOrder; -pub const Type = std_builtin.Type; -pub const StackTrace = std_builtin.StackTrace; -pub const SourceLocation = std_builtin.SourceLocation; - -pub fn FieldType(comptime T: type, comptime name: []const u8) type { - return @TypeOf(@field(@as(T, undefined), name)); -} - -// META PROGRAMMING -pub const AutoEnumArray = std.enums.EnumArray; -pub const fields = std.meta.fields; -pub const IntType = std.meta.Int; -pub const enumFromInt = std.meta.enumFromInt; -pub const stringToEnum = std.meta.stringToEnum; -pub const Tag = std.meta.Tag; - -const math = std.math; -pub const maxInt = math.maxInt; -pub const min = math.min; -pub const divCeil = math.divCeil; -pub const clamp = math.clamp; -pub const isPowerOfTwo = math.isPowerOfTwo; -pub const mul = math.mul; -pub const cast = math.cast; - -pub const unicode = std.unicode; - -pub const uefi = std.os.uefi; - -pub const DiskType = enum(u32) { - virtio = 0, - nvme = 1, - ahci = 2, - ide = 3, - memory = 4, - bios = 5, - - pub const count = enumCount(@This()); -}; - -pub const FilesystemType = enum(u32) { - birth = 0, - ext2 = 1, - fat32 = 2, - - pub const count = enumCount(@This()); -}; - -pub fn enumFields(comptime E: type) []const Type.EnumField { - return @typeInfo(E).Enum.fields; -} - -pub const enumValues = std.enums.values; - -pub fn enumCount(comptime E: type) usize { - return enumFields(E).len; -} - -pub const PartitionTableType = enum { - mbr, - gpt, -}; - -pub const supported_architectures = [_]Cpu.Arch{ - .x86_64, - //.aarch64, - //.riscv64, -}; - -pub fn architectureIndex(comptime arch: Cpu.Arch) comptime_int { - inline for (supported_architectures, 0..) |architecture, index| { - if (arch == architecture) return index; - } - - @compileError("Architecture not found"); -} - -pub const architecture_bootloader_map = blk: { - var array: [supported_architectures.len][]const ArchitectureBootloader = undefined; - - array[architectureIndex(.x86_64)] = &.{ - .{ - .id = .birth, - .protocols = &.{ .bios, .uefi }, - }, - .{ - .id = .limine, - .protocols = &.{ .bios, .uefi }, - }, - }; - - // array[architectureIndex(.aarch64)] = &.{ - // .{ - // .id = .birth, - // .protocols = &.{.uefi}, - // }, - // .{ - // .id = .limine, - // .protocols = &.{.uefi}, - // }, - // }; - - // array[architectureIndex(.riscv64)] = &.{ - // .{ - // .id = .birth, - // .protocols = &.{.uefi}, - // }, - // }; - - break :blk array; +pub const Configuration = struct { + architecture: Cpu.Arch, + bootloader: Bootloader, + boot_protocol: Bootloader.Protocol, + execution_environment: ExecutionEnvironment, + optimize_mode: OptimizeMode, + execution_type: ExecutionType, + executable_kind: std.Build.CompileStep.Kind, }; pub const Bootloader = enum(u32) { @@ -261,147 +32,18 @@ pub const Bootloader = enum(u32) { }; }; -pub const ArchitectureBootloader = struct { - id: Bootloader, - protocols: []const Bootloader.Protocol, -}; - -pub const TraditionalExecutionMode = enum(u1) { - privileged = 0, - user = 1, -}; - pub const ExecutionEnvironment = enum { qemu, }; -pub const ImageConfig = struct { - sector_count: u64, - sector_size: u16, - partition_table: PartitionTableType, - partition: PartitionConfig, - - pub const default_path = "config/image_config.json"; - - pub fn get(allocator: ZigAllocator, path: []const u8) !ImageConfig { - const image_config_file = try std.fs.cwd().readFileAlloc(allocator, path, maxInt(usize)); - const parsed_image_configuration = try std.json.parseFromSlice(ImageConfig, allocator, image_config_file, .{}); - return parsed_image_configuration.value; - } -}; - -pub const PartitionConfig = struct { - name: []const u8, - filesystem: FilesystemType, - first_lba: u64, -}; - -pub const QEMU = extern struct { - pub const isa_debug_exit = ISADebugExit{}; - - pub const ISADebugExit = extern struct { - io_base: u8 = 0xf4, - io_size: u8 = @sizeOf(u32), - }; - - pub const ExitCode = enum(u32) { - success = 0x10, - failure = 0x11, - _, - }; -}; - -pub const OptimizeMode = std.builtin.OptimizeMode; - -pub const Configuration = struct { - architecture: Cpu.Arch, - bootloader: Bootloader, - boot_protocol: Bootloader.Protocol, - execution_environment: ExecutionEnvironment, - optimize_mode: OptimizeMode, - execution_type: ExecutionType, - executable_kind: std.Build.CompileStep.Kind, -}; - -pub const QEMUOptions = packed struct { - is_test: bool, - is_debug: bool, -}; - pub const ExecutionType = enum { emulated, accelerated, }; -pub const Suffix = enum { - bootloader, - cpu_driver, - image, - complete, - - pub fn fromConfiguration(suffix: Suffix, allocator: ZigAllocator, configuration: Configuration, prefix: ?[]const u8) ![]const u8 { - const cpu_driver_suffix = [_][]const u8{ - @tagName(configuration.optimize_mode), - "_", - @tagName(configuration.architecture), - "_", - @tagName(configuration.executable_kind), - }; - - const bootloader_suffix = [_][]const u8{ - @tagName(configuration.architecture), - "_", - @tagName(configuration.bootloader), - "_", - @tagName(configuration.boot_protocol), - }; - - const image_suffix = [_][]const u8{ - @tagName(configuration.optimize_mode), - "_", - } ++ bootloader_suffix ++ [_][]const u8{ - "_", - @tagName(configuration.executable_kind), - }; - - const complete_suffix = image_suffix ++ [_][]const u8{ - "_", - @tagName(configuration.execution_type), - "_", - @tagName(configuration.execution_environment), - }; - - return try std.mem.concat(allocator, u8, &switch (suffix) { - .cpu_driver => if (prefix) |pf| [1][]const u8{pf} ++ cpu_driver_suffix else cpu_driver_suffix, - .bootloader => if (prefix) |pf| [1][]const u8{pf} ++ bootloader_suffix else bootloader_suffix, - .image => if (prefix) |pf| [1][]const u8{pf} ++ image_suffix else image_suffix, - .complete => if (prefix) |pf| [1][]const u8{pf} ++ complete_suffix else complete_suffix, - }); - } -}; - -pub const Module = struct { - program: UserProgram, - name: []const u8, -}; -pub const UserProgram = struct { - kind: Kind, - dependencies: []const Dependency, - - pub const Kind = enum { - zig_exe, - }; - - pub const Dependency = struct { - foo: u64 = 0, - }; -}; - -pub const BirthProgram = enum { - bootloader, - cpu, - user, - host, +pub const TraditionalExecutionMode = enum(u1) { + privileged = 0, + user = 1, }; pub fn canVirtualizeWithQEMU(architecture: Cpu.Arch, ci: bool) bool { @@ -418,9 +60,6 @@ pub fn canVirtualizeWithQEMU(architecture: Cpu.Arch, ci: bool) bool { }; } -pub const default_cpu_name = "/cpu"; -pub const default_init_file = "/init"; - pub const ArgumentParser = struct { pub const null_specifier = "-"; @@ -501,5 +140,45 @@ pub const ArgumentParser = struct { }; }; -pub const default_disk_size = 64 * 1024 * 1024; -pub const default_sector_size = 0x200; +fn enumCount(comptime E: type) comptime_int { + return @typeInfo(E).Enum.fields.len; +} + +pub const QEMUOptions = packed struct { + is_test: bool, + is_debug: bool, +}; + +pub const PartitionTableType = enum { + mbr, + gpt, +}; + +pub const ImageConfig = struct { + sector_count: u64, + sector_size: u16, + partition_table: PartitionTableType, + partition: PartitionConfig, + + pub const default_path = "config/image_config.json"; + + pub fn get(allocator: Allocator, path: []const u8) !ImageConfig { + const image_config_file = try std.fs.cwd().readFileAlloc(allocator, path, maxInt(usize)); + const parsed_image_configuration = try std.json.parseFromSlice(ImageConfig, allocator, image_config_file, .{}); + return parsed_image_configuration.value; + } +}; + +pub const PartitionConfig = struct { + name: []const u8, + filesystem: FilesystemType, + first_lba: u64, +}; + +pub const FilesystemType = enum(u32) { + birth = 0, + ext2 = 1, + fat32 = 2, + + pub const count = enumCount(@This()); +}; diff --git a/src/cpu.zig b/src/cpu.zig index c38ae9a..441603e 100644 --- a/src/cpu.zig +++ b/src/cpu.zig @@ -13,64 +13,44 @@ const PhysicalAddress = lib.PhysicalAddress; const PhysicalAddressSpace = lib.PhysicalAddressSpace; const PhysicalMemoryRegion = lib.PhysicalMemoryRegion; const stopCPU = privileged.arch.stopCPU; -const VirtualAddress = privileged.VirtualAddress; -const VirtualMemoryRegion = privileged.VirtualMemoryRegion; +const VirtualAddress = lib.VirtualAddress; +const VirtualMemoryRegion = lib.VirtualMemoryRegion; +const paging = privileged.arch.paging; const birth = @import("birth"); -pub const test_runner = @import("cpu/test_runner.zig"); pub const arch = @import("cpu/arch.zig"); -pub const capabilities = @import("cpu/capabilities.zig"); +pub const interface = @import("cpu/interface.zig"); +pub const init = @import("cpu/init.zig"); + +const PageTableRegions = arch.init.PageTableRegions; pub export var stack: [0x8000]u8 align(0x1000) = undefined; -pub export var page_allocator = PageAllocator{ - .head = null, - .list_allocator = .{ - .u = .{ - .primitive = .{ - .backing_4k_page = undefined, - .allocated = 0, - }, - }, - .primitive = true, - }, -}; pub var bundle: []const u8 = &.{}; pub var bundle_files: []const u8 = &.{}; +pub export var page_allocator = PageAllocator{}; pub export var user_scheduler: *UserScheduler = undefined; -pub export var driver: *align(lib.arch.valid_page_sizes[0]) Driver = undefined; +pub export var heap = HeapImplementation(false){}; +pub var debug_info: lib.ModuleDebugInfo = undefined; pub export var page_tables: CPUPageTables = undefined; pub var file: []align(lib.default_sector_size) const u8 = undefined; pub export var core_id: u32 = 0; pub export var bsp = false; var panic_lock = lib.Spinlock.released; -/// This data structure holds the information needed to run a core -pub const Driver = extern struct { - init_root_capability: capabilities.RootDescriptor, - valid: bool, - padding: [padding_byte_count]u8 = .{0} ** padding_byte_count, - const padding_byte_count = lib.arch.valid_page_sizes[0] - @sizeOf(bool) - @sizeOf(capabilities.RootDescriptor); - - pub inline fn getRootCapability(drv: *Driver) *capabilities.Root { - return drv.init_root_capability.value; - } - - comptime { - // @compileLog(@sizeOf(Driver)); - assert(lib.isAligned(@sizeOf(Driver), lib.arch.valid_page_sizes[0])); - } -}; - /// This data structure holds the information needed to run a program in a core (cpu side) pub const UserScheduler = extern struct { - capability_root_node: capabilities.Root, - common: *birth.UserScheduler, + s: S, padding: [padding_byte_count]u8 = .{0} ** padding_byte_count, - const total_size = @sizeOf(capabilities.Root) + @sizeOf(*birth.UserScheduler); + const S = extern struct { + capability_root_node: interface.Root, + common: *birth.Scheduler.Common, + }; + + const total_size = @sizeOf(S); const aligned_size = lib.alignForward(usize, total_size, lib.arch.valid_page_sizes[0]); const padding_byte_count = aligned_size - total_size; @@ -81,7 +61,7 @@ pub const UserScheduler = extern struct { } }; -const print_stack_trace = false; +const print_stack_trace = true; var panic_count: usize = 0; inline fn panicPrologue(comptime format: []const u8, arguments: anytype) !void { @@ -92,9 +72,9 @@ inline fn panicPrologue(comptime format: []const u8, arguments: anytype) !void { try writer.writeAll(lib.Color.get(.bold)); try writer.writeAll(lib.Color.get(.red)); try writer.writeAll("[CPU DRIVER] [PANIC] "); - try writer.writeAll(lib.Color.get(.reset)); try writer.print(format, arguments); try writer.writeByte('\n'); + try writer.writeAll(lib.Color.get(.reset)); } inline fn panicEpilogue() noreturn { @@ -103,64 +83,68 @@ inline fn panicEpilogue() noreturn { shutdown(.failure); } -// inline fn printStackTrace(maybe_stack_trace: ?*lib.StackTrace) !void { -// if (maybe_stack_trace) |stack_trace| { -// var debug_info = try getDebugInformation(); -// try writer.writeAll("Stack trace:\n"); -// var frame_index: usize = 0; -// var frames_left: usize = @min(stack_trace.index, stack_trace.instruction_addresses.len); -// -// while (frames_left != 0) : ({ -// frames_left -= 1; -// frame_index = (frame_index + 1) % stack_trace.instruction_addresses.len; -// }) { -// const return_address = stack_trace.instruction_addresses[frame_index]; -// try writer.print("[{}] ", .{frame_index}); -// try printSourceAtAddress(&debug_info, return_address); -// } -// } else { -// try writer.writeAll("Stack trace not available\n"); -// } -// } +inline fn printStackTrace(maybe_stack_trace: ?*lib.StackTrace) !void { + if (maybe_stack_trace) |stack_trace| { + try writer.writeAll("Stack trace:\n"); + var frame_index: usize = 0; + var frames_left: usize = @min(stack_trace.index, stack_trace.instruction_addresses.len); -// inline fn printStackTraceFromStackIterator(return_address: usize, frame_address: usize) !void { -// var debug_info = try getDebugInformation(); -// var stack_iterator = lib.StackIterator.init(return_address, frame_address); -// var frame_index: usize = 0; -// try writer.writeAll("Stack trace:\n"); -// -// try printSourceAtAddress(&debug_info, return_address); -// while (stack_iterator.next()) |address| : (frame_index += 1) { -// try writer.print("[{}] ", .{frame_index}); -// try printSourceAtAddress(&debug_info, address); -// } -// } + while (frames_left != 0) : ({ + frames_left -= 1; + frame_index = (frame_index + 1) % stack_trace.instruction_addresses.len; + }) { + const return_address = stack_trace.instruction_addresses[frame_index]; + try writer.print("[{}] ", .{frame_index}); + try printSourceAtAddress(return_address); + try writer.writeByte('\n'); + } + } else { + try writer.writeAll("Stack trace not available\n"); + } +} -// fn printSourceAtAddress(debug_info: *lib.ModuleDebugInfo, address: usize) !void { -// if (debug_info.findCompileUnit(address)) |compile_unit| { -// const symbol = .{ -// .symbol_name = debug_info.getSymbolName(address) orelse "???", -// .compile_unit_name = compile_unit.die.getAttrString(debug_info, lib.dwarf.AT.name, debug_info.debug_str, compile_unit.*) catch "???", -// .line_info = debug_info.getLineNumberInfo(heap_allocator.toZig(), compile_unit.*, address) catch null, -// }; -// try writer.print("0x{x}: {s}!{s} {s}:{}:{}\n", .{ address, symbol.symbol_name, symbol.compile_unit_name, symbol.line_info.?.file_name, symbol.line_info.?.line, symbol.line_info.?.column }); -// } else |err| { -// return err; -// } -// } +inline fn printStackTraceFromStackIterator(return_address: usize, frame_address: usize) !void { + var stack_iterator = lib.StackIterator.init(return_address, frame_address); + var frame_index: usize = 0; + try writer.writeAll("Stack trace:\n"); + + while (stack_iterator.next()) |address| : (frame_index += 1) { + if (address == 0) break; + try writer.print("[{}] ", .{frame_index}); + try printSourceAtAddress(address); + try writer.writeByte('\n'); + } +} + +fn printSourceAtAddress(address: usize) !void { + const compile_unit = debug_info.findCompileUnit(address) catch { + try writer.print("0x{x}: ???", .{address}); + return; + }; + const symbol_name = debug_info.getSymbolName(address) orelse "???"; + const compile_unit_name = compile_unit.die.getAttrString(&debug_info, lib.dwarf.AT.name, debug_info.section(.debug_str), compile_unit.*) catch "???"; + const line_info = debug_info.getLineNumberInfo(heap.allocator.zigUnwrap(), compile_unit.*, address) catch null; + const symbol = .{ + .symbol_name = symbol_name, + .compile_unit_name = compile_unit_name, + .line_info = line_info, + }; + + const file_name = if (symbol.line_info) |li| li.file_name else "???"; + const line = if (symbol.line_info) |li| li.line else 0; + const column = if (symbol.line_info) |li| li.column else 0; + try writer.print("0x{x}: {s}!{s} {s}:{}:{}", .{ address, symbol.symbol_name, symbol.compile_unit_name, file_name, line, column }); +} pub fn panicWithStackTrace(stack_trace: ?*lib.StackTrace, comptime format: []const u8, arguments: anytype) noreturn { - _ = stack_trace; panicPrologue(format, arguments) catch {}; - // if (print_stack_trace) printStackTrace(stack_trace) catch {}; + if (print_stack_trace) printStackTrace(stack_trace) catch {}; panicEpilogue(); } pub fn panicFromInstructionPointerAndFramePointer(return_address: usize, frame_address: usize, comptime format: []const u8, arguments: anytype) noreturn { - _ = frame_address; - _ = return_address; panicPrologue(format, arguments) catch {}; - //if (print_stack_trace) printStackTraceFromStackIterator(return_address, frame_address) catch {}; + if (print_stack_trace) printStackTraceFromStackIterator(return_address, frame_address) catch {}; panicEpilogue(); } @@ -168,268 +152,465 @@ pub fn panic(comptime format: []const u8, arguments: anytype) noreturn { @call(.always_inline, panicFromInstructionPointerAndFramePointer, .{ @returnAddress(), @frameAddress(), format, arguments }); } -pub var syscall_count: usize = 0; +pub var command_count: usize = 0; pub inline fn shutdown(exit_code: lib.QEMU.ExitCode) noreturn { log.debug("Printing stats...", .{}); - log.debug("Syscall count: {}", .{syscall_count}); + log.debug("System call count: {}", .{interface.system_call_count}); privileged.shutdown(exit_code); } -pub const PageAllocator = extern struct { - head: ?*Entry, - list_allocator: ListAllocator, - total_allocated_size: u32 = 0, +pub const RegionList = extern struct { + regions: [list_region_count]PhysicalMemoryRegion = .{PhysicalMemoryRegion.invalid()} ** list_region_count, + metadata: Metadata = .{}, - fn getPageAllocatorInterface(pa: *PageAllocator) PageAllocatorInterface { + pub const Metadata = extern struct { + reserved: usize = 0, + bitset: lib.BitsetU64(list_region_count) = .{}, + previous: ?*RegionList = null, + next: ?*RegionList = null, + + comptime { + assert(@sizeOf(Metadata) == expected_size); + assert(@bitSizeOf(usize) - list_region_count < 8); + } + + const expected_size = 4 * @sizeOf(usize); + }; + + const Error = error{ + OutOfMemory, + no_space, + misalignment_page_size, + }; + + pub fn allocateAligned(list: *RegionList, size: usize, alignment: usize) Error!PhysicalMemoryRegion { + assert(alignment % lib.arch.valid_page_sizes[0] == 0); + + for (&list.regions, 0..) |*region, _index| { + const index: u6 = @intCast(_index); + assert(lib.isAligned(region.size, lib.arch.valid_page_sizes[0])); + assert(lib.isAligned(region.address.value(), lib.arch.valid_page_sizes[0])); + + if (list.metadata.bitset.isSet(index)) { + if (lib.isAligned(region.address.value(), alignment)) { + if (region.size >= size) { + const result = region.takeSlice(size) catch unreachable; + if (region.size == 0) { + list.remove(@intCast(index)); + } + + return result; + } + } + } + } + + return Error.OutOfMemory; + } + + pub fn remove(list: *RegionList, index: u6) void { + list.metadata.bitset.clear(index); + } + + pub const UnalignedAllocationResult = extern struct { + wasted: PhysicalMemoryRegion, + allocated: PhysicalMemoryRegion, + }; + + /// Slow path + pub fn allocateAlignedSplitting(list: *RegionList, size: usize, alignment: usize) !UnalignedAllocationResult { + for (&list.regions, 0..) |*region, _index| { + const index: u6 = @intCast(_index); + const aligned_region_address = lib.alignForward(usize, region.address.value(), alignment); + const wasted_space = aligned_region_address - region.address.value(); + + if (list.metadata.bitset.isSet(index)) { + const target_size = wasted_space + size; + if (region.size >= target_size) { + const wasted_region = try region.takeSlice(wasted_space); + const allocated_region = try region.takeSlice(size); + + if (region.size == 0) { + list.remove(index); + } + + return UnalignedAllocationResult{ + .wasted = wasted_region, + .allocated = allocated_region, + }; + } + } + } + + log.err("allocateAlignedSplitting", .{}); + return error.OutOfMemory; + } + + pub fn allocate(list: *RegionList, size: usize) Error!PhysicalMemoryRegion { + return list.allocateAligned(size, lib.arch.valid_page_sizes[0]); + } + + pub fn append(list: *RegionList, region: PhysicalMemoryRegion) Error!birth.interface.Memory { + var block_count: usize = 0; + while (true) : (block_count += 1) { + if (!list.metadata.bitset.isFull()) { + const region_index = list.metadata.bitset.allocate() catch continue; + const block_index = block_count; + + list.regions[region_index] = region; + + return .{ + .block = @intCast(block_index), + .region = region_index, + }; + } else { + return Error.no_space; + } + } + } + + const cache_line_count = 16; + const list_region_count = @divExact((cache_line_count * lib.cache_line_size) - Metadata.expected_size, @sizeOf(PhysicalMemoryRegion)); + + comptime { + assert(@sizeOf(RegionList) % lib.cache_line_size == 0); + } +}; + +const UseCase = extern struct { + reason: Reason, + const Reason = enum(u8) { + heap, + privileged, + wasted, + user_protected, + user, + bootloader, + }; +}; + +// TODO: make this more cache friendly +const UsedRegionList = extern struct { + region: PhysicalMemoryRegion, + use_case: UseCase, + next: ?*UsedRegionList = null, +}; + +pub const PageAllocator = extern struct { + free_regions: ?*RegionList = null, + used_regions: ?*UsedRegionList = null, + used_region_buffer: ?*UsedRegionList = null, + free_byte_count: u64 = 0, + used_byte_count: u64 = 0, + + pub fn allocate(allocator: *PageAllocator, size: usize, use_case: UseCase) lib.Allocator.Allocate.Error!PhysicalMemoryRegion { + const allocation = try allocator.allocateRaw(size); + try allocator.appendUsedRegion(allocation, use_case); + return allocation; + } + + fn allocateRaw(allocator: *PageAllocator, size: usize) !PhysicalMemoryRegion { + var iterator = allocator.free_regions; + while (iterator) |region_list| : (iterator = region_list.metadata.next) { + const allocation = region_list.allocate(size) catch continue; + allocator.free_byte_count -= size; + allocator.used_byte_count += size; + + return allocation; + } + + log.err("allocateRaw: out of memory. Used: 0x{x}. Free: 0x{x}", .{ allocator.used_byte_count, allocator.free_byte_count }); + return error.OutOfMemory; + } + + /// The only purpose this serves is to do the trick when switching cr3 + pub fn allocateAligned(allocator: *PageAllocator, size: usize, alignment: usize, use_case: UseCase) lib.Allocator.Allocate.Error!PhysicalMemoryRegion { + var iterator = allocator.free_regions; + while (iterator) |region_list| : (iterator = region_list.metadata.next) { + const unaligned_allocation = region_list.allocateAlignedSplitting(size, alignment) catch continue; + // TODO: do something with the wasted space + const total_allocation_size = unaligned_allocation.wasted.size + unaligned_allocation.allocated.size; + log.err("ALLOCATED: 0x{x}. WASTED: 0x{x}. TOTAL: 0x{x}", .{ unaligned_allocation.allocated.size, unaligned_allocation.wasted.size, total_allocation_size }); + + try allocator.appendUsedRegion(unaligned_allocation.allocated, use_case); + try allocator.appendUsedRegion(unaligned_allocation.wasted, .{ .reason = .wasted }); + + allocator.free_byte_count -= total_allocation_size; + allocator.used_byte_count += total_allocation_size; + + return unaligned_allocation.allocated; + } + + @panic("TODO: PageAllocator.allocateAligned"); + } + + pub fn appendUsedRegion(allocator: *PageAllocator, physical_region: PhysicalMemoryRegion, use_case: UseCase) lib.Allocator.Allocate.Error!void { + const need_allocation = blk: { + var result: bool = true; + var iterator = allocator.used_region_buffer; + while (iterator) |it| : (iterator = it.next) { + result = it.region.size < @sizeOf(UsedRegionList); + if (!result) { + break; + } + } + + break :blk result; + }; + + if (need_allocation) { + const allocation = try allocator.allocateRaw(lib.arch.valid_page_sizes[0]); + const new_buffer = allocation.address.toHigherHalfVirtualAddress().access(*UsedRegionList); + new_buffer.* = .{ + .region = allocation, + .use_case = undefined, + }; + _ = new_buffer.region.takeSlice(@sizeOf(UsedRegionList)) catch unreachable; + const used_region_allocation = new_buffer.region.takeSlice(@sizeOf(UsedRegionList)) catch unreachable; + const new_used_region = used_region_allocation.address.toHigherHalfVirtualAddress().access(*UsedRegionList); + new_used_region.* = .{ + .region = allocation, + .use_case = .{ .reason = .privileged }, + }; + + if (allocator.used_regions) |_| { + var iterator = allocator.used_regions; + _ = iterator; + @panic("TODO: iterate"); + } else { + allocator.used_regions = new_used_region; + } + + if (allocator.used_region_buffer) |_| { + var iterator = allocator.used_region_buffer; + _ = iterator; + @panic("TODO: iterate 2"); + } else { + allocator.used_region_buffer = new_buffer; + } + + assert(new_buffer.region.size < allocation.size); + } + + var iterator = allocator.used_region_buffer; + while (iterator) |it| : (iterator = it.next) { + if (it.region.size >= @sizeOf(UsedRegionList)) { + const new_used_region_allocation = it.region.takeSlice(@sizeOf(UsedRegionList)) catch unreachable; + const new_used_region = new_used_region_allocation.address.toHigherHalfVirtualAddress().access(*UsedRegionList); + new_used_region.* = .{ + .region = physical_region, + .use_case = use_case, + }; + + iterator = allocator.used_regions; + + while (iterator) |i| : (iterator = i.next) { + if (i.next == null) { + i.next = new_used_region; + return; + } + } + } + } + + if (true) @panic("TODO: PageAllocator.appendUsedRegion"); + return error.OutOfMemory; + } + + pub fn getPageTableAllocatorInterface(allocator: *PageAllocator) privileged.PageAllocator { return .{ - .allocate = callbackAllocate, - .context = pa, + .allocate = pageTableAllocateCallback, + .context = allocator, .context_type = .cpu, }; } - fn callbackAllocate(context: ?*anyopaque, size: u64, alignment: u64, options: PageAllocatorInterface.AllocateOptions) Allocator.Allocate.Error!PhysicalMemoryRegion { - _ = options; - const pa = @as(?*PageAllocator, @ptrCast(@alignCast(context))) orelse return Allocator.Allocate.Error.OutOfMemory; - const result = try pa.allocate(size, alignment); - return result; + fn pageTableAllocateCallback(context: ?*anyopaque, size: u64, alignment: u64, options: privileged.PageAllocator.AllocateOptions) error{OutOfMemory}!lib.PhysicalMemoryRegion { + const allocator: *PageAllocator = @alignCast(@ptrCast(context orelse return error.OutOfMemory)); + assert(alignment == lib.arch.valid_page_sizes[0]); + assert(size == lib.arch.valid_page_sizes[0]); + assert(options.count == 1); + assert(options.level_valid); + + const page_table_allocation = try allocator.allocate(size, .{ .reason = .user_protected }); + // log.debug("Page table allocation: 0x{x}", .{page_table_allocation.address.value()}); + + // TODO: is this right? + if (options.user) { + const user_page_tables = &user_scheduler.s.capability_root_node.dynamic.page_table; + const user_allocator = &user_scheduler.s.capability_root_node.heap.allocator; + const new_page_table_ref = try user_page_tables.appendPageTable(user_allocator, .{ + .region = page_table_allocation, + .mapping = page_table_allocation.address.toHigherHalfVirtualAddress(), + .flags = .{ .level = options.level }, + }); + + const indexed = options.virtual_address; + const indices = indexed.toIndices(); + + var page_table_ref = user_page_tables.user; + log.debug("Level: {s}", .{@tagName(options.level)}); + + for (0..@intFromEnum(options.level) - 1) |level_index| { + log.debug("Fetching {s} page table", .{@tagName(@as(paging.Level, @enumFromInt(level_index)))}); + const page_table = user_page_tables.getPageTable(page_table_ref) catch @panic("WTF"); + page_table_ref = page_table.children[indices[level_index]]; + } + + const parent_page_table = user_page_tables.getPageTable(page_table_ref) catch @panic("WTF"); + parent_page_table.children[indices[@intFromEnum(options.level) - 1]] = new_page_table_ref; + } + + return page_table_allocation; } - - pub fn allocate(pa: *PageAllocator, size: u64, alignment: u64) Allocator.Allocate.Error!PhysicalMemoryRegion { - if (pa.head == null) { - @panic("head null"); - } - - const allocation = blk: { - var ptr = pa.head; - while (ptr) |entry| : (ptr = entry.next) { - if (lib.isAligned(entry.region.address.value(), alignment) and entry.region.size > size) { - const result = PhysicalMemoryRegion{ - .address = entry.region.address, - .size = size, - }; - entry.region.address = entry.region.address.offset(size); - entry.region.size -= size; - - pa.total_allocated_size += @as(u32, @intCast(size)); - // log.debug("Allocated 0x{x}", .{size}); - - break :blk result; - } - } - - ptr = pa.head; - - while (ptr) |entry| : (ptr = entry.next) { - const aligned_address = lib.alignForward(entry.region.address.value(), alignment); - const top = entry.region.top().value(); - if (aligned_address < top and top - aligned_address > size) { - // log.debug("Found region which we should be splitting: (0x{x}, 0x{x})", .{ entry.region.address.value(), entry.region.size }); - // log.debug("User asked for 0x{x} bytes with alignment 0x{x}", .{ size, alignment }); - // Split the addresses to obtain the desired result - const first_region_size = aligned_address - entry.region.address.value(); - const first_region_address = entry.region.address; - const first_region_next = entry.next; - - const second_region_address = aligned_address + size; - const second_region_size = top - aligned_address + size; - - const result = PhysicalMemoryRegion{ - .address = PhysicalAddress.new(aligned_address), - .size = size, - }; - - // log.debug("\nFirst region: (Address: 0x{x}. Size: 0x{x}).\nRegion in the middle (allocated): (Address: 0x{x}. Size: 0x{x}).\nSecond region: (Address: 0x{x}. Size: 0x{x})", .{ first_region_address, first_region_size, result.address.value(), result.size, second_region_address, second_region_size }); - - const new_entry = pa.list_allocator.get(); - entry.* = .{ - .region = .{ - .address = first_region_address, - .size = first_region_size, - }, - .next = new_entry, - }; - - new_entry.* = .{ - .region = .{ - .address = PhysicalAddress.new(second_region_address), - .size = second_region_size, - }, - .next = first_region_next, - }; - // log.debug("First entry: (Address: 0x{x}. Size: 0x{x})", .{ entry.region.address.value(), entry.region.size }); - // log.debug("Second entry: (Address: 0x{x}. Size: 0x{x})", .{ new_entry.region.address.value(), new_entry.region.size }); - - // pa.total_allocated_size += @intCast(u32, size); - // log.debug("Allocated 0x{x}", .{size}); - - break :blk result; - } - } - - log.err("Allocate error. Size: 0x{x}. Alignment: 0x{x}. Total allocated size: 0x{x}", .{ size, alignment, pa.total_allocated_size }); - return Allocator.Allocate.Error.OutOfMemory; - }; - - //log.debug("Physical allocation: 0x{x}, 0x{x}", .{ allocation.address.value(), allocation.size }); - - @memset(allocation.toHigherHalfVirtualAddress().access(u8), 0); - - return allocation; - } - - pub inline fn fromBSP(bootloader_information: *bootloader.Information) InitializationError!PageAllocator { - const memory_map_entries = bootloader_information.getMemoryMapEntries(); - const page_counters = bootloader_information.getPageCounters(); - - var total_size: usize = 0; - const page_shifter = lib.arch.page_shifter(lib.arch.valid_page_sizes[0]); - - for (memory_map_entries, page_counters) |entry, page_counter| { - if (entry.type != .usable or !lib.isAligned(entry.region.size, lib.arch.valid_page_sizes[0]) or entry.region.address.value() < lib.mb) { - continue; - } - - total_size += entry.region.size - (page_counter << page_shifter); - } - - const cpu_count = bootloader_information.smp.cpu_count; - const total_memory_to_take = total_size / cpu_count; - - // Look for a 4K page to host the memory map - const backing_4k_page = for (memory_map_entries, page_counters) |entry, *page_counter| { - const occupied_size = page_counter.* << page_shifter; - const entry_size_left = entry.region.size - occupied_size; - if (entry_size_left != 0) { - if (entry.type != .usable or !lib.isAligned(entry.region.size, lib.arch.valid_page_sizes[0]) or entry.region.address.value() < lib.mb) continue; - - assert(lib.isAligned(entry_size_left, lib.arch.valid_page_sizes[0])); - page_counter.* += 1; - break entry.region.address.offset(occupied_size); - } - } else return InitializationError.bootstrap_region_not_found; - - var memory_taken: usize = 0; - var backing_4k_page_memory_allocated: usize = 0; - - var last_entry: ?*Entry = null; - var first_entry: ?*Entry = null; - - for (memory_map_entries, page_counters) |entry, *page_counter| { - if (entry.type != .usable or !lib.isAligned(entry.region.size, lib.arch.valid_page_sizes[0]) or entry.region.address.value() < lib.mb) continue; - - const occupied_size = page_counter.* << page_shifter; - - if (occupied_size < entry.region.size) { - const entry_size_left = entry.region.size - occupied_size; - - var memory_taken_from_region: usize = 0; - while (memory_taken + memory_taken_from_region < total_memory_to_take) { - if (memory_taken_from_region == entry_size_left) break; - - const size_to_take = @min(2 * lib.mb, entry_size_left); - memory_taken_from_region += size_to_take; - } - - memory_taken += memory_taken_from_region; - - page_counter.* += @as(u32, @intCast(memory_taken_from_region >> page_shifter)); - const region_descriptor = .{ - .address = entry.region.offset(occupied_size).address, - .size = memory_taken_from_region, - }; - - if (backing_4k_page_memory_allocated >= lib.arch.valid_page_sizes[0]) return InitializationError.memory_exceeded; - const entry_address = backing_4k_page.offset(backing_4k_page_memory_allocated); - const new_entry = entry_address.toHigherHalfVirtualAddress().access(*Entry); - backing_4k_page_memory_allocated += @sizeOf(Entry); - - new_entry.* = .{ - .region = .{ - .address = region_descriptor.address, - .size = region_descriptor.size, - }, - .next = null, - }; - - if (last_entry) |e| { - e.next = new_entry; - } else { - first_entry = new_entry; - } - - last_entry = new_entry; - - if (memory_taken >= total_memory_to_take) break; - } - } - - const result = .{ - .head = first_entry, - .list_allocator = .{ - .u = .{ - .primitive = .{ - .backing_4k_page = backing_4k_page, - .allocated = backing_4k_page_memory_allocated, - }, - }, - .primitive = true, - }, - }; - - return result; - } - - const ListAllocator = extern struct { - u: extern union { - primitive: extern struct { - backing_4k_page: PhysicalAddress, - allocated: u64, - }, - normal: extern struct { - foo: u64, - }, - }, - primitive: bool, - - pub fn get(list_allocator: *ListAllocator) *Entry { - switch (list_allocator.primitive) { - true => { - if (list_allocator.u.primitive.allocated < 0x1000) { - const result = list_allocator.u.primitive.backing_4k_page.offset(list_allocator.u.primitive.allocated).toHigherHalfVirtualAddress().access(*Entry); - list_allocator.u.primitive.backing_4k_page = list_allocator.u.primitive.backing_4k_page.offset(@sizeOf(Entry)); - return result; - } else { - @panic("reached limit"); - } - }, - false => { - @panic("not primitive allocator not implemented"); - }, - } - } - }; - - pub const Entry = extern struct { - region: PhysicalMemoryRegion, - next: ?*Entry, - }; - - const InitializationError = error{ - bootstrap_region_not_found, - memory_exceeded, - }; }; -// fn getDebugInformation() !lib.ModuleDebugInfo { -// const debug_info = lib.getDebugInformation(heap_allocator.toZig(), file) catch |err| { -// try writer.print("Failed to get debug information: {}", .{err}); -// return err; -// }; -// -// return debug_info; -// } +pub const HeapRegion = extern struct { + region: VirtualMemoryRegion, + previous: ?*HeapRegion = null, + next: ?*HeapRegion = null, +}; + +pub fn HeapImplementation(comptime user: bool) type { + const use_case = .{ .reason = if (user) .user_protected else .heap }; + _ = use_case; + return extern struct { + allocator: lib.Allocator = .{ + .callbacks = .{ + .allocate = callbackAllocate, + }, + }, + regions: ?*Region = null, + + const Heap = @This(); + const Region = HeapRegion; + + pub fn create(heap_allocator: *Heap, comptime T: type) lib.Allocator.Allocate.Error!*T { + const result = try heap_allocator.allocate(@sizeOf(T), @alignOf(T)); + return @ptrFromInt(result.address); + } + + pub fn addBootstrapingRegion(heap_allocator: *Heap, region: VirtualMemoryRegion) !void { + assert(heap_allocator.regions == null); + + var region_splitter = region; + const new_region_vmr = try region_splitter.takeSlice(@sizeOf(Region)); + const new_region = new_region_vmr.address.access(*Region); + new_region.* = Region{ + .region = region_splitter, + }; + + heap_allocator.regions = new_region; + } + + // TODO: turn the other way around: make the callback call this function + pub fn allocate(heap_allocator: *Heap, size: u64, alignment: u64) lib.Allocator.Allocate.Error!lib.Allocator.Allocate.Result { + var iterator = heap_allocator.regions; + while (iterator) |region| : (iterator = region.next) { + if (region.region.address.isAligned(alignment)) { + if (region.region.size >= size) { + const virtual_region = region.region.takeSlice(size) catch unreachable; + const should_remove = region.region.size == 0; + if (should_remove) { + // TODO: actually remove and reuse + if (region.previous) |prev| prev.next = region.next; + } + + return @bitCast(virtual_region); + } + } + } + + const new_size = lib.alignForward(usize, size + @sizeOf(HeapRegion), lib.arch.valid_page_sizes[0]); + assert(alignment <= lib.arch.valid_page_sizes[0]); + var new_physical_region = try page_allocator.allocate(new_size, .{ .reason = .heap }); + const new_alloc = new_physical_region.takeSlice(@sizeOf(HeapRegion)) catch unreachable; + const new_heap_region = new_alloc.toHigherHalfVirtualAddress().address.access(*HeapRegion); + new_heap_region.* = .{ + .region = new_physical_region.toHigherHalfVirtualAddress(), + }; + + iterator = heap.regions; + if (iterator) |_| { + while (iterator) |heap_region| : (iterator = heap_region.next) { + if (heap_region.next == null) { + heap_region.next = new_heap_region; + break; + } + } + } else { + @panic("NO"); + } + + return heap.allocate(size, alignment); + } + + fn callbackAllocate(allocator: *Allocator, size: u64, alignment: u64) lib.Allocator.Allocate.Error!lib.Allocator.Allocate.Result { + // This assert is triggered by the Zig std library + //assert(lib.isAligned(size, alignment)); + const heap_allocator = @fieldParentPtr(Heap, "allocator", allocator); + return heap_allocator.allocate(size, alignment); + } + }; +} pub const writer = privileged.E9Writer{ .context = {} }; + +pub fn SparseArray(comptime T: type) type { + return extern struct { + ptr: [*]T, + len: usize, + capacity: usize, + + const Array = @This(); + + pub const Error = error{ + index_out_of_bounds, + }; + + pub fn append(array: *Array, allocator: *Allocator, element: T) !void { + try array.ensureCapacity(allocator, array.len + 1); + const index = array.len; + array.len += 1; + const slice = array.ptr[0..array.len]; + slice[index] = element; + } + + fn ensureCapacity(array: *Array, allocator: *Allocator, desired_capacity: usize) !void { + if (array.capacity < desired_capacity) { + // Allocate a new array + const new_slice = try allocator.allocate(T, desired_capacity); + if (array.capacity == 0) { + array.ptr = new_slice.ptr; + array.capacity = new_slice.len; + } else { + // Reallocate + if (array.len > 0) { + @memcpy(new_slice[0..array.len], array.ptr[0..array.len]); + } + + // TODO: free + + array.ptr = new_slice.ptr; + array.capacity = new_slice.len; + } + } + } + + pub inline fn get(array: *Array, index: usize) T { + assert(array.len > index); + const slice = array.ptr[0..array.len]; + return slice[index]; + } + + pub inline fn getChecked(array: *Array, index: usize) !T { + if (array.len > index) { + return array.get(index); + } else { + return error.index_out_of_bounds; + } + } + }; +} diff --git a/src/cpu/arch/x86/64/init.zig b/src/cpu/arch/x86/64/init.zig index fdcdd57..ce4182f 100644 --- a/src/cpu/arch/x86/64/init.zig +++ b/src/cpu/arch/x86/64/init.zig @@ -13,7 +13,10 @@ const PhysicalMemoryRegion = lib.PhysicalMemoryRegion; const VirtualAddress = lib.VirtualAddress; const VirtualMemoryRegion = lib.VirtualMemoryRegion; +const Leaf = cpu.interface.Leaf; +const PageTable = cpu.interface.PageTable; const panic = cpu.panic; +const RegionList = cpu.RegionList; const x86_64 = cpu.arch.current; const paging = privileged.arch.paging; @@ -31,9 +34,6 @@ const IA32_FMASK = privileged.arch.x86_64.registers.IA32_FMASK; const IA32_LSTAR = privileged.arch.x86_64.registers.IA32_LSTAR; const IA32_STAR = privileged.arch.x86_64.registers.IA32_STAR; -const user_scheduler_memory_start_virtual_address = VirtualAddress.new(0x200_000); -const user_scheduler_virtual_address = user_scheduler_memory_start_virtual_address; - pub fn entryPoint() callconv(.Naked) noreturn { asm volatile ( \\lea stack(%rip), %rsp @@ -46,45 +46,23 @@ pub fn entryPoint() callconv(.Naked) noreturn { [main] "{rax}" (&main), : "rsp", "rbp" ); - - unreachable; } -const InitializationError = error{ - feature_requested_and_not_available, - no_files, - cpu_file_not_found, - init_file_not_found, -}; - noinline fn main(bootloader_information: *bootloader.Information) callconv(.C) noreturn { log.info("Initializing...\n\n\t[BUILD MODE] {s}\n\t[BOOTLOADER] {s}\n\t[BOOT PROTOCOL] {s}\n", .{ @tagName(lib.build_mode), @tagName(bootloader_information.bootloader), @tagName(bootloader_information.protocol) }); - archInitialize(bootloader_information) catch |err| { + cpu.init.initialize(bootloader_information) catch |err| { cpu.panicWithStackTrace(@errorReturnTrace(), "Failed to initialize CPU: {}", .{err}); }; } -fn archInitialize(bootloader_information: *bootloader.Information) !noreturn { - // bootloader_information.draw_context.clearScreen(0xffff7f50); - // Do an integrity check so that the bootloader information is in perfect state and there is no weird memory behavior. - // This is mainly due to the transition from a 32-bit bootloader to a 64-bit CPU driver in the x86-64 architecture. - try bootloader_information.checkIntegrity(); - // Informing the bootloader information struct that we have reached the CPU driver and any bootloader - // functionality is not available anymore - bootloader_information.stage = .cpu; - // Check that the bootloader has loaded some files as the CPU driver needs them to go forward - cpu.bundle = bootloader_information.getSlice(.bundle); - if (cpu.bundle.len == 0) return InitializationError.no_files; - cpu.bundle_files = bootloader_information.getSlice(.file_list); - if (cpu.bundle_files.len == 0) return InitializationError.no_files; - +pub inline fn initialize() !void { const cpuid = lib.arch.x86_64.cpuid; if (x86_64.pcid) { - if (cpuid(1).ecx & (1 << 17) == 0) return InitializationError.feature_requested_and_not_available; + if (cpuid(1).ecx & (1 << 17) == 0) return error.feature_requested_and_not_available; } if (x86_64.invariant_tsc) { - if (cpuid(0x80000007).edx & (1 << 8) == 0) return InitializationError.feature_requested_and_not_available; + if (cpuid(0x80000007).edx & (1 << 8) == 0) return error.feature_requested_and_not_available; } // Initialize GDT @@ -198,22 +176,16 @@ fn archInitialize(bootloader_information: *bootloader.Information) !noreturn { efer.SCE = true; efer.write(); - // TODO: AVX - - const avx_xsave_cpuid = cpuid(1); - const avx_support = avx_xsave_cpuid.ecx & (1 << 28) != 0; + const avx_xsave_cpuid = cpuid(1, 0); const xsave_support = avx_xsave_cpuid.ecx & (1 << 26) != 0; - const avx2_support = cpuid(7).ebx & (1 << 5) != 0; - log.debug("AVX: {}. AVX2: {}. XSAVE: {}. Can't enable them yet", .{ avx_support, avx2_support, xsave_support }); - - comptime { - assert(lib.arch.valid_page_sizes[0] == 0x1000); - } + // TODO: AVX var my_cr4 = cr4.read(); my_cr4.OSFXSR = true; my_cr4.OSXMMEXCPT = true; - //my_cr4.OSXSAVE = true; + if (xsave_support) { + // my_cr4.OSXSAVE = true; + } my_cr4.page_global_enable = true; my_cr4.performance_monitoring_counter_enable = true; my_cr4.write(); @@ -225,6 +197,14 @@ fn archInitialize(bootloader_information: *bootloader.Information) !noreturn { my_cr0.task_switched = false; my_cr0.write(); + const avx_support = avx_xsave_cpuid.ecx & (1 << 28) != 0; + // const avx2_support = cpuid(7).ebx & (1 << 5) != 0; + log.debug("AVX: {}. AVX2: {}. XSAVE: {}. Can't enable them yet", .{ avx_support, false, xsave_support }); + + comptime { + assert(lib.arch.valid_page_sizes[0] == 0x1000); + } + // The bootloader already mapped APIC, so it's not necessary to map it here var ia32_apic_base = IA32_APIC_BASE.read(); cpu.bsp = ia32_apic_base.bsp; @@ -252,161 +232,10 @@ fn archInitialize(bootloader_information: *bootloader.Information) !noreturn { :: //[mxcsr] "m" (@as(u32, 0x1f80)), : "memory"); - // Write user TLS base address - IA32_FS_BASE.write(user_scheduler_virtual_address.value()); - // TODO: configure PAT - - try initialize(bootloader_information); -} - -fn initialize(bootloader_information: *bootloader.Information) !noreturn { - const memory_map_entries = bootloader_information.getMemoryMapEntries(); - const page_counters = bootloader_information.getPageCounters(); - - var free_size: usize = 0; - var free_region_count: usize = 0; - - for (memory_map_entries, page_counters) |mmap_entry, page_counter| { - if (mmap_entry.type == .usable) { - const free_region = mmap_entry.getFreeRegion(page_counter); - free_size += free_region.size; - free_region_count += @intFromBool(free_region.size > 0); - } - } - - const total_to_allocate = @sizeOf(cpu.Driver) + @sizeOf(cpu.capabilities.Root) + lib.arch.valid_page_sizes[0]; - - const total_physical: struct { - region: PhysicalMemoryRegion, - free_size: u64, - index: usize, - } = for (memory_map_entries, page_counters, 0..) |mmap_entry, page_counter, index| { - if (mmap_entry.type == .usable) { - const free_region = mmap_entry.getFreeRegion(page_counter); - if (free_region.size >= total_to_allocate) { - break .{ - .region = PhysicalMemoryRegion.new(.{ - .address = free_region.address, - .size = total_to_allocate, - }), - .free_size = free_region.size - total_to_allocate, - .index = index, - }; - } - } - } else @panic("Total physical region not found"); - - var offset: usize = 0; - - cpu.driver = total_physical.region.offset(offset).address.toHigherHalfVirtualAddress().access(*align(lib.arch.valid_page_sizes[0]) cpu.Driver); - offset += @sizeOf(cpu.Driver); - - const root_capability = total_physical.region.offset(offset).address.toHigherHalfVirtualAddress().access(*cpu.capabilities.Root); - offset += @sizeOf(cpu.capabilities.Root); - - var heap_offset: usize = 0; - const heap_region = total_physical.region.offset(offset); - assert(heap_region.size == lib.arch.valid_page_sizes[0]); - const host_free_ram = heap_region.offset(heap_offset).address.toHigherHalfVirtualAddress().access(*cpu.capabilities.RAM.Region); - host_free_ram.* = .{ - .region = PhysicalMemoryRegion.new(.{ - .address = total_physical.region.offset(total_to_allocate).address, - .size = total_physical.free_size, - }), - }; - heap_offset += @sizeOf(cpu.capabilities.RAM.Region); - const privileged_cpu_memory = heap_region.offset(heap_offset).address.toHigherHalfVirtualAddress().access(*cpu.capabilities.RAM.Region); - privileged_cpu_memory.* = .{ - .region = total_physical.region, - }; - - heap_offset += @sizeOf(cpu.capabilities.RAM); - - var previous_free_ram = host_free_ram; - for (memory_map_entries, page_counters, 0..) |memory_map_entry, page_counter, index| { - if (index == total_physical.index) continue; - - if (memory_map_entry.type == .usable) { - const region = memory_map_entry.getFreeRegion(page_counter); - if (region.size > 0) { - const new_free_ram = heap_region.offset(heap_offset).address.toHigherHalfVirtualAddress().access(*cpu.capabilities.RAM.Region); - heap_offset += @sizeOf(cpu.capabilities.RAM.Region); - new_free_ram.* = .{ - .region = region, - }; - previous_free_ram.next = new_free_ram; - previous_free_ram = new_free_ram; - } - } - } - - root_capability.* = .{ - .static = .{ - .cpu = true, - .boot = true, - .process = true, - }, - .dynamic = .{ - .io = .{ - .debug = true, - }, - .ram = .{ - .lists = blk: { - var lists = [1]?*cpu.capabilities.RAM.Region{null} ** lib.arch.reverse_valid_page_sizes.len; - var free_ram_iterator: ?*cpu.capabilities.RAM.Region = host_free_ram; - while (free_ram_iterator) |free_ram| { - comptime assert(lib.arch.reverse_valid_page_sizes.len == 3); - const next = free_ram.next; - - if (free_ram.region.size >= lib.arch.reverse_valid_page_sizes[0]) { - const previous_first = lists[0]; - lists[0] = free_ram; - free_ram.next = previous_first; - } else if (free_ram.region.size >= lib.arch.reverse_valid_page_sizes[1]) { - const previous_first = lists[1]; - lists[1] = free_ram; - free_ram.next = previous_first; - } else if (free_ram.region.size >= lib.arch.reverse_valid_page_sizes[2]) { - const previous_first = lists[2]; - lists[2] = free_ram; - free_ram.next = previous_first; - } else unreachable; - - free_ram_iterator = next; - } - - break :blk lists; - }, - }, - .cpu_memory = .{ - .flags = .{ - .allocate = true, - }, - }, - .page_table = .{}, - }, - .scheduler = .{ - .memory = undefined, - }, - .heap = cpu.capabilities.Root.Heap.new(heap_region, heap_offset), - }; - - cpu.driver.* = .{ - .valid = true, - .init_root_capability = .{ - .value = root_capability, - }, - }; - - switch (cpu.bsp) { - true => { - const init_module_descriptor = try bootloader_information.getFileDescriptor("init"); - try spawnInitBSP(init_module_descriptor.content, bootloader_information.cpu_page_tables); - }, - false => @panic("Implement APP"), - } } +// TODO: +// Write user TLS base address export var interrupt_stack: [0x1000]u8 align(lib.arch.stack_alignment) = undefined; export var gdt = x86_64.GDT{}; @@ -436,7 +265,6 @@ pub fn InterruptHandler(comptime interrupt_number: u64, comptime has_error_code: asm volatile ( \\cld ::: "memory"); - if (!has_error_code) { asm volatile ("pushq $0" ::: "memory"); } @@ -490,8 +318,6 @@ pub fn InterruptHandler(comptime interrupt_number: u64, comptime has_error_code: \\iretq \\int3 ::: "memory"); - - unreachable; } }.handler; } @@ -520,6 +346,7 @@ const Interrupt = enum(u5) { CP = 0x15, _, }; + const interrupt_handlers = [256]*const fn () callconv(.Naked) noreturn{ InterruptHandler(@intFromEnum(Interrupt.DE), false), InterruptHandler(@intFromEnum(Interrupt.DB), false), @@ -779,714 +606,186 @@ const interrupt_handlers = [256]*const fn () callconv(.Naked) noreturn{ InterruptHandler(0xff, false), }; -const BSPEarlyAllocator = extern struct { - base: PhysicalAddress, - size: usize, - offset: usize, - allocator: Allocator = .{ - .callbacks = .{ - .allocate = callbackAllocate, - }, - }, - heap_first: ?*BSPHeapEntry = null, - - const BSPHeapEntry = extern struct { - virtual_memory_region: VirtualMemoryRegion, - offset: usize = 0, - next: ?*BSPHeapEntry = null, - - // pub fn create(heap: *BSPHeapEntry, comptime T: type) !*T { - // _ = heap; - // @panic("TODO: create"); - // } - - pub fn allocateBytes(heap: *BSPHeapEntry, size: u64, alignment: u64) ![]u8 { - assert(alignment < lib.arch.valid_page_sizes[0]); - assert(heap.virtual_memory_region.size > size); - if (!lib.isAligned(heap.virtual_memory_region.address.value(), alignment)) { - const misalignment = lib.alignForward(usize, heap.virtual_memory_region.address.value(), alignment) - heap.virtual_memory_region.address.value(); - _ = heap.virtual_memory_region.takeSlice(misalignment); - } - - return heap.virtual_memory_region.takeByteSlice(size); - } - }; - - pub fn createPageAligned(allocator: *BSPEarlyAllocator, comptime T: type) AllocatorError!*align(lib.arch.valid_page_sizes[0]) T { - return @as(*align(lib.arch.valid_page_sizes[0]) T, @ptrCast(try allocator.allocateBytes(@sizeOf(T), lib.arch.valid_page_sizes[0]))); - } - - pub fn allocateBytes(allocator: *BSPEarlyAllocator, size: u64, alignment: u64) AllocatorError![]align(lib.arch.valid_page_sizes[0]) u8 { - if (!lib.isAligned(size, lib.arch.valid_page_sizes[0])) return AllocatorError.bad_alignment; - if (allocator.offset + size > allocator.size) return AllocatorError.out_of_memory; - - // TODO: don't trash memory - if (!lib.isAligned(allocator.base.offset(allocator.offset).value(), alignment)) { - const aligned = lib.alignForward(usize, allocator.base.offset(allocator.offset).value(), alignment); - allocator.offset += aligned - allocator.base.offset(allocator.offset).value(); - } - - const physical_address = allocator.base.offset(allocator.offset); - allocator.offset += size; - const slice = physical_address.toHigherHalfVirtualAddress().access([*]align(lib.arch.valid_page_sizes[0]) u8)[0..size]; - @memset(slice, 0); - - return slice; - } - - pub fn callbackAllocate(allocator: *Allocator, size: u64, alignment: u64) Allocator.Allocate.Error!Allocator.Allocate.Result { - const early_allocator = @fieldParentPtr(BSPEarlyAllocator, "allocator", allocator); - if (alignment == lib.arch.valid_page_sizes[0] or size % lib.arch.valid_page_sizes[0] == 0) { - const result = early_allocator.allocateBytes(size, alignment) catch return Allocator.Allocate.Error.OutOfMemory; - return .{ - .address = @intFromPtr(result.ptr), - .size = result.len, - }; - } else if (alignment > lib.arch.valid_page_sizes[0]) { - @panic("WTF"); - } else { - assert(size < lib.arch.valid_page_sizes[0]); - const heap_entry_allocation = early_allocator.allocateBytes(lib.arch.valid_page_sizes[0], lib.arch.valid_page_sizes[0]) catch return Allocator.Allocate.Error.OutOfMemory; - const heap_entry_region = VirtualMemoryRegion.fromByteSlice(.{ - .slice = heap_entry_allocation, - }); - const heap_entry = try early_allocator.addHeapRegion(heap_entry_region); - const result = try heap_entry.allocateBytes(size, alignment); - return .{ - .address = @intFromPtr(result.ptr), - .size = result.len, - }; - } - } - - inline fn addHeapRegion(early_allocator: *BSPEarlyAllocator, region: VirtualMemoryRegion) !*BSPHeapEntry { - const heap_entry = region.address.access(*BSPHeapEntry); - const offset = @sizeOf(BSPHeapEntry); - heap_entry.* = .{ - .offset = offset, - .virtual_memory_region = region.offset(offset), - .next = early_allocator.heap_first, - }; - - early_allocator.heap_first = heap_entry; - - return heap_entry; - } - const AllocatorError = error{ - out_of_memory, - bad_alignment, - }; -}; - const half_page_table_entry_count = @divExact(paging.page_table_entry_count, 2); -fn spawnInitBSP(init_file: []const u8, cpu_page_tables: paging.CPUPageTables) !noreturn { - const spawn_init = try spawnInitCommon(cpu_page_tables); - const init_scheduler = spawn_init.scheduler; - const page_table_regions = spawn_init.page_table_regions; - - // TODO: make this the right one - const address_space = page_table_regions.getAddressSpace(); - const init_elf = try ELF.Parser.init(init_file); - const entry_point = init_elf.getEntryPoint(); - const program_headers = init_elf.getProgramHeaders(); - const scheduler_common = init_scheduler.common; - - for (program_headers) |program_header| { - if (program_header.type == .load) { - const aligned_size = lib.alignForward(usize, program_header.size_in_memory, lib.arch.valid_page_sizes[0]); - const segment_virtual_address = VirtualAddress.new(program_header.virtual_address); - const indexed_virtual_address = @as(paging.IndexedVirtualAddress, @bitCast(program_header.virtual_address)); - _ = indexed_virtual_address; - const segment_flags = .{ - .execute_disable = !program_header.flags.executable, - .write = program_header.flags.writable, - .user = true, - }; - - const segment_physical_region = try cpu.driver.getRootCapability().allocatePages(aligned_size); - try page_table_regions.map(segment_virtual_address, segment_physical_region.address, segment_physical_region.size, segment_flags); - - const src = init_file[program_header.offset..][0..program_header.size_in_file]; - const dst = segment_physical_region.toHigherHalfVirtualAddress().access(u8)[0..program_header.size_in_file]; - @memcpy(dst, src); - } - } - - // Once all page tables are set up, copy lower half of the address space to the cpu page table - const cpu_pml4 = page_table_regions.getCpuPML4(); - const user_pml4 = page_table_regions.getUserPML4(); - @memcpy(cpu_pml4[0..half_page_table_entry_count], user_pml4[0..half_page_table_entry_count]); - - cpu.user_scheduler = init_scheduler; - address_space.cr3.write(); - - scheduler_common.self = scheduler_common; - - const scheduler_common_arch = scheduler_common.architectureSpecific(); - - // Set arguments - - // First argument - scheduler_common_arch.disabled_save_area.registers.rdi = user_scheduler_virtual_address.value(); - // Second argument - const is_init = true; - scheduler_common_arch.disabled_save_area.registers.rsi = @intFromBool(is_init); - - scheduler_common_arch.disabled_save_area.registers.rip = entry_point; // Set entry point - scheduler_common_arch.disabled_save_area.registers.rsp = user_scheduler_virtual_address.offset(@offsetOf(birth.UserScheduler, "setup_stack")).value() + scheduler_common_arch.generic.setup_stack.len; - scheduler_common.setup_stack_lock.value = true; - scheduler_common_arch.disabled_save_area.registers.rflags = .{ .IF = true }; // Set RFLAGS - - scheduler_common_arch.disabled_save_area.fpu.fcw = 0x037f; // Set FPU - scheduler_common_arch.disabled_save_area.fpu.mxcsr = 0x1f80; - - scheduler_common_arch.disabled_save_area.contextSwitch(); -} - -const UserMemory = extern struct { - root: PhysicalMemoryRegion, - pdpt: PhysicalMemoryRegion, - pdt: PhysicalMemoryRegion, - pt: PhysicalMemoryRegion, -}; - -const PageTableRegions = extern struct { - regions: [region_count]PhysicalMemoryRegion, - total: PhysicalMemoryRegion, - base_virtual_address: VirtualAddress, - - fn mapQuick(page_table_regions: PageTableRegions, virtual_address: VirtualAddress, physical_address: PhysicalAddress, size: usize, flags: paging.MemoryFlags) void { - const ptes = page_table_regions.getPageTables(.{ .index = .pt }); - // log.debug("PTE base: 0x{x}", .{@ptrToInt(ptes.ptr)}); - assert(lib.isAligned(size, lib.arch.valid_page_sizes[0])); - const indexed = @as(paging.IndexedVirtualAddress, @bitCast(virtual_address.value())); - const base_indexed = @as(paging.IndexedVirtualAddress, @bitCast(page_table_regions.base_virtual_address.value())); - const physical_base = physical_address.value(); - var physical_iterator = physical_base; - const physical_top = physical_base + size; - const pd_offset_index = indexed.PD - base_indexed.PD; - // log.debug("PD index: {}. PD offset index: {}", .{ indexed.PD, pd_offset_index }); - var index = @as(usize, pd_offset_index) * paging.page_table_entry_count + indexed.PT; - // log.debug("Virtual address: 0x{x}. Size: 0x{x}. Index: {}. PD: {}. PT: {}", .{ virtual_address.value(), size, index, indexed.PD, indexed.PT }); - - while (physical_iterator < physical_top) : ({ - physical_iterator += lib.arch.valid_page_sizes[0]; - index += 1; - }) { - ptes[index] = paging.getPageEntry(paging.PTE, physical_iterator, flags); - } - } - - fn map(page_table_regions: PageTableRegions, virtual_address: VirtualAddress, physical_address: PhysicalAddress, size: usize, flags: paging.MemoryFlags) !void { - // log.debug("Mapping 0x{x} -> 0x{x} for 0x{x} bytes", .{ virtual_address.value(), physical_address.value(), size }); - assert(page_table_regions.regions[@intFromEnum(Index.pml4)].size == 2 * lib.arch.valid_page_sizes[0]); - assert(page_table_regions.regions[@intFromEnum(Index.pdp)].size == lib.arch.valid_page_sizes[0]); - assert(page_table_regions.regions[@intFromEnum(Index.pd)].size == lib.arch.valid_page_sizes[0]); - - page_table_regions.mapQuick(virtual_address, physical_address, size, flags); - - const address_space = page_table_regions.getAddressSpace(); - const virtual_address_top = virtual_address.offset(size).value(); - var index: usize = 0; - - while (virtual_address.offset(index * lib.arch.valid_page_sizes[0]).value() < virtual_address_top) : (index += 1) { - const offset = index * lib.arch.valid_page_sizes[0]; - const expected_pa = physical_address.offset(offset); - const va = virtual_address.offset(offset); - - const translated_physical_address = address_space.translateAddress(va, flags) catch |err| { - panic("Mapping of 0x{x} failed: {}", .{ va.value(), err }); - }; - - if (translated_physical_address.value() != expected_pa.value()) { - @panic("Mapping failed"); - } - } - } - - const region_count = lib.enumCount(Index); - const Index = enum(u2) { - pml4, - pdp, - pd, - pt, - }; - - const sizes = blk: { - const shifter = lib.arch.page_shifter(lib.arch.valid_page_sizes[0]); - var result: [region_count]comptime_int = undefined; - - for (&result, entry_count_array) |*size, entry_count| { - size.* = @divExact(entry_count, paging.page_table_entry_count) << shifter; - } - - break :blk result; - }; - - const total_size = blk: { - var result: comptime_int = 0; - - for (sizes) |size| { - result += size; - } - - break :blk result; - }; - - const entry_count_array = blk: { - var result: [region_count]comptime_int = undefined; - - result[@intFromEnum(Index.pml4)] = 2 * paging.page_table_entry_count; - result[@intFromEnum(Index.pdp)] = init_vas_pdpe_count; - result[@intFromEnum(Index.pd)] = init_vas_pde_count; - result[@intFromEnum(Index.pt)] = init_vas_pte_count; - - break :blk result; - }; - - const EntryType = blk: { - var result: [region_count]type = undefined; - result[@intFromEnum(Index.pml4)] = paging.PML4TE; - result[@intFromEnum(Index.pdp)] = paging.PDPTE; - result[@intFromEnum(Index.pd)] = paging.PDTE; - result[@intFromEnum(Index.pt)] = paging.PTE; - break :blk result; - }; - - const init_vas_size = 128 * lib.mb; - const init_vas_page_count = @divExact(init_vas_size, lib.arch.valid_page_sizes[0]); - - const init_vas_pte_count = init_vas_page_count; - const init_vas_pde_count = lib.alignForward(usize, @divExact(init_vas_pte_count, paging.page_table_entry_count), paging.page_table_entry_count); - const init_vas_pdpe_count = lib.alignForward(usize, @divExact(init_vas_pde_count, paging.page_table_entry_count), paging.page_table_entry_count); - - const AccessOptions = packed struct { - index: Index, - user: bool = true, - }; - - pub inline fn getPhysicalRegion(regions: PageTableRegions, comptime options: AccessOptions) PhysicalMemoryRegion { - const index = @intFromEnum(options.index); - const result = regions.regions[index].offset(switch (index) { - 0 => switch (options.user) { - true => paging.page_table_size, - false => 0, - }, - else => 0, - }); - - return switch (index) { - 0 => PhysicalMemoryRegion.new(.{ .address = result.address, .size = paging.page_table_size }), - else => result, - }; - } - - pub inline fn getPageTables(regions: PageTableRegions, comptime options: AccessOptions) []EntryType[@intFromEnum(options.index)] { - return regions.getPhysicalRegion(options).toHigherHalfVirtualAddress().access(EntryType[@intFromEnum(options.index)]); - } - - pub inline fn getAddressSpace(regions: PageTableRegions) paging.Specific { - const address_space = paging.Specific{ .cr3 = cr3.fromAddress(regions.getPhysicalRegion(.{ .index = .pml4, .user = true }).address) }; - return address_space; - } - - pub inline fn getPrivilegedAddressSpace(regions: PageTableRegions) paging.Specific { - const address_space = paging.Specific{ .cr3 = cr3.fromAddress(regions.getPhysicalRegion(.{ .index = .pml4, .user = false }).address) }; - return address_space; - } - - pub inline fn getCpuPML4(regions: PageTableRegions) *paging.PML4Table { - return regions.getPageTables(.{ .index = .pml4, .user = false })[0..paging.page_table_entry_count]; - } - - pub inline fn getUserPML4(regions: PageTableRegions) *paging.PML4Table { - return regions.getPageTables(.{ .index = .pml4, .user = true })[0..paging.page_table_entry_count]; - } -}; - -const SpawnInitCommonResult = extern struct { - page_table_regions: PageTableRegions, - scheduler: *cpu.UserScheduler, -}; - -const scheduler_memory_size = 1 << 19; -const dispatch_count = x86_64.IDT.entry_count; var once: bool = false; -fn spawnInitCommon(cpu_page_tables: paging.CPUPageTables) !SpawnInitCommonResult { - assert(!once); - once = true; - // TODO: delete in the future - assert(cpu.bsp); - cpu.driver.valid = true; +fn map(address_space: paging.Specific, virtual: VirtualAddress, physical: PhysicalAddress, size: usize, flags: privileged.Mapping.Flags) !void { + try address_space.map(physical, virtual, size, flags, cpu.page_allocator.getPageTableAllocatorInterface()); + if (flags.user) { + const indexed: paging.IndexedVirtualAddress = @bitCast(virtual.value()); + const indices = indexed.toIndices(); + const top_indexed: paging.IndexedVirtualAddress = @bitCast(virtual.offset(size).value() - lib.arch.valid_page_sizes[0]); + const top_indices = top_indexed.toIndices(); + _ = top_indices; + // TODO: make this fast or not care, depending on how many times this is going to be executed + // + const user_page_tables = &cpu.user_scheduler.s.capability_root_node.dynamic.page_table; - const allocation: extern struct { - page_table_regions: PageTableRegions, - cpu_page_table_physical_region: PhysicalMemoryRegion, - } = blk: { - const page_table_regions_total_size = PageTableRegions.total_size; - const cpu_page_table_size = (paging.Level.count - 1) * paging.page_table_size; - const allocation_size = page_table_regions_total_size + cpu_page_table_size; - const allocation_alignment = 2 * paging.page_table_alignment; - const total_region = try cpu.driver.getRootCapability().allocatePageCustomAlignment(allocation_size, allocation_alignment); - //log.debug("Total region: (0x{x}, 0x{x})", .{ total_region.address.value(), total_region.top().value() }); - var region_slicer = total_region; - var page_table_regions = PageTableRegions{ - .regions = undefined, - .total = total_region, - .base_virtual_address = user_scheduler_virtual_address, - }; + var page_table_ref = user_page_tables.user; - inline for (&page_table_regions.regions, 0..) |*region, index| { - region.* = region_slicer.takeSlice(PageTableRegions.sizes[index]); - } - - assert(lib.isAligned(page_table_regions.regions[0].address.value(), 2 * paging.page_table_alignment)); - - assert(region_slicer.size == cpu_page_table_size); - - const cpu_page_table_physical_region = region_slicer; - - break :blk .{ - .page_table_regions = page_table_regions, - .cpu_page_table_physical_region = cpu_page_table_physical_region, - }; - }; - - const page_table_regions = allocation.page_table_regions; - const cpu_page_table_physical_region = allocation.cpu_page_table_physical_region; - - const indexed_start = @as(paging.IndexedVirtualAddress, @bitCast(user_scheduler_virtual_address.value())); - const indexed_end = @as(paging.IndexedVirtualAddress, @bitCast(user_scheduler_virtual_address.offset(PageTableRegions.init_vas_size).value())); - // log.debug("Indexed start: {}", .{indexed_start}); - // log.debug("Indexed end: {}", .{indexed_end}); - page_table_regions.getPageTables(.{ - .index = .pml4, - .user = true, - })[indexed_start.PML4] = .{ - .present = true, - .write = true, - .user = true, - .address = paging.packAddress(paging.PML4TE, page_table_regions.getPhysicalRegion(.{ .index = .pdp }).address.value()), - }; - - page_table_regions.getPageTables(.{ .index = .pdp })[indexed_start.PDP] = .{ - .present = true, - .write = true, - .user = true, - .address = paging.packAddress(paging.PDPTE, page_table_regions.getPhysicalRegion(.{ .index = .pd }).address.value()), - }; - - const pdes = page_table_regions.getPageTables(.{ .index = .pd }); - // log.debug("PDE count: {}", .{pdes.len}); - //log.debug("PTE base: 0x{x}. PTE count: {}", .{ page_table_regions.get(.{ .index = .pt }).address.value(), page_table_regions.getPageTables(.{ .index = .pt }).len }); - - for (pdes[indexed_start.PD .. indexed_start.PD + indexed_end.PD], 0..) |*pde, pde_offset| { - const pte_index = paging.page_table_entry_count * pde_offset; - const pte_offset = pte_index * paging.page_table_entry_size; - const pte_address = page_table_regions.getPhysicalRegion(.{ .index = .pt }).offset(pte_offset).address.value(); - // log.debug("Linking PDE[{}] 0x{x} with PTE base address: 0x{x} (pte index: {}. pte offset: 0x{x})", .{ pde_offset, @ptrToInt(pde), pte_address, pte_index, pte_offset }); - pde.* = paging.PDTE{ - .present = true, - .write = true, - .user = true, - .address = paging.packAddress(paging.PDTE, pte_address), - }; - } - - const scheduler_memory_physical_region = try cpu.driver.getRootCapability().allocatePages(scheduler_memory_size); - const scheduler_memory_map_flags = .{ - .present = true, - .write = true, - .user = true, - .execute_disable = true, - }; - - try page_table_regions.map(user_scheduler_memory_start_virtual_address, scheduler_memory_physical_region.address, scheduler_memory_physical_region.size, scheduler_memory_map_flags); - - const root_page_tables = [2]PhysicalMemoryRegion{ - page_table_regions.getPhysicalRegion(.{ .index = .pml4, .user = false }), - page_table_regions.getPhysicalRegion(.{ .index = .pml4, .user = true }), - }; - // log.debug("Root page tables: {any}", .{root_page_tables}); - assert(root_page_tables[0].size == lib.arch.valid_page_sizes[0]); - - // Map CPU driver into the CPU page table - var cpu_page_table_physical_region_iterator = cpu_page_table_physical_region; - // log.debug("CPU page table physical region: 0x{x} - 0x{x}", .{ cpu_page_table_physical_region.address.value(), cpu_page_table_physical_region.top().value() }); - - const cpu_pte_count = paging.page_table_entry_count - paging.CPUPageTables.left_ptables; - const cpu_ptes = cpu_page_tables.p_table.toHigherHalfVirtualAddress().access(*paging.PTable)[0..cpu_pte_count]; - const user_mapped_cpu_pte_offset = (paging.Level.count - 2) * paging.page_table_size; - // log.debug("[OFFSET] 0x{x}", .{user_mapped_cpu_pte_offset}); - const user_mapped_cpu_ptes = cpu_page_table_physical_region.offset(user_mapped_cpu_pte_offset).toHigherHalfVirtualAddress().access(paging.PTE)[0..cpu_pte_count]; - @memcpy(user_mapped_cpu_ptes, cpu_ptes); - - const user_root_page_table_region = root_page_tables[1]; - const RootPageTableEntryType = paging.EntryTypeMap(lib.arch.valid_page_sizes[1])[@intFromEnum(x86_64.root_page_table_entry)]; - user_root_page_table_region.toHigherHalfVirtualAddress().access(paging.PML4TE)[paging.CPUPageTables.pml4_index] = paging.PML4TE{ - .present = true, - .write = true, - .execute_disable = false, - .address = paging.packAddress(RootPageTableEntryType, cpu_page_table_physical_region.offset(0).address.value()), - }; - - const current_address_space = paging.Specific{ .cr3 = cr3.read() }; - const src_half = (try current_address_space.getPML4TableUnchecked())[half_page_table_entry_count..][0..half_page_table_entry_count]; - @memcpy(root_page_tables[0].toHigherHalfVirtualAddress().access(paging.PML4TE)[half_page_table_entry_count..][0..half_page_table_entry_count], src_half); - - const pdp = cpu_page_table_physical_region_iterator.takeSlice(paging.page_table_size); - const pd = cpu_page_table_physical_region_iterator.takeSlice(paging.page_table_size); - const pt = cpu_page_table_physical_region_iterator.takeSlice(paging.page_table_size); - assert(cpu_page_table_physical_region_iterator.size == 0); - - const pdp_table = pdp.toHigherHalfVirtualAddress().access(paging.PDPTE); - // log.debug("pdp index: {}. pdp table: 0x{x}", .{ paging.CPUPageTables.pdp_index, @ptrToInt(pdp_table.ptr) }); - pdp_table[paging.CPUPageTables.pdp_index] = paging.PDPTE{ - .present = true, - .write = true, - .execute_disable = false, - .address = paging.packAddress(paging.PDPTE, pd.address.value()), - }; - - const pd_table = pd.toHigherHalfVirtualAddress().access(paging.PDTE); - pd_table[paging.CPUPageTables.pd_index] = paging.PDTE{ - .present = true, - .write = true, - .execute_disable = false, - .address = paging.packAddress(paging.PDTE, pt.address.value()), - }; - - const supporting_page_table_size = PageTableRegions.total_size; - _ = supporting_page_table_size; - const indexed_base = @as(paging.IndexedVirtualAddress, @bitCast(page_table_regions.total.address.toHigherHalfVirtualAddress().value())); - const indexed_top = @as(paging.IndexedVirtualAddress, @bitCast(page_table_regions.total.top().toHigherHalfVirtualAddress().value())); - const diff = @as(u64, @bitCast(indexed_top)) - @as(u64, @bitCast(indexed_base)); - // log.debug("Mapping 0x{x} - 0x{x} to higher half", .{ page_table_regions.total.address.value(), page_table_regions.total.top().value() }); - // log.debug("supporting_page_table_size: {}", .{supporting_page_table_size}); - // log.debug("\nBASE: {}\n\nTOP: {}\n\n", .{ indexed_base, indexed_top }); - - assert(indexed_base.PML4 == indexed_top.PML4); - assert(indexed_base.PDP == indexed_top.PDP); - const ptable_count = indexed_top.PD - indexed_base.PD + 1; - - const cpu_indexed_base = @as(paging.IndexedVirtualAddress, @bitCast(cpu_page_table_physical_region.toHigherHalfVirtualAddress().address.value())); - const cpu_indexed_top = @as(paging.IndexedVirtualAddress, @bitCast(cpu_page_table_physical_region.toHigherHalfVirtualAddress().top().value())); - const cpu_diff = @as(u64, @bitCast(cpu_indexed_top)) - @as(u64, @bitCast(cpu_indexed_base)); - // log.debug("\nCPU BASE: {}\n\nCPU TOP: {}\n\n", .{ cpu_indexed_base, cpu_indexed_top }); - assert(cpu_indexed_base.PML4 == cpu_indexed_top.PML4); - assert(cpu_indexed_base.PDP == cpu_indexed_top.PDP); - assert(cpu_indexed_base.PDP == indexed_base.PDP); - assert(cpu_indexed_base.PD == cpu_indexed_top.PD); - assert(cpu_indexed_base.PT < cpu_indexed_top.PT); - assert(cpu_indexed_base.PML4 == indexed_base.PML4); - assert(cpu_indexed_base.PDP == indexed_base.PDP); - const cpu_ptable_count = cpu_indexed_top.PD - cpu_indexed_base.PD + 1; - assert(cpu_ptable_count <= ptable_count); - - const support_pdp_table_count = 1; - const support_pd_table_count = 1; - const min = @min(@as(u64, @bitCast(indexed_base)), @as(u64, @bitCast(cpu_indexed_base))); - const max = @max(@as(u64, @bitCast(indexed_top)), @as(u64, @bitCast(cpu_indexed_top))); - const min_indexed = @as(paging.IndexedVirtualAddress, @bitCast(min)); - const general_diff = max - min; - const pte_count = @divExact(general_diff, lib.arch.valid_page_sizes[0]); - const support_p_table_count = 1 + pte_count / paging.page_table_entry_count + @intFromBool(@as(usize, paging.page_table_entry_count) - min_indexed.PT < pte_count); - // log.debug("Support p table count: {}", .{support_p_table_count}); - // log.debug("indexed base: 0x{x}. top: 0x{x}", .{ @bitCast(u64, indexed_base), @bitCast(u64, indexed_top) }); - // log.debug("cpu indexed base: 0x{x}. top: 0x{x}", .{ @bitCast(u64, cpu_indexed_base), @bitCast(u64, cpu_indexed_top) }); - - const support_page_table_count = @as(usize, support_pdp_table_count + support_pd_table_count + support_p_table_count); - const support_page_table_physical_region = try cpu.driver.getRootCapability().allocatePages(support_page_table_count * paging.page_table_size); - // log.debug("Support page tables: 0x{x} - 0x{x}", .{ support_page_table_physical_region.address.value(), support_page_table_physical_region.top().value() }); - // log.debug("PD table count: {}. P table count: {}", .{ support_pd_table_count, support_p_table_count }); - - const support_pdp_offset = 0; - const support_pd_offset = support_pdp_table_count * paging.page_table_size; - const support_pt_offset = support_pd_offset + support_pd_table_count * paging.page_table_size; - - const support_pml4 = page_table_regions.getPageTables(.{ .user = true, .index = .pml4 }); - const support_pdp_region = support_page_table_physical_region.offset(support_pdp_offset); - const support_pd_region = support_page_table_physical_region.offset(support_pd_offset); - const support_pt_region = support_page_table_physical_region.offset(support_pt_offset); - - assert(!support_pml4[indexed_base.PML4].present); - assert(support_pdp_table_count == 1); - - support_pml4[indexed_base.PML4] = paging.PML4TE{ - .present = true, - .write = true, - .address = paging.packAddress(paging.PML4TE, support_pdp_region.address.value()), - }; - - const support_pdp = support_pdp_region.toHigherHalfVirtualAddress().access(paging.PDPTE); - assert(!support_pdp[indexed_base.PDP].present); - assert(support_pd_table_count == 1); - - support_pdp[indexed_base.PDP] = paging.PDPTE{ - .present = true, - .write = true, - .address = paging.packAddress(paging.PDPTE, support_pd_region.address.value()), - }; - - const support_pd = support_pd_region.toHigherHalfVirtualAddress().access(paging.PDTE); - assert(!support_pd[indexed_base.PD].present); - assert(indexed_base.PD <= cpu_indexed_base.PD); - - for (0..support_p_table_count) |i| { - const pd_index = indexed_base.PD + i; - const p_table_physical_region = support_pt_region.offset(i * paging.page_table_size); - support_pd[pd_index] = paging.PDTE{ - .present = true, - .write = true, - .address = paging.packAddress(paging.PDTE, p_table_physical_region.address.value()), - }; - } - - const support_ptes = support_pt_region.toHigherHalfVirtualAddress().access(paging.PTE); - for (0..@divExact(diff, lib.arch.valid_page_sizes[0])) |page_index| { - support_ptes[indexed_base.PT + page_index] = paging.getPageEntry(paging.PTE, page_table_regions.total.offset(page_index * lib.arch.valid_page_sizes[0]).address.value(), .{ - .present = true, - .write = true, - }); - } - - for (0..@divExact(cpu_diff, lib.arch.valid_page_sizes[0])) |page_index| { - support_ptes[cpu_indexed_base.PT + page_index] = paging.getPageEntry(paging.PTE, cpu_page_table_physical_region.offset(page_index * lib.arch.valid_page_sizes[0]).address.value(), .{ - .present = true, - .write = true, - }); - } - - { - const privileged_stack_physical_region = try cpu.driver.getRootCapability().allocatePages(x86_64.capability_address_space_stack_size); - const indexed_privileged_stack = @as(paging.IndexedVirtualAddress, @bitCast(x86_64.capability_address_space_stack_address.value())); - const stack_last_page = x86_64.capability_address_space_stack_address.offset(x86_64.capability_address_space_stack_size - lib.arch.valid_page_sizes[0]); - const indexed_privileged_stack_last_page = @as(paging.IndexedVirtualAddress, @bitCast(stack_last_page.value())); - assert(indexed_privileged_stack.PD == indexed_privileged_stack_last_page.PD); - assert(indexed_privileged_stack.PT < indexed_privileged_stack_last_page.PT); - - const pml4te = &page_table_regions.getPageTables(.{ .index = .pml4, .user = false })[indexed_privileged_stack.PML4]; - assert(pml4te.present); - - const pdpte = &(try paging.accessPageTable(PhysicalAddress.new(paging.unpackAddress(pml4te)), *paging.PDPTable))[indexed_privileged_stack.PDP]; - assert(!pdpte.present); - const pd_table_physical_region = try cpu.driver.getRootCapability().allocatePages(paging.page_table_size); - pdpte.* = paging.PDPTE{ - .present = true, - .write = true, - .address = paging.packAddress(paging.PDTE, pd_table_physical_region.address.value()), - }; - - const pdte = &(try paging.accessPageTable(PhysicalAddress.new(paging.unpackAddress(pdpte)), *paging.PDTable))[indexed_privileged_stack.PD]; - assert(!pdte.present); - const p_table_physical_region = try cpu.driver.getRootCapability().allocatePages(paging.page_table_size); - pdte.* = paging.PDTE{ - .present = true, - .write = true, - .address = paging.packAddress(paging.PDTE, p_table_physical_region.address.value()), - }; - - const p_table = try paging.accessPageTable(PhysicalAddress.new(paging.unpackAddress(pdte)), *paging.PTable); - for (p_table[indexed_privileged_stack.PT .. @as(usize, indexed_privileged_stack_last_page.PT) + 1], 0..) |*pte, index| { - const physical_address = privileged_stack_physical_region.offset(index * paging.page_table_size).address; - pte.* = paging.getPageEntry(paging.PTE, physical_address.value(), .{ - .present = true, - .write = true, - }); - } - } - - const init_cpu_scheduler_physical_region = try cpu.driver.getRootCapability().allocatePages(@sizeOf(cpu.UserScheduler)); - const init_cpu_scheduler_virtual_region = init_cpu_scheduler_physical_region.toHigherHalfVirtualAddress(); - const init_cpu_scheduler = init_cpu_scheduler_virtual_region.address.access(*cpu.UserScheduler); - // log.debug("Init scheduler: 0x{x}", .{init_cpu_scheduler_virtual_region.address.value()}); - const cpu_scheduler_indexed = @as(paging.IndexedVirtualAddress, @bitCast(init_cpu_scheduler_virtual_region.address.value())); - // log.debug("CPU scheduler indexed: {}", .{cpu_scheduler_indexed}); - - assert(cpu_scheduler_indexed.PML4 == cpu_indexed_base.PML4); - - const scheduler_pml4te = &page_table_regions.getPageTables(.{ .index = .pml4, .user = true })[cpu_scheduler_indexed.PML4]; - assert(scheduler_pml4te.present); - - const scheduler_pdpte = &(try paging.accessPageTable(PhysicalAddress.new(paging.unpackAddress(scheduler_pml4te)), *paging.PDPTable))[cpu_scheduler_indexed.PDP]; - - // Sanity checks - - const scheduler_pdte = blk: { - const pdp_is_inside = cpu_scheduler_indexed.PDP >= cpu_indexed_base.PDP and cpu_scheduler_indexed.PDP <= cpu_indexed_top.PDP; - // log.debug("PDP inside: {}", .{pdp_is_inside}); - assert(scheduler_pdpte.present == pdp_is_inside); - - if (!scheduler_pdpte.present) { - const pdte_allocation = try cpu.driver.getRootCapability().allocatePages(paging.page_table_size); - scheduler_pdpte.* = .{ - .present = true, - .write = true, - .address = paging.packAddress(@TypeOf(scheduler_pdpte.*), pdte_allocation.address.value()), + for (0..paging.Level.count - 1) |level_index| { + const page_table = user_page_tables.getPageTable(page_table_ref) catch |err| { + log.err("Error {s} at level {} when trying to map 0x{x} to 0x{x}", .{ @errorName(err), level_index, virtual.value(), physical.value() }); + const physical_address = address_space.translateAddress(virtual, .{ + .execute_disable = !flags.execute, + .write = flags.write, + .user = flags.user, + }) catch @panic("Could not translate address"); + if (physical_address.value() != physical.value()) { + @panic("Address mismatch"); + } else { + @panic("Address match"); + } }; + page_table_ref = page_table.children[indices[level_index]]; } - break :blk &(try paging.accessPageTable(PhysicalAddress.new(paging.unpackAddress(scheduler_pdpte)), *paging.PDTable))[cpu_scheduler_indexed.PD]; - }; + assert(indexed.PML4 == top_indexed.PML4); + assert(indexed.PDP == top_indexed.PDP); + assert(indexed.PD == top_indexed.PD); + assert(indexed.PT <= top_indexed.PT); - const scheduler_pte = blk: { - const is_inside_cpu_page_table_limits = cpu_scheduler_indexed.PD >= cpu_indexed_base.PD and cpu_scheduler_indexed.PD <= cpu_indexed_top.PD; - assert(is_inside_cpu_page_table_limits == scheduler_pdte.present); - if (!scheduler_pdte.present) { - const pte_allocation = try cpu.driver.getRootCapability().allocatePages(paging.page_table_size); - scheduler_pdte.* = .{ - .present = true, - .write = true, - .address = paging.packAddress(@TypeOf(scheduler_pdte.*), pte_allocation.address.value()), + const page_table = try user_page_tables.getPageTable(page_table_ref); + var index: u10 = indexed.PT; + while (index <= top_indexed.PT) : (index += 1) { + const leaf = Leaf{ + .physical = physical.offset(index - indexed.PT), + .flags = .{ + .size = .@"4KB", + }, }; + const leaf_ref = try user_page_tables.appendLeaf(&cpu.user_scheduler.s.capability_root_node.heap.allocator, leaf); + page_table.children[index] = leaf_ref; } + } +} - break :blk &(try paging.accessPageTable(PhysicalAddress.new(paging.unpackAddress(scheduler_pdte)), *paging.PTable))[cpu_scheduler_indexed.PT]; - }; - - scheduler_pte.* = paging.getPageEntry(paging.PTE, init_cpu_scheduler_physical_region.address.value(), .{ - .present = true, - .write = true, - }); - - init_cpu_scheduler.* = cpu.UserScheduler{ - .common = user_scheduler_virtual_address.access(*birth.UserScheduler), - .capability_root_node = cpu.capabilities.Root{ - .static = .{ - .cpu = true, - .boot = true, - .process = true, - }, - .dynamic = .{ - .io = .{ - .debug = true, - }, - .ram = cpu.driver.getRootCapability().dynamic.ram, - .cpu_memory = .{ - .flags = .{ - .allocate = true, - }, - }, - .page_table = .{}, - }, - .scheduler = .{ - .handle = init_cpu_scheduler, - .memory = scheduler_memory_physical_region, - }, +const CPUPageTables = privileged.arch.CPUPageTables; +// TODO: construct scheduler virtual memory tree +pub fn setupMapping(scheduler: *cpu.UserScheduler, user_virtual_region: VirtualMemoryRegion, cpu_page_tables: CPUPageTables, init_file: cpu.init.InitFile, regions: extern struct { + scheduler: cpu.init.MappingArgument, + heap: cpu.init.MappingArgument, +}) !void { + // INFO: Need this hack for page table allocation callback to work + cpu.user_scheduler = scheduler; + _ = user_virtual_region; + const page_tables = &scheduler.s.capability_root_node.dynamic.page_table; + const heap_allocator = &scheduler.s.capability_root_node.heap.allocator; + const page_table_size = paging.page_table_entry_count * paging.page_table_entry_size; + log.debug("Root page table allocation", .{}); + const root_page_table_allocation = try cpu.page_allocator.allocateAligned(2 * page_table_size, cpu.arch.user_root_page_table_alignment, .{ .reason = .user_protected }); + const root_page_tables = root_page_table_allocation.split(2); + log.debug("R priv: 0x{x}. R user: 0x{x}", .{ root_page_tables[0].address.value(), root_page_tables[1].address.value() }); + page_tables.privileged = .{ + .region = root_page_tables[0], + .mapping = root_page_tables[0].address.toHigherHalfVirtualAddress(), + .flags = .{ + .level = .PML4, }, }; - - const higher_half_scheduler_common = scheduler_memory_physical_region.address.toHigherHalfVirtualAddress().access(*birth.UserScheduler); - // log.debug("Higher half: 0x{x}", .{@ptrToInt(higher_half_scheduler_common)}); - higher_half_scheduler_common.disabled = true; - higher_half_scheduler_common.core_id = cpu.core_id; - - // log.debug("cpu scheduler: 0x{x}", .{@ptrToInt(init_cpu_scheduler)}); - - return SpawnInitCommonResult{ - .page_table_regions = page_table_regions, - .scheduler = init_cpu_scheduler, + const root_user_page_table = cpu.interface.PageTable{ + .region = root_page_tables[1], + .mapping = root_page_tables[1].address.toHigherHalfVirtualAddress(), + .flags = .{ + .level = .PML4, + }, }; + log.debug("Appending user root page table", .{}); + page_tables.user = try page_tables.appendPageTable(heap_allocator, root_user_page_table); + + log.debug("Copying higher half", .{}); + { + // Copy the higher half into the user protected address space + const current_address_space = paging.Specific{ .cr3 = cr3.read() }; + const src_half = (try current_address_space.getPML4TableUnchecked())[half_page_table_entry_count..][0..half_page_table_entry_count]; + const dst_half = page_tables.privileged.region.toHigherHalfVirtualAddress().access(paging.PML4TE)[half_page_table_entry_count..][0..half_page_table_entry_count]; + @memcpy(dst_half, src_half); + + // Map CPU driver into the CPU page table + const cpu_pte_count = paging.page_table_entry_count - paging.CPUPageTables.left_ptables; + const cpu_support_page_table_size = (paging.Level.count - 1) * paging.page_table_size; + const cpu_support_page_table_allocation = try cpu.page_allocator.allocate(cpu_support_page_table_size, .{ .reason = .user_protected }); + var cpu_support_page_table_allocator = cpu_support_page_table_allocation; + const pdp = try cpu_support_page_table_allocator.takeSlice(paging.page_table_size); + const pd = try cpu_support_page_table_allocator.takeSlice(paging.page_table_size); + const pt = try cpu_support_page_table_allocator.takeSlice(paging.page_table_size); + assert(cpu_support_page_table_allocator.size == 0); + + // Copy CPU driver PTEs to user protected address space + const cpu_ptes = cpu_page_tables.p_table.toHigherHalfVirtualAddress().access(*paging.PTable)[0..cpu_pte_count]; + const user_mapped_cpu_ptes = pt.toHigherHalfVirtualAddress().access(paging.PTE)[0..cpu_pte_count]; + @memcpy(user_mapped_cpu_ptes, cpu_ptes); + + // Fill the PML4 entry + root_user_page_table.region.toHigherHalfVirtualAddress().access(paging.PML4TE)[paging.CPUPageTables.pml4_index] = paging.PML4TE{ + .present = true, + .write = true, + .execute_disable = false, + .address = paging.packAddress(paging.PML4TE, pdp.address.value()), + }; + + // Fill the PDP entry + pdp.toHigherHalfVirtualAddress().access(paging.PDPTE)[paging.CPUPageTables.pdp_index] = paging.PDPTE{ + .present = true, + .write = true, + .execute_disable = false, + .address = paging.packAddress(paging.PDPTE, pd.address.value()), + }; + + // Fill the PD entry + pd.toHigherHalfVirtualAddress().access(paging.PDTE)[paging.CPUPageTables.pd_index] = paging.PDTE{ + .present = true, + .write = true, + .execute_disable = false, + .address = paging.packAddress(paging.PDTE, pt.address.value()), + }; + } + + const privileged_address_space = paging.Specific.fromPhysicalRegion(root_page_tables[0]); + const user_address_space = paging.Specific.fromPhysicalRegion(root_page_tables[1]); + + const scheduler_memory_map_flags = .{ + .write = true, + .user = true, + }; + + try map(user_address_space, regions.scheduler.virtual, regions.scheduler.physical, regions.scheduler.size, scheduler_memory_map_flags); + try map(user_address_space, regions.heap.virtual, regions.heap.physical, regions.heap.size, scheduler_memory_map_flags); + for (init_file.segments) |segment| { + try map(user_address_space, segment.virtual, segment.physical, segment.memory_size, segment.flags); + } + + // Map protected stack + const privileged_stack_physical_region = try cpu.page_allocator.allocate(x86_64.capability_address_space_stack_size, .{ .reason = .user_protected }); + try map(privileged_address_space, x86_64.capability_address_space_stack_address, privileged_stack_physical_region.address, x86_64.capability_address_space_stack_size, .{ + .write = true, + .execute = false, + .user = false, + }); + + const cpu_pml4 = try privileged_address_space.getPML4TableUnchecked(); + const user_pml4 = try user_address_space.getPML4TableUnchecked(); + @memcpy(cpu_pml4[0..cpu.arch.init.half_page_table_entry_count], user_pml4[0..cpu.arch.init.half_page_table_entry_count]); + + scheduler.s.capability_root_node.dynamic.page_table.switchPrivileged(); +} + +pub fn setupSchedulerCommon(scheduler_common: *birth.Scheduler.Common, entry_point: usize) void { + const user_scheduler_virtual_address = @intFromPtr(scheduler_common); + IA32_FS_BASE.write(user_scheduler_virtual_address); + // Set arguments + // First argument + scheduler_common.disabled_save_area.registers.rdi = user_scheduler_virtual_address; + // Second argument + const is_init = true; + scheduler_common.disabled_save_area.registers.rsi = @intFromBool(is_init); + + scheduler_common.disabled_save_area.registers.rip = entry_point; + scheduler_common.disabled_save_area.registers.rsp = user_scheduler_virtual_address + @offsetOf(birth.Scheduler.Common, "setup_stack") + scheduler_common.setup_stack.len; + scheduler_common.setup_stack_lock.value = true; + scheduler_common.disabled_save_area.registers.rflags = .{ .IF = true }; // Set RFLAGS + + scheduler_common.disabled_save_area.fpu.fcw = 0x037f; // Set FPU + scheduler_common.disabled_save_area.fpu.mxcsr = 0x1f80; } diff --git a/src/cpu/arch/x86/64/syscall.zig b/src/cpu/arch/x86/64/syscall.zig index 8465d74..2f77dd9 100644 --- a/src/cpu/arch/x86/64/syscall.zig +++ b/src/cpu/arch/x86/64/syscall.zig @@ -24,76 +24,10 @@ const pcid_mask = 1 << pcid_bit; /// - R10: argument 3 /// - R8: argument 4 /// - R9: argument 5 -fn birthSyscall(comptime Syscall: type, raw_arguments: birth.syscall.Arguments) Syscall.ErrorSet.Error!Syscall.Result { - cpu.syscall_count += 1; - comptime assert(Syscall == birth.capabilities.Syscall(Syscall.capability, Syscall.command)); - const capability: birth.capabilities.Type = Syscall.capability; - const command: birth.capabilities.Command(capability) = Syscall.command; - const arguments = try Syscall.toArguments(raw_arguments); - - return if (cpu.user_scheduler.capability_root_node.hasPermissions(capability, command)) switch (capability) { - .io => switch (command) { - .copy, .mint, .retype, .delete, .revoke, .create => unreachable, - .log => blk: { - const message = arguments; - cpu.writer.writeAll(message) catch unreachable; - comptime assert(Syscall.Result == usize); - break :blk message.len; - }, - }, - .cpu => switch (command) { - .copy, .mint, .retype, .delete, .revoke, .create => unreachable, - .get_core_id => cpu.core_id, - .shutdown => cpu.shutdown(.success), - .get_command_buffer => { - const command_buffer = arguments; - _ = command_buffer; - @panic("TODO: get_command_buffer"); - }, - }, - .cpu_memory => switch (command) { - .allocate => blk: { - comptime assert(@TypeOf(arguments) == usize); - const size = arguments; - const physical_region = try cpu.user_scheduler.capability_root_node.allocatePages(size); - try cpu.user_scheduler.capability_root_node.allocateCPUMemory(physical_region, .{ .privileged = false }); - break :blk physical_region.address; - }, - else => @panic(@tagName(command)), - }, - .ram => unreachable, - .boot => switch (command) { - .get_bundle_size => cpu.bundle.len, - .get_bundle_file_list_size => cpu.bundle_files.len, - else => @panic(@tagName(command)), - }, - .process => switch (command) { - .exit => switch (arguments) { - true => cpu.shutdown(.success), - false => cpu.panic("User process panicked", .{}), - }, - else => @panic(@tagName(command)), - }, - .page_table => @panic("TODO: page_table"), - } else error.forbidden; -} - -export fn syscall(registers: *const Registers) callconv(.C) birth.syscall.Result { - const options = @as(birth.syscall.Options, @bitCast(registers.syscall_number)); - const arguments = birth.syscall.Arguments{ registers.rdi, registers.rsi, registers.rdx, registers.r10, registers.r8, registers.r9 }; - - return switch (options.general.convention) { - .birth => switch (options.birth.type) { - inline else => |capability| switch (@as(birth.capabilities.Command(capability), @enumFromInt(options.birth.command))) { - inline else => |command| blk: { - const Syscall = birth.capabilities.Syscall(capability, command); - const result: Syscall.Result = birthSyscall(Syscall, arguments) catch |err| break :blk Syscall.errorToRaw(err); - break :blk Syscall.resultToRaw(result); - }, - }, - }, - .linux => @panic("linux syscall"), - }; +export fn syscall(registers: *const Registers) callconv(.C) birth.interface.Raw.Result { + const options = @as(birth.interface.Raw.Options, @bitCast(registers.syscall_number)); + const arguments = birth.interface.Raw.Arguments{ registers.rdi, registers.rsi, registers.rdx, registers.r10, registers.r8, registers.r9 }; + return cpu.interface.processFromRaw(options, arguments); } /// SYSCALL documentation @@ -107,7 +41,7 @@ export fn syscall(registers: *const Registers) callconv(.C) birth.syscall.Result /// - R10: argument 3 /// - R8: argument 4 /// - R9: argument 5 -pub fn entryPoint() callconv(.Naked) void { +pub fn entryPoint() callconv(.Naked) noreturn { asm volatile ( \\endbr64 \\swapgs @@ -252,8 +186,6 @@ pub fn entryPoint() callconv(.Naked) void { asm volatile ( \\int3 ::: "memory"); - - unreachable; } pub const Registers = extern struct { diff --git a/src/cpu/arch/x86_64.zig b/src/cpu/arch/x86_64.zig index 564da80..8a379b3 100644 --- a/src/cpu/arch/x86_64.zig +++ b/src/cpu/arch/x86_64.zig @@ -22,7 +22,7 @@ const VirtualMemoryRegion = lib.VirtualMemoryRegion; const cpu = @import("cpu"); const Heap = cpu.Heap; -const init = @import("./x86/64/init.zig"); +pub const init = @import("./x86/64/init.zig"); pub const syscall = @import("./x86/64/syscall.zig"); pub const entryPoint = init.entryPoint; @@ -56,13 +56,33 @@ pub const Registers = extern struct { const interrupt_kind: u32 = 0; +const PageFaultFlags = packed struct(u32) { + present: bool, + write: bool, + user: bool, + reserved_write: bool, + instruction_fetch: bool, + protection_key: bool, + shadow_stack: bool, + reserved0: u8 = 0, + software_guard_extensions: bool, + reserved: u16 = 0, +}; + export fn interruptHandler(regs: *const InterruptRegisters, interrupt_number: u8) void { switch (interrupt_number) { local_timer_vector => { APIC.write(.eoi, 0); nextTimer(10); }, - else => cpu.panicFromInstructionPointerAndFramePointer(regs.rip, regs.rbp, "Exception: 0x{x}", .{interrupt_number}), + else => { + if (interrupt_number == 0xe) { + const pagefault_flags: PageFaultFlags = @bitCast(@as(u32, @intCast(regs.error_code))); + const fault_address = privileged.arch.x86_64.registers.cr2.read(); + log.err("Page 0x{x} not mapped at IP 0x{x}. Flags: {}", .{ fault_address, regs.rip, pagefault_flags }); + } + cpu.panicFromInstructionPointerAndFramePointer(regs.rip, regs.rbp, "Exception 0x{x} at IP 0x{x}", .{ interrupt_number, regs.rip }); + }, } } @@ -103,7 +123,7 @@ pub const invariant_tsc = false; pub const capability_address_space_size = 1 * lib.gb; pub const capability_address_space_start = capability_address_space_stack_top - capability_address_space_size; pub const capability_address_space_stack_top = 0xffff_ffff_8000_0000; -pub const capability_address_space_stack_size = privileged.default_stack_size; +pub const capability_address_space_stack_size = 10 * privileged.default_stack_size; pub const capability_address_space_stack_alignment = lib.arch.valid_page_sizes[0]; pub const capability_address_space_stack_address = VirtualAddress.new(capability_address_space_stack_top - capability_address_space_stack_size); pub const code_64 = @offsetOf(GDT, "code_64"); @@ -263,3 +283,5 @@ pub const root_page_table_entry = @as(cpu.arch.PageTableEntry, @enumFromInt(0)); pub const IOMap = extern struct { debug: bool, }; + +pub const user_root_page_table_alignment = 2 * lib.arch.valid_page_sizes[0]; diff --git a/src/cpu/capabilities.zig b/src/cpu/capabilities.zig deleted file mode 100644 index ca151a2..0000000 --- a/src/cpu/capabilities.zig +++ /dev/null @@ -1,419 +0,0 @@ -const lib = @import("lib"); -const assert = lib.assert; -const Allocator = lib.Allocator; -const enumCount = lib.enumCount; -const log = lib.log.scoped(.capabilities); - -const privileged = @import("privileged"); -const PhysicalAddress = lib.PhysicalAddress; -const PhysicalMemoryRegion = lib.PhysicalMemoryRegion; -const birth = @import("birth"); -const cpu = @import("cpu"); - -pub const RootDescriptor = extern struct { - value: *Root, -}; - -pub const Static = enum { - cpu, - boot, - process, - - pub const count = lib.enumCount(@This()); - - pub const Bitmap = @Type(.{ - .Struct = blk: { - const full_bit_size = @max(@as(comptime_int, 1 << 3), @as(u8, @sizeOf(Static)) << 3); - break :blk .{ - .layout = .Packed, - .backing_integer = lib.IntType(.unsigned, full_bit_size), - .fields = fields: { - var fields: []const lib.Type.StructField = &.{}; - inline for (lib.enumFields(Static)) |static_field| { - fields = fields ++ [1]lib.Type.StructField{.{ - .name = static_field.name, - .type = bool, - .default_value = null, - .is_comptime = false, - .alignment = 0, - }}; - } - - assert(Static.count > 0); - assert(@sizeOf(Static) > 0 or Static.count == 1); - - const padding_type = lib.IntType(.unsigned, full_bit_size - Static.count); - - fields = fields ++ [1]lib.Type.StructField{.{ - .name = "reserved", - .type = padding_type, - .default_value = &@as(padding_type, 0), - .is_comptime = false, - .alignment = 0, - }}; - break :fields fields; - }, - .decls = &.{}, - .is_tuple = false, - }; - }, - }); -}; - -pub const Dynamic = enum { - io, - ram, // Barrelfish equivalent: RAM (no PhysAddr) - cpu_memory, // Barrelfish equivalent: Frame - page_table, // Barrelfish equivalent: VNode - // irq_table, - // device_memory, - // scheduler, - - pub const Map = extern struct { - io: IO, - ram: RAM, - cpu_memory: CPUMemory, - page_table: PageTables, - - comptime { - inline for (lib.fields(Dynamic.Map), lib.fields(Dynamic)) |struct_field, enum_field| { - assert(lib.equal(u8, enum_field.name, struct_field.name)); - } - } - }; -}; - -pub const RAM = extern struct { - lists: [lib.arch.reverse_valid_page_sizes.len]?*Region = .{null} ** lib.arch.valid_page_sizes.len, - - const AllocateError = error{ - OutOfMemory, - }; - - inline fn getListIndex(size: usize) usize { - inline for (lib.arch.reverse_valid_page_sizes, 0..) |reverse_page_size, reverse_index| { - if (size >= reverse_page_size) return reverse_index; - } - - unreachable; - } - - pub const Region = extern struct { - region: PhysicalMemoryRegion, - next: ?*@This() = null, - - const UnalignedAllocationResult = extern struct { - wasted: PhysicalMemoryRegion, - allocated: PhysicalMemoryRegion, - }; - - inline fn allocateUnaligned(free_ram: *Region, size: usize, alignment: usize) ?UnalignedAllocationResult { - const aligned_region_address = lib.alignForward(usize, free_ram.region.address.value(), alignment); - const wasted_space = aligned_region_address - free_ram.region.address.value(); - if (free_ram.region.size >= wasted_space + size) { - const wasted_region = free_ram.region.takeSlice(wasted_space); - const allocated_region = free_ram.region.takeSlice(size); - return UnalignedAllocationResult{ - .wasted = wasted_region, - .allocated = allocated_region, - }; - } - - return null; - } - }; -}; - -pub const CPUMemory = extern struct { - privileged: RAM = .{}, - user: RAM = .{}, - flags: Flags, - - const Flags = packed struct(u64) { - allocate: bool, - reserved: u63 = 0, - }; -}; - -pub const PageTables = extern struct { - foo: u32 = 0, -}; - -pub const IO = extern struct { - debug: bool, -}; - -pub const Scheduler = extern struct { - handle: ?*cpu.UserScheduler = null, - memory: PhysicalMemoryRegion, -}; - -comptime { - assert(enumCount(Dynamic) + enumCount(Static) == enumCount(birth.capabilities.Type)); -} - -pub const Root = extern struct { - static: Static.Bitmap, - dynamic: Dynamic.Map, - scheduler: Scheduler, - heap: Heap = .{}, - padding: [padding_byte_count]u8 = .{0} ** padding_byte_count, - - const max_alignment = @max(@alignOf(Static.Bitmap), @alignOf(Dynamic.Map), @alignOf(Scheduler), @alignOf(Heap)); - const total_size = lib.alignForward(usize, @sizeOf(Static.Bitmap) + @sizeOf(Dynamic.Map) + @sizeOf(Scheduler) + @sizeOf(Heap), max_alignment); - const page_aligned_size = lib.alignForward(usize, total_size, lib.arch.valid_page_sizes[0]); - const padding_byte_count = page_aligned_size - total_size; - - comptime { - assert(@sizeOf(Root) % lib.arch.valid_page_sizes[0] == 0); - } - - pub fn copy(root: *Root, other: *Root) void { - other.static = root.static; - // TODO: - other.dynamic = root.dynamic; - } - - pub inline fn hasPermissions(root: *const Root, comptime capability_type: birth.capabilities.Type, command: birth.capabilities.Command(capability_type)) bool { - return switch (capability_type) { - // static capabilities - inline .cpu, - .boot, - .process, - => |static_capability| @field(root.static, @tagName(static_capability)), - // dynamic capabilities - .io => switch (command) { - .copy, .mint, .retype, .delete, .revoke, .create => unreachable, - .log => root.dynamic.io.debug, - }, - .cpu_memory => root.dynamic.cpu_memory.flags.allocate, - .ram => unreachable, - .page_table => unreachable, - }; - } - - pub const AllocateError = error{ - OutOfMemory, - }; - - // Fast path - pub fn allocatePages(root: *Root, size: usize) AllocateError!PhysicalMemoryRegion { - assert(size != 0); - assert(lib.isAligned(size, lib.arch.valid_page_sizes[0])); - var index = RAM.getListIndex(size); - - const result = blk: { - while (true) : (index -= 1) { - const list = root.dynamic.ram.lists[index]; - var iterator = list; - - while (iterator) |free_ram| : (iterator = free_ram.next) { - if (free_ram.region.size >= size) { - if (free_ram.region.size >= size) { - const result = free_ram.region.takeSlice(size); - break :blk result; - } else { - @panic("TODO: cnsume all reigon"); - } - } - } - - if (index == 0) break; - } - - return error.OutOfMemory; - }; - - @memset(result.toHigherHalfVirtualAddress().access(u8), 0); - - return result; - } - - // Slow uncommon path. Use cases: - // 1. CR3 switch. This is assumed to be privileged, so this function assumes privileged use of the memory - pub fn allocatePageCustomAlignment(root: *Root, size: usize, alignment: usize) AllocateError!PhysicalMemoryRegion { - assert(alignment > lib.arch.valid_page_sizes[0] and alignment < lib.arch.valid_page_sizes[1]); - - comptime assert(lib.arch.valid_page_sizes.len == 3); - var index = RAM.getListIndex(size); - - while (true) : (index -= 1) { - if (root.dynamic.ram.lists[index]) |smallest_region_list| { - var iterator: ?*cpu.capabilities.RAM.Region = smallest_region_list; - while (iterator) |free_ram| : (iterator = free_ram.next) { - if (lib.isAligned(free_ram.region.address.value(), alignment)) { - if (free_ram.region.size >= size) { - const allocated_region = free_ram.region.takeSlice(size); - return allocated_region; - } - } else if (free_ram.allocateUnaligned(size, alignment)) |unaligned_allocation| { - try root.addRegion(&root.dynamic.ram, unaligned_allocation.wasted); - return unaligned_allocation.allocated; - } - } - } - - if (index == 0) break; - } - - return AllocateError.OutOfMemory; - } - - fn allocateSingle(root: *Root, comptime T: type) AllocateError!*T { - var iterator = root.heap.first; - while (iterator) |heap_region| : (iterator = heap_region.next) { - if (heap_region.alignmentFits(@alignOf(T))) { - if (heap_region.sizeFits(@sizeOf(T))) { - const allocated_region = heap_region.takeRegion(@sizeOf(T)); - const result = &allocated_region.toHigherHalfVirtualAddress().access(T)[0]; - return result; - } - } else { - @panic("ELSE"); - } - } - - const physical_region = try root.allocatePages(lib.arch.valid_page_sizes[0]); - const heap_region = physical_region.toHigherHalfVirtualAddress().address.access(*Heap.Region); - const first = root.heap.first; - heap_region.* = .{ - .descriptor = physical_region.offset(@sizeOf(Heap.Region)), - .allocated_size = @sizeOf(Heap.Region), - .next = first, - }; - - root.heap.first = heap_region; - - return try root.allocateSingle(T); - } - - fn allocateMany(root: *Root, comptime T: type, count: usize) AllocateError![]T { - _ = count; - _ = root; - - @panic("TODO many"); - } - - fn addRegion(root: *Root, ram: *RAM, physical_region: PhysicalMemoryRegion) !void { - const index = RAM.getListIndex(physical_region.size); - const new_region = try root.allocateSingle(RAM.Region); - new_region.* = RAM.Region{ - .region = physical_region, - .next = root.dynamic.ram.lists[index], - }; - - ram.lists[index] = new_region; - } - - pub const AllocateCPUMemoryOptions = packed struct { - privileged: bool, - }; - - pub fn allocateCPUMemory(root: *Root, physical_region: PhysicalMemoryRegion, options: AllocateCPUMemoryOptions) !void { - const ram_region = switch (options.privileged) { - true => &root.dynamic.cpu_memory.privileged, - false => &root.dynamic.cpu_memory.user, - }; - - try root.addRegion(ram_region, physical_region); - } - - pub const Heap = extern struct { - first: ?*Region = null, - - const AllocateError = error{ - OutOfMemory, - }; - - pub fn new(physical_region: PhysicalMemoryRegion, previous_allocated_size: usize) Heap { - const allocated_size = previous_allocated_size + @sizeOf(Region); - assert(physical_region.size > allocated_size); - const region = physical_region.offset(previous_allocated_size).address.toHigherHalfVirtualAddress().access(*Region); - region.* = .{ - .descriptor = physical_region, - .allocated_size = allocated_size, - }; - return Heap{ - .first = region, - }; - } - - fn create(heap: *Heap, comptime T: type) Heap.AllocateError!*T { - const result = try heap.allocate(T, 1); - return &result[0]; - } - - fn allocate(heap: *Heap, comptime T: type, count: usize) Heap.AllocateError![]T { - var iterator = heap.first; - while (iterator) |heap_region| { - const allocation = heap_region.allocate(T, count) catch continue; - return allocation; - } - @panic("TODO: allocate"); - } - - const Region = extern struct { - descriptor: PhysicalMemoryRegion, - allocated_size: usize, - next: ?*Region = null, - - inline fn getFreeRegion(region: Region) PhysicalMemoryRegion { - const free_region = region.descriptor.offset(region.allocated_size); - assert(free_region.size > 0); - return free_region; - } - - const AllocateError = error{ - OutOfMemory, - }; - - fn takeRegion(region: *Region, size: usize) PhysicalMemoryRegion { - var free_region = region.getFreeRegion(); - assert(free_region.size >= size); - const allocated_region = free_region.takeSlice(size); - region.allocated_size += size; - return allocated_region; - } - - fn allocate(region: *Region, comptime T: type, count: usize) Region.AllocateError![]T { - const free_region = region.getFreeRegion(); - _ = free_region; - _ = count; - @panic("TODO: region allocate"); - } - - fn create(region: *Region, comptime T: type) Region.AllocateError!*T { - const result = try region.allocate(T, 1); - return &result[0]; - } - - inline fn canAllocateDirectly(region: Region, size: usize, alignment: usize) bool { - const alignment_fits = region.alignmentFits(alignment); - const size_fits = region.sizeFits(size); - return alignment_fits and size_fits; - } - - inline fn canAllocateSplitting(region: Region, size: usize, alignment: usize) bool { - const free_region = region.getFreeRegion(); - const aligned_region_address = lib.alignForward(usize, free_region.address.value(), alignment); - const wasted_space = aligned_region_address - free_region.address.value(); - log.warn("Wasted space: {} bytes", .{wasted_space}); - _ = size; - @panic("TODO: canAllocateSplitting"); - } - - inline fn sizeFits(region: Region, size: usize) bool { - return region.descriptor.size - region.allocated_size >= size; - } - - inline fn alignmentFits(region: Region, alignment: usize) bool { - const result = lib.isAligned(region.getFreeRegion().address.value(), alignment); - return result; - } - }; - }; -}; - -pub const RootPageTableEntry = extern struct { - address: PhysicalAddress, -}; diff --git a/src/cpu/init.zig b/src/cpu/init.zig new file mode 100644 index 0000000..0bef328 --- /dev/null +++ b/src/cpu/init.zig @@ -0,0 +1,364 @@ +const birth = @import("birth"); +const bootloader = @import("bootloader"); +const cpu = @import("cpu"); +const lib = @import("lib"); +const privileged = @import("privileged"); + +const assert = lib.assert; +const log = lib.log; +const PhysicalAddress = lib.PhysicalAddress; +const PhysicalMemoryRegion = lib.PhysicalMemoryRegion; +const VirtualAddress = lib.VirtualAddress; +const VirtualMemoryRegion = lib.VirtualMemoryRegion; + +const RegionList = cpu.RegionList; +const PageTableRegions = cpu.arch.init.PageTableRegions; + +const paging = privileged.arch.paging; +pub const Error = error{ + feature_requested_and_not_available, + no_files, + cpu_file_not_found, + init_file_not_found, + no_space_for_bootstrap_region, +}; + +pub fn initialize(bootloader_information: *bootloader.Information) !noreturn { + // bootloader_information.draw_context.clearScreen(0xffff7f50); + // Do an integrity check so that the bootloader information is in perfect state and there is no weird memory behavior. + // This is mainly due to the transition from a 32-bit bootloader to a 64-bit CPU driver in the x86-64 architecture. + try bootloader_information.checkIntegrity(); + // Informing the bootloader information struct that we have reached the CPU driver and any bootloader + // functionality is not available anymore + bootloader_information.stage = .cpu; + // Check that the bootloader has loaded some files as the CPU driver needs them to go forward + cpu.bundle = bootloader_information.getSlice(.bundle); + if (cpu.bundle.len == 0) { + return Error.no_files; + } + cpu.bundle_files = bootloader_information.getSlice(.file_list); + if (cpu.bundle_files.len == 0) { + return Error.no_files; + } + + try cpu.arch.init.initialize(); + + const memory_map_entries = bootloader_information.getMemoryMapEntries(); + const page_counters = bootloader_information.getPageCounters(); + + const first_heap_allocation_size = 2 * lib.arch.valid_page_sizes[0]; + + var heap_region_metadata: struct { + region: PhysicalMemoryRegion, + free_size: u64, + index: usize, + } = for (memory_map_entries, page_counters, 0..) |mmap_entry, page_counter, index| { + if (mmap_entry.type == .usable) { + const free_region = mmap_entry.getFreeRegion(page_counter); + if (free_region.size >= first_heap_allocation_size) { + break .{ + .region = PhysicalMemoryRegion.new(.{ + .address = free_region.address, + .size = free_region.size, + }), + .free_size = free_region.size - first_heap_allocation_size, + .index = index, + }; + } + } + } else return error.no_space_for_bootstrap_region; + + const heap_region = try heap_region_metadata.region.takeSlice(first_heap_allocation_size); + try cpu.heap.addBootstrapingRegion(heap_region.toHigherHalfVirtualAddress()); + + const host_free_region_list = try cpu.heap.create(RegionList); + + var free_size: u64 = 0; + _ = try host_free_region_list.append(heap_region_metadata.region); + free_size += heap_region_metadata.region.size; + + var region_list_iterator = host_free_region_list; + + for (memory_map_entries, page_counters, 0..) |memory_map_entry, page_counter, index| { + if (index == heap_region_metadata.index) continue; + + if (memory_map_entry.type == .usable) { + const free_region = memory_map_entry.getFreeRegion(page_counter); + + if (free_region.size > 0) { + assert(lib.isAligned(free_region.size, lib.arch.valid_page_sizes[0])); + _ = region_list_iterator.append(free_region) catch { + const new_region_list = try cpu.heap.create(RegionList); + region_list_iterator.metadata.next = new_region_list; + new_region_list.metadata.previous = region_list_iterator; + region_list_iterator = new_region_list; + _ = try region_list_iterator.append(free_region); + }; + + free_size += free_region.size; + } + } + } + + cpu.page_allocator.free_regions = host_free_region_list; + cpu.page_allocator.free_byte_count = free_size; + + // Add used regions by the bootloader to the physical memory manager + for (memory_map_entries, page_counters) |memory_map_entry, page_counter| { + if (memory_map_entry.type == .usable) { + const used_region = memory_map_entry.getUsedRegion(page_counter); + if (used_region.size > 0) { + assert(lib.isAligned(used_region.size, lib.arch.valid_page_sizes[0])); + try cpu.page_allocator.appendUsedRegion(used_region, .{ .reason = .bootloader }); + } + } + } + + var used_regions = cpu.page_allocator.used_regions; + var used_memory_by_bootloader: usize = 0; + while (used_regions) |used_region| : (used_regions = used_region.next) { + if (used_region.use_case.reason == .bootloader) { + used_memory_by_bootloader += used_region.region.size; + } + } + + log.debug("Used memory by the bootloader: 0x{x} bytes", .{used_memory_by_bootloader}); + + try cpu.page_allocator.appendUsedRegion(heap_region, .{ .reason = .heap }); + + switch (cpu.bsp) { + true => { + // Setup kernel debug information + cpu.debug_info = blk: { + const cpu_driver_executable_descriptor = try bootloader_information.getFileDescriptor("cpu_driver"); + const elf_file = file: { + const aligned_file_len = lib.alignForward(usize, cpu_driver_executable_descriptor.content.len, lib.arch.valid_page_sizes[0]); + const elf_file_physical_allocation = try cpu.page_allocator.allocate(aligned_file_len, .{ .reason = .privileged }); + break :file elf_file_physical_allocation.toHigherHalfVirtualAddress().address.access([*]align(lib.arch.valid_page_sizes[0]) u8)[0..elf_file_physical_allocation.size]; + }; + lib.memcpy(elf_file[0..cpu_driver_executable_descriptor.content.len], cpu_driver_executable_descriptor.content); + const result = try lib.getDebugInformation(cpu.heap.allocator.zigUnwrap(), elf_file); + break :blk result; + }; + + const init_module_descriptor = try bootloader_information.getFileDescriptor("init"); + + try spawnInitBSP(init_module_descriptor.content, bootloader_information.cpu_page_tables); + }, + false => @panic("TODO: implement APP"), + } +} + +const ELF = lib.ELF(64); + +const SpawnInitCommonResult = extern struct { + scheduler: *cpu.UserScheduler, + entry_point: u64, +}; + +pub const MappingArgument = extern struct { + virtual: VirtualAddress, + physical: PhysicalAddress, + size: u64, +}; + +pub const InitFile = struct { + content: []const u8, + segments: []const Segment, +}; + +pub const Segment = extern struct { + virtual: VirtualAddress, + physical: PhysicalAddress, + memory_size: usize, + flags: privileged.Mapping.Flags, + file_offset: usize, + file_size: usize, +}; + +var once: bool = false; + +fn spawnInitCommon(init_file: []const u8, cpu_page_tables: paging.CPUPageTables) !SpawnInitCommonResult { + assert(!once); + once = true; + // TODO: delete in the future + assert(cpu.bsp); + + const init_elf = try ELF.Parser.init(init_file); + const entry_point = init_elf.getEntryPoint(); + const program_headers = init_elf.getProgramHeaders(); + + var segment_buffer: [20]Segment = undefined; + var segment_count: usize = 0; + var segment_total_size: usize = 0; + var first_address: ?u64 = null; + + for (program_headers) |program_header| { + if (program_header.type == .load) { + if (first_address == null) { + first_address = program_header.virtual_address; + } + + const segment_size = lib.alignForward(usize, program_header.size_in_memory, lib.arch.valid_page_sizes[0]); + segment_total_size += segment_size; + + const segment_virtual = VirtualAddress.new(program_header.virtual_address); + const segment_physical_region = try cpu.page_allocator.allocate(segment_size, .{ .reason = .user }); + + const segment = &segment_buffer[segment_count]; + segment.* = .{ + .physical = segment_physical_region.address, + .virtual = segment_virtual, + .memory_size = segment_size, + .flags = .{ + .execute = program_header.flags.executable, + .write = program_header.flags.writable, + .user = true, + }, + .file_offset = program_header.offset, + .file_size = program_header.size_in_file, + }; + + const src = init_file[segment.file_offset..][0..segment.file_size]; + // It's necessary to use the higher half address here since the user mapping is not applied yet + const dst = segment_physical_region.toHigherHalfVirtualAddress().access(u8)[0..src.len]; + lib.memcpy(dst, src); + + segment_count += 1; + } + } + + const init_start_address = first_address orelse @panic("WTF"); + const init_top_address = init_start_address + segment_total_size; + const user_scheduler_virtual_address = VirtualAddress.new(init_top_address); + const user_scheduler_virtual_region = VirtualMemoryRegion.new(.{ + .address = user_scheduler_virtual_address, + .size = lib.alignForward(usize, @sizeOf(birth.Scheduler), lib.arch.valid_page_sizes[0]), + }); + // Align to 2MB + const user_initial_heap_top = lib.alignForward(usize, user_scheduler_virtual_region.top().value(), lib.arch.valid_page_sizes[1]); + + const segments = segment_buffer[0..segment_count]; + + const user_virtual_region = VirtualMemoryRegion.new(.{ + .address = VirtualAddress.new(init_start_address), + .size = user_initial_heap_top - init_start_address, + }); + // const page_table_regions = try PageTableRegions.create(user_virtual_region, cpu_page_tables); + log.debug("Scheduler region", .{}); + const scheduler_physical_region = try cpu.page_allocator.allocate(user_scheduler_virtual_region.size, .{ .reason = .user }); + + log.debug("Heap scheduler", .{}); + const init_cpu_scheduler = try cpu.heap.create(cpu.UserScheduler); + init_cpu_scheduler.* = cpu.UserScheduler{ + .s = .{ + .common = user_scheduler_virtual_address.access(*birth.Scheduler.Common), + .capability_root_node = cpu.interface.Root{ + .static = .{ + .cpu = true, + .boot = true, + .process = true, + }, + .dynamic = .{ + .io = .{ + .debug = true, + }, + .memory = .{}, + .cpu_memory = .{ + .flags = .{ + .allocate = true, + }, + }, + .page_table = cpu.interface.PageTables{ + .privileged = undefined, + .user = birth.interface.PageTable{ + .index = 0, + .entry_type = .page_table, + }, + // .vmm = try cpu.interface.VMM.new(), + .can_map_page_tables = true, + .page_tables = .{ + .ptr = undefined, + .len = 0, + .capacity = 0, + }, + .leaves = .{ + .ptr = undefined, + .len = 0, + .capacity = 0, + }, + }, + .command_buffer_submission = .{ .region = PhysicalMemoryRegion.invalid() }, + .command_buffer_completion = .{ .region = PhysicalMemoryRegion.invalid() }, + .memory_mapping = .{}, + .page_table_mapping = .{}, + }, + .scheduler = .{ + .memory = scheduler_physical_region, + }, + }, + }, + }; + + const scheduler_virtual_region = VirtualMemoryRegion.new(.{ + .address = user_scheduler_virtual_address, + .size = scheduler_physical_region.size, + }); + + scheduler_physical_region.address.toHigherHalfVirtualAddress().access(*birth.Scheduler.Common).self = user_scheduler_virtual_address.access(*birth.Scheduler.Common); + + const heap_virtual_region = VirtualMemoryRegion.new(.{ + .address = scheduler_virtual_region.top(), + .size = lib.alignForward(usize, scheduler_virtual_region.top().value(), lib.arch.valid_page_sizes[1]) - scheduler_virtual_region.top().value(), + }); + + log.debug("Heap region", .{}); + const heap_physical_region = try cpu.page_allocator.allocate(heap_virtual_region.size, .{ .reason = .user }); + @memset(heap_physical_region.toHigherHalfVirtualAddress().access(u8), 0); + + assert(scheduler_physical_region.size == scheduler_virtual_region.size); + assert(heap_physical_region.size == heap_virtual_region.size); + // Setup common variables + const higher_half_scheduler_common = scheduler_physical_region.address.toHigherHalfVirtualAddress().access(*birth.Scheduler.Common); + higher_half_scheduler_common.disabled = true; + higher_half_scheduler_common.core_id = cpu.core_id; + higher_half_scheduler_common.heap = VirtualMemoryRegion.new(.{ + .address = heap_virtual_region.address, + .size = heap_virtual_region.size, + }); + + try cpu.arch.init.setupMapping(init_cpu_scheduler, user_virtual_region, cpu_page_tables, .{ + .content = init_file, + .segments = segments, + }, .{ + .scheduler = .{ + .physical = scheduler_physical_region.address, + .virtual = scheduler_virtual_region.address, + .size = scheduler_virtual_region.size, + }, + .heap = .{ + .physical = heap_physical_region.address, + .virtual = heap_virtual_region.address, + .size = heap_virtual_region.size, + }, + }); + + return SpawnInitCommonResult{ + // .page_table_regions = page_table_regions, + .scheduler = init_cpu_scheduler, + .entry_point = entry_point, + }; +} + +fn spawnInitBSP(init_file: []const u8, cpu_page_tables: paging.CPUPageTables) !noreturn { + const spawn_init = try spawnInitCommon(init_file, cpu_page_tables); + const init_scheduler = spawn_init.scheduler; + // const page_table_regions = spawn_init.page_table_regions; + const entry_point = spawn_init.entry_point; + const scheduler_common = init_scheduler.s.common; + + cpu.user_scheduler = init_scheduler; + + cpu.arch.init.setupSchedulerCommon(scheduler_common, entry_point); + scheduler_common.disabled_save_area.contextSwitch(); +} diff --git a/src/cpu/interface.zig b/src/cpu/interface.zig new file mode 100644 index 0000000..2b5fd68 --- /dev/null +++ b/src/cpu/interface.zig @@ -0,0 +1,698 @@ +const lib = @import("lib"); +const assert = lib.assert; +const Allocator = lib.Allocator; +const enumCount = lib.enumCount; +const log = lib.log.scoped(.capabilities); +const VirtualAddress = lib.VirtualAddress; + +const privileged = @import("privileged"); +const paging = privileged.arch.paging; +const PhysicalAddress = lib.PhysicalAddress; +const PhysicalMemoryRegion = lib.PhysicalMemoryRegion; +const VirtualMemoryRegion = lib.VirtualMemoryRegion; +const birth = @import("birth"); +const cpu = @import("cpu"); +const RegionList = cpu.RegionList; +const SparseArray = cpu.SparseArray; + +pub var system_call_count: usize = 0; + +pub fn processFromRaw(options: birth.interface.Raw.Options, arguments: birth.interface.Raw.Arguments) birth.interface.Raw.Result { + defer system_call_count += 1; + return switch (options.general.convention) { + .birth => switch (options.birth.type) { + inline else => |capability| switch (@as(birth.interface.Command.fromCapability(capability), @enumFromInt(options.birth.command))) { + inline else => |command| blk: { + const Interface = birth.interface.Descriptor(capability, command); + const result = processCommand(Interface, arguments) catch |err| { + lib.log.err("Syscall ({s}, {s}) ended up in error: {}", .{ @tagName(capability), @tagName(command), err }); + break :blk Interface.fromError(err); + }; + break :blk Interface.fromResult(result); + }, + }, + }, + .emulated => @panic("TODO: emulated"), + }; +} + +pub fn processCommand(comptime Descriptor: type, raw_arguments: birth.interface.Raw.Arguments) Descriptor.Error!Descriptor.Result { + defer cpu.command_count += 1; + const capability = Descriptor.Capability; + const command = Descriptor.Command; + const arguments = try Descriptor.toArguments(raw_arguments); + + const root = &cpu.user_scheduler.s.capability_root_node; + // log.err("\n========\nSyscall received: {s}, {s}\n========\n", .{ @tagName(capability), @tagName(command) }); + + assert(root.static.process); + const has_permissions = root.hasPermissions(Descriptor, arguments); + + return if (has_permissions) switch (capability) { + .io => switch (command) { + .copy, .mint, .retype, .delete, .revoke, .create => unreachable, + .log => blk: { + const message = arguments; + cpu.writer.writeAll(message) catch unreachable; + comptime assert(Descriptor.Result == usize); + break :blk message.len; + }, + }, + .cpu => switch (command) { + .copy, .mint, .retype, .delete, .revoke, .create => unreachable, + .get_core_id => cpu.core_id, + .shutdown => cpu.shutdown(.success), + .get_command_buffer => { + const command_buffer = arguments; + _ = command_buffer; + @panic("TODO: get_command_buffer"); + }, + }, + .cpu_memory => switch (command) { + else => @panic(@tagName(command)), + }, + .command_buffer_completion, .command_buffer_submission => switch (command) { + .map => { + const region = @field(root.dynamic, @tagName(capability)).region; + assert(region.address.value() != 0); + assert(region.size != 0); + @panic("TODO: map"); + }, // TODO + else => @panic(@tagName(command)), + }, + .memory => switch (command) { + .allocate => blk: { + comptime assert(@TypeOf(arguments) == usize); + const size = arguments; + // TODO: we want more fine-grained control of the reason if we want more than a simple statistic + const physical_region = try cpu.page_allocator.allocate(size, .{ .reason = .user }); + const result = try root.dynamic.memory.appendRegion(physical_region); + break :blk result; + }, + .retype => blk: { + const source = arguments.source; + const destination = arguments.destination; + const region_ptr = root.dynamic.memory.find(source) orelse unreachable; + const region_copy = region_ptr.*; + root.dynamic.memory.remove(source); + switch (destination) { + .cpu_memory => { + // TODO: delete properly + const new_ref = root.dynamic.cpu_memory.allocated.append(region_copy) catch |err| { + log.err("Error: {}", .{err}); + return error.OutOfMemory; + }; + // TODO: delete properly + + break :blk @bitCast(new_ref); + }, + .command_buffer_submission, .command_buffer_completion => { + switch (destination) { + inline .command_buffer_completion, .command_buffer_submission => |dst| @field(root.dynamic, @tagName(dst)).region = region_copy, + else => @panic("WTF"), + } + // TODO: better value + break :blk .{ .integer = 0 }; + }, + else => @panic("WTF"), + } + if (true) @panic("TODO: retype"); + break :blk undefined; + }, + else => @panic(@tagName(command)), + }, + .boot => switch (command) { + .get_bundle_size => cpu.bundle.len, + .get_bundle_file_list_size => cpu.bundle_files.len, + else => @panic(@tagName(command)), + }, + .process => switch (command) { + .exit => cpu.shutdown(switch (arguments) { + true => .success, + false => .failure, + }), + .panic => cpu.panic("User process panicked with exit code 0x{x}:\n==========\n{s}\n==========", .{ arguments.exit_code, arguments.message }), + else => @panic(@tagName(command)), + }, + .page_table => switch (command) { + .get => { + const descriptor = arguments.descriptor; + assert(descriptor.entry_type == .page_table); + + const block = try root.dynamic.page_table.page_tables.getChecked(descriptor.block); + const page_table = &block.array[descriptor.index]; + log.debug("Page table: {}", .{page_table.flags.level}); + @memcpy(arguments.buffer, &page_table.children); + }, + else => @panic("TODO: page_table other"), + }, + .memory_mapping => { + @panic("TODO: memory_mapping"); + }, + .page_table_mapping => { + @panic("TODO: page_table_mapping"); + }, + } else error.forbidden; +} + +pub const RootDescriptor = extern struct { + value: *Root, +}; + +pub const Static = enum { + cpu, + boot, + process, + + pub const count = lib.enumCount(@This()); + + pub const Bitmap = @Type(.{ + .Struct = blk: { + const full_bit_size = @max(@as(comptime_int, 1 << 3), @as(u8, @sizeOf(Static)) << 3); + break :blk .{ + .layout = .Packed, + .backing_integer = @Type(.{ + .Int = .{ + .signedness = .unsigned, + .bits = full_bit_size, + }, + }), + .fields = fields: { + var fields: []const lib.Type.StructField = &.{}; + inline for (lib.enumFields(Static)) |static_field| { + fields = fields ++ [1]lib.Type.StructField{.{ + .name = static_field.name, + .type = bool, + .default_value = null, + .is_comptime = false, + .alignment = 0, + }}; + } + + assert(Static.count > 0); + assert(@sizeOf(Static) > 0 or Static.count == 1); + + const padding_type = @Type(.{ + .Int = .{ + .signedness = .unsigned, + .bits = full_bit_size - Static.count, + }, + }); + + fields = fields ++ [1]lib.Type.StructField{.{ + .name = "reserved", + .type = padding_type, + .default_value = &@as(padding_type, 0), + .is_comptime = false, + .alignment = 0, + }}; + break :fields fields; + }, + .decls = &.{}, + .is_tuple = false, + }; + }, + }); +}; + +pub const CommandBufferMemory = extern struct { + region: PhysicalMemoryRegion, +}; + +pub const Dynamic = enum { + io, + memory, // Barrelfish equivalent: Memory (no PhysAddr) + cpu_memory, // Barrelfish equivalent: Frame + page_table, // Barrelfish equivalent: VNode + command_buffer_submission, + command_buffer_completion, + memory_mapping, // Barrelfish equivalent: Frame mapping, Device Frame Mapping + page_table_mapping, // Barrelfish equivalent: VNode mapping + // irq_table, + // device_memory, + // scheduler, + + pub const Map = extern struct { + io: IO, + memory: Memory, + cpu_memory: CPUMemory, + page_table: PageTables, + command_buffer_submission: CommandBufferMemory, + command_buffer_completion: CommandBufferMemory, + memory_mapping: Memory.Mapping, + page_table_mapping: PageTables.Mapping, + + comptime { + inline for (lib.fields(Dynamic.Map), lib.fields(Dynamic)) |struct_field, enum_field| { + assert(lib.equal(u8, enum_field.name, struct_field.name)); + } + } + }; +}; + +pub const Memory = extern struct { + allocated: RegionList = .{}, + allocate: bool = true, + + pub const Mapping = extern struct { + foo: u32 = 0, + }; + + const AllocateError = error{ + OutOfMemory, + }; + + fn find(memory: *Memory, memory_descriptor: birth.interface.Memory) ?*PhysicalMemoryRegion { + var iterator: ?*RegionList = &memory.allocated; + var block_index: usize = 0; + + return blk: while (iterator) |list| : ({ + iterator = list.metadata.next; + block_index += 1; + }) { + if (block_index == memory_descriptor.block) { + @panic("TODO: find"); + // if (memory_descriptor.region < list.metadata.count) { + // const region = &list.regions[memory_descriptor.region]; + // if (region.size != 0 and region.address.value() != 0) { + // assert(lib.isAligned(region.size, lib.arch.valid_page_sizes[0])); + // assert(lib.isAligned(region.address.value(), lib.arch.valid_page_sizes[0])); + // break :blk region; + // } + // } + // + // break :blk null; + } else if (block_index > memory_descriptor.block) { + break :blk null; + } else { + continue; + } + } else break :blk null; + } + + inline fn getListIndex(size: usize) usize { + inline for (lib.arch.reverse_valid_page_sizes, 0..) |reverse_page_size, reverse_index| { + if (size >= reverse_page_size) return reverse_index; + } + + @panic("WTF"); + } + + pub fn appendRegion(memory: *Memory, region: PhysicalMemoryRegion) !birth.interface.Memory { + var iterator: ?*RegionList = &memory.allocated; + while (iterator) |region_list| : (iterator = region_list.metadata.next) { + const result = region_list.append(region) catch continue; + return result; + } + + return error.OutOfMemory; + } + + pub fn remove(memory: *Memory, ref: birth.interface.Memory) void { + const region_index: u6 = @intCast(ref.region); + var block_index: u32 = 0; + var iterator: ?*RegionList = &memory.allocated; + while (iterator) |region_list| : ({ + iterator = region_list.metadata.next; + block_index += 1; + }) { + if (block_index == ref.block) { + region_list.remove(region_index); + break; + } else if (block_index > ref.block) { + @panic("WTF"); + } else continue; + } else { + @panic("WTF"); + } + } +}; + +pub const CPUMemory = extern struct { + allocated: RegionList = .{}, + flags: Flags = .{}, + + const Flags = packed struct(u64) { + allocate: bool = true, + reserved: u63 = 0, + }; +}; + +pub const PageTable = extern struct { + region: PhysicalMemoryRegion, + mapping: VirtualAddress, + flags: Flags, + children: Children = .{.{}} ** children_count, + + pub const Children = [children_count]birth.interface.PageTable; + pub const children_count = paging.page_table_entry_count; + + pub const Flags = packed struct(u64) { + level: paging.Level, + reserved: u62 = 0, + }; + + pub const Array = extern struct { + array: [count]PageTable, + bitset: lib.BitsetU64(count), + next: ?*Array = null, + + pub const count = 32; + + pub fn get(array: *Array, index: u6) !*PageTable { + if (array.bitset.isSet(index)) { + return &array.array[index]; + } else { + return error.index_out_of_bounds; + } + } + }; +}; + +pub const Leaf = extern struct { + physical: PhysicalAddress, + flags: Flags, + + pub const Flags = packed struct(u64) { + size: Size, + reserved: u62 = 0, + }; + + pub const Size = enum(u2) { + @"4KB", + @"2MB", + @"1GB", + }; + + pub const Array = extern struct { + array: [count]Leaf, + bitset: lib.BitsetU64(count), + next: ?*Array = null, + pub const count = 32; + pub fn get(array: *Array, index: u6) !*PageTable { + if (array.bitset.isSet(index)) { + return &array.array[index]; + } else { + return error.index_out_of_bounds; + } + } + }; +}; + +pub const PageTables = extern struct { + // This one has the kernel mapped + privileged: PageTable, // This one is separate as cannot be mapped + user: birth.interface.PageTable, + page_tables: SparseArray(*PageTable.Array), + leaves: SparseArray(*Leaf.Array), + // vmm: VMM, + can_map_page_tables: bool, + + pub const Mapping = extern struct { + foo: u32 = 0, + }; + + const end = privileged.arch.paging.user_address_space_end; + + fn getUser(page_tables: *const PageTables) ?PhysicalMemoryRegion { + if (page_tables.user.address.value() == 0) { + return null; + } + + if (page_tables.user.size == 0) { + return null; + } + + return page_tables.user; + } + + pub fn switchPrivileged(page_tables: *const PageTables) void { + paging.Specific.fromPhysicalRegion(page_tables.privileged.region).makeCurrentPrivileged(); + } + + pub fn appendPageTable(page_tables: *PageTables, allocator: *Allocator, page_table: PageTable) !birth.interface.PageTable { + if (page_tables.page_tables.len > 0) { + const slice = page_tables.page_tables.ptr[0..page_tables.page_tables.len]; + for (slice, 0..) |block, block_index| { + const index = block.bitset.allocate() catch continue; + block.array[index] = page_table; + return .{ + .index = index, + .block = @intCast(block_index), + .entry_type = .page_table, + .present = true, + }; + } + } + + const page_table_array = try allocator.create(PageTable.Array); + try page_tables.page_tables.append(allocator, page_table_array); + return appendPageTable(page_tables, allocator, page_table); + } + + pub fn appendLeaf(page_tables: *PageTables, allocator: *Allocator, leaf: Leaf) !birth.interface.PageTable { + if (page_tables.leaves.len > 0) { + const slice = page_tables.leaves.ptr[0..page_tables.leaves.len]; + for (slice, 0..) |block, block_index| { + const index = block.bitset.allocate() catch continue; + block.array[index] = leaf; + + return .{ + .index = index, + .block = @intCast(block_index), + .entry_type = .leaf, + .present = true, + }; + } + } + + const leaf_array = try allocator.create(Leaf.Array); + try page_tables.leaves.append(allocator, leaf_array); + return appendLeaf(page_tables, allocator, leaf); + } + + pub fn getPageTable(page_tables: *PageTables, page_table: birth.interface.PageTable) !*PageTable { + assert(page_table.entry_type == .page_table); + if (page_table.present) { + const page_table_block = try page_tables.page_tables.getChecked(page_table.block); + const result = try page_table_block.get(@intCast(page_table.index)); + return result; + } else { + return error.not_present; + } + } +}; + +pub const IO = extern struct { + debug: bool, +}; + +pub const Scheduler = extern struct { + memory: PhysicalMemoryRegion, +}; + +comptime { + const dynamic_count = enumCount(Dynamic); + const static_count = enumCount(Static); + const total_count = enumCount(birth.interface.Capability); + assert(dynamic_count + static_count == total_count); +} + +pub const Root = extern struct { + static: Static.Bitmap, + dynamic: Dynamic.Map, + scheduler: Scheduler, + heap: Heap = .{}, + padding: [padding_byte_count]u8 = .{0} ** padding_byte_count, + + const Heap = cpu.HeapImplementation(true); + + const max_alignment = @max(@alignOf(Static.Bitmap), @alignOf(Dynamic.Map), @alignOf(Scheduler), @alignOf(Heap)); + const total_size = lib.alignForward(usize, @sizeOf(Static.Bitmap) + @sizeOf(Dynamic.Map) + @sizeOf(Scheduler) + @sizeOf(Heap), max_alignment); + const page_aligned_size = lib.alignForward(usize, total_size, lib.arch.valid_page_sizes[0]); + const padding_byte_count = page_aligned_size - total_size; + + comptime { + assert(@sizeOf(Root) % lib.arch.valid_page_sizes[0] == 0); + } + + pub const AllocateError = error{ + OutOfMemory, + }; + + fn hasPermissions(root: *Root, comptime Descriptor: type, arguments: Descriptor.Arguments) bool { + const capability = Descriptor.Capability; + const command = Descriptor.Command; + + if (command == .retype) { + const can_retype: bool = switch (@TypeOf(arguments)) { + void => @panic("Retype on void"), + else => switch (arguments.destination) { + inline else => |destination| blk: { + const child_types = comptime capability.getChildTypes(); + inline for (child_types) |child_type| { + if (child_type == destination) { + break :blk true; + } + } else { + break :blk false; + } + }, + }, + }; + + if (!can_retype) { + return false; + } + } + + const has_permissions = switch (capability) { + // static capabilities + inline .cpu, + .boot, + => |static_capability| @field(root.static, @tagName(static_capability)), + .process => root.static.process or command == .panic, + // dynamic capabilities + .io => switch (command) { + .copy, .mint, .retype, .delete, .revoke, .create => unreachable, + .log => root.dynamic.io.debug, + }, + .cpu_memory => root.dynamic.cpu_memory.flags.allocate, + .command_buffer_completion, .command_buffer_submission => true, //TODO + .memory => switch (command) { + .allocate => root.dynamic.memory.allocate, + .retype => root.dynamic.memory.find(arguments.source) != null, + else => @panic("TODO: else => memory"), + }, + .page_table => root.dynamic.page_table.can_map_page_tables, // TODO + .memory_mapping => true, // TODO + .page_table_mapping => true, // TODO + }; + + return has_permissions; + } + + // Fast path + fn allocateMemoryRaw(root: *Root, size: usize) AllocateError!PhysicalMemoryRegion { + lib.log.err("New allocation demanded: 0x{x} bytes", .{size}); + assert(size != 0); + assert(lib.isAligned(size, lib.arch.valid_page_sizes[0])); + var index = Memory.getListIndex(size); + + const result = blk: { + while (true) : (index -= 1) { + const list = &root.dynamic.memory.lists[index]; + var iterator: ?*cpu.capabilities.RegionList = list; + + // const page_size = @as(u64, switch (index) { + // 0 => lib.arch.reverse_valid_page_sizes[0], + // 1 => lib.arch.reverse_valid_page_sizes[1], + // 2 => lib.arch.reverse_valid_page_sizes[2], + // else => unreachable, + // }); + + var list_count: usize = 0; + while (iterator) |free_memory_list| : ({ + iterator = free_memory_list.metadata.next; + list_count += 1; + }) { + const allocation = free_memory_list.allocate(size) catch continue; + list_count += 1; + break :blk allocation; + } + + if (index == 0) break; + } + + log.err("allocateMemoryRaw", .{}); + return error.OutOfMemory; + }; + + @memset(result.toHigherHalfVirtualAddress().access(u8), 0); + + return result; + } + + pub fn allocateMemory(root: *Root, size: usize) AllocateError!birth.capabilities.memory { + log.debug("Allocating 0x{x} bytes for user (root is 0x{x}", .{ size, @intFromPtr(root) }); + const result = try allocateMemoryRaw(root, size); + const reference = root.dynamic.memory.allocated.append(result) catch |err| { + log.err("err(user): {}", .{err}); + return AllocateError.OutOfMemory; + }; + assert(reference.block == 0); + assert(reference.region == 0); + const region_address = &root.dynamic.memory.allocated.regions[reference.region]; + log.debug("Region address: 0x{x}", .{@intFromPtr(region_address)}); + return reference; + } + + // Slow uncommon path. Use cases: + // 1. CR3 switch. This is assumed to be privileged, so this function assumes privileged use of the memory + pub fn allocatePageCustomAlignment(root: *Root, size: usize, alignment: usize) AllocateError!PhysicalMemoryRegion { + assert(alignment > lib.arch.valid_page_sizes[0] and alignment < lib.arch.valid_page_sizes[1]); + + comptime assert(lib.arch.valid_page_sizes.len == 3); + var index = Memory.getListIndex(size); + + while (true) : (index -= 1) { + const smallest_region_list = &root.dynamic.memory.lists[index]; + var iterator: ?*cpu.capabilities.RegionList = smallest_region_list; + while (iterator) |free_region_list| : (iterator = free_region_list.metadata.next) { + const physical_allocation = free_region_list.allocateAligned(size, alignment) catch blk: { + const splitted_allocation = free_region_list.allocateAlignedSplitting(size, alignment) catch continue; + _ = try root.appendRegion(&root.dynamic.memory, splitted_allocation.wasted); + break :blk splitted_allocation.allocated; + }; + + return physical_allocation; + } + + if (index == 0) break; + } + + log.err("allocatePageCustomAlignment", .{}); + return AllocateError.OutOfMemory; + } + + fn allocateSingle(root: *Root, comptime T: type) AllocateError!*T { + const size = @sizeOf(T); + const alignment = @alignOf(T); + var iterator = root.heap.first; + while (iterator) |heap_region| : (iterator = heap_region.next) { + if (heap_region.alignmentFits(alignment)) { + if (heap_region.sizeFits(size)) { + const allocated_region = heap_region.takeRegion(size); + const result = &allocated_region.toHigherHalfVirtualAddress().access(T)[0]; + return result; + } + } else { + @panic("ELSE"); + } + } + + const physical_region = try root.allocateMemory(lib.arch.valid_page_sizes[0]); + const heap_region = physical_region.toHigherHalfVirtualAddress().address.access(*Heap.Region); + const first = root.heap.first; + heap_region.* = .{ + .descriptor = physical_region.offset(@sizeOf(Heap.Region)), + .allocated_size = @sizeOf(Heap.Region), + .next = first, + }; + + root.heap.first = heap_region; + + return try root.allocateSingle(T); + } + + fn allocateMany(root: *Root, comptime T: type, count: usize) AllocateError![]T { + _ = count; + _ = root; + + @panic("TODO many"); + } + + pub const AllocateCPUMemoryOptions = packed struct { + privileged: bool, + }; +}; diff --git a/src/cpu/main.zig b/src/cpu/main.zig index 7d7d9f2..4556d4e 100644 --- a/src/cpu/main.zig +++ b/src/cpu/main.zig @@ -7,11 +7,8 @@ const stopCPU = privileged.arch.stopCPU; const cpu = @import("cpu"); -var lock: lib.Spinlock = .released; - pub const std_options = struct { pub fn logFn(comptime level: lib.log.Level, comptime scope: @TypeOf(.EnumLiteral), comptime format: []const u8, args: anytype) void { - lock.acquire(); cpu.writer.writeAll("[CPU DRIVER] ") catch unreachable; cpu.writer.writeByte('[') catch unreachable; cpu.writer.writeAll(@tagName(scope)) catch unreachable; @@ -21,8 +18,6 @@ pub const std_options = struct { cpu.writer.writeAll("] ") catch unreachable; lib.format(cpu.writer, format, args) catch unreachable; cpu.writer.writeByte('\n') catch unreachable; - - lock.release(); } pub const log_level = lib.log.Level.debug; diff --git a/src/cpu/test_runner.zig b/src/cpu/test_runner.zig deleted file mode 100644 index b80a220..0000000 --- a/src/cpu/test_runner.zig +++ /dev/null @@ -1,38 +0,0 @@ -const lib = @import("lib"); -const assert = lib.assert; -const log = lib.log.scoped(.TEST); -const privileged = @import("privileged"); -const QEMU = lib.QEMU; - -const cpu = @import("cpu"); - -const RunAllTestResult = error{ - failure, -}; - -pub fn runAllTests() RunAllTestResult!void { - comptime assert(lib.is_test); - const test_functions = @import("builtin").test_functions; - var failed_test_count: usize = 0; - for (test_functions) |test_function| { - test_function.func() catch |err| { - log.err("Test failed: {}", .{err}); - failed_test_count += 1; - }; - } - - const test_count = test_functions.len; - assert(QEMU.isa_debug_exit.io_size == @sizeOf(u32)); - const exit_code = switch (failed_test_count) { - 0 => blk: { - log.info("All {} tests passed.", .{test_count}); - break :blk .success; - }, - else => blk: { - log.info("Run {} tests. Failed {}.", .{ test_count, failed_test_count }); - break :blk .failure; - }, - }; - - cpu.shutdown(exit_code); -} diff --git a/src/host/runner/main.zig b/src/host/runner/main.zig index 2b1befc..ea32e54 100644 --- a/src/host/runner/main.zig +++ b/src/host/runner/main.zig @@ -251,12 +251,20 @@ pub fn main() anyerror!void { if (arguments.log) |log_configuration| { var log_what = host.ArrayList(u8).init(wrapped_allocator.zigUnwrap()); - if (log_configuration.guest_errors) try log_what.appendSlice("guest_errors,"); - if (log_configuration.interrupts) try log_what.appendSlice("int,"); - if (!arguments_result.ci and log_configuration.assembly) try log_what.appendSlice("in_asm,"); + if (log_configuration.guest_errors) { + try log_what.appendSlice("guest_errors,"); + } + + if (log_configuration.interrupts) { + try log_what.appendSlice("int,"); + } + + if (!arguments_result.ci and log_configuration.assembly) { + try log_what.appendSlice("in_asm,"); + } if (log_what.items.len > 0) { - // Delete the last comma + //Delete the last comma _ = log_what.pop(); try argument_list.append("-d"); @@ -284,7 +292,7 @@ pub fn main() anyerror!void { // GF2, when not found in the PATH, can give problems const use_gf = switch (lib.os) { .macos => false, - .linux => false, + .linux => true, else => false, }; @@ -292,7 +300,8 @@ pub fn main() anyerror!void { if (use_gf) { try command_line_gdb.append("gf2"); } else { - try command_line_gdb.append("kitty"); + const terminal_emulator = "foot"; + try command_line_gdb.append(terminal_emulator); try command_line_gdb.append(switch (lib.os) { .linux => "gdb", .macos => "x86_64-elf-gdb", @@ -331,12 +340,12 @@ pub fn main() anyerror!void { try debugger_process.spawn(); } - var process = host.ChildProcess.init(argument_list.items, wrapped_allocator.zigUnwrap()); + var emulator_process = host.ChildProcess.init(argument_list.items, wrapped_allocator.zigUnwrap()); //process.stdout_behavior = .I; - const result = try process.spawnAndWait(); + const emulator_process_result = try emulator_process.spawnAndWait(); - if (result == .Exited) { - const exit_code = result.Exited; + if (emulator_process_result == .Exited) { + const exit_code = emulator_process_result.Exited; if (exit_code & 1 != 0) { const mask = lib.maxInt(@TypeOf(exit_code)) - 1; const masked_exit_code = exit_code & mask; @@ -354,7 +363,7 @@ pub fn main() anyerror!void { } else log.err("QEMU exited with unexpected code: {}. Masked: {}", .{ exit_code, masked_exit_code }); } else log.err("QEMU exited with unexpected code: {}", .{exit_code}); } else { - log.err("QEMU was {s}", .{@tagName(result)}); + log.err("QEMU was {s}", .{@tagName(emulator_process_result)}); } if (debugcon_file_used) { diff --git a/src/lib.zig b/src/lib.zig index 7dbcc0a..7d5ac01 100644 --- a/src/lib.zig +++ b/src/lib.zig @@ -1,6 +1,318 @@ const common = @import("common.zig"); pub usingnamespace common; +const compiler_builtin = @import("builtin"); +pub const cpu = compiler_builtin.cpu; +pub const os = compiler_builtin.os.tag; +pub const build_mode = compiler_builtin.mode; +pub const is_test = compiler_builtin.is_test; + +pub const kb = 1024; +pub const mb = kb * 1024; +pub const gb = mb * 1024; +pub const tb = gb * 1024; + +pub const SizeUnit = enum(u64) { + byte = 1, + kilobyte = 1024, + megabyte = 1024 * 1024, + gigabyte = 1024 * 1024 * 1024, + terabyte = 1024 * 1024 * 1024 * 1024, +}; + +pub const std = @import("std"); +pub const Target = std.Target; +pub const Cpu = Target.Cpu; +pub const CrossTarget = std.zig.CrossTarget; + +pub const log = std.log; + +pub fn BitsetU64(comptime bits: comptime_int) type { + assert(bits <= @bitSizeOf(u64)); + const max_value = maxInt(@Type(.{ + .Int = .{ + .signedness = .unsigned, + .bits = bits, + }, + })); + + return packed struct(u64) { + value: u64 = 0, + + const Error = error{ + block_full, + }; + + pub inline fn allocate(bitset: *@This()) !u6 { + if (bitset.value & max_value != max_value) { + // log.debug("Bitset: 0b{b}", .{bitset.value}); + const result: u6 = @intCast(@ctz(~bitset.value)); + // log.debug("Result: {}", .{result}); + assert(!bitset.isSet(result)); + bitset.set(result); + return result; + } else { + return error.block_full; + } + } + + pub inline fn set(bitset: *@This(), index: u6) void { + assert(index < bits); + bitset.value |= (@as(u64, 1) << index); + } + + pub inline fn clear(bitset: *@This(), index: u6) void { + assert(index < bits); + bitset.value &= ~(@as(u64, 1) << index); + } + + pub inline fn isSet(bitset: @This(), index: u6) bool { + assert(index < bits); + return bitset.value & (@as(u64, 1) << index) != 0; + } + + pub inline fn isFull(bitset: @This()) bool { + return bitset.value == max_value; + } + }; +} + +pub const Atomic = std.atomic.Atomic; + +pub const Reader = std.io.Reader; +pub const Writer = std.io.Writer; + +pub const FixedBufferStream = std.io.FixedBufferStream; +pub const fixedBufferStream = std.io.fixedBufferStream; + +pub fn assert(ok: bool) void { + if (!ok) { + if (@inComptime()) { + @compileError("Assert failed!"); + } else { + @panic("Assert failed!"); + } + } +} + +pub const deflate = std.compress.deflate; + +const debug = std.debug; +pub const print = debug.print; +pub const StackIterator = debug.StackIterator; +pub const dwarf = std.dwarf; +pub const ModuleDebugInfo = std.debug.ModuleDebugInfo; + +pub const elf = std.elf; + +const fmt = std.fmt; +pub const format = std.fmt.format; +pub const FormatOptions = fmt.FormatOptions; +pub const bufPrint = fmt.bufPrint; +pub const allocPrint = fmt.allocPrint; +pub const comptimePrint = fmt.comptimePrint; +pub const parseUnsigned = fmt.parseUnsigned; + +const heap = std.heap; +pub const FixedBufferAllocator = heap.FixedBufferAllocator; + +pub const json = std.json; + +const mem = std.mem; +pub const ZigAllocator = mem.Allocator; +pub const equal = mem.eql; +pub const length = mem.len; +pub const startsWith = mem.startsWith; +pub const endsWith = mem.endsWith; +pub const indexOf = mem.indexOf; +// Ideal for small inputs +pub const indexOfPosLinear = mem.indexOfPosLinear; +pub const lastIndexOf = mem.lastIndexOf; +pub const asBytes = mem.asBytes; +pub const readIntBig = mem.readIntBig; +pub const readIntSliceBig = mem.readIntSliceBig; +pub const concat = mem.concat; +pub const sliceAsBytes = mem.sliceAsBytes; +pub const bytesAsSlice = mem.bytesAsSlice; +pub const alignForward = mem.alignForward; +pub const alignBackward = mem.alignBackward; +pub const isAligned = mem.isAligned; +pub const isAlignedGeneric = mem.isAlignedGeneric; +pub const reverse = mem.reverse; +pub const tokenize = mem.tokenize; +pub const containsAtLeast = mem.containsAtLeast; +pub const sliceTo = mem.sliceTo; +pub const swap = mem.swap; + +pub const random = std.rand; + +pub const testing = std.testing; + +pub const sort = std.sort; + +pub fn fieldSize(comptime T: type, field_name: []const u8) comptime_int { + var foo: T = undefined; + return @sizeOf(@TypeOf(@field(foo, field_name))); +} + +const DiffError = error{ + diff, +}; + +pub fn diff(file1: []const u8, file2: []const u8) !void { + assert(file1.len == file2.len); + var different_bytes: u64 = 0; + for (file1, 0..) |byte1, index| { + const byte2 = file2[index]; + const is_different_byte = byte1 != byte2; + different_bytes += @intFromBool(is_different_byte); + if (is_different_byte) { + log.debug("Byte [0x{x}] is different: 0x{x} != 0x{x}", .{ index, byte1, byte2 }); + } + } + + if (different_bytes != 0) { + log.debug("Total different bytes: 0x{x}", .{different_bytes}); + return DiffError.diff; + } +} + +pub fn zeroes(comptime T: type) T { + var result: T = undefined; + const slice = asBytes(&result); + @memset(slice, 0); + return result; +} + +const ascii = std.ascii; +pub const upperString = ascii.upperString; +pub const isUpper = ascii.isUpper; +pub const isAlphabetic = ascii.isAlphabetic; + +const std_builtin = std.builtin; +pub const AtomicRmwOp = std_builtin.AtomicRmwOp; +pub const AtomicOrder = std_builtin.AtomicOrder; +pub const Type = std_builtin.Type; +pub const StackTrace = std_builtin.StackTrace; +pub const SourceLocation = std_builtin.SourceLocation; + +pub fn FieldType(comptime T: type, comptime name: []const u8) type { + return @TypeOf(@field(@as(T, undefined), name)); +} + +// META PROGRAMMING +pub const AutoEnumArray = std.enums.EnumArray; +pub const fields = std.meta.fields; +pub const enumFromInt = std.meta.enumFromInt; +pub const stringToEnum = std.meta.stringToEnum; +pub const Tag = std.meta.Tag; + +const math = std.math; +pub const maxInt = math.maxInt; +pub const min = math.min; +pub const divCeil = math.divCeil; +pub const clamp = math.clamp; +pub const isPowerOfTwo = math.isPowerOfTwo; +pub const mul = math.mul; +pub const cast = math.cast; + +pub const unicode = std.unicode; + +pub const uefi = std.os.uefi; + +pub const DiskType = enum(u32) { + virtio = 0, + nvme = 1, + ahci = 2, + ide = 3, + memory = 4, + bios = 5, + + pub const count = enumCount(@This()); +}; + +pub fn enumFields(comptime E: type) []const Type.EnumField { + return @typeInfo(E).Enum.fields; +} + +pub const enumValues = std.enums.values; + +pub fn enumCount(comptime E: type) usize { + return enumFields(E).len; +} + +pub const QEMU = extern struct { + pub const isa_debug_exit = ISADebugExit{}; + + pub const ISADebugExit = extern struct { + io_base: u8 = 0xf4, + io_size: u8 = @sizeOf(u32), + }; + + pub const ExitCode = enum(u32) { + success = 0x10, + failure = 0x11, + _, + }; +}; + +pub const OptimizeMode = std.builtin.OptimizeMode; + +pub const Suffix = enum { + bootloader, + cpu_driver, + image, + complete, + + pub fn fromConfiguration(suffix: Suffix, allocator: ZigAllocator, configuration: common.Configuration, prefix: ?[]const u8) ![]const u8 { + const cpu_driver_suffix = [_][]const u8{ + @tagName(configuration.optimize_mode), + "_", + @tagName(configuration.architecture), + "_", + @tagName(configuration.executable_kind), + }; + + const bootloader_suffix = [_][]const u8{ + @tagName(configuration.architecture), + "_", + @tagName(configuration.bootloader), + "_", + @tagName(configuration.boot_protocol), + }; + + const image_suffix = [_][]const u8{ + @tagName(configuration.optimize_mode), + "_", + } ++ bootloader_suffix ++ [_][]const u8{ + "_", + @tagName(configuration.executable_kind), + }; + + const complete_suffix = image_suffix ++ [_][]const u8{ + "_", + @tagName(configuration.execution_type), + "_", + @tagName(configuration.execution_environment), + }; + + return try std.mem.concat(allocator, u8, &switch (suffix) { + .cpu_driver => if (prefix) |pf| [1][]const u8{pf} ++ cpu_driver_suffix else cpu_driver_suffix, + .bootloader => if (prefix) |pf| [1][]const u8{pf} ++ bootloader_suffix else bootloader_suffix, + .image => if (prefix) |pf| [1][]const u8{pf} ++ image_suffix else image_suffix, + .complete => if (prefix) |pf| [1][]const u8{pf} ++ complete_suffix else complete_suffix, + }); + } +}; + +pub const default_cpu_name = "/cpu"; +pub const default_init_file = "/init"; + +pub const default_disk_size = 64 * 1024 * 1024; +pub const default_sector_size = 0x200; + +pub const cache_line_size = 64; + pub const arch = @import("lib/arch.zig"); /// This is done so the allocator can respect allocating from different address spaces pub const config = @import("lib/config.zig"); @@ -17,10 +329,9 @@ const extern_enum_array = @import("lib/extern_enum_array.zig"); pub const EnumArray = extern_enum_array.EnumArray; pub fn memcpy(noalias destination: []u8, noalias source: []const u8) void { - @setRuntimeSafety(false); // Using this as the Zig implementation is really slow (at least in x86 with soft_float enabled - // if (common.cpu.arch == .x86 or common.cpu.arch == .x86_64 and common.Target.x86.featureSetHas(common.cpu.features, .soft_float)) { - const bytes_left = switch (common.cpu.arch) { + // if (cpu.arch == .x86 or cpu.arch == .x86_64 and Target.x86.featureSetHas(cpu.features, .soft_float)) { + const bytes_left = switch (cpu.arch) { .x86 => asm volatile ( \\rep movsb : [ret] "={ecx}" (-> usize), @@ -38,46 +349,16 @@ pub fn memcpy(noalias destination: []u8, noalias source: []const u8) void { else => @compileError("Unreachable"), }; - common.assert(bytes_left == 0); - // } else { - // @memcpy(destination, source); - // } + assert(bytes_left == 0); } -// pub fn memset(comptime T: type, slice: []T, elem: T) void { -// @setRuntimeSafety(false); -// -// const bytes_left = switch (T) { -// u8 => switch (common.cpu.arch) { -// .x86 => asm volatile ( -// \\rep stosb -// : [ret] "={ecx}" (-> usize), -// : [slice] "{edi}" (slice.ptr), -// [len] "{ecx}" (slice.len), -// [element] "{al}" (elem), -// ), -// .x86_64 => asm volatile ( -// \\rep movsb -// : [ret] "={rcx}" (-> usize), -// : [slice] "{rdi}" (slice.ptr), -// [len] "{rcx}" (slice.len), -// [element] "{al}" (elem), -// ), -// else => @compileError("Unsupported OS"), -// }, -// else => @compileError("Type " ++ @typeName(T) ++ " not supported"), -// }; -// -// common.assert(bytes_left == 0); -// } - pub fn EnumStruct(comptime Enum: type, comptime Value: type) type { - const EnumFields = common.enumFields(Enum); + const EnumFields = enumFields(Enum); const MyEnumStruct = @Type(.{ .Struct = .{ .layout = .Extern, .fields = &blk: { - var arr = [1]common.Type.StructField{undefined} ** EnumFields.len; + var arr = [1]Type.StructField{undefined} ** EnumFields.len; inline for (EnumFields) |EnumValue| { arr[EnumValue.value] = .{ .name = EnumValue.name, @@ -102,8 +383,8 @@ pub fn EnumStruct(comptime Enum: type, comptime Value: type) type { pub const Array = MyEnumArray; }; - common.assert(@sizeOf(Union.Struct) == @sizeOf(Union.Array)); - common.assert(@sizeOf(Union.Array) == @sizeOf(Union)); + assert(@sizeOf(Union.Struct) == @sizeOf(Union.Array)); + assert(@sizeOf(Union.Array) == @sizeOf(Union)); return Union; } @@ -115,7 +396,7 @@ pub const DirectoryTokenizer = struct { total_count: usize, pub fn init(string: []const u8) DirectoryTokenizer { - common.assert(string.len > 0); + assert(string.len > 0); var count: usize = 0; if (string[0] == '/') { @@ -153,8 +434,8 @@ pub const DirectoryTokenizer = struct { return tokenizer.string[original_index..]; } else { - common.assert(original_index == tokenizer.string.len); - common.assert(tokenizer.given_count == tokenizer.total_count); + assert(original_index == tokenizer.string.len); + assert(tokenizer.given_count == tokenizer.total_count); return null; } } @@ -165,8 +446,8 @@ pub const DirectoryTokenizer = struct { } test "directory tokenizer" { - common.log.err("ajskdjsa", .{}); - if (common.os != .freestanding) { + log.err("ajskdjsa", .{}); + if (os != .freestanding) { const TestCase = struct { path: []const u8, expected_result: []const []const u8, @@ -183,13 +464,13 @@ pub const DirectoryTokenizer = struct { var result_count: usize = 0; while (dir_tokenizer.next()) |dir| { - try common.testing.expect(result_count < results.len); - try common.testing.expectEqualStrings(case.expected_result[result_count], dir); + try testing.expect(result_count < results.len); + try testing.expectEqualStrings(case.expected_result[result_count], dir); results[result_count] = dir; result_count += 1; } - try common.testing.expectEqual(case.expected_result.len, result_count); + try testing.expectEqual(case.expected_result.len, result_count); } } } @@ -212,7 +493,7 @@ pub inline fn maybePtrSub(comptime T: type, ptr: ?*T, element_offset: usize) ?*T } test { - common.log.err("test not taken into the test suite"); + log.err("test not taken into the test suite"); _ = DirectoryTokenizer; _ = Filesystem; _ = PartitionTable; @@ -237,7 +518,7 @@ pub const Allocator = extern struct { }; /// Necessary to do this hack - const Callbacks = switch (common.cpu.arch) { + const Callbacks = switch (cpu.arch) { .x86 => extern struct { allocate: *const Allocate.Fn, allocate_padding: u32 = 0, @@ -265,7 +546,7 @@ pub const Allocator = extern struct { return &result[0]; } - pub fn wrap(zig_allocator: common.ZigAllocator) Wrapped { + pub fn wrap(zig_allocator: ZigAllocator) Wrapped { return .{ .allocator = .{ .callbacks = .{ @@ -279,7 +560,7 @@ pub const Allocator = extern struct { }; } - pub fn zigUnwrap(allocator: *Allocator) common.ZigAllocator { + pub fn zigUnwrap(allocator: *Allocator) ZigAllocator { return .{ .ptr = allocator, .vtable = &zig_vtable, @@ -293,11 +574,13 @@ pub const Allocator = extern struct { }; pub fn zigAllocate(context: *anyopaque, size: usize, ptr_align: u8, return_address: usize) ?[*]u8 { - _ = context; - _ = size; - _ = ptr_align; _ = return_address; - return null; + const allocator: *Allocator = @ptrCast(@alignCast(context)); + // Not understanding why Zig API is like this: + const alignment = @as(u64, 1) << @as(u6, @intCast(ptr_align)); + const result = allocator.allocateBytes(size, alignment) catch return null; + assert(result.size >= size); + return @ptrFromInt(result.address); } pub fn zigResize(context: *anyopaque, buffer: []u8, buffer_alignment: u8, new_length: usize, return_address: usize) bool { @@ -320,14 +603,14 @@ pub const Allocator = extern struct { allocator: Allocator, zig: extern struct { ptr: *anyopaque, - vtable: *const common.ZigAllocator.VTable, + vtable: *const ZigAllocator.VTable, }, pub fn unwrap(wrapped_allocator: *Wrapped) *Allocator { return &wrapped_allocator.allocator; } - pub fn zigUnwrap(wrapped_allocator: *Wrapped) common.ZigAllocator { + pub fn zigUnwrap(wrapped_allocator: *Wrapped) ZigAllocator { return .{ .ptr = wrapped_allocator.zig.ptr, .vtable = wrapped_allocator.zig.vtable, @@ -337,7 +620,7 @@ pub const Allocator = extern struct { pub fn wrappedCallbackAllocate(allocator: *Allocator, size: u64, alignment: u64) Allocator.Allocate.Error!Allocator.Allocate.Result { const wrapped_allocator = @fieldParentPtr(Wrapped, "allocator", allocator); const zig_allocator = wrapped_allocator.zigUnwrap(); - if (alignment > common.maxInt(u8)) { + if (alignment > maxInt(u8)) { @panic("alignment supported by Zig is less than asked"); } const zig_result = zig_allocator.vtable.alloc(zig_allocator.ptr, size, @as(u8, @intCast(alignment)), @returnAddress()); @@ -461,7 +744,7 @@ pub fn ELF(comptime bits: comptime_int) type { return Parser.Error.invalid_magic; } - if (!common.equal(u8, &file_header.elf_id, FileHeader.elf_signature)) { + if (!equal(u8, &file_header.elf_id, FileHeader.elf_signature)) { return Parser.Error.invalid_signature; } @@ -484,7 +767,7 @@ pub fn ELF(comptime bits: comptime_int) type { pub const ProgramHeader = switch (is_64) { true => extern struct { - type: Type = .load, + type: @This().Type = .load, flags: Flags, //= @enumToInt(Flags.readable) | @enumToInt(Flags.executable), offset: u64, virtual_address: u64, @@ -518,7 +801,7 @@ pub fn ELF(comptime bits: comptime_int) type { reserved: u29, comptime { - common.assert(@sizeOf(Flags) == @sizeOf(u32)); + assert(@sizeOf(Flags) == @sizeOf(u32)); } }; }, @@ -585,8 +868,8 @@ pub fn ELF(comptime bits: comptime_int) type { pub inline fn safeArchitectureCast(value: anytype) usize { return switch (@sizeOf(@TypeOf(value)) > @sizeOf(usize)) { - true => if (value <= common.maxInt(usize)) @as(usize, @truncate(value)) else { - common.log.err("PANIC: virtual address is longer than usize: 0x{x}", .{value}); + true => if (value <= maxInt(usize)) @as(usize, @truncate(value)) else { + log.err("PANIC: virtual address is longer than usize: 0x{x}", .{value}); @panic("safeArchitectureCast"); }, false => value, @@ -598,11 +881,11 @@ pub const DereferenceError = error{ }; pub inline fn tryDereferenceAddress(value: anytype) DereferenceError!usize { - common.assert(@sizeOf(@TypeOf(value)) > @sizeOf(usize)); - return if (value <= common.maxInt(usize)) @as(usize, @truncate(value)) else return DereferenceError.address_bigger_than_usize; + assert(@sizeOf(@TypeOf(value)) > @sizeOf(usize)); + return if (value <= maxInt(usize)) @as(usize, @truncate(value)) else return DereferenceError.address_bigger_than_usize; } -pub fn enumAddNames(comptime enum_fields: []const common.Type.EnumField, comptime names: []const []const u8) []const common.Type.EnumField { +pub fn enumAddNames(comptime enum_fields: []const Type.EnumField, comptime names: []const []const u8) []const Type.EnumField { comptime var result = enum_fields; const previous_last_value = if (enum_fields.len > 0) enum_fields[enum_fields.len - 1].value else 0; @@ -617,13 +900,13 @@ pub fn enumAddNames(comptime enum_fields: []const common.Type.EnumField, comptim return result; } -pub fn ErrorSet(comptime error_names: []const []const u8, comptime predefined_fields: []const common.Type.EnumField) type { - comptime var error_fields: []const common.Type.Error = &.{}; - comptime var enum_items: []const common.Type.EnumField = predefined_fields; +pub fn ErrorSet(comptime error_names: []const []const u8, comptime predefined_fields: []const Type.EnumField) type { + comptime var error_fields: []const Type.Error = &.{}; + comptime var enum_items: []const Type.EnumField = predefined_fields; comptime var enum_value = enum_items[enum_items.len - 1].value + 1; inline for (error_names) |error_name| { - enum_items = enum_items ++ [1]common.Type.EnumField{ + enum_items = enum_items ++ [1]Type.EnumField{ .{ .name = error_name, .value = enum_value, @@ -634,23 +917,23 @@ pub fn ErrorSet(comptime error_names: []const []const u8, comptime predefined_fi } inline for (enum_items) |item| { - error_fields = error_fields ++ [1]common.Type.Error{ + error_fields = error_fields ++ [1]Type.Error{ .{ .name = item.name, }, }; } - const EnumType = @Type(common.Type{ + const EnumType = @Type(Type{ .Enum = .{ - .tag_type = u16, + .tag_type = u15, .fields = enum_items, .decls = &.{}, .is_exhaustive = true, }, }); - const ErrorType = @Type(common.Type{ + const ErrorType = @Type(Type{ .ErrorSet = error_fields, }); @@ -660,12 +943,9 @@ pub fn ErrorSet(comptime error_names: []const []const u8, comptime predefined_fi }; } -pub fn getDebugInformation(allocator: common.ZigAllocator, elf_file: []align(common.default_sector_size) const u8) !common.ModuleDebugInfo { - const elf = common.elf; - var module_debug_info: common.ModuleDebugInfo = undefined; - _ = module_debug_info; - const hdr = @as(*const elf.Ehdr, @ptrCast(&elf_file[0])); - if (!common.equal(u8, hdr.e_ident[0..4], elf.MAGIC)) return error.InvalidElfMagic; +pub fn getDebugInformation(allocator: ZigAllocator, elf_file: []align(arch.valid_page_sizes[0]) const u8) !ModuleDebugInfo { + const hdr = @as(*align(1) const elf.Ehdr, @ptrCast(&elf_file[0])); + if (!equal(u8, hdr.e_ident[0..4], elf.MAGIC)) return error.InvalidElfMagic; if (hdr.e_ident[elf.EI_VERSION] != 1) return error.InvalidElfVersion; const endian = .Little; @@ -673,94 +953,110 @@ pub fn getDebugInformation(allocator: common.ZigAllocator, elf_file: []align(com const shoff = hdr.e_shoff; const str_section_off = shoff + @as(u64, hdr.e_shentsize) * @as(u64, hdr.e_shstrndx); const str_shdr = @as( - *const elf.Shdr, - @ptrCast(@alignCast(&elf_file[common.cast(usize, str_section_off) orelse return error.Overflow])), + *align(1) const elf.Shdr, + @ptrCast(&elf_file[cast(usize, str_section_off) orelse return error.Overflow]), ); const header_strings = elf_file[str_shdr.sh_offset .. str_shdr.sh_offset + str_shdr.sh_size]; const shdrs = @as( - [*]const elf.Shdr, - @ptrCast(@alignCast(&elf_file[shoff])), + [*]align(1) const elf.Shdr, + @ptrCast(&elf_file[shoff]), )[0..hdr.e_shnum]; - var opt_debug_info: ?[]const u8 = null; - var opt_debug_abbrev: ?[]const u8 = null; - var opt_debug_str: ?[]const u8 = null; - var opt_debug_str_offsets: ?[]const u8 = null; - var opt_debug_line: ?[]const u8 = null; - var opt_debug_line_str: ?[]const u8 = null; - var opt_debug_ranges: ?[]const u8 = null; - var opt_debug_loclists: ?[]const u8 = null; - var opt_debug_rnglists: ?[]const u8 = null; - var opt_debug_addr: ?[]const u8 = null; - var opt_debug_names: ?[]const u8 = null; - var opt_debug_frame: ?[]const u8 = null; + var sections: dwarf.DwarfInfo.SectionArray = dwarf.DwarfInfo.null_section_array; + + // Combine section list. This takes ownership over any owned sections from the parent scope. + errdefer for (sections) |section| if (section) |s| if (s.owned) allocator.free(s.data); + + var separate_debug_filename: ?[]const u8 = null; + _ = separate_debug_filename; + var separate_debug_crc: ?u32 = null; + _ = separate_debug_crc; for (shdrs) |*shdr| { - if (shdr.sh_type == elf.SHT_NULL) continue; + if (shdr.sh_type == elf.SHT_NULL or shdr.sh_type == elf.SHT_NOBITS) continue; + const name = sliceTo(header_strings[shdr.sh_name..], 0); - const name = common.sliceTo(header_strings[shdr.sh_name..], 0); - if (common.equal(u8, name, ".debug_info")) { - opt_debug_info = try chopSlice(elf_file, shdr.sh_offset, shdr.sh_size); - } else if (common.equal(u8, name, ".debug_abbrev")) { - opt_debug_abbrev = try chopSlice(elf_file, shdr.sh_offset, shdr.sh_size); - } else if (common.equal(u8, name, ".debug_str")) { - opt_debug_str = try chopSlice(elf_file, shdr.sh_offset, shdr.sh_size); - } else if (common.equal(u8, name, ".debug_str_offsets")) { - opt_debug_str_offsets = try chopSlice(elf_file, shdr.sh_offset, shdr.sh_size); - } else if (common.equal(u8, name, ".debug_line")) { - opt_debug_line = try chopSlice(elf_file, shdr.sh_offset, shdr.sh_size); - } else if (common.equal(u8, name, ".debug_line_str")) { - opt_debug_line_str = try chopSlice(elf_file, shdr.sh_offset, shdr.sh_size); - } else if (common.equal(u8, name, ".debug_ranges")) { - opt_debug_ranges = try chopSlice(elf_file, shdr.sh_offset, shdr.sh_size); - } else if (common.equal(u8, name, ".debug_loclists")) { - opt_debug_loclists = try chopSlice(elf_file, shdr.sh_offset, shdr.sh_size); - } else if (common.equal(u8, name, ".debug_rnglists")) { - opt_debug_rnglists = try chopSlice(elf_file, shdr.sh_offset, shdr.sh_size); - } else if (common.equal(u8, name, ".debug_addr")) { - opt_debug_addr = try chopSlice(elf_file, shdr.sh_offset, shdr.sh_size); - } else if (common.equal(u8, name, ".debug_names")) { - opt_debug_names = try chopSlice(elf_file, shdr.sh_offset, shdr.sh_size); - } else if (common.equal(u8, name, ".debug_frame")) { - opt_debug_frame = try chopSlice(elf_file, shdr.sh_offset, shdr.sh_size); + if (equal(u8, name, ".gnu_debuglink")) { + @panic("WTF"); + // const gnu_debuglink = try chopSlice(mapped_mem, shdr.sh_offset, shdr.sh_size); + // const debug_filename = mem.sliceTo(@as([*:0]const u8, @ptrCast(gnu_debuglink.ptr)), 0); + // const crc_offset = mem.alignForward(usize, @intFromPtr(&debug_filename[debug_filename.len]) + 1, 4) - @intFromPtr(gnu_debuglink.ptr); + // const crc_bytes = gnu_debuglink[crc_offset .. crc_offset + 4]; + // separate_debug_crc = mem.readIntSliceNative(u32, crc_bytes); + // separate_debug_filename = debug_filename; + // continue; } + + var section_index: ?usize = null; + inline for (@typeInfo(dwarf.DwarfSection).Enum.fields, 0..) |section, i| { + if (equal(u8, "." ++ section.name, name)) section_index = i; + } + if (section_index == null) continue; + if (sections[section_index.?] != null) continue; + + const section_bytes = try chopSlice(elf_file, shdr.sh_offset, shdr.sh_size); + sections[section_index.?] = if ((shdr.sh_flags & elf.SHF_COMPRESSED) > 0) blk: { + var section_stream = fixedBufferStream(section_bytes); + var section_reader = section_stream.reader(); + const chdr = section_reader.readStruct(elf.Chdr) catch continue; + if (chdr.ch_type != .ZLIB) continue; + + if (true) @panic("ZLIB"); + break :blk undefined; + // var zlib_stream = std.compress.zlib.decompressStream(allocator, section_stream.reader()) catch continue; + // defer zlib_stream.deinit(); + // + // var decompressed_section = try allocator.alloc(u8, chdr.ch_size); + // errdefer allocator.free(decompressed_section); + // + // const read = zlib_stream.reader().readAll(decompressed_section) catch continue; + // assert(read == decompressed_section.len); + // + // break :blk .{ + // .data = decompressed_section, + // .virtual_address = shdr.sh_addr, + // .owned = true, + // }; + } else .{ + .data = section_bytes, + .virtual_address = shdr.sh_addr, + .owned = false, + }; } - var di = common.dwarf.DwarfInfo{ + const missing_debug_info = + sections[@intFromEnum(dwarf.DwarfSection.debug_info)] == null or + sections[@intFromEnum(dwarf.DwarfSection.debug_abbrev)] == null or + sections[@intFromEnum(dwarf.DwarfSection.debug_str)] == null or + sections[@intFromEnum(dwarf.DwarfSection.debug_line)] == null; + assert(!missing_debug_info); + + var di = dwarf.DwarfInfo{ .endian = endian, - .debug_info = opt_debug_info orelse return error.MissingDebugInfo, - .debug_abbrev = opt_debug_abbrev orelse return error.MissingDebugInfo, - .debug_str = opt_debug_str orelse return error.MissingDebugInfo, - .debug_str_offsets = opt_debug_str_offsets, - .debug_line = opt_debug_line orelse return error.MissingDebugInfo, - .debug_line_str = opt_debug_line_str, - .debug_ranges = opt_debug_ranges, - .debug_loclists = opt_debug_loclists, - .debug_rnglists = opt_debug_rnglists, - .debug_addr = opt_debug_addr, - .debug_names = opt_debug_names, - .debug_frame = opt_debug_frame, + .sections = sections, + .is_macho = false, }; - try common.dwarf.openDwarfDebugInfo(&di, allocator); + try dwarf.openDwarfDebugInfo(&di, allocator); + return di; } fn chopSlice(ptr: []const u8, offset: u64, size: u64) error{Overflow}![]const u8 { - const start = common.cast(usize, offset) orelse return error.Overflow; - const end = start + (common.cast(usize, size) orelse return error.Overflow); + const start = cast(usize, offset) orelse return error.Overflow; + const end = start + (cast(usize, size) orelse return error.Overflow); return ptr[start..end]; } pub fn RegionInterface(comptime Region: type) type { const type_info = @typeInfo(Region); - common.assert(type_info == .Struct); - common.assert(type_info.Struct.layout == .Extern); - common.assert(type_info.Struct.fields.len == 2); - const fields = type_info.Struct.fields; - common.assert(common.equal(u8, fields[0].name, "address")); - common.assert(common.equal(u8, fields[1].name, "size")); - const Addr = fields[0].type; + assert(type_info == .Struct); + assert(type_info.Struct.layout == .Extern); + assert(type_info.Struct.fields.len == 2); + const region_fields = type_info.Struct.fields; + assert(equal(u8, region_fields[0].name, "address")); + assert(equal(u8, region_fields[1].name, "size")); + const Addr = region_fields[0].type; const AddrT = getAddrT(Addr); return struct { @@ -773,6 +1069,14 @@ pub fn RegionInterface(comptime Region: type) type { .size = info.size, }; } + + pub inline fn invalid() Region { + return Region{ + .address = Addr.invalid(), + .size = 0, + }; + } + pub inline fn fromRaw(info: struct { raw_address: AddrT, size: AddrT, @@ -808,7 +1112,7 @@ pub fn RegionInterface(comptime Region: type) type { pub inline fn fromAnytype(any: anytype, info: struct {}) Region { _ = info; - common.assert(@typeInfo(@TypeOf(any)) == .Pointer); + assert(@typeInfo(@TypeOf(any)) == .Pointer); return Region{ .address = VirtualAddress.new(@intFromPtr(any)), .size = @sizeOf(@TypeOf(any.*)), @@ -833,7 +1137,7 @@ pub fn RegionInterface(comptime Region: type) type { } pub fn shrinked(region: Region, size: AddrT) Region { - common.assert(size <= region.size); + assert(size <= region.size); const result = Region{ .address = region.address, .size = size, @@ -842,17 +1146,34 @@ pub fn RegionInterface(comptime Region: type) type { return result; } - pub inline fn takeSlice(region: *Region, size: AddrT) Region { - common.assert(size <= region.size); - const result = Region{ + const TakeSliceError = error{ + not_enough_space, + }; + + pub inline fn slice(region: *const Region, size: AddrT) Region { + assert(size <= region.size); + const result = .{ .address = region.address, .size = size, }; - region.* = region.offset(size); return result; } + pub inline fn takeSlice(region: *Region, size: AddrT) !Region { + if (size <= region.size) { + const result = Region{ + .address = region.address, + .size = size, + }; + region.* = region.offset(size); + + return result; + } + + return TakeSliceError.not_enough_space; + } + pub inline fn split(region: Region, comptime count: comptime_int) [count]Region { const region_size = @divExact(region.size, count); var result: [count]Region = undefined; @@ -919,9 +1240,9 @@ pub const VirtualMemoryRegion = extern struct { fn getAddrT(comptime AddressEnum: type) type { const type_info = @typeInfo(AddressEnum); - common.assert(type_info == .Enum); + assert(type_info == .Enum); const AddrT = type_info.Enum.tag_type; - common.assert(switch (common.cpu.arch) { + assert(switch (cpu.arch) { .x86 => @sizeOf(AddrT) == 2 * @sizeOf(usize), else => @sizeOf(AddrT) == @sizeOf(usize), }); @@ -1023,12 +1344,12 @@ pub const VirtualAddress = enum(u64) { } pub inline fn toPhysicalAddress(virtual_address: VirtualAddress) PhysicalAddress { - common.assert(virtual_address.value() >= config.cpu_driver_higher_half_address); + assert(virtual_address.value() >= config.cpu_driver_higher_half_address); return @as(PhysicalAddress, @enumFromInt(virtual_address.value() - config.cpu_driver_higher_half_address)); } pub inline fn toGuaranteedPhysicalAddress(virtual_address: VirtualAddress) PhysicalAddress { - common.assert(virtual_address.value() < config.cpu_driver_higher_half_address); + assert(virtual_address.value() < config.cpu_driver_higher_half_address); return PhysicalAddress.new(virtual_address.value()); } }; diff --git a/src/lib/arch.zig b/src/lib/arch.zig index 7030f7b..5d346ea 100644 --- a/src/lib/arch.zig +++ b/src/lib/arch.zig @@ -15,6 +15,8 @@ pub const current = switch (@import("builtin").cpu.arch) { pub const x86 = @import("arch/x86.zig"); pub const x86_64 = @import("arch/x86_64.zig"); +pub const paging = x86_64.paging; + pub const default_page_size = current.default_page_size; pub const reasonable_page_size = current.reasonable_page_size; diff --git a/src/lib/arch/x86/common.zig b/src/lib/arch/x86/common.zig index e4b9e61..7ca1a7e 100644 --- a/src/lib/arch/x86/common.zig +++ b/src/lib/arch/x86/common.zig @@ -5,7 +5,7 @@ pub const CPUID = extern struct { ecx: u32, }; -pub inline fn cpuid(leaf: u32) CPUID { +pub inline fn cpuid(leaf: u32, subleaf: u32) CPUID { var eax: u32 = undefined; var ebx: u32 = undefined; var edx: u32 = undefined; @@ -18,6 +18,8 @@ pub inline fn cpuid(leaf: u32) CPUID { [edx] "={edx}" (edx), [ecx] "={ecx}" (ecx), : [leaf] "{eax}" (leaf), + [subleaf] "{ecx}" (subleaf), + : "memory" ); return CPUID{ diff --git a/src/lib/arch/x86_64.zig b/src/lib/arch/x86_64.zig index ebd1b05..6388a42 100644 --- a/src/lib/arch/x86_64.zig +++ b/src/lib/arch/x86_64.zig @@ -2,6 +2,39 @@ const lib = @import("lib"); const x86 = @import("x86/common.zig"); pub usingnamespace x86; +pub const paging = struct { + pub const page_table_entry_size = @sizeOf(u64); + pub const page_table_size = lib.arch.valid_page_sizes[0]; + pub const page_table_entry_count = @divExact(page_table_size, page_table_entry_size); + pub const page_table_alignment = page_table_size; + pub const page_table_mask = page_table_entry_count - 1; + pub const user_address_space_start = 0x200_000; + pub const user_address_space_end = 0x8000_0000_0000; + pub const root_page_table_level: Level = switch (Level) { + Level4 => Level.PML4, + Level5 => @compileError("TODO"), + else => @compileError("Unknown level"), + }; + + pub const Level = Level4; + + pub const Level4 = enum(u2) { + PML4 = 0, + PDP = 1, + PD = 2, + PT = 3, + + pub const count = lib.enumCount(@This()); + }; + + pub const Level5 = enum(u3) {}; + + comptime { + lib.assert(page_table_alignment == page_table_size); + lib.assert(page_table_size == lib.arch.valid_page_sizes[0]); + } +}; + pub const valid_page_sizes = [3]comptime_int{ 0x1000, 0x1000 * 0x200, 0x1000 * 0x200 * 0x200 }; pub const reverse_valid_page_sizes = blk: { var reverse = valid_page_sizes; diff --git a/src/lib/filesystem/fat32.zig b/src/lib/filesystem/fat32.zig index ab426a2..343697b 100644 --- a/src/lib/filesystem/fat32.zig +++ b/src/lib/filesystem/fat32.zig @@ -353,10 +353,7 @@ pub const Cache = extern struct { const aligned_file_size = lib.alignForward(usize, file_size, cache.disk.sector_size); const lba = cache.clusterToSector(first_cluster); - log.debug("Start disk callback", .{}); - const result = try cache.disk.callbacks.read(cache.disk, @divExact(aligned_file_size, cache.disk.sector_size), lba, file_buffer); - log.debug("End disk callback", .{}); return result.buffer[0..file_size]; } diff --git a/src/privileged.zig b/src/privileged.zig index b73372e..dfccc9d 100644 --- a/src/privileged.zig +++ b/src/privileged.zig @@ -10,6 +10,7 @@ const assert = lib.assert; const log = lib.log; const maxInt = lib.maxInt; const Allocator = lib.Allocator; +const VirtualAddress = lib.VirtualAddress; const bootloader = @import("bootloader"); @@ -86,6 +87,7 @@ pub const PageAllocator = struct { count: u16 = 1, level: arch.paging.Level, user: bool, + virtual_address: arch.paging.IndexedVirtualAddress, }; pub inline fn allocatePageTable(page_allocator: PageAllocator, options: AllocatePageTablesOptions) !lib.PhysicalMemoryRegion { @@ -94,6 +96,7 @@ pub const PageAllocator = struct { .level = options.level, .level_valid = true, .user = options.user, + .virtual_address = options.virtual_address, }); return result; } @@ -104,6 +107,7 @@ pub const PageAllocator = struct { level: arch.paging.Level = undefined, level_valid: bool = false, user: bool = false, + virtual_address: arch.paging.IndexedVirtualAddress, }; const ContextType = enum(u32) { diff --git a/src/privileged/arch/x86/64/paging.zig b/src/privileged/arch/x86/64/paging.zig index fe00fed..f7813d3 100644 --- a/src/privileged/arch/x86/64/paging.zig +++ b/src/privileged/arch/x86/64/paging.zig @@ -10,7 +10,6 @@ const zeroes = lib.zeroes; const Allocator = lib.Allocator; const privileged = @import("privileged"); -const Heap = privileged.Heap; const PageAllocator = privileged.PageAllocator; const valid_page_sizes = lib.arch.x86_64.valid_page_sizes; @@ -22,34 +21,20 @@ const PhysicalAddress = lib.PhysicalAddress; const VirtualAddress = lib.VirtualAddress; const PhysicalMemoryRegion = lib.PhysicalMemoryRegion; const PhysicalAddressSpace = lib.PhysicalAddressSpace; -const Mapping = privileged.Mapping; +pub const Mapping = privileged.Mapping; const bootloader = @import("bootloader"); -const page_table_level_count = 4; -pub const page_table_mask = page_table_entry_count - 1; +const paging = lib.arch.x86_64.paging; +pub usingnamespace paging; -pub fn entryCount(comptime level: Level, limit: u64) u10 { +pub fn entryCount(comptime level: paging.Level, limit: u64) u10 { const index = baseFromVirtualAddress(level, limit - 1); const result = @as(u10, index) + 1; // @compileLog(limit, index, result); return result; } -// Comptime test -comptime { - const va = 134217728; - const indices = computeIndices(va); - const pml4_index = baseFromVirtualAddress(.PML4, va); - const pdp_index = baseFromVirtualAddress(.PDP, va); - const pd_index = baseFromVirtualAddress(.PD, va); - const pt_index = baseFromVirtualAddress(.PT, va); - assert(pml4_index == indices[@intFromEnum(Level.PML4)]); - assert(pdp_index == indices[@intFromEnum(Level.PDP)]); - assert(pd_index == indices[@intFromEnum(Level.PD)]); - assert(pt_index == indices[@intFromEnum(Level.PT)]); -} - const max_level_possible = 5; pub const IndexedVirtualAddress = packed struct(u64) { page_offset: u12 = 0, @@ -67,9 +52,26 @@ pub const IndexedVirtualAddress = packed struct(u64) { return VirtualAddress.new(raw); } } + + pub fn toIndices(indexed: IndexedVirtualAddress) [Level.count]u9 { + var result: [Level.count]u9 = undefined; + inline for (@typeInfo(Level).Enum.fields) |enum_field| { + result[@intFromEnum(@field(Level, enum_field.name))] = @field(indexed, enum_field.name); + } + return result; + } }; -pub fn baseFromVirtualAddress(comptime level: Level, virtual_address: u64) u9 { +const Level = enum(u2) { + PML4 = 0, + PDP = 1, + PD = 2, + PT = 3, + + const count = @typeInfo(Level).Enum.fields.len; +}; + +pub fn baseFromVirtualAddress(comptime level: paging.Level, virtual_address: u64) u9 { const indexed = @as(IndexedVirtualAddress, @bitCast(virtual_address)); return @field(indexed, @tagName(level)); } @@ -82,7 +84,7 @@ pub const CPUPageTables = extern struct { const base = 0xffff_ffff_8000_0000; const top = base + pte_count * lib.arch.valid_page_sizes[0]; - const pte_count = page_table_entry_count - left_ptables; + const pte_count = paging.page_table_entry_count - left_ptables; pub const left_ptables = 4; pub const pml4_index = 0x1ff; pub const pdp_index = 0x1fe; @@ -101,7 +103,7 @@ pub const CPUPageTables = extern struct { } pub fn initialize(page_allocator: PageAllocator) !CPUPageTables { - const page_table_allocation = try page_allocator.allocate(page_allocator.context, allocated_size, lib.arch.valid_page_sizes[0], .{}); + const page_table_allocation = try page_allocator.allocate(page_allocator.context, allocated_size, lib.arch.valid_page_sizes[0], .{ .virtual_address = @bitCast(@as(u64, 0)) }); const page_tables = CPUPageTables{ .pml4_table = page_table_allocation.address, @@ -163,8 +165,8 @@ pub const CPUPageTables = extern struct { if (asked_virtual_address.offset(size).value() > top) return CPUPageTables.MapError.upper_limit_exceeded; const flags = general_flags.toArchitectureSpecific(); - const indices = computeIndices(asked_virtual_address.value()); - const index = indices[indices.len - 1]; + const indexed: IndexedVirtualAddress = @bitCast(asked_virtual_address.value()); + const index = indexed.PT; const iteration_count = @as(u32, @intCast(size >> lib.arch.page_shifter(lib.arch.valid_page_sizes[0]))); const p_table = cpu_page_tables.p_table.toIdentityMappedVirtualAddress().access(*PTable); const p_table_slice = p_table[index .. index + iteration_count]; @@ -181,10 +183,26 @@ pub const CPUPageTables = extern struct { pub const Specific = extern struct { cr3: cr3 align(8), + pub fn current() Specific { + return .{ + .cr3 = cr3.read(), + }; + } + pub inline fn makeCurrent(specific: Specific) void { specific.getUserCr3().write(); } + pub inline fn makeCurrentPrivileged(specific: Specific) void { + specific.cr3.write(); + } + + pub fn fromPhysicalRegion(physical_memory_region: PhysicalMemoryRegion) Specific { + return Specific{ + .cr3 = @bitCast(physical_memory_region.address.value()), + }; + } + pub fn fromPageTables(cpu_page_tables: CPUPageTables) Specific { return .{ .cr3 = cr3.fromAddress(cpu_page_tables.pml4_table), @@ -266,7 +284,7 @@ pub const Specific = extern struct { // TODO: batch better switch (asked_page_size) { // 1 GB - lib.arch.valid_page_sizes[0] * page_table_entry_count * page_table_entry_count => { + lib.arch.valid_page_sizes[0] * paging.page_table_entry_count * paging.page_table_entry_count => { while (virtual_address < top_virtual_address) : ({ physical_address += asked_page_size; virtual_address += asked_page_size; @@ -275,7 +293,7 @@ pub const Specific = extern struct { } }, // 2 MB - lib.arch.valid_page_sizes[0] * page_table_entry_count => { + lib.arch.valid_page_sizes[0] * paging.page_table_entry_count => { while (virtual_address < top_virtual_address) : ({ physical_address += asked_page_size; virtual_address += asked_page_size; @@ -297,67 +315,34 @@ pub const Specific = extern struct { } fn map1GBPage(specific: Specific, physical_address: u64, virtual_address: u64, flags: MemoryFlags, page_allocator: PageAllocator) !void { - const indices = computeIndices(virtual_address); + const indexed: IndexedVirtualAddress = @bitCast(virtual_address); const pml4_table = try getPML4Table(specific.cr3); - const pdp_table = try getPDPTable(pml4_table, indices, flags, page_allocator); - try mapPageTable1GB(pdp_table, indices, physical_address, flags); + const pdp_table = try getPDPTable(pml4_table, indexed, flags, page_allocator); + try mapPageTable1GB(pdp_table, indexed, physical_address, flags); } fn map2MBPage(specific: Specific, physical_address: u64, virtual_address: u64, flags: MemoryFlags, page_allocator: PageAllocator) !void { - const indices = computeIndices(virtual_address); + const indexed: IndexedVirtualAddress = @bitCast(virtual_address); const pml4_table = try getPML4Table(specific.cr3); - const pdp_table = try getPDPTable(pml4_table, indices, flags, page_allocator); - const pd_table = try getPDTable(pdp_table, indices, flags, page_allocator); + const pdp_table = try getPDPTable(pml4_table, indexed, flags, page_allocator); + const pd_table = try getPDTable(pdp_table, indexed, flags, page_allocator); - mapPageTable2MB(pd_table, indices, physical_address, flags) catch |err| { + mapPageTable2MB(pd_table, indexed, physical_address, flags) catch |err| { log.err("Virtual address: 0x{x}. Physical address: 0x{x}", .{ virtual_address, physical_address }); return err; }; } fn map4KPage(specific: Specific, physical_address: u64, virtual_address: u64, flags: MemoryFlags, page_allocator: PageAllocator) !void { - const indices = computeIndices(virtual_address); + const indexed: IndexedVirtualAddress = @bitCast(virtual_address); const pml4_table = try getPML4Table(specific.cr3); - const pdp_table = try getPDPTable(pml4_table, indices, flags, page_allocator); - const pd_table = try getPDTable(pdp_table, indices, flags, page_allocator); - const p_table = try getPTable(pd_table, indices, flags, page_allocator); - try mapPageTable4KB(p_table, indices, physical_address, flags); - } - - pub inline fn switchTo(specific: *Specific, execution_mode: lib.TraditionalExecutionMode) void { - const mask = ~@as(u64, 1 << 12); - const masked_cr3 = (@as(u64, @bitCast(specific.cr3)) & mask); - const privileged_or = (@as(u64, @intFromEnum(execution_mode)) << 12); - const new_cr3 = @as(cr3, @bitCast(masked_cr3 | privileged_or)); - specific.cr3 = new_cr3; - } - - pub inline fn copyHigherHalfCommon(cpu_specific: Specific, pml4_physical_address: PhysicalAddress) void { - const cpu_side_pml4_table = pml4_physical_address.toHigherHalfVirtualAddress().access(*PML4Table); - const privileged_cpu_pml4_table = try getPML4Table(cpu_specific.cr3); - for (cpu_side_pml4_table[0x100..], privileged_cpu_pml4_table[0x100..]) |*pml4_entry, cpu_pml4_entry| { - pml4_entry.* = cpu_pml4_entry; - } - } - - pub fn copyHigherHalfPrivileged(cpu_specific: Specific, pml4_physical_address: PhysicalAddress) void { - cpu_specific.copyHigherHalfCommon(pml4_physical_address); - } - - pub fn copyHigherHalfUser(cpu_specific: Specific, pml4_physical_address: PhysicalAddress, page_allocator: *PageAllocator) !void { - cpu_specific.copyHigherHalfCommon(pml4_physical_address); - - const pml4_table = pml4_physical_address.toHigherHalfVirtualAddress().access(*PML4Table); - const pml4_entry = pml4_table[0x1ff]; - const pml4_entry_address = PhysicalAddress.new(unpackAddress(pml4_entry)); - const pdp_table = pml4_entry_address.toHigherHalfVirtualAddress().access(*PDPTable); - const new_pdp_table_allocation = try page_allocator.allocate(0x1000, 0x1000); - const new_pdp_table = new_pdp_table_allocation.toHigherHalfVirtualAddress().access(PDPTE); - @memcpy(new_pdp_table, pdp_table); - new_pdp_table[0x1fd] = @as(PDPTE, @bitCast(@as(u64, 0))); + const pdp_table = try getPDPTable(pml4_table, indexed, flags, page_allocator); + const pd_table = try getPDTable(pdp_table, indexed, flags, page_allocator); + const p_table = try getPTable(pd_table, indexed, flags, page_allocator); + try mapPageTable4KB(p_table, indexed, physical_address, flags); } pub const TranslateError = error{ @@ -373,7 +358,7 @@ pub const Specific = extern struct { }; pub fn translateAddress(specific: Specific, virtual_address: VirtualAddress, flags: MemoryFlags) !PhysicalAddress { - const indices = computeIndices(virtual_address.value()); + const indexed: IndexedVirtualAddress = @bitCast(virtual_address.value()); const is_desired = virtual_address.value() == 0xffff_ffff_8001_f000; const pml4_table = try getPML4Table(specific.cr3); @@ -382,10 +367,10 @@ pub const Specific = extern struct { // } //log.debug("pml4 table: 0x{x}", .{@ptrToInt(pml4_table)}); - const pml4_index = indices[@intFromEnum(Level.PML4)]; + const pml4_index = indexed.PML4; const pml4_entry = pml4_table[pml4_index]; if (!pml4_entry.present) { - log.err("Virtual address: 0x{x}.\nPML4 index: {}.\nValue: {}\n", .{ virtual_address.value(), pml4_index, pml4_entry }); + log.err("Virtual address: 0x{x}.\nPML4 pointer: 0x{x}\nPML4 index: {}.\nValue: {}\n", .{ virtual_address.value(), @intFromPtr(pml4_table), pml4_index, pml4_entry }); return TranslateError.pml4_entry_not_present; } @@ -398,12 +383,12 @@ pub const Specific = extern struct { return TranslateError.pml4_entry_address_null; } - const pdp_table = try getPDPTable(pml4_table, indices, undefined, null); + const pdp_table = try getPDPTable(pml4_table, indexed, undefined, null); if (is_desired) { _ = try specific.translateAddress(VirtualAddress.new(@intFromPtr(pdp_table)), .{}); } //log.debug("pdp table: 0x{x}", .{@ptrToInt(pdp_table)}); - const pdp_index = indices[@intFromEnum(Level.PDP)]; + const pdp_index = indexed.PDP; const pdp_entry = &pdp_table[pdp_index]; if (!pdp_entry.present) { log.err("PDP index {} not present in PDP table 0x{x}", .{ pdp_index, @intFromPtr(pdp_table) }); @@ -435,7 +420,7 @@ pub const Specific = extern struct { _ = try specific.translateAddress(VirtualAddress.new(@intFromPtr(pd_table)), .{}); } //log.debug("pd table: 0x{x}", .{@ptrToInt(pd_table)}); - const pd_index = indices[@intFromEnum(Level.PD)]; + const pd_index = indexed.PD; const pd_entry = &pd_table[pd_index]; if (!pd_entry.present) { log.err("PD index: {}", .{pd_index}); @@ -468,11 +453,10 @@ pub const Specific = extern struct { _ = try specific.translateAddress(VirtualAddress.new(@intFromPtr(p_table)), .{}); } // log.debug("p table: 0x{x}", .{@ptrToInt(p_table)}); - const pt_index = indices[@intFromEnum(Level.PT)]; + const pt_index = indexed.PT; const pt_entry = &p_table[pt_index]; if (!pt_entry.present) { log.err("Virtual address 0x{x} not mapped", .{virtual_address.value()}); - log.err("Indices: {any}", .{indices}); log.err("PTE: 0x{x}", .{@intFromPtr(pt_entry)}); log.err("PDE: 0x{x}", .{@intFromPtr(pd_entry)}); log.err("PDPE: 0x{x}", .{@intFromPtr(pdp_entry)}); @@ -492,14 +476,14 @@ pub const Specific = extern struct { } pub fn setMappingFlags(specific: Specific, virtual_address: u64, flags: Mapping.Flags) !void { - const indices = computeIndices(virtual_address); + const indexed: IndexedVirtualAddress = @bitCast(virtual_address); const vas_cr3 = specific.cr3; const pml4_physical_address = vas_cr3.getAddress(); const pml4_table = try accessPageTable(pml4_physical_address, *PML4Table); - const pml4_entry = pml4_table[indices[@intFromEnum(Level.PML4)]]; + const pml4_entry = pml4_table[indexed.PML4]; if (!pml4_entry.present) { return TranslateError.pml4_entry_not_present; } @@ -510,7 +494,7 @@ pub const Specific = extern struct { } const pdp_table = try accessPageTable(pml4_entry_address, *PDPTable); - const pdp_entry = pdp_table[indices[@intFromEnum(Level.PDP)]]; + const pdp_entry = pdp_table[indexed.PDP]; if (!pdp_entry.present) { return TranslateError.pdp_entry_not_present; } @@ -521,7 +505,7 @@ pub const Specific = extern struct { } const pd_table = try accessPageTable(pdp_entry_address, *PDTable); - const pd_entry = pd_table[indices[@intFromEnum(Level.PD)]]; + const pd_entry = pd_table[indexed.PD]; if (!pd_entry.present) { return TranslateError.pd_entry_not_present; } @@ -532,7 +516,7 @@ pub const Specific = extern struct { } const pt_table = try accessPageTable(pd_entry_address, *PTable); - const pt_entry = &pt_table[indices[@intFromEnum(Level.PT)]]; + const pt_entry = &pt_table[indexed.PT]; if (!pt_entry.present) { return TranslateError.pd_entry_not_present; } @@ -592,14 +576,19 @@ pub const Specific = extern struct { } inline fn getUserCr3(specific: Specific) cr3 { - assert(@as(u64, @bitCast(specific.cr3)) & page_table_size == 0); - return @as(cr3, @bitCast(@as(u64, @bitCast(specific.cr3)) | page_table_size)); + assert(specific.isPrivileged()); + return @as(cr3, @bitCast(@as(u64, @bitCast(specific.cr3)) | paging.page_table_size)); } pub inline fn getCpuPML4Table(specific: Specific) !*PML4Table { - assert(@as(u64, @bitCast(specific.cr3)) & page_table_size == 0); + assert(specific.isPrivileged()); return try specific.getPML4TableUnchecked(); } + + fn isPrivileged(specific: Specific) bool { + return @as(u64, @bitCast(specific.cr3)) & paging.page_table_size == 0; + } + pub inline fn getUserPML4Table(specific: Specific) !*PML4Table { return try getPML4Table(specific.getUserCr3()); } @@ -609,8 +598,6 @@ pub const Specific = extern struct { } }; -const Indices = [enumCount(Level)]u16; - const MapError = error{ already_present_4kb, already_present_2mb, @@ -650,8 +637,8 @@ fn getPML4Table(cr3r: cr3) !*PML4Table { return pml4_table; } -fn getPDPTable(pml4_table: *PML4Table, indices: Indices, flags: MemoryFlags, maybe_page_allocator: ?PageAllocator) !*PDPTable { - const index = indices[@intFromEnum(Level.PML4)]; +fn getPDPTable(pml4_table: *PML4Table, virtual_address: IndexedVirtualAddress, flags: MemoryFlags, maybe_page_allocator: ?PageAllocator) !*PDPTable { + const index = virtual_address.PML4; const entry_pointer = &pml4_table[index]; const table_physical_address = physical_address_blk: { @@ -664,6 +651,7 @@ fn getPDPTable(pml4_table: *PML4Table, indices: Indices, flags: MemoryFlags, may const entry_allocation = try page_allocator.allocatePageTable(.{ .level = .PDP, .user = flags.user, + .virtual_address = virtual_address, }); entry_pointer.* = PML4TE{ @@ -706,8 +694,8 @@ pub inline fn getPageEntry(comptime Entry: type, physical_address: u64, flags: M }; } -fn mapPageTable1GB(pdp_table: *PDPTable, indices: Indices, physical_address: u64, flags: MemoryFlags) MapError!void { - const entry_index = indices[@intFromEnum(Level.PDP)]; +fn mapPageTable1GB(pdp_table: *PDPTable, indexed: IndexedVirtualAddress, physical_address: u64, flags: MemoryFlags) MapError!void { + const entry_index = indexed.PDP; const entry_pointer = &pdp_table[entry_index]; if (entry_pointer.present) return MapError.already_present_1gb; @@ -717,8 +705,8 @@ fn mapPageTable1GB(pdp_table: *PDPTable, indices: Indices, physical_address: u64 entry_pointer.* = @as(PDPTE, @bitCast(getPageEntry(PDPTE_1GB, physical_address, flags))); } -fn mapPageTable2MB(pd_table: *PDTable, indices: Indices, physical_address: u64, flags: MemoryFlags) !void { - const entry_index = indices[@intFromEnum(Level.PD)]; +fn mapPageTable2MB(pd_table: *PDTable, indexed: IndexedVirtualAddress, physical_address: u64, flags: MemoryFlags) !void { + const entry_index = indexed.PD; const entry_pointer = &pd_table[entry_index]; const entry_value = entry_pointer.*; @@ -732,8 +720,8 @@ fn mapPageTable2MB(pd_table: *PDTable, indices: Indices, physical_address: u64, entry_pointer.* = @as(PDTE, @bitCast(getPageEntry(PDTE_2MB, physical_address, flags))); } -fn mapPageTable4KB(p_table: *PTable, indices: Indices, physical_address: u64, flags: MemoryFlags) !void { - const entry_index = indices[@intFromEnum(Level.PT)]; +fn mapPageTable4KB(p_table: *PTable, indexed: IndexedVirtualAddress, physical_address: u64, flags: MemoryFlags) !void { + const entry_index = indexed.PT; const entry_pointer = &p_table[entry_index]; if (entry_pointer.present) { @@ -750,8 +738,8 @@ const ToImplementError = error{ page_size, }; -fn getPDTable(pdp_table: *PDPTable, indices: Indices, flags: MemoryFlags, page_allocator: PageAllocator) !*PDTable { - const entry_index = indices[@intFromEnum(Level.PDP)]; +fn getPDTable(pdp_table: *PDPTable, indexed: IndexedVirtualAddress, flags: MemoryFlags, page_allocator: PageAllocator) !*PDTable { + const entry_index = indexed.PDP; const entry_pointer = &pdp_table[entry_index]; const table_physical_address = physical_address_blk: { @@ -765,6 +753,7 @@ fn getPDTable(pdp_table: *PDPTable, indices: Indices, flags: MemoryFlags, page_a const entry_allocation = try page_allocator.allocatePageTable(.{ .level = .PD, .user = flags.user, + .virtual_address = indexed, }); entry_pointer.* = PDPTE{ @@ -781,8 +770,8 @@ fn getPDTable(pdp_table: *PDPTable, indices: Indices, flags: MemoryFlags, page_a return try accessPageTable(table_physical_address, *PDTable); } -fn getPTable(pd_table: *PDTable, indices: Indices, flags: MemoryFlags, page_allocator: PageAllocator) !*PTable { - const entry_pointer = &pd_table[indices[@intFromEnum(Level.PD)]]; +fn getPTable(pd_table: *PDTable, indexed: IndexedVirtualAddress, flags: MemoryFlags, page_allocator: PageAllocator) !*PTable { + const entry_pointer = &pd_table[indexed.PD]; const table_physical_address = physical_address_blk: { const entry_value = entry_pointer.*; if (entry_value.present) { @@ -791,7 +780,11 @@ fn getPTable(pd_table: *PDTable, indices: Indices, flags: MemoryFlags, page_allo return ToImplementError.page_size; } else break :physical_address_blk PhysicalAddress.new(unpackAddress(entry_value)); } else { - const entry_allocation = try page_allocator.allocatePageTable(.{ .level = .PT, .user = flags.user }); + const entry_allocation = try page_allocator.allocatePageTable(.{ + .level = .PT, + .user = flags.user, + .virtual_address = indexed, + }); entry_pointer.* = PDTE{ .present = true, @@ -811,21 +804,6 @@ const half_entry_count = (@sizeOf(PML4Table) / @sizeOf(PML4TE)) / 2; const needed_physical_memory_for_bootstrapping_cpu_driver_address_space = @sizeOf(PML4Table) + @sizeOf(PDPTable) * 256; -pub fn computeIndices(virtual_address: u64) Indices { - var indices: Indices = undefined; - var va = virtual_address; - va = va >> 12; - indices[3] = @as(u9, @truncate(va)); - va = va >> 9; - indices[2] = @as(u9, @truncate(va)); - va = va >> 9; - indices[1] = @as(u9, @truncate(va)); - va = va >> 9; - indices[0] = @as(u9, @truncate(va)); - - return indices; -} - pub inline fn newFlags(general_flags: Mapping.Flags) MemoryFlags { return MemoryFlags{ .write = general_flags.write, @@ -856,28 +834,15 @@ pub const MemoryFlags = packed struct(u64) { const address_mask: u64 = 0x0000_00ff_ffff_f000; -pub const Level = Level4; - -pub const Level4 = enum(u2) { - PML4 = 0, - PDP = 1, - PD = 2, - PT = 3, - - pub const count = lib.enumCount(@This()); -}; - -pub const Level5 = enum(u3) {}; - pub fn EntryTypeMapSize(comptime page_size: comptime_int) usize { - return switch (Level) { - Level4 => switch (page_size) { + return switch (paging.Level) { + paging.Level4 => switch (page_size) { lib.arch.valid_page_sizes[0] => 4, lib.arch.valid_page_sizes[1] => 3, lib.arch.valid_page_sizes[2] => 2, else => @compileError("Unknown page size"), }, - Level5 => @compileError("TODO"), + paging.Level5 => @compileError("TODO"), else => @compileError("unreachable"), }; } @@ -886,28 +851,28 @@ pub fn EntryTypeMap(comptime page_size: comptime_int) [EntryTypeMapSize(page_siz const map_size = EntryTypeMapSize(page_size); const Result = [map_size]type; var result: Result = undefined; - switch (Level) { - Level4, Level5 => { - if (@hasField(Level, "pml5")) { + switch (paging.Level) { + paging.Level4, paging.Level5 => { + if (@hasField(paging.Level, "pml5")) { @compileError("TODO: type_map[@enumToInt(Level.PML5)] ="); } - result[@intFromEnum(Level.PML4)] = PML4TE; + result[@intFromEnum(paging.Level.PML4)] = PML4TE; if (page_size == lib.arch.valid_page_sizes[2]) { - assert(map_size == 2 + @intFromBool(Level == Level5)); - result[@intFromEnum(Level.PDP)] = PDPTE_1GB; + assert(map_size == 2 + @intFromBool(paging.Level == paging.Level5)); + result[@intFromEnum(paging.Level.PDP)] = PDPTE_1GB; } else { - result[@intFromEnum(Level.PDP)] = PDPTE; + result[@intFromEnum(paging.Level.PDP)] = PDPTE; if (page_size == lib.arch.valid_page_sizes[1]) { - assert(map_size == @as(usize, 3) + @intFromBool(Level == Level5)); - result[@intFromEnum(Level.PD)] = PDTE_2MB; + assert(map_size == @as(usize, 3) + @intFromBool(paging.Level == paging.Level5)); + result[@intFromEnum(paging.Level.PD)] = PDTE_2MB; } else { assert(page_size == lib.arch.valid_page_sizes[0]); - result[@intFromEnum(Level.PD)] = PDTE; - result[@intFromEnum(Level.PT)] = PTE; + result[@intFromEnum(paging.Level.PD)] = PDTE; + result[@intFromEnum(paging.Level.PT)] = PTE; } } }, @@ -1073,16 +1038,7 @@ pub const PTE = packed struct(u64) { } }; -pub const PML4Table = [page_table_entry_count]PML4TE; -pub const PDPTable = [page_table_entry_count]PDPTE; -pub const PDTable = [page_table_entry_count]PDTE; -pub const PTable = [page_table_entry_count]PTE; -pub const page_table_entry_size = @sizeOf(u64); -pub const page_table_size = lib.arch.valid_page_sizes[0]; -pub const page_table_entry_count = @divExact(page_table_size, page_table_entry_size); -pub const page_table_alignment = page_table_size; - -comptime { - assert(page_table_alignment == page_table_size); - assert(page_table_size == lib.arch.valid_page_sizes[0]); -} +pub const PML4Table = [paging.page_table_entry_count]PML4TE; +pub const PDPTable = [paging.page_table_entry_count]PDPTE; +pub const PDTable = [paging.page_table_entry_count]PDTE; +pub const PTable = [paging.page_table_entry_count]PTE; diff --git a/src/user.zig b/src/user.zig index 8bae431..e44aca0 100644 --- a/src/user.zig +++ b/src/user.zig @@ -4,24 +4,20 @@ const assert = lib.assert; const ExecutionMode = lib.Syscall.ExecutionMode; const birth = @import("birth"); -const capabilities = birth.capabilities; -pub const Syscall = birth.capabilities.Syscall; +pub const Command = birth.interface.Command; +pub const Interface = birth.interface.Descriptor; +pub const Scheduler = birth.Scheduler; pub const arch = @import("user/arch.zig"); +pub const capabilities = @import("user/capabilities.zig"); const core_state = @import("user/core_state.zig"); pub const CoreState = core_state.CoreState; -pub const PinnedState = core_state.PinnedState; pub const libc = @import("user/libc.zig"); pub const thread = @import("user/thread.zig"); +pub const Thread = thread.Thread; pub const process = @import("user/process.zig"); -const vas = @import("user/virtual_address_space.zig"); +pub const Virtual = @import("user/virtual.zig"); const VirtualAddress = lib.VirtualAddress; -pub const VirtualAddressSpace = vas.VirtualAddressSpace; -pub const MMUAwareVirtualAddressSpace = vas.MMUAwareVirtualAddressSpace; - -pub const PhysicalMap = @import("user/physical_map.zig").PhysicalMap; -pub const PhysicalMemoryRegion = @import("user/physical_memory_region.zig").PhysicalMemoryRegion; -pub const SlotAllocator = @import("user/slot_allocator.zig").SlotAllocator; comptime { @export(arch._start, .{ .linkage = .Strong, .name = "_start" }); @@ -29,8 +25,8 @@ comptime { pub const writer = lib.Writer(void, Writer.Error, Writer.write){ .context = {} }; const Writer = extern struct { - const syscall = Syscall(.io, .log); - const Error = Writer.syscall.ErrorSet.Error; + const syscall = Interface(.io, .log); + const Error = Writer.syscall.Error; fn write(_: void, bytes: []const u8) Error!usize { const result = try Writer.syscall.blocking(bytes); @@ -52,22 +48,16 @@ pub fn zigPanic(message: []const u8, _: ?*lib.StackTrace, _: ?usize) noreturn { } pub fn panic(comptime format: []const u8, arguments: anytype) noreturn { - lib.log.scoped(.PANIC).err(format, arguments); + var buffer: [0x100]u8 = undefined; + const message: []const u8 = lib.bufPrint(&buffer, format, arguments) catch "Failed to get panic message!"; while (true) { - Syscall(.process, .exit).blocking(false) catch |err| log.err("Exit failed: {}", .{err}); + Interface(.process, .panic).blocking(.{ + .message = message, + .exit_code = 1, + }) catch |err| log.err("Exit failed: {}", .{err}); } } -pub const Scheduler = extern struct { - time_slice: u32, - core_id: u32, - core_state: CoreState, -}; - -pub inline fn currentScheduler() *Scheduler { - return arch.currentScheduler(); -} - fn schedulerInitDisabled(scheduler: *arch.Scheduler) void { // Architecture-specific initialization scheduler.generic.time_slice = 1; @@ -75,18 +65,37 @@ fn schedulerInitDisabled(scheduler: *arch.Scheduler) void { } pub var is_init = false; -pub var command_buffer: birth.CommandBuffer = undefined; +pub var command_buffer: Command.Buffer = undefined; +const entry_count = 50; -pub export fn start(scheduler: *arch.Scheduler, arg_init: bool) callconv(.C) noreturn { +const CommandBufferCreateError = error{ + invalid_entry_count, +}; + +fn createCommandBuffer(options: Command.Buffer.CreateOptions) !Command.Buffer { + // TODO: allow kernel to chop slices of memories + try capabilities.setupCommandFrame(Command.Submission, options.submission_entry_count); + try capabilities.setupCommandFrame(Command.Completion, options.completion_entry_count); + @panic("TODO: createCommandBuffer"); +} + +pub export fn start(scheduler: *Scheduler, arg_init: bool) callconv(.C) noreturn { assert(arg_init); is_init = arg_init; if (is_init) { - assert(scheduler.common.generic.setup_stack_lock.load(.Monotonic)); + assert(scheduler.common.setup_stack_lock.load(.Monotonic)); } - assert(scheduler.common.generic.disabled); - scheduler.initDisabled(); - // command_buffer = Syscall(.cpu, .get_command_buffer).blocking(&command_buffer) catch @panic("Unable to get command buffer"); - Syscall(.cpu, .shutdown).blocking({}) catch unreachable; + + initialize() catch |err| panic("Failed to initialize: {}", .{err}); + @import("root").main() catch |err| panic("Failed to execute main: {}", .{err}); + + while (true) { + @panic("TODO: after main"); + } +} + +fn initialize() !void { + _ = try Virtual.AddressSpace.create(); } // export fn birthInitializeDisabled(scheduler: *arch.Scheduler, arg_init: bool) callconv(.C) noreturn { @@ -98,45 +107,7 @@ pub export fn start(scheduler: *arch.Scheduler, arg_init: bool) callconv(.C) nor // } // Barrelfish: vregion -pub const VirtualMemoryRegion = extern struct { - virtual_address_space: *VirtualAddressSpace, - physical_region: *PhysicalMemoryRegion, - offset: usize, - size: usize, - address: VirtualAddress, - flags: Flags, - next: ?*VirtualMemoryRegion = null, - - pub const Flags = packed struct(u8) { - read: bool = false, - write: bool = false, - execute: bool = false, - cache_disabled: bool = false, - preferred_page_size: u2 = 0, - write_combining: bool = false, - reserved: u1 = 0, - }; -}; - -pub const MoreCore = extern struct { - const InitializationError = error{ - invalid_page_size, - }; - - pub fn init(page_size: usize) InitializationError!void { - blk: inline for (lib.arch.valid_page_sizes) |valid_page_size| { - if (valid_page_size == page_size) break :blk; - } else { - return InitializationError.invalid_page_size; - } - - const morecore_state = process.getMoreCoreState(); - morecore_state.mmu_state = try MMUAwareVirtualAddressSpace.initAligned(SlotAllocator.getDefault(), lib.arch.valid_page_sizes[1], lib.arch.valid_page_sizes[0], .{ .read = true, .write = true }); - - @panic("TODO: MoreCore.init"); - } - - pub const State = extern struct { - mmu_state: MMUAwareVirtualAddressSpace, - }; -}; +pub inline fn currentScheduler() *birth.Scheduler { + const result = arch.maybeCurrentScheduler().?; + return result; +} diff --git a/src/user/arch/x86_64.zig b/src/user/arch/x86_64.zig index 855c4ec..c8df470 100644 --- a/src/user/arch/x86_64.zig +++ b/src/user/arch/x86_64.zig @@ -16,28 +16,6 @@ const SlotAllocator = user.SlotAllocator; const Thread = user.Thread; const VirtualAddressSpace = user.VirtualAddressSpace; -pub const Scheduler = extern struct { - common: birth.arch.UserScheduler, - generic: user.Scheduler, - - pub fn initDisabled(scheduler: *Scheduler) void { - _ = scheduler; - // TODO: - // *set entry points? - // *set tls registers? - } - - pub noinline fn restore(scheduler: *Scheduler, register_arena: *const RegisterArena) noreturn { - assert(scheduler.common.generic.disabled); - assert(scheduler.common.generic.has_work); - - assert(register_arena.registers.rip > lib.arch.valid_page_sizes[0]); - assert(register_arena.registers.rflags.IF and register_arena.registers.rflags.reserved0); - - register_arena.contextSwitch(); - } -}; - // CRT0 pub fn _start() callconv(.Naked) noreturn { asm volatile ( @@ -46,8 +24,6 @@ pub fn _start() callconv(.Naked) noreturn { : : [startFunction] "r" (user.start), ); - - unreachable; } pub inline fn setInitialState(register_arena: *RegisterArena, entry: VirtualAddress, stack_virtual_address: VirtualAddress, arguments: birth.syscall.Arguments) void { @@ -80,37 +56,3 @@ pub inline fn maybeCurrentScheduler() ?*user.Scheduler { : "memory" ); } - -pub inline fn currentScheduler() *user.Scheduler { - const result = maybeCurrentScheduler().?; - return result; -} - -/// This is an interface to user.PhysicalMap, providing the architecture-specific functionality -pub const PhysicalMapInterface = struct { - pub fn determineAddress(physical_map: *PhysicalMap, physical_memory_region: PhysicalMemoryRegion, alignment: usize) !VirtualAddress { - _ = physical_memory_region; - _ = alignment; - assert(physical_map.virtual_address_space.regions != null); - log.debug("PMap: 0x{x}", .{@intFromPtr(physical_map.virtual_address_space.regions)}); - log.debug("PMap: {?}", .{physical_map.virtual_address_space.regions}); - @panic("TODO: PhysicalMapInterface.determineAddress"); - } - - pub fn initializeCurrent(physical_map: *PhysicalMap) !void { - _ = physical_map; - log.warn("TODO: PhysicalMapInterface.initializeCurrent", .{}); - } - - pub fn init(virtual_address_space: *VirtualAddressSpace, page_level: u3, slot_allocator: *SlotAllocator) !PhysicalMap { - var result = PhysicalMap{ - .virtual_address_space = virtual_address_space, - .slot_allocator = slot_allocator, - }; - _ = page_level; - - try result.initPageTableManagement(); - - @panic("TODO: PhysicalMap.init"); - } -}; diff --git a/src/user/arch/x86_64/linker_script.ld b/src/user/arch/x86_64/linker_script.ld index e820441..92b31a5 100644 --- a/src/user/arch/x86_64/linker_script.ld +++ b/src/user/arch/x86_64/linker_script.ld @@ -6,7 +6,7 @@ PHDRS { } SECTIONS { - . = 0x600000; + . = 0x200000; . = ALIGN(4K); .text . : { *(.text*) diff --git a/src/user/capabilities.zig b/src/user/capabilities.zig index 8d407b5..695dc82 100644 --- a/src/user/capabilities.zig +++ b/src/user/capabilities.zig @@ -1,20 +1,85 @@ const lib = @import("lib"); +const log = lib.log; const assert = lib.assert; const birth = @import("birth"); +const user = @import("user"); +const Interface = user.Interface; + +const Command = birth.interface.Command; // TODO: ref -pub fn frameCreate(ref: usize, bytes: usize) !usize { - return mappableCapabilityCreate(ref, .cpu_memory, bytes); +pub fn frameCreate(bytes: usize) !birth.capabilities.RAM { + return mappableCapabilityCreate(.cpu_memory, bytes); } -fn mappableCapabilityCreate(ref: usize, mappable_capability: birth.capabilities.Type.Mappable, bytes: usize) !usize { - _ = mappable_capability; - _ = ref; +const CommandBufferFrameType = enum { + command_buffer_completion, + command_buffer_submission, +}; + +pub fn setupCommandFrame(comptime QueueType: type, entry_count: usize) !void { + assert(entry_count > 0); + comptime assert(@alignOf(QueueType) <= @sizeOf(QueueType.Header)); + const total_size = lib.alignForward(usize, @sizeOf(QueueType.Header) + entry_count * @sizeOf(QueueType), lib.arch.valid_page_sizes[0]); + const capability = switch (QueueType) { + Command.Submission => .command_buffer_submission, + Command.Completion => .command_buffer_completion, + else => @compileError("Unexpected type"), + }; + + const allocation = try Interface(.ram, .allocate).blocking(total_size); + const dst_cap_frame = try retype(@bitCast(allocation), capability); + const flags = .{ + .write = QueueType == Command.Submission, + .execute = false, + }; + _ = try Interface(capability, .map).blocking(.{ + .frame = dst_cap_frame, + .flags = flags, + }); + + @panic("TODO: setup frame"); +} + +fn mappableCapabilityCreate(capability: birth.capabilities.Type.Mappable, bytes: usize) !birth.capabilities.RAM { assert(bytes > 0); + + return RamDescendant.create(capability, bytes); } -fn ramDescendantCreate( - ref: usize, -) !usize { - _ = ref; +const Ram = extern struct { + pub fn allocate(size: usize) !usize { + _ = size; + log.err("TODO: allocate", .{}); + return error.not_implemented; + } +}; + +const RamDescendant = extern struct { + capability: usize, + size: usize, + + pub fn create(capability: birth.capabilities.Type.Mappable, size: usize) !birth.capabilities.RAM { + const allocation = try Interface(.ram, .allocate).blocking(size); + const generic_capability = switch (capability) { + inline else => |mappable_cap| @field(birth.interface.Capability, @tagName(mappable_cap)), + }; + const result = try retype(@bitCast(allocation), generic_capability); + + // TODO: check if the previous capability needs to be deleted (because maybe it should be deleted at the retype operation + // try destroy(@bitCast(allocation)); + return @bitCast(result); + } +}; + +// TODO: make this more complex and generic to handle all cases +pub fn retype(source: birth.interface.Reference, capability: birth.interface.Capability) !birth.interface.Reference { + const new_reference = try Interface(.ram, .retype).blocking(.{ .source = @bitCast(source), .destination = capability }); + return new_reference; +} + +pub fn destroy(capability: birth.capabilities.Reference) !void { + _ = capability; + log.err("TODO: destroy", .{}); + return error.not_implemented; } diff --git a/src/user/core_state.zig b/src/user/core_state.zig index 705e7df..78b70f6 100644 --- a/src/user/core_state.zig +++ b/src/user/core_state.zig @@ -1,27 +1,6 @@ const user = @import("user"); -const MoreCore = user.MoreCore; const PhysicalMap = user.PhysicalMap; const PhysicalMemoryRegion = user.PhysicalMemoryRegion; const SlotAllocator = user.SlotAllocator; const VirtualAddressSpace = user.VirtualAddressSpace; const VirtualMemoryRegion = user.VirtualMemoryRegion; - -pub const PagingState = extern struct { - virtual_address_space: VirtualAddressSpace, - physical_map: PhysicalMap, -}; - -pub const PinnedState = extern struct { - physical_memory_region: PhysicalMemoryRegion.Pinned, - virtual_memory_region: VirtualMemoryRegion, - offset: usize, - // TODO: lists -}; - -pub const CoreState = extern struct { - paging: PagingState, - slot_allocator: SlotAllocator.State, - virtual_address_space: VirtualAddressSpace.State, - pinned: PinnedState, - more_core: MoreCore.State, -}; diff --git a/src/user/libc.zig b/src/user/libc.zig index cf6889f..c8e107d 100644 --- a/src/user/libc.zig +++ b/src/user/libc.zig @@ -1,7 +1,6 @@ const user = @import("user"); pub export fn malloc(size: usize) ?*anyopaque { - const morecore_state = user.process.getMoreCoreState(); - const result = morecore_state.mmu_state.map(size) catch return null; - return result.ptr; + _ = size; + @panic("TODO: malloc"); } diff --git a/src/user/mmu_aware_virtual_address_space.zig b/src/user/mmu_aware_virtual_address_space.zig deleted file mode 100644 index 6ea5e7e..0000000 --- a/src/user/mmu_aware_virtual_address_space.zig +++ /dev/null @@ -1,62 +0,0 @@ -const lib = @import("lib"); -const assert = lib.assert; -const log = lib.log.scoped(.MMUAwareVirtualAddressSpace); - -const user = @import("user"); -const PhysicalMemoryRegion = user.PhysicalMemoryRegion; -const SlotAllocator = user.SlotAllocator; -const VirtualMemoryRegion = user.VirtualMemoryRegion; - -pub const MMUAwareVirtualAddressSpace = extern struct { - size: usize, - alignment: usize, - consumed: usize = 0, - /// This is a index into the architecture-specific page sizes - page_size: u8, - slot_allocator: *SlotAllocator, - physical_memory_region: PhysicalMemoryRegion.Anonymous, - virtual_memory_region: VirtualMemoryRegion, - // struct vregion vregion; ///< Needs just one vregion - // struct memobj_anon memobj; ///< Needs just one memobj - // lvaddr_t offset; ///< Offset of free space in anon - // lvaddr_t mapoffset; ///< Offset into the anon that has been mapped in - - pub fn init(size: usize) !MMUAwareVirtualAddressSpace { - const slot_allocator = SlotAllocator.getDefault(); - const alignment = lib.arch.valid_page_sizes[0]; - return initAligned(slot_allocator, size, alignment, .{ .write = true }); - } - - pub fn initAligned(slot_allocator: *SlotAllocator, size: usize, alignment: usize, flags: VirtualMemoryRegion.Flags) !MMUAwareVirtualAddressSpace { - assert(flags.preferred_page_size < lib.arch.valid_page_sizes.len); - var result = MMUAwareVirtualAddressSpace{ - .size = size, - .alignment = alignment, - .page_size = flags.preferred_page_size, - .slot_allocator = slot_allocator, - .physical_memory_region = try PhysicalMemoryRegion.Anonymous.new(size), - .virtual_memory_region = undefined, - }; - // TODO: fix this API - result.virtual_memory_region = try user.process.getVirtualAddressSpace().mapAligned(result.physical_memory_region.getGeneric().*, 0, size, alignment, flags); - - // TODO: create memobj - // TODO: map memobj into vregion - - @panic("TODO: MMUAwareVirtualAddressSpace.initAligned"); - } - - const Error = error{ - alignment, - }; - - pub fn map(virtual_address_space: *MMUAwareVirtualAddressSpace, size: usize) ![]u8 { - if (!lib.isAligned(size, lib.arch.valid_page_sizes[0])) { - return error.alignment; - } - _ = virtual_address_space; - log.warn("[map] TODO: slot allocation", .{}); - //virtual_address_space.slot_allocator.allocate(); - @panic("TODO: MMUAwareVirtualAddressSpace.map"); - } -}; diff --git a/src/user/physical_map.zig b/src/user/physical_map.zig deleted file mode 100644 index 516e50d..0000000 --- a/src/user/physical_map.zig +++ /dev/null @@ -1,25 +0,0 @@ -const lib = @import("lib"); -const log = lib.log.scoped(.PhysicalMap); - -const user = @import("user"); -const SlotAllocator = user.SlotAllocator; -const VirtualAddressSpace = user.VirtualAddressSpace; - -pub const PhysicalMap = extern struct { - virtual_address_space: *VirtualAddressSpace, - slot_allocator: *SlotAllocator, - - pub usingnamespace user.arch.PhysicalMapInterface; - - pub fn initPageTableManagement(physical_map: *PhysicalMap) !void { - const current_physical_map = user.process.getPhysicalMap(); - log.debug("CURR: 0x{x}. PHYS: 0x{x}", .{ @intFromPtr(current_physical_map), @intFromPtr(physical_map) }); - if (current_physical_map == physical_map) { - @panic("TODO: if"); - } else { - log.warn("TODO: slab_init", .{}); - _ = user.libc.malloc(lib.arch.valid_page_sizes[0]); - @panic("TODO: else"); - } - } -}; diff --git a/src/user/physical_memory_region.zig b/src/user/physical_memory_region.zig deleted file mode 100644 index 65129aa..0000000 --- a/src/user/physical_memory_region.zig +++ /dev/null @@ -1,81 +0,0 @@ -const lib = @import("lib"); -const assert = lib.assert; -const log = lib.log.scoped(.PhysicalMemoryRegion); - -// Barrelfish: memobj -pub const PhysicalMemoryRegion = extern struct { - size: usize, - type: Type, - - pub const Type = enum(u8) { - anonymous = 0, - one_frame = 1, - pinned = 3, - //one_frame_lazy, - //one_frame_one_map, - // vfs, - // fixed, - // numa, - // append, - - fn map(t: Type) type { - return switch (t) { - .anonymous => Anonymous, - .one_frame => OneFrame, - .pinned => Pinned, - }; - } - }; - - pub const Anonymous = extern struct { - region: PhysicalMemoryRegion, - - pub usingnamespace Interface(@This()); - - pub fn new(size: usize) !Anonymous { - const result = Anonymous{ - .region = .{ - .size = size, - .type = .anonymous, - }, - }; - - log.warn("[Anonymous.new] TODO: initialize memory", .{}); - - return result; - } - }; - - pub const OneFrame = extern struct { - pub usingnamespace Interface(@This()); - }; - - pub const Pinned = extern struct { - region: PhysicalMemoryRegion, - pub usingnamespace Interface(@This()); - - pub fn new(size: usize) !Pinned { - const result = Pinned{ - .region = .{ - .size = size, - .type = .pinned, - }, - }; - - log.warn("[Pinned.new] TODO: initialize memory", .{}); - - return result; - } - }; - - fn Interface(comptime PhysicalMemoryRegionType: type) type { - assert(@hasField(PhysicalMemoryRegionType, "region")); - assert(@TypeOf(@field(@as(PhysicalMemoryRegionType, undefined), "region")) == PhysicalMemoryRegion); - - return extern struct { - pub inline fn getGeneric(r: *PhysicalMemoryRegionType) *PhysicalMemoryRegion { - return &r.region; - } - }; - } -}; diff --git a/src/user/programs/device_manager/main.zig b/src/user/programs/device_manager/main.zig index f855590..f3702f4 100644 --- a/src/user/programs/device_manager/main.zig +++ b/src/user/programs/device_manager/main.zig @@ -9,10 +9,12 @@ pub const std_options = user.std_options; export var core_id: u32 = 0; pub fn main() !noreturn { - core_id = try Syscall(.cpu, .get_core_id).blocking({}); - user.currentScheduler().core_id = core_id; - log.debug("Hello world! User space initialization from core #{}", .{core_id}); - const allocation = try Syscall(.cpu_memory, .allocate).blocking(0x1000); - log.debug("Look allocation successful at 0x{x}", .{allocation.value()}); - try Syscall(.cpu, .shutdown).blocking({}); + @panic("TODO: main"); + + // core_id = try Syscall(.cpu, .get_core_id).blocking({}); + // user.currentScheduler().core_id = core_id; + // log.debug("Hello world! User space initialization from core #{}", .{core_id}); + // const allocation = try Syscall(.cpu_memory, .allocate).blocking(0x1000); + // log.debug("Look allocation successful at 0x{x}", .{allocation.value()}); + // try Syscall(.cpu, .shutdown).blocking({}); } diff --git a/src/user/programs/init/main.zig b/src/user/programs/init/main.zig index 19e6781..0add678 100644 --- a/src/user/programs/init/main.zig +++ b/src/user/programs/init/main.zig @@ -9,7 +9,7 @@ pub const std_options = user.std_options; export var core_id: u32 = 0; -pub fn main() !noreturn { +pub fn main() !void { // core_id = try syscall(.cpu, .get_core_id).blocking({}); // user.currentScheduler().core_id = core_id; // log.debug("Hello world! User space initialization from core #{}", .{core_id}); @@ -21,5 +21,5 @@ pub fn main() !noreturn { // const aligned_bundle_size = lib.alignForward(usize, bundle_size, lib.arch.valid_page_sizes[0]); // const bundle_allocation = try syscall(.cpu_memory, .allocate).blocking(aligned_bundle_size); // log.debug("Look allocation successful at 0x{x}", .{bundle_allocation.value()}); - try syscall(.cpu, .shutdown).blocking({}); + // try syscall(.cpu, .shutdown).blocking({}); } diff --git a/src/user/slot_allocator.zig b/src/user/slot_allocator.zig deleted file mode 100644 index dcd102f..0000000 --- a/src/user/slot_allocator.zig +++ /dev/null @@ -1,28 +0,0 @@ -const log = @import("lib").log; -const user = @import("user"); - -pub const SlotAllocator = extern struct { - foo: u32 = 0, - - /// This function is inlined because it's only called once - pub inline fn init() !void { - log.warn("TODO: implement the whole SlotAllocator.init", .{}); - const state = user.process.getSlotAllocatorState(); - const default_allocator = state.default_allocator; - _ = default_allocator; - } - - pub fn getDefault() *SlotAllocator { - const process_slot_allocator_state = user.process.getSlotAllocatorState(); - return &process_slot_allocator_state.default_allocator.allocator; - } - - pub const State = extern struct { - default_allocator: MultiSlotAllocator, - }; -}; - -pub const MultiSlotAllocator = extern struct { - allocator: SlotAllocator, - // TODO: -}; diff --git a/src/user/thread.zig b/src/user/thread.zig index 9ac9ac2..94d03f3 100644 --- a/src/user/thread.zig +++ b/src/user/thread.zig @@ -9,48 +9,21 @@ const SlotAllocator = user.SlotAllocator; const VirtualAddress = lib.VirtualAddress; const VirtualAddressSpace = user.VirtualAddressSpace; +const Thread = birth.Thread; + const max_thread_count = 256; -pub const Thread = extern struct { - self: *Thread, - previous: ?*Thread, - next: ?*Thread, - stack: [*]u8, - stack_top: [*]align(lib.arch.stack_alignment) u8, - register_arena: birth.arch.RegisterArena align(lib.arch.stack_alignment), - core_id: u32, - - pub fn init(thread: *Thread, scheduler: *user.arch.Scheduler) void { - thread.self = thread; - thread.previous = null; - thread.next = null; - thread.core_id = scheduler.generic.core_id; - } -}; - -pub const Mutex = extern struct { - locked: bool = false, - - pub inline fn internalLock(mutex: *volatile Mutex) void { - mutex.locked = true; - } -}; - -var static_stack: [0x10000]u8 align(lib.arch.stack_alignment) = undefined; -var static_thread: Thread = undefined; -var static_thread_lock = Mutex{}; - -pub fn initDisabled(scheduler: *user.arch.Scheduler) noreturn { - const thread = &static_thread; - static_thread_lock.internalLock(); - thread.stack = &static_stack; - thread.stack_top = static_stack[static_stack.len..]; +pub fn initBootstrap(scheduler: *user.arch.Scheduler) noreturn { + const thread = &scheduler.generic.bootstrap_thread; + thread.stack = &scheduler.common.generic.setup_stack; + thread.stack_top = @ptrFromInt(@intFromPtr(&scheduler.common.generic.setup_stack) + scheduler.common.generic.setup_stack.len); thread.init(scheduler); // TODO: use RAX as parameter? user.arch.setInitialState(&thread.register_arena, VirtualAddress.new(bootstrapThread), VirtualAddress.new(thread.stack_top), .{0} ** 6); - + scheduler.generic.enqueueThread(thread); + scheduler.generic.current_thread = thread; scheduler.common.generic.has_work = true; scheduler.restore(&thread.register_arena); diff --git a/src/user/virtual.zig b/src/user/virtual.zig new file mode 100644 index 0000000..40767c5 --- /dev/null +++ b/src/user/virtual.zig @@ -0,0 +1,118 @@ +const Virtual = @This(); + +const birth = @import("birth"); +const lib = @import("lib"); +const user = @import("user"); + +const assert = lib.assert; +const log = lib.log; +const VirtualAddress = lib.VirtualAddress; + +const paging = lib.arch.paging; + +pub const AddressSpace = extern struct { + // page_table: PageTable, + region: Virtual.AddressSpace.Region, + minimum: VirtualAddress = VirtualAddress.new(paging.user_address_space_start), + maximum: VirtualAddress = VirtualAddress.new(paging.user_address_space_end), + + const Region = extern struct { + list: Virtual.Region.List = .{}, + block_count: usize = 0, + }; + + pub fn create() !*AddressSpace { + const scheduler = user.currentScheduler(); + const virtual_address_space = try scheduler.common.heapAllocateFast(AddressSpace); + virtual_address_space.* = .{ + .page_table = undefined, + .region = .{}, + }; + + virtual_address_space.collectPageTables(0, 0, 0, &virtual_address_space.page_table.root.u.page_table.children); + + @panic("TODO: create"); + } + + fn collectPageTables(virtual_address_space: *AddressSpace, block: u7, index: u7, level: usize, page_table_buffer: *[512]birth.interface.PageTable) !void { + _ = virtual_address_space; + try user.Interface(.page_table, .get).blocking(.{ + .descriptor = .{ + .block = block, + .index = index, + .entry_type = .page_table, + }, + .buffer = page_table_buffer, + }); + + for (page_table_buffer, 0..) |page_table_entry, i| { + _ = i; + if (page_table_entry.present) { + switch (page_table_entry.entry_type) { + .page_table => { + const scheduler = user.currentScheduler(); + const buffer = try scheduler.common.heapAllocateFast([512]birth.interface.PageTable); + collectPageTables(page_table_entry.block, page_table_entry.index, level + 1, buffer) catch unreachable; + }, + .leaf => { + log.err("Leaf: {}", .{page_table_entry}); + }, + } + } + } + } +}; + +pub const Region = extern struct { + foo: u32 = 0, + + pub const List = extern struct { + regions: [region_count]Region = .{.{}} ** region_count, + next: ?*List = null, + + const region_count = 20; + }; +}; + +// fn newPageTableNode(page_table: Virtual.PageTable.Node.PageTable, level: paging.Level) PageTable.Node { +// return .{ +// .flags = .{ +// .type = .page_table, +// .level = level, +// }, +// .u = .{ +// .page_table = page_table, +// }, +// }; +// } + +// pub const PageTable = extern struct { +// root: Node, +// foo: u32 = 0, +// +// pub const Node = extern struct { +// flags: Flags, +// u: extern union { +// leaf: Leaf, +// page_table: Node.PageTable, +// }, +// +// pub const Flags = packed struct(u32) { +// type: birth.interface.PageTable.EntryType, +// level: paging.Level, +// reserved: u29 = 0, +// }; +// +// pub const Leaf = extern struct { +// foo: u32 = 0, +// }; +// +// pub const PageTable = extern struct { +// foo: u32 = 0, +// children: Buffer = .{.{ .entry_type = .page_table }} ** node_count, +// }; +// }; +// +// const node_count = paging.page_table_entry_count; +// pub const Buffer = [node_count]birth.interface.PageTable; +// }; diff --git a/src/user/virtual_address_space.zig b/src/user/virtual_address_space.zig deleted file mode 100644 index daea64a..0000000 --- a/src/user/virtual_address_space.zig +++ /dev/null @@ -1,60 +0,0 @@ -const lib = @import("lib"); -const log = lib.log; - -const user = @import("user"); -const PhysicalMap = user.PhysicalMap; -const PhysicalMemoryRegion = user.PhysicalMemoryRegion; -const VirtualMemoryRegion = user.VirtualMemoryRegion; - -pub const MMUAwareVirtualAddressSpace = @import("mmu_aware_virtual_address_space.zig").MMUAwareVirtualAddressSpace; - -pub const VirtualAddressSpace = extern struct { - physical_map: *PhysicalMap, - // TODO: layout - regions: ?*VirtualMemoryRegion = null, - - /// The function is inlined because it's only called once - pub inline fn initializeCurrent() !void { - log.debug("VirtualAddressSpace.initializeCurrent", .{}); - const virtual_address_space = user.process.getVirtualAddressSpace(); - const physical_map = user.process.getPhysicalMap(); - virtual_address_space.physical_map = physical_map; - - const root_page_level = 0; - physical_map.* = try PhysicalMap.init(virtual_address_space, root_page_level, user.process.getSlotAllocator()); - // This should be an inline call as this the only time this function is called - try physical_map.initializeCurrent(); - - try virtual_address_space.pinnedInit(); - - log.warn("TODO: VirtualAddressSpace.initializeCurrent is incomplete!", .{}); - } - - pub inline fn pinnedInit(virtual_address_space: *VirtualAddressSpace) !void { - const pinned_state = user.process.getPinnedState(); - const pinned_size = 128 * lib.mb; - pinned_state.physical_memory_region = try PhysicalMemoryRegion.Pinned.new(pinned_size); - - pinned_state.virtual_memory_region = try virtual_address_space.map(pinned_state.physical_memory_region.getGeneric().*, 0, pinned_size, .{ .write = true }); - log.warn("TODO: VirtualAddressSpace.pinnedInit", .{}); - } - - pub inline fn map(virtual_address_space: *VirtualAddressSpace, physical_memory_region: PhysicalMemoryRegion, offset: usize, size: usize, flags: VirtualMemoryRegion.Flags) !VirtualMemoryRegion { - const alignment = lib.arch.valid_page_sizes[0]; - return virtual_address_space.mapAligned(physical_memory_region, offset, size, alignment, flags); - } - - pub fn mapAligned(virtual_address_space: *VirtualAddressSpace, physical_memory_region: PhysicalMemoryRegion, offset: usize, size: usize, alignment: usize, flags: VirtualMemoryRegion.Flags) !VirtualMemoryRegion { - const virtual_address = try virtual_address_space.physical_map.determineAddress(physical_memory_region, alignment); - _ = virtual_address; - _ = offset; - _ = size; - _ = flags; - @panic("TODO: VirtualAddressSpace.mapAligned"); - } - - pub const State = extern struct { - virtual_address_space: VirtualAddressSpace, - physical_map: PhysicalMap, - }; -};