Compare commits
No commits in common. "syscall-rework-2" and "main" have entirely different histories.
syscall-re
...
main
6
.github/workflows/ci.yml
vendored
6
.github/workflows/ci.yml
vendored
@ -40,10 +40,8 @@ jobs:
|
||||
run: zig env
|
||||
- name: Build test executables
|
||||
run: zig build all_tests -Dci --verbose
|
||||
- name: Set up QEMU
|
||||
uses: davidgm94/setup-qemu@main
|
||||
- name: Run all tests
|
||||
run: zig build test_all -Dci --verbose
|
||||
- name: Run host tests
|
||||
run: zig build test_host
|
||||
# build_and_test:
|
||||
# runs-on: [self-hosted, Linux, X64]
|
||||
# steps:
|
||||
|
233
build.zig
233
build.zig
@ -1,45 +1,31 @@
|
||||
const std = @import("std");
|
||||
const ArrayList = std.ArrayList;
|
||||
const assert = std.debug.assert;
|
||||
const concat = std.mem.concat;
|
||||
const cwd = std.fs.cwd;
|
||||
const Cpu = Target.Cpu;
|
||||
const CrossTarget = std.zig.CrossTarget;
|
||||
const EnumArray = std.EnumArray;
|
||||
const enumValues = std.enums.values;
|
||||
const fields = std.meta.fields;
|
||||
const json = std.json;
|
||||
const maxInt = std.math.maxInt;
|
||||
const OptimizeMode = std.builtin.OptimizeMode;
|
||||
const Target = std.Target;
|
||||
const common = @import("src/common.zig");
|
||||
const os = common.os;
|
||||
|
||||
// Build types
|
||||
const Build = std.Build;
|
||||
const CompileStep = Build.CompileStep;
|
||||
const LazyPath = Build.LazyPath;
|
||||
const Module = Build.Module;
|
||||
const ModuleDependency = Build.ModuleDependency;
|
||||
const OptionsStep = Build.OptionsStep;
|
||||
const RunStep = Build.RunStep;
|
||||
const Step = Build.Step;
|
||||
const CompileStep = std.Build.CompileStep;
|
||||
const FileSource = std.Build.FileSource;
|
||||
const Module = std.Build.Module;
|
||||
const ModuleDependency = std.Build.ModuleDependency;
|
||||
const OptionsStep = std.Build.OptionsStep;
|
||||
const RunStep = std.Build.RunStep;
|
||||
const Step = std.Build.Step;
|
||||
|
||||
const builtin = @import("builtin");
|
||||
const os = builtin.os.tag;
|
||||
const cpu = builtin.cpu;
|
||||
|
||||
const common = @import("src/common.zig");
|
||||
const ArgumentParser = common.ArgumentParser;
|
||||
const assert = std.debug.assert;
|
||||
const Bootloader = common.Bootloader;
|
||||
const canVirtualizeWithQEMU = common.canVirtualizeWithQEMU;
|
||||
const Configuration = common.Configuration;
|
||||
const Cpu = common.Cpu;
|
||||
const CrossTarget = common.CrossTarget;
|
||||
const DiskType = common.DiskType;
|
||||
const ExecutionType = common.ExecutionType;
|
||||
const ExecutionEnvironment = common.ExecutionEnvironment;
|
||||
const FilesystemType = common.FilesystemType;
|
||||
const ImageConfig = common.ImageConfig;
|
||||
const OptimizeMode = common.OptimizeMode;
|
||||
const QEMUOptions = common.QEMUOptions;
|
||||
const BirthProgram = common.BirthProgram;
|
||||
const Suffix = common.Suffix;
|
||||
const TraditionalExecutionMode = common.TraditionalExecutionMode;
|
||||
const Target = common.Target;
|
||||
|
||||
const Error = error{
|
||||
not_implemented,
|
||||
@ -58,87 +44,9 @@ var modules = Modules{};
|
||||
var b: *Build = undefined;
|
||||
var build_steps: *BuildSteps = undefined;
|
||||
var default_configuration: Configuration = undefined;
|
||||
var user_modules: []const UserModule = undefined;
|
||||
var user_modules: []const common.Module = undefined;
|
||||
var options = Options{};
|
||||
|
||||
const supported_architectures = [_]Cpu.Arch{
|
||||
.x86_64,
|
||||
//.aarch64,
|
||||
//.riscv64,
|
||||
};
|
||||
|
||||
fn architectureIndex(comptime arch: Cpu.Arch) comptime_int {
|
||||
inline for (supported_architectures, 0..) |architecture, index| {
|
||||
if (arch == architecture) return index;
|
||||
}
|
||||
|
||||
@compileError("Architecture not found");
|
||||
}
|
||||
|
||||
const ArchitectureBootloader = struct {
|
||||
id: Bootloader,
|
||||
protocols: []const Bootloader.Protocol,
|
||||
};
|
||||
|
||||
const architecture_bootloader_map = blk: {
|
||||
var array: [supported_architectures.len][]const ArchitectureBootloader = undefined;
|
||||
|
||||
array[architectureIndex(.x86_64)] = &.{
|
||||
.{
|
||||
.id = .birth,
|
||||
.protocols = &.{ .bios, .uefi },
|
||||
},
|
||||
.{
|
||||
.id = .limine,
|
||||
.protocols = &.{ .bios, .uefi },
|
||||
},
|
||||
};
|
||||
|
||||
// array[architectureIndex(.aarch64)] = &.{
|
||||
// .{
|
||||
// .id = .birth,
|
||||
// .protocols = &.{.uefi},
|
||||
// },
|
||||
// .{
|
||||
// .id = .limine,
|
||||
// .protocols = &.{.uefi},
|
||||
// },
|
||||
// };
|
||||
|
||||
// array[architectureIndex(.riscv64)] = &.{
|
||||
// .{
|
||||
// .id = .birth,
|
||||
// .protocols = &.{.uefi},
|
||||
// },
|
||||
// };
|
||||
|
||||
break :blk array;
|
||||
};
|
||||
|
||||
pub const UserModule = struct {
|
||||
package: UserPackage,
|
||||
name: []const u8,
|
||||
};
|
||||
pub const UserPackage = struct {
|
||||
kind: Kind,
|
||||
dependencies: []const Dependency,
|
||||
|
||||
pub const Kind = enum {
|
||||
zig_exe,
|
||||
};
|
||||
|
||||
pub const Dependency = struct {
|
||||
foo: u64 = 0,
|
||||
};
|
||||
};
|
||||
|
||||
pub const BirthProgram = enum {
|
||||
bootloader,
|
||||
cpu,
|
||||
user,
|
||||
host,
|
||||
};
|
||||
|
||||
pub fn build(b_arg: *Build) !void {
|
||||
b = b_arg;
|
||||
ci = b.option(bool, "ci", "CI mode") orelse false;
|
||||
@ -148,9 +56,9 @@ pub fn build(b_arg: *Build) !void {
|
||||
const default_cfg_override = b.option([]const u8, "default", "Default configuration JSON file") orelse "config/default.json";
|
||||
modules = blk: {
|
||||
var mods = Modules{};
|
||||
inline for (comptime enumValues(ModuleID)) |module_id| {
|
||||
inline for (comptime common.enumValues(ModuleID)) |module_id| {
|
||||
mods.modules.set(module_id, b.createModule(.{
|
||||
.source_file = LazyPath.relative(switch (module_id) {
|
||||
.source_file = FileSource.relative(switch (module_id) {
|
||||
.limine_installer => "src/bootloader/limine/installer.zig",
|
||||
else => switch (module_id) {
|
||||
.bios, .uefi, .limine => "src/bootloader",
|
||||
@ -185,12 +93,12 @@ pub fn build(b_arg: *Build) !void {
|
||||
};
|
||||
|
||||
default_configuration = blk: {
|
||||
const default_json_file = try cwd().readFileAlloc(b.allocator, default_cfg_override, maxInt(usize));
|
||||
const parsed_cfg = try json.parseFromSlice(Configuration, b.allocator, default_json_file, .{});
|
||||
const default_json_file = try std.fs.cwd().readFileAlloc(b.allocator, default_cfg_override, common.maxInt(usize));
|
||||
const parsed_cfg = try std.json.parseFromSlice(Configuration, b.allocator, default_json_file, .{});
|
||||
const cfg = parsed_cfg.value;
|
||||
|
||||
const optimize_mode = b.option(
|
||||
OptimizeMode,
|
||||
std.builtin.Mode,
|
||||
"optimize",
|
||||
"Prioritize performance, safety, or binary size (-O flag)",
|
||||
) orelse cfg.optimize_mode;
|
||||
@ -254,13 +162,13 @@ pub fn build(b_arg: *Build) !void {
|
||||
run_native: bool = true,
|
||||
|
||||
const C = struct {
|
||||
include_paths: []const LazyPath,
|
||||
include_paths: []const []const u8,
|
||||
source_files: []const SourceFile,
|
||||
link_libc: bool,
|
||||
link_libcpp: bool,
|
||||
|
||||
const SourceFile = struct {
|
||||
path: LazyPath,
|
||||
path: []const u8,
|
||||
flags: []const []const u8,
|
||||
};
|
||||
};
|
||||
@ -275,10 +183,10 @@ pub fn build(b_arg: *Build) !void {
|
||||
.root_project_path = disk_image_root_path,
|
||||
.modules = disk_image_builder_modules,
|
||||
.c = .{
|
||||
.include_paths = &.{LazyPath.relative("src/bootloader/limine/installables")},
|
||||
.include_paths = &.{"src/bootloader/limine/installables"},
|
||||
.source_files = &.{
|
||||
.{
|
||||
.path = LazyPath.relative("src/bootloader/limine/installables/limine-deploy.c"),
|
||||
.path = "src/bootloader/limine/installables/limine-deploy.c",
|
||||
.flags = &.{},
|
||||
},
|
||||
},
|
||||
@ -292,7 +200,7 @@ pub fn build(b_arg: *Build) !void {
|
||||
|
||||
const native_test_optimize_mode = .ReleaseFast;
|
||||
for (native_tests) |native_test| {
|
||||
const test_name = try concat(b.allocator, u8, &.{ native_test.name, "_", @tagName(native_test_optimize_mode) });
|
||||
const test_name = try std.mem.concat(b.allocator, u8, &.{ native_test.name, "_", @tagName(native_test_optimize_mode) });
|
||||
const test_exe = try addCompileStep(.{
|
||||
.name = test_name,
|
||||
.root_project_path = native_test.root_project_path,
|
||||
@ -307,7 +215,7 @@ pub fn build(b_arg: *Build) !void {
|
||||
}
|
||||
|
||||
for (c.source_files) |source_file| {
|
||||
test_exe.addCSourceFile(.{ .file = source_file.path, .flags = source_file.flags });
|
||||
test_exe.addCSourceFile(source_file.path, source_file.flags);
|
||||
}
|
||||
|
||||
if (c.link_libc) {
|
||||
@ -339,20 +247,20 @@ pub fn build(b_arg: *Build) !void {
|
||||
const ovmf_path = ovmf_downloader_run_step.addOutputFileArg("OVMF.fd");
|
||||
|
||||
{
|
||||
var user_module_list = ArrayList(UserModule).init(b.allocator);
|
||||
var user_program_dir = try cwd().openIterableDir(user_program_dir_path, .{ .access_sub_paths = true });
|
||||
var user_module_list = std.ArrayList(common.Module).init(b.allocator);
|
||||
var user_program_dir = try std.fs.cwd().openIterableDir(user_program_dir_path, .{ .access_sub_paths = true });
|
||||
defer user_program_dir.close();
|
||||
|
||||
var user_program_iterator = user_program_dir.iterate();
|
||||
|
||||
while (try user_program_iterator.next()) |entry| {
|
||||
const dir_name = entry.name;
|
||||
const file_path = try concat(b.allocator, u8, &.{ dir_name, "/module.json" });
|
||||
const file = try user_program_dir.dir.readFileAlloc(b.allocator, file_path, maxInt(usize));
|
||||
const parsed_user_package = try json.parseFromSlice(UserPackage, b.allocator, file, .{});
|
||||
const user_package = parsed_user_package.value;
|
||||
const file_path = try std.mem.concat(b.allocator, u8, &.{ dir_name, "/module.json" });
|
||||
const file = try user_program_dir.dir.readFileAlloc(b.allocator, file_path, common.maxInt(usize));
|
||||
const parsed_user_program = try std.json.parseFromSlice(common.UserProgram, b.allocator, file, .{});
|
||||
const user_program = parsed_user_program.value;
|
||||
try user_module_list.append(.{
|
||||
.package = user_package,
|
||||
.program = user_program,
|
||||
.name = b.dupe(dir_name), // we have to dupe here otherwise Windows CI fails
|
||||
});
|
||||
}
|
||||
@ -362,8 +270,8 @@ pub fn build(b_arg: *Build) !void {
|
||||
|
||||
const executable_kinds = [2]CompileStep.Kind{ .exe, .@"test" };
|
||||
|
||||
for (enumValues(OptimizeMode)) |optimize_mode| {
|
||||
for (supported_architectures, 0..) |architecture, architecture_index| {
|
||||
for (common.enumValues(OptimizeMode)) |optimize_mode| {
|
||||
for (common.supported_architectures, 0..) |architecture, architecture_index| {
|
||||
const user_target = try getTarget(architecture, .user);
|
||||
|
||||
for (executable_kinds) |executable_kind| {
|
||||
@ -393,7 +301,7 @@ pub fn build(b_arg: *Build) !void {
|
||||
else => return Error.architecture_not_supported,
|
||||
};
|
||||
|
||||
const cpu_driver_linker_script_path = LazyPath.relative(try concat(b.allocator, u8, &.{ cpu_driver_path, "/arch/", switch (architecture) {
|
||||
const cpu_driver_linker_script_path = FileSource.relative(try std.mem.concat(b.allocator, u8, &.{ cpu_driver_path, "/arch/", switch (architecture) {
|
||||
.x86_64 => "x86/64",
|
||||
.x86 => "x86/32",
|
||||
else => @tagName(architecture),
|
||||
@ -401,14 +309,14 @@ pub fn build(b_arg: *Build) !void {
|
||||
|
||||
cpu_driver.setLinkerScriptPath(cpu_driver_linker_script_path);
|
||||
|
||||
var user_module_list = try ArrayList(*CompileStep).initCapacity(b.allocator, user_modules.len);
|
||||
const user_architecture_source_path = try concat(b.allocator, u8, &.{ "src/user/arch/", @tagName(architecture), "/" });
|
||||
const user_linker_script_path = LazyPath.relative(try concat(b.allocator, u8, &.{ user_architecture_source_path, "linker_script.ld" }));
|
||||
var user_module_list = try std.ArrayList(*CompileStep).initCapacity(b.allocator, user_modules.len);
|
||||
const user_architecture_source_path = try std.mem.concat(b.allocator, u8, &.{ "src/user/arch/", @tagName(architecture), "/" });
|
||||
const user_linker_script_path = FileSource.relative(try std.mem.concat(b.allocator, u8, &.{ user_architecture_source_path, "linker_script.ld" }));
|
||||
for (user_modules) |module| {
|
||||
const user_module = try addCompileStep(.{
|
||||
.kind = executable_kind,
|
||||
.name = module.name,
|
||||
.root_project_path = try concat(b.allocator, u8, &.{ user_program_dir_path, "/", module.name }),
|
||||
.root_project_path = try std.mem.concat(b.allocator, u8, &.{ user_program_dir_path, "/", module.name }),
|
||||
.target = user_target,
|
||||
.optimize_mode = optimize_mode,
|
||||
.modules = &.{ .lib, .user, .birth },
|
||||
@ -420,7 +328,7 @@ pub fn build(b_arg: *Build) !void {
|
||||
user_module_list.appendAssumeCapacity(user_module);
|
||||
}
|
||||
|
||||
const bootloaders = architecture_bootloader_map[architecture_index];
|
||||
const bootloaders = common.architecture_bootloader_map[architecture_index];
|
||||
for (bootloaders) |bootloader_struct| {
|
||||
const bootloader = bootloader_struct.id;
|
||||
for (bootloader_struct.protocols) |boot_protocol| {
|
||||
@ -445,9 +353,9 @@ pub fn build(b_arg: *Build) !void {
|
||||
|
||||
executable.strip = true;
|
||||
|
||||
executable.addAssemblyFile(LazyPath.relative("src/bootloader/arch/x86/64/smp_trampoline.S"));
|
||||
executable.addAssemblyFile(LazyPath.relative(bootloader_path ++ "/unreal_mode.S"));
|
||||
executable.setLinkerScriptPath(LazyPath.relative(bootloader_path ++ "/linker_script.ld"));
|
||||
executable.addAssemblyFile("src/bootloader/arch/x86/64/smp_trampoline.S");
|
||||
executable.addAssemblyFile(bootloader_path ++ "/unreal_mode.S");
|
||||
executable.setLinkerScriptPath(FileSource.relative(bootloader_path ++ "/linker_script.ld"));
|
||||
executable.code_model = .small;
|
||||
|
||||
break :blk executable;
|
||||
@ -472,7 +380,7 @@ pub fn build(b_arg: *Build) !void {
|
||||
executable.strip = true;
|
||||
|
||||
switch (architecture) {
|
||||
.x86_64 => executable.addAssemblyFile(LazyPath.relative("src/bootloader/arch/x86/64/smp_trampoline.S")),
|
||||
.x86_64 => executable.addAssemblyFile("src/bootloader/arch/x86/64/smp_trampoline.S"),
|
||||
else => {},
|
||||
}
|
||||
|
||||
@ -497,7 +405,7 @@ pub fn build(b_arg: *Build) !void {
|
||||
|
||||
executable.code_model = cpu_driver.code_model;
|
||||
|
||||
executable.setLinkerScriptPath(LazyPath.relative(try concat(b.allocator, u8, &.{ limine_loader_path ++ "arch/", @tagName(architecture), "/linker_script.ld" })));
|
||||
executable.setLinkerScriptPath(FileSource.relative(try common.concat(b.allocator, u8, &.{ limine_loader_path ++ "arch/", @tagName(architecture), "/linker_script.ld" })));
|
||||
|
||||
break :blk executable;
|
||||
},
|
||||
@ -523,7 +431,7 @@ pub fn build(b_arg: *Build) !void {
|
||||
};
|
||||
|
||||
const execution_types: []const ExecutionType =
|
||||
switch (canVirtualizeWithQEMU(architecture, ci)) {
|
||||
switch (common.canVirtualizeWithQEMU(architecture, ci)) {
|
||||
true => &.{ .emulated, .accelerated },
|
||||
false => &.{.emulated},
|
||||
};
|
||||
@ -540,13 +448,13 @@ pub fn build(b_arg: *Build) !void {
|
||||
.executable_kind = executable_kind,
|
||||
};
|
||||
|
||||
var disk_argument_parser = ArgumentParser.DiskImageBuilder{};
|
||||
var disk_argument_parser = common.ArgumentParser.DiskImageBuilder{};
|
||||
const disk_image_builder_run = b.addRunArtifact(disk_image_builder);
|
||||
const disk_image_path = disk_image_builder_run.addOutputFileArg("disk.hdd");
|
||||
|
||||
while (disk_argument_parser.next()) |argument_type| switch (argument_type) {
|
||||
.configuration => inline for (fields(Configuration)) |field| disk_image_builder_run.addArg(@tagName(@field(configuration, field.name))),
|
||||
.image_configuration_path => disk_image_builder_run.addArg(ImageConfig.default_path),
|
||||
.configuration => inline for (common.fields(Configuration)) |field| disk_image_builder_run.addArg(@tagName(@field(configuration, field.name))),
|
||||
.image_configuration_path => disk_image_builder_run.addArg(common.ImageConfig.default_path),
|
||||
.disk_image_path => {
|
||||
// Must be first
|
||||
assert(@intFromEnum(argument_type) == 0);
|
||||
@ -632,7 +540,7 @@ pub fn build(b_arg: *Build) !void {
|
||||
}
|
||||
|
||||
const Options = struct {
|
||||
arr: EnumArray(BirthProgram, *OptionsStep) = EnumArray(BirthProgram, *OptionsStep).initUndefined(),
|
||||
arr: std.EnumArray(BirthProgram, *OptionsStep) = std.EnumArray(BirthProgram, *OptionsStep).initUndefined(),
|
||||
|
||||
pub fn createOption(options_struct: *Options, birth_program: BirthProgram) void {
|
||||
const new_options = b.addOptions();
|
||||
@ -683,31 +591,31 @@ fn addFileSize(artifact: *CompileStep, comptime name: []const u8) void {
|
||||
|
||||
fn newRunnerRunArtifact(arguments: struct {
|
||||
configuration: Configuration,
|
||||
disk_image_path: LazyPath,
|
||||
disk_image_path: FileSource,
|
||||
loader: *CompileStep,
|
||||
runner: *CompileStep,
|
||||
cpu_driver: *CompileStep,
|
||||
user_init: *CompileStep,
|
||||
qemu_options: QEMUOptions,
|
||||
ovmf_path: LazyPath,
|
||||
ovmf_path: FileSource,
|
||||
is_default: bool,
|
||||
}) !*RunStep {
|
||||
const runner = b.addRunArtifact(arguments.runner);
|
||||
|
||||
var argument_parser = ArgumentParser.Runner{};
|
||||
var argument_parser = common.ArgumentParser.Runner{};
|
||||
|
||||
while (argument_parser.next()) |argument_type| switch (argument_type) {
|
||||
.configuration => inline for (fields(Configuration)) |field| runner.addArg(@tagName(@field(arguments.configuration, field.name))),
|
||||
.image_configuration_path => runner.addArg(ImageConfig.default_path),
|
||||
.configuration => inline for (common.fields(Configuration)) |field| runner.addArg(@tagName(@field(arguments.configuration, field.name))),
|
||||
.image_configuration_path => runner.addArg(common.ImageConfig.default_path),
|
||||
.cpu_driver => runner.addArtifactArg(arguments.cpu_driver),
|
||||
.loader_path => runner.addArtifactArg(arguments.loader),
|
||||
.init => runner.addArtifactArg(arguments.user_init),
|
||||
.disk_image_path => runner.addFileArg(arguments.disk_image_path),
|
||||
.qemu_options => inline for (fields(QEMUOptions)) |field| runner.addArg(if (@field(arguments.qemu_options, field.name)) "true" else "false"),
|
||||
.disk_image_path => runner.addFileSourceArg(arguments.disk_image_path),
|
||||
.qemu_options => inline for (common.fields(QEMUOptions)) |field| runner.addArg(if (@field(arguments.qemu_options, field.name)) "true" else "false"),
|
||||
.ci => runner.addArg(if (ci) "true" else "false"),
|
||||
.debug_user => runner.addArg(if (debug_user) "true" else "false"),
|
||||
.debug_loader => runner.addArg(if (debug_loader) "true" else "false"),
|
||||
.ovmf_path => runner.addFileArg(arguments.ovmf_path),
|
||||
.ovmf_path => runner.addFileSourceArg(arguments.ovmf_path),
|
||||
.is_default => runner.addArg(if (arguments.is_default) "true" else "false"),
|
||||
};
|
||||
|
||||
@ -723,17 +631,15 @@ const ExecutableDescriptor = struct {
|
||||
modules: []const ModuleID,
|
||||
};
|
||||
|
||||
const main_package_path = LazyPath.relative(source_root_dir);
|
||||
fn addCompileStep(executable_descriptor: ExecutableDescriptor) !*CompileStep {
|
||||
const main_file = try concat(b.allocator, u8, &.{ executable_descriptor.root_project_path, "/main.zig" });
|
||||
const main_file = try std.mem.concat(b.allocator, u8, &.{ executable_descriptor.root_project_path, "/main.zig" });
|
||||
const compile_step = switch (executable_descriptor.kind) {
|
||||
.exe => blk: {
|
||||
const executable = b.addExecutable(.{
|
||||
.name = executable_descriptor.name,
|
||||
.root_source_file = LazyPath.relative(main_file),
|
||||
.root_source_file = FileSource.relative(main_file),
|
||||
.target = executable_descriptor.target,
|
||||
.optimize = executable_descriptor.optimize_mode,
|
||||
.main_pkg_path = main_package_path,
|
||||
});
|
||||
|
||||
build_steps.build_all.dependOn(&executable.step);
|
||||
@ -741,14 +647,13 @@ fn addCompileStep(executable_descriptor: ExecutableDescriptor) !*CompileStep {
|
||||
break :blk executable;
|
||||
},
|
||||
.@"test" => blk: {
|
||||
const test_file = LazyPath.relative(try concat(b.allocator, u8, &.{ executable_descriptor.root_project_path, "/test.zig" }));
|
||||
const test_file = FileSource.relative(try std.mem.concat(b.allocator, u8, &.{ executable_descriptor.root_project_path, "/test.zig" }));
|
||||
const test_exe = b.addTest(.{
|
||||
.name = executable_descriptor.name,
|
||||
.root_source_file = test_file,
|
||||
.target = executable_descriptor.target,
|
||||
.optimize = executable_descriptor.optimize_mode,
|
||||
.test_runner = if (executable_descriptor.target.os_tag) |_| main_file else null,
|
||||
.main_pkg_path = main_package_path,
|
||||
});
|
||||
|
||||
build_steps.build_all_tests.dependOn(&test_exe.step);
|
||||
@ -764,6 +669,8 @@ fn addCompileStep(executable_descriptor: ExecutableDescriptor) !*CompileStep {
|
||||
compile_step.entry_symbol_name = "_start";
|
||||
}
|
||||
|
||||
compile_step.setMainPkgPath(source_root_dir);
|
||||
|
||||
for (executable_descriptor.modules) |module| {
|
||||
modules.addModule(compile_step, module);
|
||||
}
|
||||
@ -793,8 +700,8 @@ const ModuleID = enum {
|
||||
};
|
||||
|
||||
pub const Modules = struct {
|
||||
modules: EnumArray(ModuleID, *Module) = EnumArray(ModuleID, *Module).initUndefined(),
|
||||
dependencies: EnumArray(ModuleID, []const ModuleDependency) = EnumArray(ModuleID, []const ModuleDependency).initUndefined(),
|
||||
modules: std.EnumArray(ModuleID, *Module) = std.EnumArray(ModuleID, *Module).initUndefined(),
|
||||
dependencies: std.EnumArray(ModuleID, []const ModuleDependency) = std.EnumArray(ModuleID, []const ModuleDependency).initUndefined(),
|
||||
|
||||
fn addModule(mods: Modules, compile_step: *CompileStep, module_id: ModuleID) void {
|
||||
compile_step.addModule(@tagName(module_id), mods.modules.get(module_id));
|
||||
@ -811,7 +718,7 @@ pub const Modules = struct {
|
||||
}
|
||||
};
|
||||
|
||||
fn getTarget(asked_arch: Cpu.Arch, execution_mode: TraditionalExecutionMode) Error!CrossTarget {
|
||||
fn getTarget(asked_arch: Cpu.Arch, execution_mode: common.TraditionalExecutionMode) Error!CrossTarget {
|
||||
var enabled_features = Cpu.Feature.Set.empty;
|
||||
var disabled_features = Cpu.Feature.Set.empty;
|
||||
|
||||
@ -836,14 +743,14 @@ fn getTarget(asked_arch: Cpu.Arch, execution_mode: TraditionalExecutionMode) Err
|
||||
|
||||
return CrossTarget{
|
||||
.cpu_arch = asked_arch,
|
||||
.cpu_model = switch (cpu.arch) {
|
||||
.cpu_model = switch (common.cpu.arch) {
|
||||
.x86 => .determined_by_cpu_arch,
|
||||
.x86_64 => if (execution_mode == .privileged) .determined_by_cpu_arch else
|
||||
// zig fmt off
|
||||
.determined_by_cpu_arch,
|
||||
// .determined_by_cpu_arch,
|
||||
// TODO: this causes some problems: https://github.com/ziglang/zig/issues/15524
|
||||
//.{ .explicit = &Target.x86.cpu.x86_64_v3 },
|
||||
//.{ .explicit = &common.Target.x86.cpu.x86_64_v3 },
|
||||
else => .determined_by_cpu_arch,
|
||||
},
|
||||
.os_tag = .freestanding,
|
||||
|
@ -1,6 +1,6 @@
|
||||
{
|
||||
"architecture": "x86_64",
|
||||
"bootloader": "birth",
|
||||
"bootloader": "limine",
|
||||
"boot_protocol": "uefi",
|
||||
"execution_environment": "qemu",
|
||||
"optimize_mode": "Debug",
|
||||
|
112
src/birth.zig
112
src/birth.zig
@ -1,111 +1,23 @@
|
||||
const lib = @import("lib");
|
||||
const Allocator = lib.Allocator;
|
||||
const assert = lib.assert;
|
||||
|
||||
pub const arch = @import("birth/arch.zig");
|
||||
pub const interface = @import("birth/interface.zig");
|
||||
pub const capabilities = @import("birth/capabilities.zig");
|
||||
pub const syscall = @import("birth/syscall.zig");
|
||||
|
||||
/// This struct is the shared part that the user and the cpu see
|
||||
pub const Scheduler = extern struct {
|
||||
common: Common,
|
||||
current_thread: *Thread,
|
||||
thread_queue: ?*Thread = null,
|
||||
time_slice: u32,
|
||||
pub const UserScheduler = extern struct {
|
||||
self: *UserScheduler,
|
||||
disabled: bool,
|
||||
has_work: bool,
|
||||
core_id: u32,
|
||||
core_state: CoreState,
|
||||
bootstrap_thread: Thread,
|
||||
fast_allocator: Allocator,
|
||||
setup_stack: [lib.arch.valid_page_sizes[0]]u8 align(lib.arch.stack_alignment),
|
||||
setup_stack_lock: lib.Atomic(bool),
|
||||
|
||||
pub fn initializeAllocator(scheduler: *Scheduler) void {
|
||||
scheduler.fast_allocator = Allocator{
|
||||
.callbacks = .{
|
||||
.allocate = callbackAllocate,
|
||||
},
|
||||
};
|
||||
}
|
||||
|
||||
fn callbackAllocate(allocator: *Allocator, size: u64, alignment: u64) Allocator.Allocate.Error!Allocator.Allocate.Result {
|
||||
const scheduler = @fieldParentPtr(Scheduler, "fast_allocator", allocator);
|
||||
assert(scheduler.common.heap.address.isAligned(alignment));
|
||||
const result = scheduler.common.heap.takeSlice(size) catch return error.OutOfMemory;
|
||||
return @bitCast(result);
|
||||
}
|
||||
|
||||
pub const Common = extern struct {
|
||||
self: *Common,
|
||||
disabled: bool,
|
||||
has_work: bool,
|
||||
core_id: u32,
|
||||
heap: lib.VirtualMemoryRegion,
|
||||
setup_stack: [lib.arch.valid_page_sizes[0] * 4]u8 align(lib.arch.stack_alignment),
|
||||
setup_stack_lock: lib.Atomic(bool),
|
||||
disabled_save_area: arch.RegisterArena,
|
||||
|
||||
// pub fn heapAllocateFast(common: *Common, comptime T: type) !*T {
|
||||
// const size = @sizeOf(T);
|
||||
// const alignment = @alignOf(T);
|
||||
// lib.log.debug("Heap: {}. Size: {}. Alignment: {}", .{ common.heap, size, alignment });
|
||||
// const result = try common.heap.takeSlice(size);
|
||||
// const ptr = &result.access(T)[0];
|
||||
// assert(lib.isAligned(@intFromPtr(ptr), alignment));
|
||||
//
|
||||
// return ptr;
|
||||
// }
|
||||
};
|
||||
|
||||
pub fn enqueueThread(scheduler: *Scheduler, thread_to_queue: *Thread) void {
|
||||
// TODO: check queue
|
||||
// TODO: defer check queue
|
||||
if (scheduler.thread_queue) |thread_queue| {
|
||||
_ = thread_queue;
|
||||
@panic("TODO: enqueueThread");
|
||||
} else {
|
||||
scheduler.thread_queue = thread_to_queue;
|
||||
thread_to_queue.previous = thread_to_queue;
|
||||
thread_to_queue.next = thread_to_queue;
|
||||
}
|
||||
}
|
||||
|
||||
pub noinline fn restore(scheduler: *Scheduler, register_arena: *const arch.RegisterArena) noreturn {
|
||||
assert(scheduler.common.generic.disabled);
|
||||
assert(scheduler.common.generic.has_work);
|
||||
|
||||
assert(register_arena.registers.rip > lib.arch.valid_page_sizes[0]);
|
||||
assert(register_arena.registers.rflags.IF and register_arena.registers.rflags.reserved0);
|
||||
|
||||
register_arena.contextSwitch();
|
||||
pub inline fn architectureSpecific(user_scheduler: *UserScheduler) *arch.UserScheduler {
|
||||
return @fieldParentPtr(arch.UserScheduler, "generic", user_scheduler);
|
||||
}
|
||||
};
|
||||
|
||||
pub const Thread = extern struct {
|
||||
self: *Thread,
|
||||
previous: ?*Thread = null,
|
||||
next: ?*Thread = null,
|
||||
stack: [*]u8,
|
||||
stack_top: [*]align(lib.arch.stack_alignment) u8,
|
||||
register_arena: arch.RegisterArena align(arch.RegisterArena.alignment),
|
||||
core_id: u32,
|
||||
|
||||
pub fn init(thread: *Thread, scheduler: *Scheduler) void {
|
||||
thread.* = Thread{
|
||||
.self = thread,
|
||||
.core_id = scheduler.generic.core_id,
|
||||
.stack = thread.stack,
|
||||
.stack_top = thread.stack_top,
|
||||
.register_arena = thread.register_arena,
|
||||
};
|
||||
}
|
||||
};
|
||||
|
||||
pub const CoreState = extern struct {
|
||||
virtual_address_space: *VirtualAddressSpace,
|
||||
};
|
||||
pub const VirtualAddressSpace = extern struct {
|
||||
// TODO: physical map
|
||||
// TODO: layout
|
||||
regions: ?*VirtualMemoryRegion = null,
|
||||
};
|
||||
|
||||
pub const VirtualMemoryRegion = extern struct {
|
||||
next: ?*VirtualMemoryRegion = null,
|
||||
pub const CommandBuffer = struct {
|
||||
foo: u32,
|
||||
};
|
||||
|
@ -2,14 +2,18 @@ const lib = @import("lib");
|
||||
const assert = lib.assert;
|
||||
const birth = @import("birth");
|
||||
|
||||
pub const UserScheduler = extern struct {
|
||||
generic: birth.UserScheduler,
|
||||
disabled_save_area: RegisterArena,
|
||||
};
|
||||
|
||||
pub const RegisterArena = extern struct {
|
||||
fpu: FPU align(lib.arch.stack_alignment),
|
||||
registers: birth.arch.Registers,
|
||||
|
||||
pub const alignment = lib.arch.stack_alignment;
|
||||
|
||||
pub fn contextSwitch(register_arena: *align(lib.arch.stack_alignment) const RegisterArena) noreturn {
|
||||
assert(lib.isAligned(@intFromPtr(register_arena), lib.arch.stack_alignment));
|
||||
//lib.log.debug("ASDASD: {}", .{register_arena});
|
||||
register_arena.fpu.load();
|
||||
register_arena.registers.restore();
|
||||
}
|
||||
@ -125,9 +129,9 @@ pub const FPU = extern struct {
|
||||
pub const user_code_selector = 0x43;
|
||||
pub const user_data_selector = 0x3b;
|
||||
|
||||
pub inline fn syscall(options: birth.interface.Raw.Options, arguments: birth.interface.Raw.Arguments) birth.interface.Raw.Result {
|
||||
var first: birth.interface.Raw.Result.Birth.First = undefined;
|
||||
var second: birth.interface.Raw.Result.Birth.Second = undefined;
|
||||
pub inline fn syscall(options: birth.syscall.Options, arguments: birth.syscall.Arguments) birth.syscall.Result {
|
||||
var first: birth.syscall.Result.Birth.First = undefined;
|
||||
var second: birth.syscall.Result.Birth.Second = undefined;
|
||||
asm volatile (
|
||||
\\syscall
|
||||
: [rax] "={rax}" (first),
|
||||
@ -149,28 +153,3 @@ pub inline fn syscall(options: birth.interface.Raw.Options, arguments: birth.int
|
||||
},
|
||||
};
|
||||
}
|
||||
|
||||
pub const PageTable = extern struct {
|
||||
/// The frame that holds the memory of this page table
|
||||
frame: birth.interface.RAM,
|
||||
flags: packed struct(u8) {
|
||||
level: Level4, // TODO: move to other
|
||||
granularity: Granularity,
|
||||
reserved: u4 = 0,
|
||||
},
|
||||
|
||||
pub const Granularity = enum(u2) {
|
||||
@"4_kb",
|
||||
@"2_mb",
|
||||
@"1_gb",
|
||||
};
|
||||
|
||||
pub const Level4 = enum(u2) {
|
||||
PML4 = 0,
|
||||
PDP = 1,
|
||||
PD = 2,
|
||||
PT = 3,
|
||||
|
||||
pub const count = lib.enumCount(@This());
|
||||
};
|
||||
};
|
||||
|
381
src/birth/capabilities.zig
Normal file
381
src/birth/capabilities.zig
Normal file
@ -0,0 +1,381 @@
|
||||
const lib = @import("lib");
|
||||
const assert = lib.assert;
|
||||
const PhysicalAddress = lib.PhysicalAddress;
|
||||
|
||||
const birth = @import("birth");
|
||||
const syscall = birth.syscall;
|
||||
|
||||
const Capabilities = @This();
|
||||
|
||||
pub const Type = enum(u8) {
|
||||
io, // primitive
|
||||
cpu, // primitive
|
||||
ram, // primitive
|
||||
cpu_memory, // non-primitive Barrelfish: frame
|
||||
boot,
|
||||
process, // Temporarily available
|
||||
page_table, // Barrelfish: vnode
|
||||
// TODO: device_memory, // primitive
|
||||
// scheduler,
|
||||
// irq_table,
|
||||
|
||||
// _,
|
||||
|
||||
pub const Type = u8;
|
||||
|
||||
pub const Mappable = enum {
|
||||
cpu_memory,
|
||||
page_table,
|
||||
|
||||
pub inline fn toCapability(mappable: Mappable) Capabilities.Type {
|
||||
return switch (mappable) {
|
||||
inline else => |mappable_cap| @field(Capabilities.Type, @tagName(mappable_cap)),
|
||||
};
|
||||
}
|
||||
};
|
||||
};
|
||||
|
||||
pub const Subtype = u16;
|
||||
pub const AllTypes = Type;
|
||||
|
||||
pub fn CommandBuilder(comptime list: []const []const u8) type {
|
||||
const capability_base_command_list = .{
|
||||
"copy",
|
||||
"mint",
|
||||
"retype",
|
||||
"delete",
|
||||
"revoke",
|
||||
"create",
|
||||
} ++ list;
|
||||
const enum_fields = lib.enumAddNames(&.{}, capability_base_command_list);
|
||||
|
||||
// TODO: make this non-exhaustive enums
|
||||
// PROBLEM: https://github.com/ziglang/zig/issues/12250
|
||||
// Currently waiting on this since this will enable some comptime magic
|
||||
const result = @Type(.{
|
||||
.Enum = .{
|
||||
.tag_type = Subtype,
|
||||
.fields = enum_fields,
|
||||
.decls = &.{},
|
||||
.is_exhaustive = true,
|
||||
},
|
||||
});
|
||||
return result;
|
||||
}
|
||||
|
||||
/// Takes some names and integers. Then values are added to the Command enum for an specific capability
|
||||
/// The number is an offset of the fields with respect to the base command enum fields
|
||||
pub fn Command(comptime capability: Type) type {
|
||||
const extra_command_list = switch (capability) {
|
||||
.io => .{
|
||||
"log",
|
||||
},
|
||||
.cpu => .{
|
||||
"get_core_id",
|
||||
"shutdown",
|
||||
"get_command_buffer",
|
||||
},
|
||||
.ram => [_][]const u8{},
|
||||
.cpu_memory => .{
|
||||
"allocate",
|
||||
},
|
||||
.boot => .{
|
||||
"get_bundle_size",
|
||||
"get_bundle_file_list_size",
|
||||
},
|
||||
.process => .{
|
||||
"exit",
|
||||
},
|
||||
.page_table => [_][]const u8{},
|
||||
};
|
||||
|
||||
return CommandBuilder(&extra_command_list);
|
||||
}
|
||||
|
||||
const success = 0;
|
||||
const first_valid_error = success + 1;
|
||||
|
||||
pub fn ErrorSet(comptime error_names: []const []const u8) type {
|
||||
return lib.ErrorSet(error_names, &.{
|
||||
.{
|
||||
.name = "forbidden",
|
||||
.value = first_valid_error + 0,
|
||||
},
|
||||
.{
|
||||
.name = "corrupted_input",
|
||||
.value = first_valid_error + 1,
|
||||
},
|
||||
.{
|
||||
.name = "invalid_input",
|
||||
.value = first_valid_error + 2,
|
||||
},
|
||||
});
|
||||
}
|
||||
|
||||
const raw_argument_count = @typeInfo(syscall.Arguments).Array.len;
|
||||
|
||||
pub fn Syscall(comptime capability_type: Type, comptime command_type: Command(capability_type)) type {
|
||||
const Types = switch (capability_type) {
|
||||
.io => switch (command_type) {
|
||||
.copy, .mint, .retype, .delete, .revoke, .create => struct {
|
||||
pub const ErrorSet = Capabilities.ErrorSet(&.{});
|
||||
pub const Result = void;
|
||||
pub const Arguments = void;
|
||||
},
|
||||
.log => struct {
|
||||
pub const ErrorSet = Capabilities.ErrorSet(&.{});
|
||||
pub const Result = usize;
|
||||
pub const Arguments = []const u8;
|
||||
|
||||
inline fn toResult(raw_result: syscall.Result.Birth) Result {
|
||||
return raw_result.second;
|
||||
}
|
||||
|
||||
inline fn resultToRaw(result: Result) syscall.Result {
|
||||
return syscall.Result{
|
||||
.birth = .{
|
||||
.first = .{},
|
||||
.second = result,
|
||||
},
|
||||
};
|
||||
}
|
||||
|
||||
inline fn argumentsToRaw(arguments: Arguments) syscall.Arguments {
|
||||
const result = [2]usize{ @intFromPtr(arguments.ptr), arguments.len };
|
||||
return result ++ .{0} ** (raw_argument_count - result.len);
|
||||
}
|
||||
|
||||
inline fn toArguments(raw_arguments: syscall.Arguments) !Arguments {
|
||||
const message_ptr = @as(?[*]const u8, @ptrFromInt(raw_arguments[0])) orelse return error.invalid_input;
|
||||
const message_len = raw_arguments[1];
|
||||
if (message_len == 0) return error.invalid_input;
|
||||
const message = message_ptr[0..message_len];
|
||||
return message;
|
||||
}
|
||||
},
|
||||
},
|
||||
.cpu => switch (command_type) {
|
||||
.copy, .mint, .retype, .delete, .revoke, .create => struct {
|
||||
pub const ErrorSet = Capabilities.ErrorSet(&.{});
|
||||
pub const Result = void;
|
||||
pub const Arguments = void;
|
||||
},
|
||||
.get_core_id => struct {
|
||||
pub const ErrorSet = Capabilities.ErrorSet(&.{});
|
||||
pub const Result = u32;
|
||||
pub const Arguments = void;
|
||||
|
||||
inline fn toResult(raw_result: syscall.Result.birth) Result {
|
||||
return @as(Result, @intCast(raw_result.second));
|
||||
}
|
||||
|
||||
inline fn resultToRaw(result: Result) syscall.Result {
|
||||
return syscall.Result{
|
||||
.birth = .{
|
||||
.first = .{},
|
||||
.second = result,
|
||||
},
|
||||
};
|
||||
}
|
||||
},
|
||||
.shutdown => struct {
|
||||
pub const ErrorSet = Capabilities.ErrorSet(&.{});
|
||||
pub const Result = noreturn;
|
||||
pub const Arguments = void;
|
||||
|
||||
pub const toResult = @compileError("noreturn unexpectedly returned");
|
||||
},
|
||||
.get_command_buffer => struct {
|
||||
pub const ErrorSet = Capabilities.ErrorSet(&.{});
|
||||
pub const Result = noreturn;
|
||||
pub const Arguments = *birth.CommandBuffer;
|
||||
|
||||
pub const toResult = @compileError("noreturn unexpectedly returned");
|
||||
|
||||
inline fn toArguments(raw_arguments: syscall.Arguments) !Arguments {
|
||||
const ptr = @as(?*birth.CommandBuffer, @ptrFromInt(raw_arguments[0])) orelse return error.invalid_input;
|
||||
return ptr;
|
||||
}
|
||||
|
||||
inline fn argumentsToRaw(arguments: Arguments) syscall.Arguments {
|
||||
const result = [1]usize{@intFromPtr(arguments)};
|
||||
return result ++ .{0} ** (raw_argument_count - result.len);
|
||||
}
|
||||
},
|
||||
},
|
||||
.ram => struct {
|
||||
pub const ErrorSet = Capabilities.ErrorSet(&.{});
|
||||
pub const Result = void;
|
||||
pub const Arguments = void;
|
||||
},
|
||||
.cpu_memory => struct {
|
||||
pub const ErrorSet = Capabilities.ErrorSet(&.{
|
||||
"OutOfMemory",
|
||||
});
|
||||
pub const Result = PhysicalAddress;
|
||||
pub const Arguments = usize;
|
||||
|
||||
inline fn toResult(raw_result: syscall.Result.birth) Result {
|
||||
return PhysicalAddress.new(raw_result.second);
|
||||
}
|
||||
|
||||
inline fn resultToRaw(result: Result) syscall.Result {
|
||||
return syscall.Result{
|
||||
.birth = .{
|
||||
.first = .{},
|
||||
.second = result.value(),
|
||||
},
|
||||
};
|
||||
}
|
||||
|
||||
inline fn toArguments(raw_arguments: syscall.Arguments) !Arguments {
|
||||
const size = raw_arguments[0];
|
||||
return size;
|
||||
}
|
||||
|
||||
inline fn argumentsToRaw(arguments: Arguments) syscall.Arguments {
|
||||
const result = [1]usize{arguments};
|
||||
return result ++ .{0} ** (raw_argument_count - result.len);
|
||||
}
|
||||
},
|
||||
.boot => switch (command_type) {
|
||||
.get_bundle_file_list_size, .get_bundle_size => struct {
|
||||
pub const ErrorSet = Capabilities.ErrorSet(&.{
|
||||
"buffer_too_small",
|
||||
});
|
||||
pub const Result = usize;
|
||||
pub const Arguments = void;
|
||||
|
||||
inline fn resultToRaw(result: Result) syscall.Result {
|
||||
return syscall.Result{
|
||||
.birth = .{
|
||||
.first = .{},
|
||||
.second = result,
|
||||
},
|
||||
};
|
||||
}
|
||||
|
||||
inline fn toResult(raw_result: syscall.Result.birth) Result {
|
||||
return raw_result.second;
|
||||
}
|
||||
},
|
||||
else => struct {
|
||||
pub const ErrorSet = Capabilities.ErrorSet(&.{
|
||||
"buffer_too_small",
|
||||
});
|
||||
pub const Result = void;
|
||||
pub const Arguments = void;
|
||||
},
|
||||
},
|
||||
.process => switch (command_type) {
|
||||
.exit => struct {
|
||||
pub const ErrorSet = Capabilities.ErrorSet(&.{});
|
||||
pub const Result = noreturn;
|
||||
pub const Arguments = bool;
|
||||
|
||||
inline fn toArguments(raw_arguments: syscall.Arguments) !Arguments {
|
||||
const result = raw_arguments[0] != 0;
|
||||
return result;
|
||||
}
|
||||
inline fn argumentsToRaw(arguments: Arguments) syscall.Arguments {
|
||||
const result = [1]usize{@intFromBool(arguments)};
|
||||
return result ++ .{0} ** (raw_argument_count - result.len);
|
||||
}
|
||||
},
|
||||
else => struct {
|
||||
pub const ErrorSet = Capabilities.ErrorSet(&.{});
|
||||
pub const Result = void;
|
||||
pub const Arguments = void;
|
||||
},
|
||||
},
|
||||
.page_table => switch (command_type) {
|
||||
else => struct {
|
||||
pub const ErrorSet = Capabilities.ErrorSet(&.{});
|
||||
pub const Result = void;
|
||||
pub const Arguments = void;
|
||||
},
|
||||
},
|
||||
// else => @compileError("TODO: " ++ @tagName(capability)),
|
||||
};
|
||||
|
||||
return struct {
|
||||
pub const ErrorSet = Types.ErrorSet;
|
||||
pub const Result = Types.Result;
|
||||
pub const Arguments = Types.Arguments;
|
||||
pub const toResult = Types.toResult;
|
||||
pub const toArguments = if (Arguments != void)
|
||||
Types.toArguments
|
||||
else
|
||||
struct {
|
||||
fn lambda(raw_arguments: syscall.Arguments) error{}!void {
|
||||
_ = raw_arguments;
|
||||
return {};
|
||||
}
|
||||
}.lambda;
|
||||
pub const capability = capability_type;
|
||||
pub const command = command_type;
|
||||
|
||||
pub inline fn resultToRaw(result: Result) syscall.Result {
|
||||
return if (@hasDecl(Types, "resultToRaw")) blk: {
|
||||
comptime assert(Result != void and Result != noreturn);
|
||||
break :blk Types.resultToRaw(result);
|
||||
} else blk: {
|
||||
if (Result != void) {
|
||||
@compileError("expected void type, got " ++ @typeName(Result) ++ ". You forgot to implement a resultToRaw function" ++ " for (" ++ @tagName(capability) ++ ", " ++ @tagName(command) ++ ").");
|
||||
}
|
||||
|
||||
break :blk syscall.Result{
|
||||
.birth = .{
|
||||
.first = .{},
|
||||
.second = 0,
|
||||
},
|
||||
};
|
||||
};
|
||||
}
|
||||
|
||||
pub inline fn errorToRaw(err: @This().ErrorSet.Error) syscall.Result {
|
||||
const error_enum = switch (err) {
|
||||
inline else => |comptime_error| @field(@This().ErrorSet.Enum, @errorName(comptime_error)),
|
||||
};
|
||||
return syscall.Result{
|
||||
.birth = .{
|
||||
.first = .{
|
||||
.@"error" = @intFromEnum(error_enum),
|
||||
},
|
||||
.second = 0,
|
||||
},
|
||||
};
|
||||
}
|
||||
|
||||
/// This is not meant to be called in the CPU driver
|
||||
pub fn blocking(arguments: Arguments) @This().ErrorSet.Error!Result {
|
||||
const raw_arguments = if (Arguments != void) Types.argumentsToRaw(arguments) else [1]usize{0} ** raw_argument_count;
|
||||
// TODO: make this more reliable and robust?
|
||||
const options = birth.syscall.Options{
|
||||
.birth = .{
|
||||
.type = capability,
|
||||
.command = @intFromEnum(command),
|
||||
},
|
||||
};
|
||||
|
||||
const raw_result = birth.arch.syscall(options, raw_arguments);
|
||||
|
||||
const raw_error_value = raw_result.birth.first.@"error";
|
||||
comptime {
|
||||
assert(!@hasField(@This().ErrorSet.Enum, "ok"));
|
||||
assert(!@hasField(@This().ErrorSet.Enum, "success"));
|
||||
assert(lib.enumFields(@This().ErrorSet.Enum)[0].value == first_valid_error);
|
||||
}
|
||||
|
||||
return switch (raw_error_value) {
|
||||
success => switch (Result) {
|
||||
noreturn => unreachable,
|
||||
else => toResult(raw_result.birth),
|
||||
},
|
||||
else => switch (@as(@This().ErrorSet.Enum, @enumFromInt(raw_error_value))) {
|
||||
inline else => |comptime_error_enum| @field(@This().ErrorSet.Error, @tagName(comptime_error_enum)),
|
||||
},
|
||||
};
|
||||
}
|
||||
};
|
||||
}
|
File diff suppressed because it is too large
Load Diff
117
src/birth/syscall.zig
Normal file
117
src/birth/syscall.zig
Normal file
@ -0,0 +1,117 @@
|
||||
const lib = @import("lib");
|
||||
const assert = lib.assert;
|
||||
const log = lib.log.scoped(.Syscall);
|
||||
|
||||
const birth = @import("birth");
|
||||
const capabilities = birth.capabilities;
|
||||
|
||||
pub const argument_count = 6;
|
||||
pub const Arguments = [argument_count]usize;
|
||||
|
||||
pub const Convention = enum(u1) {
|
||||
linux = 0,
|
||||
birth = 1,
|
||||
};
|
||||
|
||||
pub const Options = extern union {
|
||||
general: General,
|
||||
birth: Birth,
|
||||
linux: Linux,
|
||||
|
||||
pub const General = packed struct(u64) {
|
||||
number: Number,
|
||||
convention: Convention,
|
||||
|
||||
pub const Number = lib.IntType(.unsigned, union_space_bits);
|
||||
|
||||
comptime {
|
||||
assertSize(@This());
|
||||
}
|
||||
|
||||
pub inline fn getNumberInteger(general: General, comptime convention: Convention) NumberIntegerType(convention) {
|
||||
const options_integer = @as(u64, @bitCast(general));
|
||||
return @as(NumberIntegerType(convention), @truncate(options_integer));
|
||||
}
|
||||
|
||||
pub fn NumberIntegerType(comptime convention: Convention) type {
|
||||
return switch (convention) {
|
||||
.birth => birth.IDInteger,
|
||||
.linux => u64,
|
||||
};
|
||||
}
|
||||
};
|
||||
|
||||
pub const Birth = packed struct(u64) {
|
||||
type: capabilities.Type,
|
||||
command: capabilities.Subtype,
|
||||
reserved: lib.IntType(.unsigned, @bitSizeOf(u64) - @bitSizeOf(capabilities.Type) - @bitSizeOf(capabilities.Subtype) - @bitSizeOf(Convention)) = 0,
|
||||
convention: Convention = .birth,
|
||||
|
||||
comptime {
|
||||
Options.assertSize(@This());
|
||||
}
|
||||
|
||||
const IDInteger = u16;
|
||||
pub const ID = enum(IDInteger) {
|
||||
qemu_exit = 0,
|
||||
print = 1,
|
||||
};
|
||||
};
|
||||
|
||||
pub const Linux = enum(u64) {
|
||||
_,
|
||||
comptime {
|
||||
Options.assertSize(@This());
|
||||
}
|
||||
};
|
||||
|
||||
pub const union_space_bits = @bitSizeOf(u64) - @bitSizeOf(Convention);
|
||||
|
||||
fn assertSize(comptime T: type) void {
|
||||
assert(@sizeOf(T) == @sizeOf(u64));
|
||||
assert(@bitSizeOf(T) == @bitSizeOf(u64));
|
||||
}
|
||||
|
||||
comptime {
|
||||
assertSize(@This());
|
||||
}
|
||||
};
|
||||
|
||||
pub const Result = extern union {
|
||||
general: General,
|
||||
birth: Birth,
|
||||
linux: Linux,
|
||||
|
||||
pub const General = extern struct {
|
||||
first: packed struct(u64) {
|
||||
argument: u63,
|
||||
convention: Convention,
|
||||
},
|
||||
second: u64,
|
||||
};
|
||||
|
||||
pub const Birth = extern struct {
|
||||
first: First,
|
||||
second: Second,
|
||||
|
||||
pub const First = packed struct(u64) {
|
||||
padding1: u32 = 0,
|
||||
@"error": u16 = 0,
|
||||
padding2: u8 = 0,
|
||||
padding3: u7 = 0,
|
||||
convention: Convention = .birth,
|
||||
};
|
||||
|
||||
pub const Second = u64;
|
||||
};
|
||||
|
||||
pub const Linux = extern struct {
|
||||
result: u64,
|
||||
reserved: u64 = 0,
|
||||
};
|
||||
|
||||
fn assertSize(comptime T: type) void {
|
||||
assert(@sizeOf(T) == @sizeOf(u64));
|
||||
assert(@bitSizeOf(T) == @bitSizeOf(u64));
|
||||
}
|
||||
};
|
@ -315,13 +315,6 @@ pub const Information = extern struct {
|
||||
return Error.unexpected_memory_map_entry_count;
|
||||
}
|
||||
bootloader_information.configuration.memory_map_diff = @as(u8, @intCast(memory_map_entry_count - new_memory_map_entry_count));
|
||||
|
||||
const entries = bootloader_information.getMemoryMapEntries();
|
||||
const entry = entries[total_allocation.index];
|
||||
assert(entry.region.address.value() == total_allocation.region.address.value());
|
||||
assert(entry.region.size == total_allocation.region.size);
|
||||
|
||||
page_counters[total_allocation.index] = bootloader_information.getAlignedTotalSize() >> lib.arch.page_shifter(lib.arch.valid_page_sizes[0]);
|
||||
}
|
||||
|
||||
// Check if the host entry still corresponds to the same index
|
||||
@ -373,9 +366,7 @@ pub const Information = extern struct {
|
||||
}
|
||||
|
||||
const aligned_size = lib.alignForward(u64, ph.size_in_memory, lib.arch.valid_page_sizes[0]);
|
||||
const physical_allocation = try bootloader_information.allocatePages(aligned_size, lib.arch.valid_page_sizes[0], .{
|
||||
.virtual_address = @bitCast(@as(u64, 0)),
|
||||
});
|
||||
const physical_allocation = try bootloader_information.allocatePages(aligned_size, lib.arch.valid_page_sizes[0], .{});
|
||||
const physical_address = physical_allocation.address;
|
||||
const virtual_address = VirtualAddress.new(ph.virtual_address);
|
||||
const flags = Mapping.Flags{ .write = ph.flags.writable, .execute = ph.flags.executable };
|
||||
@ -478,7 +469,7 @@ pub const Information = extern struct {
|
||||
}
|
||||
|
||||
pub inline fn getSlice(information: *const Information, comptime offset_name: Slice.Name) []Slice.TypeMap[@intFromEnum(offset_name)] {
|
||||
const slice_offset = &information.slices.array.values[@intFromEnum(offset_name)];
|
||||
const slice_offset = information.slices.array.values[@intFromEnum(offset_name)];
|
||||
return slice_offset.dereference(offset_name, information);
|
||||
}
|
||||
|
||||
@ -693,10 +684,6 @@ pub const MemoryMapEntry = extern struct {
|
||||
bad_memory = 2,
|
||||
};
|
||||
|
||||
pub fn getUsedRegion(mmap_entry: MemoryMapEntry, page_counter: u32) PhysicalMemoryRegion {
|
||||
return mmap_entry.region.slice(page_counter << lib.arch.page_shifter(lib.arch.valid_page_sizes[0]));
|
||||
}
|
||||
|
||||
pub fn getFreeRegion(mmap_entry: MemoryMapEntry, page_counter: u32) PhysicalMemoryRegion {
|
||||
return mmap_entry.region.offset(page_counter << lib.arch.page_shifter(lib.arch.valid_page_sizes[0]));
|
||||
}
|
||||
|
@ -68,8 +68,10 @@ pub const Disk = extern struct {
|
||||
.segment = 0,
|
||||
.lba = lba,
|
||||
};
|
||||
lib.log.debug("DAP: {}", .{dap});
|
||||
|
||||
const dap_address = @intFromPtr(&dap);
|
||||
lib.log.debug("DAP address: 0x{x}", .{dap_address});
|
||||
const dap_offset = offset(dap_address);
|
||||
const dap_segment = segment(dap_address);
|
||||
var registers = Registers{
|
||||
@ -79,7 +81,9 @@ pub const Disk = extern struct {
|
||||
.ds = dap_segment,
|
||||
};
|
||||
|
||||
lib.log.debug("Start int", .{});
|
||||
interrupt(0x13, ®isters, ®isters);
|
||||
lib.log.debug("End int", .{});
|
||||
|
||||
if (registers.eflags.flags.carry_flag) return error.read_error;
|
||||
|
||||
@ -88,6 +92,7 @@ pub const Disk = extern struct {
|
||||
const src_slice = buffer[0..bytes_to_copy];
|
||||
|
||||
if (maybe_provided_buffer) |provided_buffer| {
|
||||
lib.log.debug("A", .{});
|
||||
const dst_slice = provided_buffer[@as(usize, @intCast(provided_buffer_offset))..][0..bytes_to_copy];
|
||||
|
||||
// TODO: report Zig that this codegen is so bad that we have to use rep movsb instead to make it go fast
|
||||
@ -97,11 +102,19 @@ pub const Disk = extern struct {
|
||||
const use_rep_movsb = true;
|
||||
if (use_rep_movsb) {
|
||||
lib.memcpy(dst_slice, src_slice);
|
||||
const bytes_left = asm volatile (
|
||||
\\rep movsb
|
||||
: [ret] "={ecx}" (-> usize),
|
||||
: [dest] "{edi}" (dst_slice.ptr),
|
||||
[src] "{esi}" (src_slice.ptr),
|
||||
[len] "{ecx}" (src_slice.len),
|
||||
);
|
||||
assert(bytes_left == 0);
|
||||
} else {
|
||||
@memcpy(dst_slice, src_slice);
|
||||
}
|
||||
} else {
|
||||
//lib.log.debug("B", .{});
|
||||
lib.log.debug("B", .{});
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -5,7 +5,8 @@ const log = lib.log;
|
||||
const privileged = @import("privileged");
|
||||
const ACPI = privileged.ACPI;
|
||||
const MemoryManager = privileged.MemoryManager;
|
||||
pub const writer = privileged.writer;
|
||||
const PhysicalHeap = privileged.PhyicalHeap;
|
||||
const writer = privileged.writer;
|
||||
|
||||
const stopCPU = privileged.arch.stopCPU;
|
||||
const GDT = privileged.arch.x86_64.GDT;
|
||||
@ -53,7 +54,10 @@ pub const std_options = struct {
|
||||
_ = format;
|
||||
_ = scope;
|
||||
_ = level;
|
||||
_ = level;
|
||||
// _ = level;
|
||||
// writer.writeByte('[') catch stopCPU();
|
||||
// writer.writeAll(@tagName(scope)) catch stopCPU();
|
||||
// writer.writeAll("] ") catch stopCPU();
|
||||
// lib.format(writer, format, args) catch stopCPU();
|
||||
// writer.writeByte('\n') catch stopCPU();
|
||||
}
|
||||
@ -79,13 +83,17 @@ const Filesystem = extern struct {
|
||||
}
|
||||
|
||||
pub fn readFile(filesystem: *Filesystem, file_path: []const u8, file_buffer: []u8) ![]const u8 {
|
||||
log.debug("File {s} read started", .{file_path});
|
||||
assert(filesystem.fat_allocator.allocated <= filesystem.fat_allocator.buffer.len);
|
||||
const file = try filesystem.fat_cache.readFileToBuffer(file_path, file_buffer);
|
||||
log.debug("File read succeeded", .{});
|
||||
return file;
|
||||
}
|
||||
|
||||
pub fn sneakFile(filesystem: *Filesystem, file_path: []const u8, size: usize) ![]const u8 {
|
||||
log.debug("File {s} read started", .{file_path});
|
||||
const file = try filesystem.fat_cache.readFileToCache(file_path, size);
|
||||
log.debug("File read succeeded", .{});
|
||||
return file;
|
||||
}
|
||||
|
||||
|
@ -14,13 +14,16 @@ const uefi = @import("uefi");
|
||||
const BootloaderInformation = uefi.BootloaderInformation;
|
||||
const BootServices = uefi.BootServices;
|
||||
const ConfigurationTable = uefi.ConfigurationTable;
|
||||
const FileProtocol = uefi.FileProtocol;
|
||||
const Handle = uefi.Handle;
|
||||
const LoadedImageProtocol = uefi.LoadedImageProtocol;
|
||||
const LoadKernelFunction = uefi.LoadKernelFunction;
|
||||
const MemoryCategory = uefi.MemoryCategory;
|
||||
const MemoryDescriptor = uefi.MemoryDescriptor;
|
||||
const ProgramSegment = uefi.ProgramSegment;
|
||||
const Protocol = uefi.Protocol;
|
||||
const page_table_estimated_size = uefi.page_table_estimated_size;
|
||||
const SimpleFilesystemProtocol = uefi.SimpleFilesystemProtocol;
|
||||
const SystemTable = uefi.SystemTable;
|
||||
|
||||
const privileged = @import("privileged");
|
||||
@ -65,7 +68,7 @@ pub fn panic(message: []const u8, _: ?*lib.StackTrace, _: ?usize) noreturn {
|
||||
}
|
||||
|
||||
const Filesystem = extern struct {
|
||||
root: *uefi.protocol.File,
|
||||
root: *FileProtocol,
|
||||
buffer: [0x200 * 10]u8 = undefined,
|
||||
|
||||
pub fn deinitialize(filesystem: *Filesystem) !void {
|
||||
@ -88,7 +91,7 @@ const Filesystem = extern struct {
|
||||
}
|
||||
|
||||
const FileDescriptor = struct {
|
||||
handle: *uefi.protocol.File,
|
||||
handle: *FileProtocol,
|
||||
path_size: u32,
|
||||
};
|
||||
|
||||
@ -108,14 +111,14 @@ const Filesystem = extern struct {
|
||||
return Error.boot_services_exited;
|
||||
}
|
||||
|
||||
var file: *uefi.protocol.File = undefined;
|
||||
var file: *FileProtocol = undefined;
|
||||
var path_buffer: [256:0]u16 = undefined;
|
||||
const length = try lib.unicode.utf8ToUtf16Le(&path_buffer, file_path);
|
||||
path_buffer[length] = 0;
|
||||
const path = path_buffer[0..length :0];
|
||||
const uefi_path = if (path[0] == '/') path[1..] else path;
|
||||
|
||||
try uefi.Try(filesystem.root.open(&file, uefi_path, uefi.protocol.File.efi_file_mode_read, 0));
|
||||
try uefi.Try(filesystem.root.open(&file, uefi_path, FileProtocol.efi_file_mode_read, 0));
|
||||
|
||||
const result = FileDescriptor{
|
||||
.handle = file,
|
||||
@ -233,16 +236,16 @@ const Initialization = struct {
|
||||
},
|
||||
.filesystem = .{
|
||||
.root = blk: {
|
||||
const loaded_image = try Protocol.open(uefi.protocol.LoadedImage, boot_services, handle);
|
||||
const filesystem_protocol = try Protocol.open(uefi.protocol.SimpleFileSystem, boot_services, loaded_image.device_handle orelse @panic("No device handle"));
|
||||
const loaded_image = try Protocol.open(LoadedImageProtocol, boot_services, handle);
|
||||
const filesystem_protocol = try Protocol.open(SimpleFilesystemProtocol, boot_services, loaded_image.device_handle orelse @panic("No device handle"));
|
||||
|
||||
var root: *uefi.protocol.File = undefined;
|
||||
var root: *FileProtocol = undefined;
|
||||
try uefi.Try(filesystem_protocol.openVolume(&root));
|
||||
break :blk root;
|
||||
},
|
||||
},
|
||||
.framebuffer = blk: {
|
||||
const gop = try Protocol.locate(uefi.protocol.GraphicsOutput, boot_services);
|
||||
const gop = try Protocol.locate(uefi.GraphicsOutputProtocol, boot_services);
|
||||
|
||||
const pixel_format_info: struct {
|
||||
red_color_mask: bootloader.Framebuffer.ColorMask,
|
||||
@ -250,19 +253,20 @@ const Initialization = struct {
|
||||
green_color_mask: bootloader.Framebuffer.ColorMask,
|
||||
bpp: u8,
|
||||
} = switch (gop.mode.info.pixel_format) {
|
||||
.RedGreenBlueReserved8BitPerColor => .{
|
||||
.PixelRedGreenBlueReserved8BitPerColor => .{
|
||||
.red_color_mask = .{ .size = 8, .shift = 0 },
|
||||
.green_color_mask = .{ .size = 8, .shift = 8 },
|
||||
.blue_color_mask = .{ .size = 8, .shift = 16 },
|
||||
.bpp = 32,
|
||||
},
|
||||
.BlueGreenRedReserved8BitPerColor => .{
|
||||
.PixelBlueGreenRedReserved8BitPerColor => .{
|
||||
.red_color_mask = .{ .size = 8, .shift = 16 },
|
||||
.green_color_mask = .{ .size = 8, .shift = 8 },
|
||||
.blue_color_mask = .{ .size = 8, .shift = 0 },
|
||||
.bpp = 32,
|
||||
},
|
||||
.BitMask, .BltOnly => @panic("Unsupported pixel format"),
|
||||
.PixelBitMask, .PixelBltOnly => @panic("Unsupported pixel format"),
|
||||
.PixelFormatMax => @panic("Corrupted pixel format"),
|
||||
};
|
||||
|
||||
break :blk bootloader.Framebuffer{
|
||||
|
@ -8,14 +8,13 @@ const uefi = lib.uefi;
|
||||
pub const BootServices = uefi.tables.BootServices;
|
||||
pub const ConfigurationTable = uefi.tables.ConfigurationTable;
|
||||
pub const Error = Status.EfiError;
|
||||
pub const FileInfo = uefi.FileInfo;
|
||||
// pub const FileProtocol = protocol.FileProtocol;
|
||||
// pub const GraphicsOutputProtocol = protocol.GraphicsOutputProtocol;
|
||||
// pub const LoadedImageProtocol = protocol.LoadedImageProtocol;
|
||||
// pub const SimpleFilesystemProtocol = protocol.SimpleFileSystemProtocol;
|
||||
pub const FileInfo = uefi.protocols.FileInfo;
|
||||
pub const FileProtocol = uefi.protocols.FileProtocol;
|
||||
pub const GraphicsOutputProtocol = uefi.protocols.GraphicsOutputProtocol;
|
||||
pub const LoadedImageProtocol = uefi.protocols.LoadedImageProtocol;
|
||||
pub const Handle = uefi.Handle;
|
||||
pub const MemoryDescriptor = uefi.tables.MemoryDescriptor;
|
||||
pub const protocol = uefi.protocol;
|
||||
pub const SimpleFilesystemProtocol = uefi.protocols.SimpleFileSystemProtocol;
|
||||
pub const Status = uefi.Status;
|
||||
pub const SystemTable = uefi.tables.SystemTable;
|
||||
pub const Try = Status.err;
|
||||
@ -26,6 +25,9 @@ pub const page_size = 0x1000;
|
||||
pub const page_shifter = lib.arch.page_shifter(page_size);
|
||||
|
||||
const privileged = @import("privileged");
|
||||
const PhysicalAddress = privileged.PhysicalAddress;
|
||||
const VirtualAddress = privileged.VirtualAddress;
|
||||
const VirtualMemoryRegion = privileged.VirtualMemoryRegion;
|
||||
const stopCPU = privileged.arch.stopCPU;
|
||||
|
||||
pub fn panic(comptime format: []const u8, arguments: anytype) noreturn {
|
||||
|
449
src/common.zig
449
src/common.zig
@ -1,25 +1,254 @@
|
||||
// This file is meant to be shared between all parts of the project, including build.zig
|
||||
const std = @import("std");
|
||||
const maxInt = std.math.maxInt;
|
||||
const containsAtLeast = std.mem.containsAtLeast;
|
||||
const Target = std.Target;
|
||||
const Cpu = Target.Cpu;
|
||||
const OptimizeMode = std.builtin.OptimizeMode;
|
||||
const compiler_builtin = @import("builtin");
|
||||
pub const cpu = compiler_builtin.cpu;
|
||||
pub const os = compiler_builtin.os.tag;
|
||||
pub const build_mode = compiler_builtin.mode;
|
||||
pub const is_test = compiler_builtin.is_test;
|
||||
|
||||
const Allocator = std.mem.Allocator;
|
||||
pub const kb = 1024;
|
||||
pub const mb = kb * 1024;
|
||||
pub const gb = mb * 1024;
|
||||
pub const tb = gb * 1024;
|
||||
|
||||
const builtin = @import("builtin");
|
||||
const cpu = builtin.cpu;
|
||||
const os = builtin.os.tag;
|
||||
pub const SizeUnit = enum(u64) {
|
||||
byte = 1,
|
||||
kilobyte = 1024,
|
||||
megabyte = 1024 * 1024,
|
||||
gigabyte = 1024 * 1024 * 1024,
|
||||
terabyte = 1024 * 1024 * 1024 * 1024,
|
||||
};
|
||||
|
||||
pub const Configuration = struct {
|
||||
architecture: Cpu.Arch,
|
||||
bootloader: Bootloader,
|
||||
boot_protocol: Bootloader.Protocol,
|
||||
execution_environment: ExecutionEnvironment,
|
||||
optimize_mode: OptimizeMode,
|
||||
execution_type: ExecutionType,
|
||||
executable_kind: std.Build.CompileStep.Kind,
|
||||
pub const std = @import("std");
|
||||
pub const Target = std.Target;
|
||||
pub const Cpu = Target.Cpu;
|
||||
pub const CrossTarget = std.zig.CrossTarget;
|
||||
|
||||
pub const log = std.log;
|
||||
|
||||
pub const Atomic = std.atomic.Atomic;
|
||||
|
||||
pub const Reader = std.io.Reader;
|
||||
pub const Writer = std.io.Writer;
|
||||
|
||||
pub const FixedBufferStream = std.io.FixedBufferStream;
|
||||
pub const fixedBufferStream = std.io.fixedBufferStream;
|
||||
|
||||
pub fn assert(ok: bool) void {
|
||||
if (!ok) {
|
||||
if (@inComptime()) {
|
||||
@compileError("Assert failed!");
|
||||
} else {
|
||||
@panic("Assert failed!");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub const deflate = std.compress.deflate;
|
||||
|
||||
const debug = std.debug;
|
||||
pub const print = debug.print;
|
||||
pub const StackIterator = debug.StackIterator;
|
||||
pub const dwarf = std.dwarf;
|
||||
pub const ModuleDebugInfo = std.debug.ModuleDebugInfo;
|
||||
|
||||
pub const elf = std.elf;
|
||||
|
||||
const fmt = std.fmt;
|
||||
pub const format = std.fmt.format;
|
||||
pub const FormatOptions = fmt.FormatOptions;
|
||||
pub const bufPrint = fmt.bufPrint;
|
||||
pub const allocPrint = fmt.allocPrint;
|
||||
pub const comptimePrint = fmt.comptimePrint;
|
||||
pub const parseUnsigned = fmt.parseUnsigned;
|
||||
|
||||
const heap = std.heap;
|
||||
pub const FixedBufferAllocator = heap.FixedBufferAllocator;
|
||||
|
||||
pub const json = std.json;
|
||||
|
||||
const mem = std.mem;
|
||||
pub const ZigAllocator = mem.Allocator;
|
||||
pub const equal = mem.eql;
|
||||
pub const length = mem.len;
|
||||
pub const startsWith = mem.startsWith;
|
||||
pub const endsWith = mem.endsWith;
|
||||
pub const indexOf = mem.indexOf;
|
||||
// Ideal for small inputs
|
||||
pub const indexOfPosLinear = mem.indexOfPosLinear;
|
||||
pub const lastIndexOf = mem.lastIndexOf;
|
||||
pub const asBytes = mem.asBytes;
|
||||
pub const readIntBig = mem.readIntBig;
|
||||
pub const readIntSliceBig = mem.readIntSliceBig;
|
||||
pub const concat = mem.concat;
|
||||
pub const sliceAsBytes = mem.sliceAsBytes;
|
||||
pub const bytesAsSlice = mem.bytesAsSlice;
|
||||
pub const alignForward = mem.alignForward;
|
||||
pub const alignBackward = mem.alignBackward;
|
||||
pub const isAligned = mem.isAligned;
|
||||
pub const isAlignedGeneric = mem.isAlignedGeneric;
|
||||
pub const reverse = mem.reverse;
|
||||
pub const tokenize = mem.tokenize;
|
||||
pub const containsAtLeast = mem.containsAtLeast;
|
||||
pub const sliceTo = mem.sliceTo;
|
||||
pub const swap = mem.swap;
|
||||
|
||||
pub const random = std.rand;
|
||||
|
||||
pub const testing = std.testing;
|
||||
|
||||
pub const sort = std.sort;
|
||||
|
||||
pub fn fieldSize(comptime T: type, field_name: []const u8) comptime_int {
|
||||
var foo: T = undefined;
|
||||
return @sizeOf(@TypeOf(@field(foo, field_name)));
|
||||
}
|
||||
|
||||
const DiffError = error{
|
||||
diff,
|
||||
};
|
||||
|
||||
pub fn diff(file1: []const u8, file2: []const u8) !void {
|
||||
assert(file1.len == file2.len);
|
||||
var different_bytes: u64 = 0;
|
||||
for (file1, 0..) |byte1, index| {
|
||||
const byte2 = file2[index];
|
||||
const is_different_byte = byte1 != byte2;
|
||||
different_bytes += @intFromBool(is_different_byte);
|
||||
if (is_different_byte) {
|
||||
log.debug("Byte [0x{x}] is different: 0x{x} != 0x{x}", .{ index, byte1, byte2 });
|
||||
}
|
||||
}
|
||||
|
||||
if (different_bytes != 0) {
|
||||
log.debug("Total different bytes: 0x{x}", .{different_bytes});
|
||||
return DiffError.diff;
|
||||
}
|
||||
}
|
||||
|
||||
pub fn zeroes(comptime T: type) T {
|
||||
var result: T = undefined;
|
||||
const slice = asBytes(&result);
|
||||
@memset(slice, 0);
|
||||
return result;
|
||||
}
|
||||
|
||||
const ascii = std.ascii;
|
||||
pub const upperString = ascii.upperString;
|
||||
pub const isUpper = ascii.isUpper;
|
||||
pub const isAlphabetic = ascii.isAlphabetic;
|
||||
|
||||
const std_builtin = std.builtin;
|
||||
pub const AtomicRmwOp = std_builtin.AtomicRmwOp;
|
||||
pub const AtomicOrder = std_builtin.AtomicOrder;
|
||||
pub const Type = std_builtin.Type;
|
||||
pub const StackTrace = std_builtin.StackTrace;
|
||||
pub const SourceLocation = std_builtin.SourceLocation;
|
||||
|
||||
pub fn FieldType(comptime T: type, comptime name: []const u8) type {
|
||||
return @TypeOf(@field(@as(T, undefined), name));
|
||||
}
|
||||
|
||||
// META PROGRAMMING
|
||||
pub const AutoEnumArray = std.enums.EnumArray;
|
||||
pub const fields = std.meta.fields;
|
||||
pub const IntType = std.meta.Int;
|
||||
pub const enumFromInt = std.meta.enumFromInt;
|
||||
pub const stringToEnum = std.meta.stringToEnum;
|
||||
pub const Tag = std.meta.Tag;
|
||||
|
||||
const math = std.math;
|
||||
pub const maxInt = math.maxInt;
|
||||
pub const min = math.min;
|
||||
pub const divCeil = math.divCeil;
|
||||
pub const clamp = math.clamp;
|
||||
pub const isPowerOfTwo = math.isPowerOfTwo;
|
||||
pub const mul = math.mul;
|
||||
pub const cast = math.cast;
|
||||
|
||||
pub const unicode = std.unicode;
|
||||
|
||||
pub const uefi = std.os.uefi;
|
||||
|
||||
pub const DiskType = enum(u32) {
|
||||
virtio = 0,
|
||||
nvme = 1,
|
||||
ahci = 2,
|
||||
ide = 3,
|
||||
memory = 4,
|
||||
bios = 5,
|
||||
|
||||
pub const count = enumCount(@This());
|
||||
};
|
||||
|
||||
pub const FilesystemType = enum(u32) {
|
||||
birth = 0,
|
||||
ext2 = 1,
|
||||
fat32 = 2,
|
||||
|
||||
pub const count = enumCount(@This());
|
||||
};
|
||||
|
||||
pub fn enumFields(comptime E: type) []const Type.EnumField {
|
||||
return @typeInfo(E).Enum.fields;
|
||||
}
|
||||
|
||||
pub const enumValues = std.enums.values;
|
||||
|
||||
pub fn enumCount(comptime E: type) usize {
|
||||
return enumFields(E).len;
|
||||
}
|
||||
|
||||
pub const PartitionTableType = enum {
|
||||
mbr,
|
||||
gpt,
|
||||
};
|
||||
|
||||
pub const supported_architectures = [_]Cpu.Arch{
|
||||
.x86_64,
|
||||
//.aarch64,
|
||||
//.riscv64,
|
||||
};
|
||||
|
||||
pub fn architectureIndex(comptime arch: Cpu.Arch) comptime_int {
|
||||
inline for (supported_architectures, 0..) |architecture, index| {
|
||||
if (arch == architecture) return index;
|
||||
}
|
||||
|
||||
@compileError("Architecture not found");
|
||||
}
|
||||
|
||||
pub const architecture_bootloader_map = blk: {
|
||||
var array: [supported_architectures.len][]const ArchitectureBootloader = undefined;
|
||||
|
||||
array[architectureIndex(.x86_64)] = &.{
|
||||
.{
|
||||
.id = .birth,
|
||||
.protocols = &.{ .bios, .uefi },
|
||||
},
|
||||
.{
|
||||
.id = .limine,
|
||||
.protocols = &.{ .bios, .uefi },
|
||||
},
|
||||
};
|
||||
|
||||
// array[architectureIndex(.aarch64)] = &.{
|
||||
// .{
|
||||
// .id = .birth,
|
||||
// .protocols = &.{.uefi},
|
||||
// },
|
||||
// .{
|
||||
// .id = .limine,
|
||||
// .protocols = &.{.uefi},
|
||||
// },
|
||||
// };
|
||||
|
||||
// array[architectureIndex(.riscv64)] = &.{
|
||||
// .{
|
||||
// .id = .birth,
|
||||
// .protocols = &.{.uefi},
|
||||
// },
|
||||
// };
|
||||
|
||||
break :blk array;
|
||||
};
|
||||
|
||||
pub const Bootloader = enum(u32) {
|
||||
@ -32,18 +261,147 @@ pub const Bootloader = enum(u32) {
|
||||
};
|
||||
};
|
||||
|
||||
pub const ArchitectureBootloader = struct {
|
||||
id: Bootloader,
|
||||
protocols: []const Bootloader.Protocol,
|
||||
};
|
||||
|
||||
pub const TraditionalExecutionMode = enum(u1) {
|
||||
privileged = 0,
|
||||
user = 1,
|
||||
};
|
||||
|
||||
pub const ExecutionEnvironment = enum {
|
||||
qemu,
|
||||
};
|
||||
|
||||
pub const ImageConfig = struct {
|
||||
sector_count: u64,
|
||||
sector_size: u16,
|
||||
partition_table: PartitionTableType,
|
||||
partition: PartitionConfig,
|
||||
|
||||
pub const default_path = "config/image_config.json";
|
||||
|
||||
pub fn get(allocator: ZigAllocator, path: []const u8) !ImageConfig {
|
||||
const image_config_file = try std.fs.cwd().readFileAlloc(allocator, path, maxInt(usize));
|
||||
const parsed_image_configuration = try std.json.parseFromSlice(ImageConfig, allocator, image_config_file, .{});
|
||||
return parsed_image_configuration.value;
|
||||
}
|
||||
};
|
||||
|
||||
pub const PartitionConfig = struct {
|
||||
name: []const u8,
|
||||
filesystem: FilesystemType,
|
||||
first_lba: u64,
|
||||
};
|
||||
|
||||
pub const QEMU = extern struct {
|
||||
pub const isa_debug_exit = ISADebugExit{};
|
||||
|
||||
pub const ISADebugExit = extern struct {
|
||||
io_base: u8 = 0xf4,
|
||||
io_size: u8 = @sizeOf(u32),
|
||||
};
|
||||
|
||||
pub const ExitCode = enum(u32) {
|
||||
success = 0x10,
|
||||
failure = 0x11,
|
||||
_,
|
||||
};
|
||||
};
|
||||
|
||||
pub const OptimizeMode = std.builtin.OptimizeMode;
|
||||
|
||||
pub const Configuration = struct {
|
||||
architecture: Cpu.Arch,
|
||||
bootloader: Bootloader,
|
||||
boot_protocol: Bootloader.Protocol,
|
||||
execution_environment: ExecutionEnvironment,
|
||||
optimize_mode: OptimizeMode,
|
||||
execution_type: ExecutionType,
|
||||
executable_kind: std.Build.CompileStep.Kind,
|
||||
};
|
||||
|
||||
pub const QEMUOptions = packed struct {
|
||||
is_test: bool,
|
||||
is_debug: bool,
|
||||
};
|
||||
|
||||
pub const ExecutionType = enum {
|
||||
emulated,
|
||||
accelerated,
|
||||
};
|
||||
|
||||
pub const TraditionalExecutionMode = enum(u1) {
|
||||
privileged = 0,
|
||||
user = 1,
|
||||
pub const Suffix = enum {
|
||||
bootloader,
|
||||
cpu_driver,
|
||||
image,
|
||||
complete,
|
||||
|
||||
pub fn fromConfiguration(suffix: Suffix, allocator: ZigAllocator, configuration: Configuration, prefix: ?[]const u8) ![]const u8 {
|
||||
const cpu_driver_suffix = [_][]const u8{
|
||||
@tagName(configuration.optimize_mode),
|
||||
"_",
|
||||
@tagName(configuration.architecture),
|
||||
"_",
|
||||
@tagName(configuration.executable_kind),
|
||||
};
|
||||
|
||||
const bootloader_suffix = [_][]const u8{
|
||||
@tagName(configuration.architecture),
|
||||
"_",
|
||||
@tagName(configuration.bootloader),
|
||||
"_",
|
||||
@tagName(configuration.boot_protocol),
|
||||
};
|
||||
|
||||
const image_suffix = [_][]const u8{
|
||||
@tagName(configuration.optimize_mode),
|
||||
"_",
|
||||
} ++ bootloader_suffix ++ [_][]const u8{
|
||||
"_",
|
||||
@tagName(configuration.executable_kind),
|
||||
};
|
||||
|
||||
const complete_suffix = image_suffix ++ [_][]const u8{
|
||||
"_",
|
||||
@tagName(configuration.execution_type),
|
||||
"_",
|
||||
@tagName(configuration.execution_environment),
|
||||
};
|
||||
|
||||
return try std.mem.concat(allocator, u8, &switch (suffix) {
|
||||
.cpu_driver => if (prefix) |pf| [1][]const u8{pf} ++ cpu_driver_suffix else cpu_driver_suffix,
|
||||
.bootloader => if (prefix) |pf| [1][]const u8{pf} ++ bootloader_suffix else bootloader_suffix,
|
||||
.image => if (prefix) |pf| [1][]const u8{pf} ++ image_suffix else image_suffix,
|
||||
.complete => if (prefix) |pf| [1][]const u8{pf} ++ complete_suffix else complete_suffix,
|
||||
});
|
||||
}
|
||||
};
|
||||
|
||||
pub const Module = struct {
|
||||
program: UserProgram,
|
||||
name: []const u8,
|
||||
};
|
||||
pub const UserProgram = struct {
|
||||
kind: Kind,
|
||||
dependencies: []const Dependency,
|
||||
|
||||
pub const Kind = enum {
|
||||
zig_exe,
|
||||
};
|
||||
|
||||
pub const Dependency = struct {
|
||||
foo: u64 = 0,
|
||||
};
|
||||
};
|
||||
|
||||
pub const BirthProgram = enum {
|
||||
bootloader,
|
||||
cpu,
|
||||
user,
|
||||
host,
|
||||
};
|
||||
|
||||
pub fn canVirtualizeWithQEMU(architecture: Cpu.Arch, ci: bool) bool {
|
||||
@ -60,6 +418,9 @@ pub fn canVirtualizeWithQEMU(architecture: Cpu.Arch, ci: bool) bool {
|
||||
};
|
||||
}
|
||||
|
||||
pub const default_cpu_name = "/cpu";
|
||||
pub const default_init_file = "/init";
|
||||
|
||||
pub const ArgumentParser = struct {
|
||||
pub const null_specifier = "-";
|
||||
|
||||
@ -140,45 +501,5 @@ pub const ArgumentParser = struct {
|
||||
};
|
||||
};
|
||||
|
||||
fn enumCount(comptime E: type) comptime_int {
|
||||
return @typeInfo(E).Enum.fields.len;
|
||||
}
|
||||
|
||||
pub const QEMUOptions = packed struct {
|
||||
is_test: bool,
|
||||
is_debug: bool,
|
||||
};
|
||||
|
||||
pub const PartitionTableType = enum {
|
||||
mbr,
|
||||
gpt,
|
||||
};
|
||||
|
||||
pub const ImageConfig = struct {
|
||||
sector_count: u64,
|
||||
sector_size: u16,
|
||||
partition_table: PartitionTableType,
|
||||
partition: PartitionConfig,
|
||||
|
||||
pub const default_path = "config/image_config.json";
|
||||
|
||||
pub fn get(allocator: Allocator, path: []const u8) !ImageConfig {
|
||||
const image_config_file = try std.fs.cwd().readFileAlloc(allocator, path, maxInt(usize));
|
||||
const parsed_image_configuration = try std.json.parseFromSlice(ImageConfig, allocator, image_config_file, .{});
|
||||
return parsed_image_configuration.value;
|
||||
}
|
||||
};
|
||||
|
||||
pub const PartitionConfig = struct {
|
||||
name: []const u8,
|
||||
filesystem: FilesystemType,
|
||||
first_lba: u64,
|
||||
};
|
||||
|
||||
pub const FilesystemType = enum(u32) {
|
||||
birth = 0,
|
||||
ext2 = 1,
|
||||
fat32 = 2,
|
||||
|
||||
pub const count = enumCount(@This());
|
||||
};
|
||||
pub const default_disk_size = 64 * 1024 * 1024;
|
||||
pub const default_sector_size = 0x200;
|
||||
|
772
src/cpu.zig
772
src/cpu.zig
@ -13,44 +13,64 @@ const PhysicalAddress = lib.PhysicalAddress;
|
||||
const PhysicalAddressSpace = lib.PhysicalAddressSpace;
|
||||
const PhysicalMemoryRegion = lib.PhysicalMemoryRegion;
|
||||
const stopCPU = privileged.arch.stopCPU;
|
||||
const VirtualAddress = lib.VirtualAddress;
|
||||
const VirtualMemoryRegion = lib.VirtualMemoryRegion;
|
||||
const paging = privileged.arch.paging;
|
||||
const VirtualAddress = privileged.VirtualAddress;
|
||||
const VirtualMemoryRegion = privileged.VirtualMemoryRegion;
|
||||
|
||||
const birth = @import("birth");
|
||||
|
||||
pub const test_runner = @import("cpu/test_runner.zig");
|
||||
pub const arch = @import("cpu/arch.zig");
|
||||
pub const interface = @import("cpu/interface.zig");
|
||||
pub const init = @import("cpu/init.zig");
|
||||
|
||||
const PageTableRegions = arch.init.PageTableRegions;
|
||||
pub const capabilities = @import("cpu/capabilities.zig");
|
||||
|
||||
pub export var stack: [0x8000]u8 align(0x1000) = undefined;
|
||||
pub export var page_allocator = PageAllocator{
|
||||
.head = null,
|
||||
.list_allocator = .{
|
||||
.u = .{
|
||||
.primitive = .{
|
||||
.backing_4k_page = undefined,
|
||||
.allocated = 0,
|
||||
},
|
||||
},
|
||||
.primitive = true,
|
||||
},
|
||||
};
|
||||
|
||||
pub var bundle: []const u8 = &.{};
|
||||
pub var bundle_files: []const u8 = &.{};
|
||||
|
||||
pub export var page_allocator = PageAllocator{};
|
||||
pub export var user_scheduler: *UserScheduler = undefined;
|
||||
pub export var heap = HeapImplementation(false){};
|
||||
pub var debug_info: lib.ModuleDebugInfo = undefined;
|
||||
pub export var driver: *align(lib.arch.valid_page_sizes[0]) Driver = undefined;
|
||||
pub export var page_tables: CPUPageTables = undefined;
|
||||
pub var file: []align(lib.default_sector_size) const u8 = undefined;
|
||||
pub export var core_id: u32 = 0;
|
||||
pub export var bsp = false;
|
||||
var panic_lock = lib.Spinlock.released;
|
||||
|
||||
/// This data structure holds the information needed to run a core
|
||||
pub const Driver = extern struct {
|
||||
init_root_capability: capabilities.RootDescriptor,
|
||||
valid: bool,
|
||||
padding: [padding_byte_count]u8 = .{0} ** padding_byte_count,
|
||||
const padding_byte_count = lib.arch.valid_page_sizes[0] - @sizeOf(bool) - @sizeOf(capabilities.RootDescriptor);
|
||||
|
||||
pub inline fn getRootCapability(drv: *Driver) *capabilities.Root {
|
||||
return drv.init_root_capability.value;
|
||||
}
|
||||
|
||||
comptime {
|
||||
// @compileLog(@sizeOf(Driver));
|
||||
assert(lib.isAligned(@sizeOf(Driver), lib.arch.valid_page_sizes[0]));
|
||||
}
|
||||
};
|
||||
|
||||
/// This data structure holds the information needed to run a program in a core (cpu side)
|
||||
pub const UserScheduler = extern struct {
|
||||
s: S,
|
||||
capability_root_node: capabilities.Root,
|
||||
common: *birth.UserScheduler,
|
||||
padding: [padding_byte_count]u8 = .{0} ** padding_byte_count,
|
||||
|
||||
const S = extern struct {
|
||||
capability_root_node: interface.Root,
|
||||
common: *birth.Scheduler.Common,
|
||||
};
|
||||
|
||||
const total_size = @sizeOf(S);
|
||||
const total_size = @sizeOf(capabilities.Root) + @sizeOf(*birth.UserScheduler);
|
||||
const aligned_size = lib.alignForward(usize, total_size, lib.arch.valid_page_sizes[0]);
|
||||
const padding_byte_count = aligned_size - total_size;
|
||||
|
||||
@ -61,7 +81,7 @@ pub const UserScheduler = extern struct {
|
||||
}
|
||||
};
|
||||
|
||||
const print_stack_trace = true;
|
||||
const print_stack_trace = false;
|
||||
var panic_count: usize = 0;
|
||||
|
||||
inline fn panicPrologue(comptime format: []const u8, arguments: anytype) !void {
|
||||
@ -72,9 +92,9 @@ inline fn panicPrologue(comptime format: []const u8, arguments: anytype) !void {
|
||||
try writer.writeAll(lib.Color.get(.bold));
|
||||
try writer.writeAll(lib.Color.get(.red));
|
||||
try writer.writeAll("[CPU DRIVER] [PANIC] ");
|
||||
try writer.writeAll(lib.Color.get(.reset));
|
||||
try writer.print(format, arguments);
|
||||
try writer.writeByte('\n');
|
||||
try writer.writeAll(lib.Color.get(.reset));
|
||||
}
|
||||
|
||||
inline fn panicEpilogue() noreturn {
|
||||
@ -83,68 +103,64 @@ inline fn panicEpilogue() noreturn {
|
||||
shutdown(.failure);
|
||||
}
|
||||
|
||||
inline fn printStackTrace(maybe_stack_trace: ?*lib.StackTrace) !void {
|
||||
if (maybe_stack_trace) |stack_trace| {
|
||||
try writer.writeAll("Stack trace:\n");
|
||||
var frame_index: usize = 0;
|
||||
var frames_left: usize = @min(stack_trace.index, stack_trace.instruction_addresses.len);
|
||||
// inline fn printStackTrace(maybe_stack_trace: ?*lib.StackTrace) !void {
|
||||
// if (maybe_stack_trace) |stack_trace| {
|
||||
// var debug_info = try getDebugInformation();
|
||||
// try writer.writeAll("Stack trace:\n");
|
||||
// var frame_index: usize = 0;
|
||||
// var frames_left: usize = @min(stack_trace.index, stack_trace.instruction_addresses.len);
|
||||
//
|
||||
// while (frames_left != 0) : ({
|
||||
// frames_left -= 1;
|
||||
// frame_index = (frame_index + 1) % stack_trace.instruction_addresses.len;
|
||||
// }) {
|
||||
// const return_address = stack_trace.instruction_addresses[frame_index];
|
||||
// try writer.print("[{}] ", .{frame_index});
|
||||
// try printSourceAtAddress(&debug_info, return_address);
|
||||
// }
|
||||
// } else {
|
||||
// try writer.writeAll("Stack trace not available\n");
|
||||
// }
|
||||
// }
|
||||
|
||||
while (frames_left != 0) : ({
|
||||
frames_left -= 1;
|
||||
frame_index = (frame_index + 1) % stack_trace.instruction_addresses.len;
|
||||
}) {
|
||||
const return_address = stack_trace.instruction_addresses[frame_index];
|
||||
try writer.print("[{}] ", .{frame_index});
|
||||
try printSourceAtAddress(return_address);
|
||||
try writer.writeByte('\n');
|
||||
}
|
||||
} else {
|
||||
try writer.writeAll("Stack trace not available\n");
|
||||
}
|
||||
}
|
||||
// inline fn printStackTraceFromStackIterator(return_address: usize, frame_address: usize) !void {
|
||||
// var debug_info = try getDebugInformation();
|
||||
// var stack_iterator = lib.StackIterator.init(return_address, frame_address);
|
||||
// var frame_index: usize = 0;
|
||||
// try writer.writeAll("Stack trace:\n");
|
||||
//
|
||||
// try printSourceAtAddress(&debug_info, return_address);
|
||||
// while (stack_iterator.next()) |address| : (frame_index += 1) {
|
||||
// try writer.print("[{}] ", .{frame_index});
|
||||
// try printSourceAtAddress(&debug_info, address);
|
||||
// }
|
||||
// }
|
||||
|
||||
inline fn printStackTraceFromStackIterator(return_address: usize, frame_address: usize) !void {
|
||||
var stack_iterator = lib.StackIterator.init(return_address, frame_address);
|
||||
var frame_index: usize = 0;
|
||||
try writer.writeAll("Stack trace:\n");
|
||||
|
||||
while (stack_iterator.next()) |address| : (frame_index += 1) {
|
||||
if (address == 0) break;
|
||||
try writer.print("[{}] ", .{frame_index});
|
||||
try printSourceAtAddress(address);
|
||||
try writer.writeByte('\n');
|
||||
}
|
||||
}
|
||||
|
||||
fn printSourceAtAddress(address: usize) !void {
|
||||
const compile_unit = debug_info.findCompileUnit(address) catch {
|
||||
try writer.print("0x{x}: ???", .{address});
|
||||
return;
|
||||
};
|
||||
const symbol_name = debug_info.getSymbolName(address) orelse "???";
|
||||
const compile_unit_name = compile_unit.die.getAttrString(&debug_info, lib.dwarf.AT.name, debug_info.section(.debug_str), compile_unit.*) catch "???";
|
||||
const line_info = debug_info.getLineNumberInfo(heap.allocator.zigUnwrap(), compile_unit.*, address) catch null;
|
||||
const symbol = .{
|
||||
.symbol_name = symbol_name,
|
||||
.compile_unit_name = compile_unit_name,
|
||||
.line_info = line_info,
|
||||
};
|
||||
|
||||
const file_name = if (symbol.line_info) |li| li.file_name else "???";
|
||||
const line = if (symbol.line_info) |li| li.line else 0;
|
||||
const column = if (symbol.line_info) |li| li.column else 0;
|
||||
try writer.print("0x{x}: {s}!{s} {s}:{}:{}", .{ address, symbol.symbol_name, symbol.compile_unit_name, file_name, line, column });
|
||||
}
|
||||
// fn printSourceAtAddress(debug_info: *lib.ModuleDebugInfo, address: usize) !void {
|
||||
// if (debug_info.findCompileUnit(address)) |compile_unit| {
|
||||
// const symbol = .{
|
||||
// .symbol_name = debug_info.getSymbolName(address) orelse "???",
|
||||
// .compile_unit_name = compile_unit.die.getAttrString(debug_info, lib.dwarf.AT.name, debug_info.debug_str, compile_unit.*) catch "???",
|
||||
// .line_info = debug_info.getLineNumberInfo(heap_allocator.toZig(), compile_unit.*, address) catch null,
|
||||
// };
|
||||
// try writer.print("0x{x}: {s}!{s} {s}:{}:{}\n", .{ address, symbol.symbol_name, symbol.compile_unit_name, symbol.line_info.?.file_name, symbol.line_info.?.line, symbol.line_info.?.column });
|
||||
// } else |err| {
|
||||
// return err;
|
||||
// }
|
||||
// }
|
||||
|
||||
pub fn panicWithStackTrace(stack_trace: ?*lib.StackTrace, comptime format: []const u8, arguments: anytype) noreturn {
|
||||
_ = stack_trace;
|
||||
panicPrologue(format, arguments) catch {};
|
||||
if (print_stack_trace) printStackTrace(stack_trace) catch {};
|
||||
// if (print_stack_trace) printStackTrace(stack_trace) catch {};
|
||||
panicEpilogue();
|
||||
}
|
||||
|
||||
pub fn panicFromInstructionPointerAndFramePointer(return_address: usize, frame_address: usize, comptime format: []const u8, arguments: anytype) noreturn {
|
||||
_ = frame_address;
|
||||
_ = return_address;
|
||||
panicPrologue(format, arguments) catch {};
|
||||
if (print_stack_trace) printStackTraceFromStackIterator(return_address, frame_address) catch {};
|
||||
//if (print_stack_trace) printStackTraceFromStackIterator(return_address, frame_address) catch {};
|
||||
panicEpilogue();
|
||||
}
|
||||
|
||||
@ -152,410 +168,268 @@ pub fn panic(comptime format: []const u8, arguments: anytype) noreturn {
|
||||
@call(.always_inline, panicFromInstructionPointerAndFramePointer, .{ @returnAddress(), @frameAddress(), format, arguments });
|
||||
}
|
||||
|
||||
pub var command_count: usize = 0;
|
||||
pub var syscall_count: usize = 0;
|
||||
|
||||
pub inline fn shutdown(exit_code: lib.QEMU.ExitCode) noreturn {
|
||||
log.debug("Printing stats...", .{});
|
||||
log.debug("System call count: {}", .{interface.system_call_count});
|
||||
log.debug("Syscall count: {}", .{syscall_count});
|
||||
|
||||
privileged.shutdown(exit_code);
|
||||
}
|
||||
|
||||
pub const RegionList = extern struct {
|
||||
regions: [list_region_count]PhysicalMemoryRegion = .{PhysicalMemoryRegion.invalid()} ** list_region_count,
|
||||
metadata: Metadata = .{},
|
||||
|
||||
pub const Metadata = extern struct {
|
||||
reserved: usize = 0,
|
||||
bitset: Bitset = .{},
|
||||
previous: ?*RegionList = null,
|
||||
next: ?*RegionList = null,
|
||||
|
||||
const Bitset = lib.data_structures.BitsetU64(list_region_count);
|
||||
|
||||
comptime {
|
||||
assert(@sizeOf(Metadata) == expected_size);
|
||||
assert(@bitSizeOf(usize) - list_region_count < 8);
|
||||
}
|
||||
|
||||
const expected_size = 4 * @sizeOf(usize);
|
||||
};
|
||||
|
||||
const Error = error{
|
||||
OutOfMemory,
|
||||
no_space,
|
||||
misalignment_page_size,
|
||||
};
|
||||
|
||||
pub fn allocateAligned(list: *RegionList, size: usize, alignment: usize) Error!PhysicalMemoryRegion {
|
||||
assert(alignment % lib.arch.valid_page_sizes[0] == 0);
|
||||
|
||||
for (&list.regions, 0..) |*region, _index| {
|
||||
const index: u6 = @intCast(_index);
|
||||
assert(lib.isAligned(region.size, lib.arch.valid_page_sizes[0]));
|
||||
assert(lib.isAligned(region.address.value(), lib.arch.valid_page_sizes[0]));
|
||||
|
||||
if (list.metadata.bitset.isSet(index)) {
|
||||
if (lib.isAligned(region.address.value(), alignment)) {
|
||||
if (region.size >= size) {
|
||||
const result = region.takeSlice(size) catch unreachable;
|
||||
if (region.size == 0) {
|
||||
list.remove(@intCast(index));
|
||||
}
|
||||
|
||||
return result;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return Error.OutOfMemory;
|
||||
}
|
||||
|
||||
pub fn remove(list: *RegionList, index: u6) void {
|
||||
list.metadata.bitset.clear(index);
|
||||
}
|
||||
|
||||
pub const UnalignedAllocationResult = extern struct {
|
||||
wasted: PhysicalMemoryRegion,
|
||||
allocated: PhysicalMemoryRegion,
|
||||
};
|
||||
|
||||
/// Slow path
|
||||
pub fn allocateAlignedSplitting(list: *RegionList, size: usize, alignment: usize) !UnalignedAllocationResult {
|
||||
for (&list.regions, 0..) |*region, _index| {
|
||||
const index: u6 = @intCast(_index);
|
||||
const aligned_region_address = lib.alignForward(usize, region.address.value(), alignment);
|
||||
const wasted_space = aligned_region_address - region.address.value();
|
||||
|
||||
if (list.metadata.bitset.isSet(index)) {
|
||||
const target_size = wasted_space + size;
|
||||
if (region.size >= target_size) {
|
||||
const wasted_region = try region.takeSlice(wasted_space);
|
||||
const allocated_region = try region.takeSlice(size);
|
||||
|
||||
if (region.size == 0) {
|
||||
list.remove(index);
|
||||
}
|
||||
|
||||
return UnalignedAllocationResult{
|
||||
.wasted = wasted_region,
|
||||
.allocated = allocated_region,
|
||||
};
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
log.err("allocateAlignedSplitting", .{});
|
||||
return error.OutOfMemory;
|
||||
}
|
||||
|
||||
pub fn allocate(list: *RegionList, size: usize) Error!PhysicalMemoryRegion {
|
||||
return list.allocateAligned(size, lib.arch.valid_page_sizes[0]);
|
||||
}
|
||||
|
||||
pub fn append(list: *RegionList, region: PhysicalMemoryRegion) Error!birth.interface.Memory {
|
||||
var block_count: usize = 0;
|
||||
while (true) : (block_count += 1) {
|
||||
if (!list.metadata.bitset.isFull()) {
|
||||
const region_index = list.metadata.bitset.allocate() catch continue;
|
||||
const block_index = block_count;
|
||||
|
||||
list.regions[region_index] = region;
|
||||
|
||||
return .{
|
||||
.block = @intCast(block_index),
|
||||
.region = region_index,
|
||||
};
|
||||
} else {
|
||||
return Error.no_space;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
const cache_line_count = 16;
|
||||
const list_region_count = @divExact((cache_line_count * lib.cache_line_size) - Metadata.expected_size, @sizeOf(PhysicalMemoryRegion));
|
||||
|
||||
comptime {
|
||||
assert(@sizeOf(RegionList) % lib.cache_line_size == 0);
|
||||
}
|
||||
};
|
||||
|
||||
const UseCase = extern struct {
|
||||
reason: Reason,
|
||||
const Reason = enum(u8) {
|
||||
heap,
|
||||
privileged,
|
||||
wasted,
|
||||
user_protected,
|
||||
user,
|
||||
bootloader,
|
||||
};
|
||||
};
|
||||
|
||||
// TODO: make this more cache friendly
|
||||
const UsedRegionList = extern struct {
|
||||
region: PhysicalMemoryRegion,
|
||||
use_case: UseCase,
|
||||
next: ?*UsedRegionList = null,
|
||||
};
|
||||
|
||||
pub const PageAllocator = extern struct {
|
||||
free_regions: ?*RegionList = null,
|
||||
used_regions: ?*UsedRegionList = null,
|
||||
used_region_buffer: ?*UsedRegionList = null,
|
||||
free_byte_count: u64 = 0,
|
||||
used_byte_count: u64 = 0,
|
||||
head: ?*Entry,
|
||||
list_allocator: ListAllocator,
|
||||
total_allocated_size: u32 = 0,
|
||||
|
||||
pub fn allocate(allocator: *PageAllocator, size: usize, use_case: UseCase) lib.Allocator.Allocate.Error!PhysicalMemoryRegion {
|
||||
const allocation = try allocator.allocateRaw(size);
|
||||
try allocator.appendUsedRegion(allocation, use_case);
|
||||
return allocation;
|
||||
}
|
||||
|
||||
fn allocateRaw(allocator: *PageAllocator, size: usize) !PhysicalMemoryRegion {
|
||||
var iterator = allocator.free_regions;
|
||||
while (iterator) |region_list| : (iterator = region_list.metadata.next) {
|
||||
const allocation = region_list.allocate(size) catch continue;
|
||||
allocator.free_byte_count -= size;
|
||||
allocator.used_byte_count += size;
|
||||
|
||||
return allocation;
|
||||
}
|
||||
|
||||
log.err("allocateRaw: out of memory. Used: 0x{x}. Free: 0x{x}", .{ allocator.used_byte_count, allocator.free_byte_count });
|
||||
return error.OutOfMemory;
|
||||
}
|
||||
|
||||
/// The only purpose this serves is to do the trick when switching cr3
|
||||
pub fn allocateAligned(allocator: *PageAllocator, size: usize, alignment: usize, use_case: UseCase) lib.Allocator.Allocate.Error!PhysicalMemoryRegion {
|
||||
var iterator = allocator.free_regions;
|
||||
while (iterator) |region_list| : (iterator = region_list.metadata.next) {
|
||||
const unaligned_allocation = region_list.allocateAlignedSplitting(size, alignment) catch continue;
|
||||
// TODO: do something with the wasted space
|
||||
const total_allocation_size = unaligned_allocation.wasted.size + unaligned_allocation.allocated.size;
|
||||
log.err("ALLOCATED: 0x{x}. WASTED: 0x{x}. TOTAL: 0x{x}", .{ unaligned_allocation.allocated.size, unaligned_allocation.wasted.size, total_allocation_size });
|
||||
|
||||
try allocator.appendUsedRegion(unaligned_allocation.allocated, use_case);
|
||||
try allocator.appendUsedRegion(unaligned_allocation.wasted, .{ .reason = .wasted });
|
||||
|
||||
allocator.free_byte_count -= total_allocation_size;
|
||||
allocator.used_byte_count += total_allocation_size;
|
||||
|
||||
return unaligned_allocation.allocated;
|
||||
}
|
||||
|
||||
@panic("TODO: PageAllocator.allocateAligned");
|
||||
}
|
||||
|
||||
pub fn appendUsedRegion(allocator: *PageAllocator, physical_region: PhysicalMemoryRegion, use_case: UseCase) lib.Allocator.Allocate.Error!void {
|
||||
const need_allocation = blk: {
|
||||
var result: bool = true;
|
||||
var iterator = allocator.used_region_buffer;
|
||||
while (iterator) |it| : (iterator = it.next) {
|
||||
result = it.region.size < @sizeOf(UsedRegionList);
|
||||
if (!result) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
break :blk result;
|
||||
};
|
||||
|
||||
if (need_allocation) {
|
||||
const allocation = try allocator.allocateRaw(lib.arch.valid_page_sizes[0]);
|
||||
const new_buffer = allocation.address.toHigherHalfVirtualAddress().access(*UsedRegionList);
|
||||
new_buffer.* = .{
|
||||
.region = allocation,
|
||||
.use_case = undefined,
|
||||
};
|
||||
_ = new_buffer.region.takeSlice(@sizeOf(UsedRegionList)) catch unreachable;
|
||||
const used_region_allocation = new_buffer.region.takeSlice(@sizeOf(UsedRegionList)) catch unreachable;
|
||||
const new_used_region = used_region_allocation.address.toHigherHalfVirtualAddress().access(*UsedRegionList);
|
||||
new_used_region.* = .{
|
||||
.region = allocation,
|
||||
.use_case = .{ .reason = .privileged },
|
||||
};
|
||||
|
||||
if (allocator.used_regions) |_| {
|
||||
var iterator = allocator.used_regions;
|
||||
_ = iterator;
|
||||
@panic("TODO: iterate");
|
||||
} else {
|
||||
allocator.used_regions = new_used_region;
|
||||
}
|
||||
|
||||
if (allocator.used_region_buffer) |_| {
|
||||
var iterator = allocator.used_region_buffer;
|
||||
_ = iterator;
|
||||
@panic("TODO: iterate 2");
|
||||
} else {
|
||||
allocator.used_region_buffer = new_buffer;
|
||||
}
|
||||
|
||||
assert(new_buffer.region.size < allocation.size);
|
||||
}
|
||||
|
||||
var iterator = allocator.used_region_buffer;
|
||||
while (iterator) |it| : (iterator = it.next) {
|
||||
if (it.region.size >= @sizeOf(UsedRegionList)) {
|
||||
const new_used_region_allocation = it.region.takeSlice(@sizeOf(UsedRegionList)) catch unreachable;
|
||||
const new_used_region = new_used_region_allocation.address.toHigherHalfVirtualAddress().access(*UsedRegionList);
|
||||
new_used_region.* = .{
|
||||
.region = physical_region,
|
||||
.use_case = use_case,
|
||||
};
|
||||
|
||||
iterator = allocator.used_regions;
|
||||
|
||||
while (iterator) |i| : (iterator = i.next) {
|
||||
if (i.next == null) {
|
||||
i.next = new_used_region;
|
||||
return;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (true) @panic("TODO: PageAllocator.appendUsedRegion");
|
||||
return error.OutOfMemory;
|
||||
}
|
||||
|
||||
pub fn getPageTableAllocatorInterface(allocator: *PageAllocator) privileged.PageAllocator {
|
||||
fn getPageAllocatorInterface(pa: *PageAllocator) PageAllocatorInterface {
|
||||
return .{
|
||||
.allocate = pageTableAllocateCallback,
|
||||
.context = allocator,
|
||||
.allocate = callbackAllocate,
|
||||
.context = pa,
|
||||
.context_type = .cpu,
|
||||
};
|
||||
}
|
||||
|
||||
fn pageTableAllocateCallback(context: ?*anyopaque, size: u64, alignment: u64, options: privileged.PageAllocator.AllocateOptions) error{OutOfMemory}!lib.PhysicalMemoryRegion {
|
||||
const allocator: *PageAllocator = @alignCast(@ptrCast(context orelse return error.OutOfMemory));
|
||||
assert(alignment == lib.arch.valid_page_sizes[0]);
|
||||
assert(size == lib.arch.valid_page_sizes[0]);
|
||||
assert(options.count == 1);
|
||||
assert(options.level_valid);
|
||||
fn callbackAllocate(context: ?*anyopaque, size: u64, alignment: u64, options: PageAllocatorInterface.AllocateOptions) Allocator.Allocate.Error!PhysicalMemoryRegion {
|
||||
_ = options;
|
||||
const pa = @as(?*PageAllocator, @ptrCast(@alignCast(context))) orelse return Allocator.Allocate.Error.OutOfMemory;
|
||||
const result = try pa.allocate(size, alignment);
|
||||
return result;
|
||||
}
|
||||
|
||||
const page_table_allocation = try allocator.allocate(size, .{ .reason = .user_protected });
|
||||
// log.debug("Page table allocation: 0x{x}", .{page_table_allocation.address.value()});
|
||||
|
||||
// TODO: is this right?
|
||||
if (options.user) {
|
||||
const user_page_tables = &user_scheduler.s.capability_root_node.dynamic.page_table;
|
||||
const user_allocator = &user_scheduler.s.capability_root_node.heap.allocator;
|
||||
const new_page_table_ref = try user_page_tables.appendPageTable(user_allocator, .{
|
||||
.region = page_table_allocation,
|
||||
.mapping = page_table_allocation.address.toHigherHalfVirtualAddress(),
|
||||
.flags = .{ .level = options.level },
|
||||
});
|
||||
|
||||
const indexed = options.virtual_address;
|
||||
const indices = indexed.toIndices();
|
||||
|
||||
var page_table_ref = user_page_tables.user;
|
||||
log.debug("Level: {s}", .{@tagName(options.level)});
|
||||
|
||||
for (0..@intFromEnum(options.level) - 1) |level_index| {
|
||||
log.debug("Fetching {s} page table", .{@tagName(@as(paging.Level, @enumFromInt(level_index)))});
|
||||
const page_table = user_page_tables.getPageTable(page_table_ref) catch @panic("WTF");
|
||||
page_table_ref = page_table.children[indices[level_index]];
|
||||
}
|
||||
|
||||
const parent_page_table = user_page_tables.getPageTable(page_table_ref) catch @panic("WTF");
|
||||
parent_page_table.children[indices[@intFromEnum(options.level) - 1]] = new_page_table_ref;
|
||||
pub fn allocate(pa: *PageAllocator, size: u64, alignment: u64) Allocator.Allocate.Error!PhysicalMemoryRegion {
|
||||
if (pa.head == null) {
|
||||
@panic("head null");
|
||||
}
|
||||
|
||||
return page_table_allocation;
|
||||
const allocation = blk: {
|
||||
var ptr = pa.head;
|
||||
while (ptr) |entry| : (ptr = entry.next) {
|
||||
if (lib.isAligned(entry.region.address.value(), alignment) and entry.region.size > size) {
|
||||
const result = PhysicalMemoryRegion{
|
||||
.address = entry.region.address,
|
||||
.size = size,
|
||||
};
|
||||
entry.region.address = entry.region.address.offset(size);
|
||||
entry.region.size -= size;
|
||||
|
||||
pa.total_allocated_size += @as(u32, @intCast(size));
|
||||
// log.debug("Allocated 0x{x}", .{size});
|
||||
|
||||
break :blk result;
|
||||
}
|
||||
}
|
||||
|
||||
ptr = pa.head;
|
||||
|
||||
while (ptr) |entry| : (ptr = entry.next) {
|
||||
const aligned_address = lib.alignForward(entry.region.address.value(), alignment);
|
||||
const top = entry.region.top().value();
|
||||
if (aligned_address < top and top - aligned_address > size) {
|
||||
// log.debug("Found region which we should be splitting: (0x{x}, 0x{x})", .{ entry.region.address.value(), entry.region.size });
|
||||
// log.debug("User asked for 0x{x} bytes with alignment 0x{x}", .{ size, alignment });
|
||||
// Split the addresses to obtain the desired result
|
||||
const first_region_size = aligned_address - entry.region.address.value();
|
||||
const first_region_address = entry.region.address;
|
||||
const first_region_next = entry.next;
|
||||
|
||||
const second_region_address = aligned_address + size;
|
||||
const second_region_size = top - aligned_address + size;
|
||||
|
||||
const result = PhysicalMemoryRegion{
|
||||
.address = PhysicalAddress.new(aligned_address),
|
||||
.size = size,
|
||||
};
|
||||
|
||||
// log.debug("\nFirst region: (Address: 0x{x}. Size: 0x{x}).\nRegion in the middle (allocated): (Address: 0x{x}. Size: 0x{x}).\nSecond region: (Address: 0x{x}. Size: 0x{x})", .{ first_region_address, first_region_size, result.address.value(), result.size, second_region_address, second_region_size });
|
||||
|
||||
const new_entry = pa.list_allocator.get();
|
||||
entry.* = .{
|
||||
.region = .{
|
||||
.address = first_region_address,
|
||||
.size = first_region_size,
|
||||
},
|
||||
.next = new_entry,
|
||||
};
|
||||
|
||||
new_entry.* = .{
|
||||
.region = .{
|
||||
.address = PhysicalAddress.new(second_region_address),
|
||||
.size = second_region_size,
|
||||
},
|
||||
.next = first_region_next,
|
||||
};
|
||||
// log.debug("First entry: (Address: 0x{x}. Size: 0x{x})", .{ entry.region.address.value(), entry.region.size });
|
||||
// log.debug("Second entry: (Address: 0x{x}. Size: 0x{x})", .{ new_entry.region.address.value(), new_entry.region.size });
|
||||
|
||||
// pa.total_allocated_size += @intCast(u32, size);
|
||||
// log.debug("Allocated 0x{x}", .{size});
|
||||
|
||||
break :blk result;
|
||||
}
|
||||
}
|
||||
|
||||
log.err("Allocate error. Size: 0x{x}. Alignment: 0x{x}. Total allocated size: 0x{x}", .{ size, alignment, pa.total_allocated_size });
|
||||
return Allocator.Allocate.Error.OutOfMemory;
|
||||
};
|
||||
|
||||
//log.debug("Physical allocation: 0x{x}, 0x{x}", .{ allocation.address.value(), allocation.size });
|
||||
|
||||
@memset(allocation.toHigherHalfVirtualAddress().access(u8), 0);
|
||||
|
||||
return allocation;
|
||||
}
|
||||
};
|
||||
|
||||
pub const HeapRegion = extern struct {
|
||||
region: VirtualMemoryRegion,
|
||||
previous: ?*HeapRegion = null,
|
||||
next: ?*HeapRegion = null,
|
||||
};
|
||||
pub inline fn fromBSP(bootloader_information: *bootloader.Information) InitializationError!PageAllocator {
|
||||
const memory_map_entries = bootloader_information.getMemoryMapEntries();
|
||||
const page_counters = bootloader_information.getPageCounters();
|
||||
|
||||
pub fn HeapImplementation(comptime user: bool) type {
|
||||
const use_case = .{ .reason = if (user) .user_protected else .heap };
|
||||
_ = use_case;
|
||||
return extern struct {
|
||||
allocator: lib.Allocator = .{
|
||||
.callbacks = .{
|
||||
.allocate = callbackAllocate,
|
||||
var total_size: usize = 0;
|
||||
const page_shifter = lib.arch.page_shifter(lib.arch.valid_page_sizes[0]);
|
||||
|
||||
for (memory_map_entries, page_counters) |entry, page_counter| {
|
||||
if (entry.type != .usable or !lib.isAligned(entry.region.size, lib.arch.valid_page_sizes[0]) or entry.region.address.value() < lib.mb) {
|
||||
continue;
|
||||
}
|
||||
|
||||
total_size += entry.region.size - (page_counter << page_shifter);
|
||||
}
|
||||
|
||||
const cpu_count = bootloader_information.smp.cpu_count;
|
||||
const total_memory_to_take = total_size / cpu_count;
|
||||
|
||||
// Look for a 4K page to host the memory map
|
||||
const backing_4k_page = for (memory_map_entries, page_counters) |entry, *page_counter| {
|
||||
const occupied_size = page_counter.* << page_shifter;
|
||||
const entry_size_left = entry.region.size - occupied_size;
|
||||
if (entry_size_left != 0) {
|
||||
if (entry.type != .usable or !lib.isAligned(entry.region.size, lib.arch.valid_page_sizes[0]) or entry.region.address.value() < lib.mb) continue;
|
||||
|
||||
assert(lib.isAligned(entry_size_left, lib.arch.valid_page_sizes[0]));
|
||||
page_counter.* += 1;
|
||||
break entry.region.address.offset(occupied_size);
|
||||
}
|
||||
} else return InitializationError.bootstrap_region_not_found;
|
||||
|
||||
var memory_taken: usize = 0;
|
||||
var backing_4k_page_memory_allocated: usize = 0;
|
||||
|
||||
var last_entry: ?*Entry = null;
|
||||
var first_entry: ?*Entry = null;
|
||||
|
||||
for (memory_map_entries, page_counters) |entry, *page_counter| {
|
||||
if (entry.type != .usable or !lib.isAligned(entry.region.size, lib.arch.valid_page_sizes[0]) or entry.region.address.value() < lib.mb) continue;
|
||||
|
||||
const occupied_size = page_counter.* << page_shifter;
|
||||
|
||||
if (occupied_size < entry.region.size) {
|
||||
const entry_size_left = entry.region.size - occupied_size;
|
||||
|
||||
var memory_taken_from_region: usize = 0;
|
||||
while (memory_taken + memory_taken_from_region < total_memory_to_take) {
|
||||
if (memory_taken_from_region == entry_size_left) break;
|
||||
|
||||
const size_to_take = @min(2 * lib.mb, entry_size_left);
|
||||
memory_taken_from_region += size_to_take;
|
||||
}
|
||||
|
||||
memory_taken += memory_taken_from_region;
|
||||
|
||||
page_counter.* += @as(u32, @intCast(memory_taken_from_region >> page_shifter));
|
||||
const region_descriptor = .{
|
||||
.address = entry.region.offset(occupied_size).address,
|
||||
.size = memory_taken_from_region,
|
||||
};
|
||||
|
||||
if (backing_4k_page_memory_allocated >= lib.arch.valid_page_sizes[0]) return InitializationError.memory_exceeded;
|
||||
const entry_address = backing_4k_page.offset(backing_4k_page_memory_allocated);
|
||||
const new_entry = entry_address.toHigherHalfVirtualAddress().access(*Entry);
|
||||
backing_4k_page_memory_allocated += @sizeOf(Entry);
|
||||
|
||||
new_entry.* = .{
|
||||
.region = .{
|
||||
.address = region_descriptor.address,
|
||||
.size = region_descriptor.size,
|
||||
},
|
||||
.next = null,
|
||||
};
|
||||
|
||||
if (last_entry) |e| {
|
||||
e.next = new_entry;
|
||||
} else {
|
||||
first_entry = new_entry;
|
||||
}
|
||||
|
||||
last_entry = new_entry;
|
||||
|
||||
if (memory_taken >= total_memory_to_take) break;
|
||||
}
|
||||
}
|
||||
|
||||
const result = .{
|
||||
.head = first_entry,
|
||||
.list_allocator = .{
|
||||
.u = .{
|
||||
.primitive = .{
|
||||
.backing_4k_page = backing_4k_page,
|
||||
.allocated = backing_4k_page_memory_allocated,
|
||||
},
|
||||
},
|
||||
.primitive = true,
|
||||
},
|
||||
};
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
const ListAllocator = extern struct {
|
||||
u: extern union {
|
||||
primitive: extern struct {
|
||||
backing_4k_page: PhysicalAddress,
|
||||
allocated: u64,
|
||||
},
|
||||
normal: extern struct {
|
||||
foo: u64,
|
||||
},
|
||||
},
|
||||
regions: ?*Region = null,
|
||||
primitive: bool,
|
||||
|
||||
const Heap = @This();
|
||||
const Region = HeapRegion;
|
||||
|
||||
pub fn create(heap_allocator: *Heap, comptime T: type) lib.Allocator.Allocate.Error!*T {
|
||||
const result = try heap_allocator.allocate(@sizeOf(T), @alignOf(T));
|
||||
return @ptrFromInt(result.address);
|
||||
}
|
||||
|
||||
pub fn addBootstrapingRegion(heap_allocator: *Heap, region: VirtualMemoryRegion) !void {
|
||||
assert(heap_allocator.regions == null);
|
||||
|
||||
var region_splitter = region;
|
||||
const new_region_vmr = try region_splitter.takeSlice(@sizeOf(Region));
|
||||
const new_region = new_region_vmr.address.access(*Region);
|
||||
new_region.* = Region{
|
||||
.region = region_splitter,
|
||||
};
|
||||
|
||||
heap_allocator.regions = new_region;
|
||||
}
|
||||
|
||||
// TODO: turn the other way around: make the callback call this function
|
||||
pub fn allocate(heap_allocator: *Heap, size: u64, alignment: u64) lib.Allocator.Allocate.Error!lib.Allocator.Allocate.Result {
|
||||
var iterator = heap_allocator.regions;
|
||||
while (iterator) |region| : (iterator = region.next) {
|
||||
if (region.region.address.isAligned(alignment)) {
|
||||
if (region.region.size >= size) {
|
||||
const virtual_region = region.region.takeSlice(size) catch unreachable;
|
||||
const should_remove = region.region.size == 0;
|
||||
if (should_remove) {
|
||||
// TODO: actually remove and reuse
|
||||
if (region.previous) |prev| prev.next = region.next;
|
||||
}
|
||||
|
||||
return @bitCast(virtual_region);
|
||||
pub fn get(list_allocator: *ListAllocator) *Entry {
|
||||
switch (list_allocator.primitive) {
|
||||
true => {
|
||||
if (list_allocator.u.primitive.allocated < 0x1000) {
|
||||
const result = list_allocator.u.primitive.backing_4k_page.offset(list_allocator.u.primitive.allocated).toHigherHalfVirtualAddress().access(*Entry);
|
||||
list_allocator.u.primitive.backing_4k_page = list_allocator.u.primitive.backing_4k_page.offset(@sizeOf(Entry));
|
||||
return result;
|
||||
} else {
|
||||
@panic("reached limit");
|
||||
}
|
||||
}
|
||||
},
|
||||
false => {
|
||||
@panic("not primitive allocator not implemented");
|
||||
},
|
||||
}
|
||||
|
||||
const new_size = lib.alignForward(usize, size + @sizeOf(HeapRegion), lib.arch.valid_page_sizes[0]);
|
||||
assert(alignment <= lib.arch.valid_page_sizes[0]);
|
||||
var new_physical_region = try page_allocator.allocate(new_size, .{ .reason = .heap });
|
||||
const new_alloc = new_physical_region.takeSlice(@sizeOf(HeapRegion)) catch unreachable;
|
||||
const new_heap_region = new_alloc.toHigherHalfVirtualAddress().address.access(*HeapRegion);
|
||||
new_heap_region.* = .{
|
||||
.region = new_physical_region.toHigherHalfVirtualAddress(),
|
||||
};
|
||||
|
||||
iterator = heap.regions;
|
||||
if (iterator) |_| {
|
||||
while (iterator) |heap_region| : (iterator = heap_region.next) {
|
||||
if (heap_region.next == null) {
|
||||
heap_region.next = new_heap_region;
|
||||
break;
|
||||
}
|
||||
}
|
||||
} else {
|
||||
@panic("NO");
|
||||
}
|
||||
|
||||
return heap.allocate(size, alignment);
|
||||
}
|
||||
|
||||
fn callbackAllocate(allocator: *Allocator, size: u64, alignment: u64) lib.Allocator.Allocate.Error!lib.Allocator.Allocate.Result {
|
||||
// This assert is triggered by the Zig std library
|
||||
//assert(lib.isAligned(size, alignment));
|
||||
const heap_allocator = @fieldParentPtr(Heap, "allocator", allocator);
|
||||
return heap_allocator.allocate(size, alignment);
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
pub const Entry = extern struct {
|
||||
region: PhysicalMemoryRegion,
|
||||
next: ?*Entry,
|
||||
};
|
||||
|
||||
const InitializationError = error{
|
||||
bootstrap_region_not_found,
|
||||
memory_exceeded,
|
||||
};
|
||||
};
|
||||
|
||||
// fn getDebugInformation() !lib.ModuleDebugInfo {
|
||||
// const debug_info = lib.getDebugInformation(heap_allocator.toZig(), file) catch |err| {
|
||||
// try writer.print("Failed to get debug information: {}", .{err});
|
||||
// return err;
|
||||
// };
|
||||
//
|
||||
// return debug_info;
|
||||
// }
|
||||
|
||||
pub const writer = privileged.E9Writer{ .context = {} };
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -24,10 +24,76 @@ const pcid_mask = 1 << pcid_bit;
|
||||
/// - R10: argument 3
|
||||
/// - R8: argument 4
|
||||
/// - R9: argument 5
|
||||
export fn syscall(registers: *const Registers) callconv(.C) birth.interface.Raw.Result {
|
||||
const options = @as(birth.interface.Raw.Options, @bitCast(registers.syscall_number));
|
||||
const arguments = birth.interface.Raw.Arguments{ registers.rdi, registers.rsi, registers.rdx, registers.r10, registers.r8, registers.r9 };
|
||||
return cpu.interface.processFromRaw(options, arguments);
|
||||
fn birthSyscall(comptime Syscall: type, raw_arguments: birth.syscall.Arguments) Syscall.ErrorSet.Error!Syscall.Result {
|
||||
cpu.syscall_count += 1;
|
||||
comptime assert(Syscall == birth.capabilities.Syscall(Syscall.capability, Syscall.command));
|
||||
const capability: birth.capabilities.Type = Syscall.capability;
|
||||
const command: birth.capabilities.Command(capability) = Syscall.command;
|
||||
const arguments = try Syscall.toArguments(raw_arguments);
|
||||
|
||||
return if (cpu.user_scheduler.capability_root_node.hasPermissions(capability, command)) switch (capability) {
|
||||
.io => switch (command) {
|
||||
.copy, .mint, .retype, .delete, .revoke, .create => unreachable,
|
||||
.log => blk: {
|
||||
const message = arguments;
|
||||
cpu.writer.writeAll(message) catch unreachable;
|
||||
comptime assert(Syscall.Result == usize);
|
||||
break :blk message.len;
|
||||
},
|
||||
},
|
||||
.cpu => switch (command) {
|
||||
.copy, .mint, .retype, .delete, .revoke, .create => unreachable,
|
||||
.get_core_id => cpu.core_id,
|
||||
.shutdown => cpu.shutdown(.success),
|
||||
.get_command_buffer => {
|
||||
const command_buffer = arguments;
|
||||
_ = command_buffer;
|
||||
@panic("TODO: get_command_buffer");
|
||||
},
|
||||
},
|
||||
.cpu_memory => switch (command) {
|
||||
.allocate => blk: {
|
||||
comptime assert(@TypeOf(arguments) == usize);
|
||||
const size = arguments;
|
||||
const physical_region = try cpu.user_scheduler.capability_root_node.allocatePages(size);
|
||||
try cpu.user_scheduler.capability_root_node.allocateCPUMemory(physical_region, .{ .privileged = false });
|
||||
break :blk physical_region.address;
|
||||
},
|
||||
else => @panic(@tagName(command)),
|
||||
},
|
||||
.ram => unreachable,
|
||||
.boot => switch (command) {
|
||||
.get_bundle_size => cpu.bundle.len,
|
||||
.get_bundle_file_list_size => cpu.bundle_files.len,
|
||||
else => @panic(@tagName(command)),
|
||||
},
|
||||
.process => switch (command) {
|
||||
.exit => switch (arguments) {
|
||||
true => cpu.shutdown(.success),
|
||||
false => cpu.panic("User process panicked", .{}),
|
||||
},
|
||||
else => @panic(@tagName(command)),
|
||||
},
|
||||
.page_table => @panic("TODO: page_table"),
|
||||
} else error.forbidden;
|
||||
}
|
||||
|
||||
export fn syscall(registers: *const Registers) callconv(.C) birth.syscall.Result {
|
||||
const options = @as(birth.syscall.Options, @bitCast(registers.syscall_number));
|
||||
const arguments = birth.syscall.Arguments{ registers.rdi, registers.rsi, registers.rdx, registers.r10, registers.r8, registers.r9 };
|
||||
|
||||
return switch (options.general.convention) {
|
||||
.birth => switch (options.birth.type) {
|
||||
inline else => |capability| switch (@as(birth.capabilities.Command(capability), @enumFromInt(options.birth.command))) {
|
||||
inline else => |command| blk: {
|
||||
const Syscall = birth.capabilities.Syscall(capability, command);
|
||||
const result: Syscall.Result = birthSyscall(Syscall, arguments) catch |err| break :blk Syscall.errorToRaw(err);
|
||||
break :blk Syscall.resultToRaw(result);
|
||||
},
|
||||
},
|
||||
},
|
||||
.linux => @panic("linux syscall"),
|
||||
};
|
||||
}
|
||||
|
||||
/// SYSCALL documentation
|
||||
@ -41,7 +107,7 @@ export fn syscall(registers: *const Registers) callconv(.C) birth.interface.Raw.
|
||||
/// - R10: argument 3
|
||||
/// - R8: argument 4
|
||||
/// - R9: argument 5
|
||||
pub fn entryPoint() callconv(.Naked) noreturn {
|
||||
pub fn entryPoint() callconv(.Naked) void {
|
||||
asm volatile (
|
||||
\\endbr64
|
||||
\\swapgs
|
||||
@ -186,6 +252,8 @@ pub fn entryPoint() callconv(.Naked) noreturn {
|
||||
asm volatile (
|
||||
\\int3
|
||||
::: "memory");
|
||||
|
||||
unreachable;
|
||||
}
|
||||
|
||||
pub const Registers = extern struct {
|
||||
|
@ -22,7 +22,7 @@ const VirtualMemoryRegion = lib.VirtualMemoryRegion;
|
||||
const cpu = @import("cpu");
|
||||
const Heap = cpu.Heap;
|
||||
|
||||
pub const init = @import("./x86/64/init.zig");
|
||||
const init = @import("./x86/64/init.zig");
|
||||
pub const syscall = @import("./x86/64/syscall.zig");
|
||||
pub const entryPoint = init.entryPoint;
|
||||
|
||||
@ -56,33 +56,13 @@ pub const Registers = extern struct {
|
||||
|
||||
const interrupt_kind: u32 = 0;
|
||||
|
||||
const PageFaultFlags = packed struct(u32) {
|
||||
present: bool,
|
||||
write: bool,
|
||||
user: bool,
|
||||
reserved_write: bool,
|
||||
instruction_fetch: bool,
|
||||
protection_key: bool,
|
||||
shadow_stack: bool,
|
||||
reserved0: u8 = 0,
|
||||
software_guard_extensions: bool,
|
||||
reserved: u16 = 0,
|
||||
};
|
||||
|
||||
export fn interruptHandler(regs: *const InterruptRegisters, interrupt_number: u8) void {
|
||||
switch (interrupt_number) {
|
||||
local_timer_vector => {
|
||||
APIC.write(.eoi, 0);
|
||||
nextTimer(10);
|
||||
},
|
||||
else => {
|
||||
if (interrupt_number == 0xe) {
|
||||
const pagefault_flags: PageFaultFlags = @bitCast(@as(u32, @intCast(regs.error_code)));
|
||||
const fault_address = privileged.arch.x86_64.registers.cr2.read();
|
||||
log.err("Page 0x{x} not mapped at IP 0x{x}. Flags: {}", .{ fault_address, regs.rip, pagefault_flags });
|
||||
}
|
||||
cpu.panicFromInstructionPointerAndFramePointer(regs.rip, regs.rbp, "Exception 0x{x} at IP 0x{x}", .{ interrupt_number, regs.rip });
|
||||
},
|
||||
else => cpu.panicFromInstructionPointerAndFramePointer(regs.rip, regs.rbp, "Exception: 0x{x}", .{interrupt_number}),
|
||||
}
|
||||
}
|
||||
|
||||
@ -123,7 +103,7 @@ pub const invariant_tsc = false;
|
||||
pub const capability_address_space_size = 1 * lib.gb;
|
||||
pub const capability_address_space_start = capability_address_space_stack_top - capability_address_space_size;
|
||||
pub const capability_address_space_stack_top = 0xffff_ffff_8000_0000;
|
||||
pub const capability_address_space_stack_size = 10 * privileged.default_stack_size;
|
||||
pub const capability_address_space_stack_size = privileged.default_stack_size;
|
||||
pub const capability_address_space_stack_alignment = lib.arch.valid_page_sizes[0];
|
||||
pub const capability_address_space_stack_address = VirtualAddress.new(capability_address_space_stack_top - capability_address_space_stack_size);
|
||||
pub const code_64 = @offsetOf(GDT, "code_64");
|
||||
@ -283,5 +263,3 @@ pub const root_page_table_entry = @as(cpu.arch.PageTableEntry, @enumFromInt(0));
|
||||
pub const IOMap = extern struct {
|
||||
debug: bool,
|
||||
};
|
||||
|
||||
pub const user_root_page_table_alignment = 2 * lib.arch.valid_page_sizes[0];
|
||||
|
419
src/cpu/capabilities.zig
Normal file
419
src/cpu/capabilities.zig
Normal file
@ -0,0 +1,419 @@
|
||||
const lib = @import("lib");
|
||||
const assert = lib.assert;
|
||||
const Allocator = lib.Allocator;
|
||||
const enumCount = lib.enumCount;
|
||||
const log = lib.log.scoped(.capabilities);
|
||||
|
||||
const privileged = @import("privileged");
|
||||
const PhysicalAddress = lib.PhysicalAddress;
|
||||
const PhysicalMemoryRegion = lib.PhysicalMemoryRegion;
|
||||
const birth = @import("birth");
|
||||
const cpu = @import("cpu");
|
||||
|
||||
pub const RootDescriptor = extern struct {
|
||||
value: *Root,
|
||||
};
|
||||
|
||||
pub const Static = enum {
|
||||
cpu,
|
||||
boot,
|
||||
process,
|
||||
|
||||
pub const count = lib.enumCount(@This());
|
||||
|
||||
pub const Bitmap = @Type(.{
|
||||
.Struct = blk: {
|
||||
const full_bit_size = @max(@as(comptime_int, 1 << 3), @as(u8, @sizeOf(Static)) << 3);
|
||||
break :blk .{
|
||||
.layout = .Packed,
|
||||
.backing_integer = lib.IntType(.unsigned, full_bit_size),
|
||||
.fields = fields: {
|
||||
var fields: []const lib.Type.StructField = &.{};
|
||||
inline for (lib.enumFields(Static)) |static_field| {
|
||||
fields = fields ++ [1]lib.Type.StructField{.{
|
||||
.name = static_field.name,
|
||||
.type = bool,
|
||||
.default_value = null,
|
||||
.is_comptime = false,
|
||||
.alignment = 0,
|
||||
}};
|
||||
}
|
||||
|
||||
assert(Static.count > 0);
|
||||
assert(@sizeOf(Static) > 0 or Static.count == 1);
|
||||
|
||||
const padding_type = lib.IntType(.unsigned, full_bit_size - Static.count);
|
||||
|
||||
fields = fields ++ [1]lib.Type.StructField{.{
|
||||
.name = "reserved",
|
||||
.type = padding_type,
|
||||
.default_value = &@as(padding_type, 0),
|
||||
.is_comptime = false,
|
||||
.alignment = 0,
|
||||
}};
|
||||
break :fields fields;
|
||||
},
|
||||
.decls = &.{},
|
||||
.is_tuple = false,
|
||||
};
|
||||
},
|
||||
});
|
||||
};
|
||||
|
||||
pub const Dynamic = enum {
|
||||
io,
|
||||
ram, // Barrelfish equivalent: RAM (no PhysAddr)
|
||||
cpu_memory, // Barrelfish equivalent: Frame
|
||||
page_table, // Barrelfish equivalent: VNode
|
||||
// irq_table,
|
||||
// device_memory,
|
||||
// scheduler,
|
||||
|
||||
pub const Map = extern struct {
|
||||
io: IO,
|
||||
ram: RAM,
|
||||
cpu_memory: CPUMemory,
|
||||
page_table: PageTables,
|
||||
|
||||
comptime {
|
||||
inline for (lib.fields(Dynamic.Map), lib.fields(Dynamic)) |struct_field, enum_field| {
|
||||
assert(lib.equal(u8, enum_field.name, struct_field.name));
|
||||
}
|
||||
}
|
||||
};
|
||||
};
|
||||
|
||||
pub const RAM = extern struct {
|
||||
lists: [lib.arch.reverse_valid_page_sizes.len]?*Region = .{null} ** lib.arch.valid_page_sizes.len,
|
||||
|
||||
const AllocateError = error{
|
||||
OutOfMemory,
|
||||
};
|
||||
|
||||
inline fn getListIndex(size: usize) usize {
|
||||
inline for (lib.arch.reverse_valid_page_sizes, 0..) |reverse_page_size, reverse_index| {
|
||||
if (size >= reverse_page_size) return reverse_index;
|
||||
}
|
||||
|
||||
unreachable;
|
||||
}
|
||||
|
||||
pub const Region = extern struct {
|
||||
region: PhysicalMemoryRegion,
|
||||
next: ?*@This() = null,
|
||||
|
||||
const UnalignedAllocationResult = extern struct {
|
||||
wasted: PhysicalMemoryRegion,
|
||||
allocated: PhysicalMemoryRegion,
|
||||
};
|
||||
|
||||
inline fn allocateUnaligned(free_ram: *Region, size: usize, alignment: usize) ?UnalignedAllocationResult {
|
||||
const aligned_region_address = lib.alignForward(usize, free_ram.region.address.value(), alignment);
|
||||
const wasted_space = aligned_region_address - free_ram.region.address.value();
|
||||
if (free_ram.region.size >= wasted_space + size) {
|
||||
const wasted_region = free_ram.region.takeSlice(wasted_space);
|
||||
const allocated_region = free_ram.region.takeSlice(size);
|
||||
return UnalignedAllocationResult{
|
||||
.wasted = wasted_region,
|
||||
.allocated = allocated_region,
|
||||
};
|
||||
}
|
||||
|
||||
return null;
|
||||
}
|
||||
};
|
||||
};
|
||||
|
||||
pub const CPUMemory = extern struct {
|
||||
privileged: RAM = .{},
|
||||
user: RAM = .{},
|
||||
flags: Flags,
|
||||
|
||||
const Flags = packed struct(u64) {
|
||||
allocate: bool,
|
||||
reserved: u63 = 0,
|
||||
};
|
||||
};
|
||||
|
||||
pub const PageTables = extern struct {
|
||||
foo: u32 = 0,
|
||||
};
|
||||
|
||||
pub const IO = extern struct {
|
||||
debug: bool,
|
||||
};
|
||||
|
||||
pub const Scheduler = extern struct {
|
||||
handle: ?*cpu.UserScheduler = null,
|
||||
memory: PhysicalMemoryRegion,
|
||||
};
|
||||
|
||||
comptime {
|
||||
assert(enumCount(Dynamic) + enumCount(Static) == enumCount(birth.capabilities.Type));
|
||||
}
|
||||
|
||||
pub const Root = extern struct {
|
||||
static: Static.Bitmap,
|
||||
dynamic: Dynamic.Map,
|
||||
scheduler: Scheduler,
|
||||
heap: Heap = .{},
|
||||
padding: [padding_byte_count]u8 = .{0} ** padding_byte_count,
|
||||
|
||||
const max_alignment = @max(@alignOf(Static.Bitmap), @alignOf(Dynamic.Map), @alignOf(Scheduler), @alignOf(Heap));
|
||||
const total_size = lib.alignForward(usize, @sizeOf(Static.Bitmap) + @sizeOf(Dynamic.Map) + @sizeOf(Scheduler) + @sizeOf(Heap), max_alignment);
|
||||
const page_aligned_size = lib.alignForward(usize, total_size, lib.arch.valid_page_sizes[0]);
|
||||
const padding_byte_count = page_aligned_size - total_size;
|
||||
|
||||
comptime {
|
||||
assert(@sizeOf(Root) % lib.arch.valid_page_sizes[0] == 0);
|
||||
}
|
||||
|
||||
pub fn copy(root: *Root, other: *Root) void {
|
||||
other.static = root.static;
|
||||
// TODO:
|
||||
other.dynamic = root.dynamic;
|
||||
}
|
||||
|
||||
pub inline fn hasPermissions(root: *const Root, comptime capability_type: birth.capabilities.Type, command: birth.capabilities.Command(capability_type)) bool {
|
||||
return switch (capability_type) {
|
||||
// static capabilities
|
||||
inline .cpu,
|
||||
.boot,
|
||||
.process,
|
||||
=> |static_capability| @field(root.static, @tagName(static_capability)),
|
||||
// dynamic capabilities
|
||||
.io => switch (command) {
|
||||
.copy, .mint, .retype, .delete, .revoke, .create => unreachable,
|
||||
.log => root.dynamic.io.debug,
|
||||
},
|
||||
.cpu_memory => root.dynamic.cpu_memory.flags.allocate,
|
||||
.ram => unreachable,
|
||||
.page_table => unreachable,
|
||||
};
|
||||
}
|
||||
|
||||
pub const AllocateError = error{
|
||||
OutOfMemory,
|
||||
};
|
||||
|
||||
// Fast path
|
||||
pub fn allocatePages(root: *Root, size: usize) AllocateError!PhysicalMemoryRegion {
|
||||
assert(size != 0);
|
||||
assert(lib.isAligned(size, lib.arch.valid_page_sizes[0]));
|
||||
var index = RAM.getListIndex(size);
|
||||
|
||||
const result = blk: {
|
||||
while (true) : (index -= 1) {
|
||||
const list = root.dynamic.ram.lists[index];
|
||||
var iterator = list;
|
||||
|
||||
while (iterator) |free_ram| : (iterator = free_ram.next) {
|
||||
if (free_ram.region.size >= size) {
|
||||
if (free_ram.region.size >= size) {
|
||||
const result = free_ram.region.takeSlice(size);
|
||||
break :blk result;
|
||||
} else {
|
||||
@panic("TODO: cnsume all reigon");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (index == 0) break;
|
||||
}
|
||||
|
||||
return error.OutOfMemory;
|
||||
};
|
||||
|
||||
@memset(result.toHigherHalfVirtualAddress().access(u8), 0);
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
// Slow uncommon path. Use cases:
|
||||
// 1. CR3 switch. This is assumed to be privileged, so this function assumes privileged use of the memory
|
||||
pub fn allocatePageCustomAlignment(root: *Root, size: usize, alignment: usize) AllocateError!PhysicalMemoryRegion {
|
||||
assert(alignment > lib.arch.valid_page_sizes[0] and alignment < lib.arch.valid_page_sizes[1]);
|
||||
|
||||
comptime assert(lib.arch.valid_page_sizes.len == 3);
|
||||
var index = RAM.getListIndex(size);
|
||||
|
||||
while (true) : (index -= 1) {
|
||||
if (root.dynamic.ram.lists[index]) |smallest_region_list| {
|
||||
var iterator: ?*cpu.capabilities.RAM.Region = smallest_region_list;
|
||||
while (iterator) |free_ram| : (iterator = free_ram.next) {
|
||||
if (lib.isAligned(free_ram.region.address.value(), alignment)) {
|
||||
if (free_ram.region.size >= size) {
|
||||
const allocated_region = free_ram.region.takeSlice(size);
|
||||
return allocated_region;
|
||||
}
|
||||
} else if (free_ram.allocateUnaligned(size, alignment)) |unaligned_allocation| {
|
||||
try root.addRegion(&root.dynamic.ram, unaligned_allocation.wasted);
|
||||
return unaligned_allocation.allocated;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (index == 0) break;
|
||||
}
|
||||
|
||||
return AllocateError.OutOfMemory;
|
||||
}
|
||||
|
||||
fn allocateSingle(root: *Root, comptime T: type) AllocateError!*T {
|
||||
var iterator = root.heap.first;
|
||||
while (iterator) |heap_region| : (iterator = heap_region.next) {
|
||||
if (heap_region.alignmentFits(@alignOf(T))) {
|
||||
if (heap_region.sizeFits(@sizeOf(T))) {
|
||||
const allocated_region = heap_region.takeRegion(@sizeOf(T));
|
||||
const result = &allocated_region.toHigherHalfVirtualAddress().access(T)[0];
|
||||
return result;
|
||||
}
|
||||
} else {
|
||||
@panic("ELSE");
|
||||
}
|
||||
}
|
||||
|
||||
const physical_region = try root.allocatePages(lib.arch.valid_page_sizes[0]);
|
||||
const heap_region = physical_region.toHigherHalfVirtualAddress().address.access(*Heap.Region);
|
||||
const first = root.heap.first;
|
||||
heap_region.* = .{
|
||||
.descriptor = physical_region.offset(@sizeOf(Heap.Region)),
|
||||
.allocated_size = @sizeOf(Heap.Region),
|
||||
.next = first,
|
||||
};
|
||||
|
||||
root.heap.first = heap_region;
|
||||
|
||||
return try root.allocateSingle(T);
|
||||
}
|
||||
|
||||
fn allocateMany(root: *Root, comptime T: type, count: usize) AllocateError![]T {
|
||||
_ = count;
|
||||
_ = root;
|
||||
|
||||
@panic("TODO many");
|
||||
}
|
||||
|
||||
fn addRegion(root: *Root, ram: *RAM, physical_region: PhysicalMemoryRegion) !void {
|
||||
const index = RAM.getListIndex(physical_region.size);
|
||||
const new_region = try root.allocateSingle(RAM.Region);
|
||||
new_region.* = RAM.Region{
|
||||
.region = physical_region,
|
||||
.next = root.dynamic.ram.lists[index],
|
||||
};
|
||||
|
||||
ram.lists[index] = new_region;
|
||||
}
|
||||
|
||||
pub const AllocateCPUMemoryOptions = packed struct {
|
||||
privileged: bool,
|
||||
};
|
||||
|
||||
pub fn allocateCPUMemory(root: *Root, physical_region: PhysicalMemoryRegion, options: AllocateCPUMemoryOptions) !void {
|
||||
const ram_region = switch (options.privileged) {
|
||||
true => &root.dynamic.cpu_memory.privileged,
|
||||
false => &root.dynamic.cpu_memory.user,
|
||||
};
|
||||
|
||||
try root.addRegion(ram_region, physical_region);
|
||||
}
|
||||
|
||||
pub const Heap = extern struct {
|
||||
first: ?*Region = null,
|
||||
|
||||
const AllocateError = error{
|
||||
OutOfMemory,
|
||||
};
|
||||
|
||||
pub fn new(physical_region: PhysicalMemoryRegion, previous_allocated_size: usize) Heap {
|
||||
const allocated_size = previous_allocated_size + @sizeOf(Region);
|
||||
assert(physical_region.size > allocated_size);
|
||||
const region = physical_region.offset(previous_allocated_size).address.toHigherHalfVirtualAddress().access(*Region);
|
||||
region.* = .{
|
||||
.descriptor = physical_region,
|
||||
.allocated_size = allocated_size,
|
||||
};
|
||||
return Heap{
|
||||
.first = region,
|
||||
};
|
||||
}
|
||||
|
||||
fn create(heap: *Heap, comptime T: type) Heap.AllocateError!*T {
|
||||
const result = try heap.allocate(T, 1);
|
||||
return &result[0];
|
||||
}
|
||||
|
||||
fn allocate(heap: *Heap, comptime T: type, count: usize) Heap.AllocateError![]T {
|
||||
var iterator = heap.first;
|
||||
while (iterator) |heap_region| {
|
||||
const allocation = heap_region.allocate(T, count) catch continue;
|
||||
return allocation;
|
||||
}
|
||||
@panic("TODO: allocate");
|
||||
}
|
||||
|
||||
const Region = extern struct {
|
||||
descriptor: PhysicalMemoryRegion,
|
||||
allocated_size: usize,
|
||||
next: ?*Region = null,
|
||||
|
||||
inline fn getFreeRegion(region: Region) PhysicalMemoryRegion {
|
||||
const free_region = region.descriptor.offset(region.allocated_size);
|
||||
assert(free_region.size > 0);
|
||||
return free_region;
|
||||
}
|
||||
|
||||
const AllocateError = error{
|
||||
OutOfMemory,
|
||||
};
|
||||
|
||||
fn takeRegion(region: *Region, size: usize) PhysicalMemoryRegion {
|
||||
var free_region = region.getFreeRegion();
|
||||
assert(free_region.size >= size);
|
||||
const allocated_region = free_region.takeSlice(size);
|
||||
region.allocated_size += size;
|
||||
return allocated_region;
|
||||
}
|
||||
|
||||
fn allocate(region: *Region, comptime T: type, count: usize) Region.AllocateError![]T {
|
||||
const free_region = region.getFreeRegion();
|
||||
_ = free_region;
|
||||
_ = count;
|
||||
@panic("TODO: region allocate");
|
||||
}
|
||||
|
||||
fn create(region: *Region, comptime T: type) Region.AllocateError!*T {
|
||||
const result = try region.allocate(T, 1);
|
||||
return &result[0];
|
||||
}
|
||||
|
||||
inline fn canAllocateDirectly(region: Region, size: usize, alignment: usize) bool {
|
||||
const alignment_fits = region.alignmentFits(alignment);
|
||||
const size_fits = region.sizeFits(size);
|
||||
return alignment_fits and size_fits;
|
||||
}
|
||||
|
||||
inline fn canAllocateSplitting(region: Region, size: usize, alignment: usize) bool {
|
||||
const free_region = region.getFreeRegion();
|
||||
const aligned_region_address = lib.alignForward(usize, free_region.address.value(), alignment);
|
||||
const wasted_space = aligned_region_address - free_region.address.value();
|
||||
log.warn("Wasted space: {} bytes", .{wasted_space});
|
||||
_ = size;
|
||||
@panic("TODO: canAllocateSplitting");
|
||||
}
|
||||
|
||||
inline fn sizeFits(region: Region, size: usize) bool {
|
||||
return region.descriptor.size - region.allocated_size >= size;
|
||||
}
|
||||
|
||||
inline fn alignmentFits(region: Region, alignment: usize) bool {
|
||||
const result = lib.isAligned(region.getFreeRegion().address.value(), alignment);
|
||||
return result;
|
||||
}
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
pub const RootPageTableEntry = extern struct {
|
||||
address: PhysicalAddress,
|
||||
};
|
368
src/cpu/init.zig
368
src/cpu/init.zig
@ -1,368 +0,0 @@
|
||||
const birth = @import("birth");
|
||||
const bootloader = @import("bootloader");
|
||||
const cpu = @import("cpu");
|
||||
const lib = @import("lib");
|
||||
const privileged = @import("privileged");
|
||||
|
||||
const assert = lib.assert;
|
||||
const log = lib.log;
|
||||
const PhysicalAddress = lib.PhysicalAddress;
|
||||
const PhysicalMemoryRegion = lib.PhysicalMemoryRegion;
|
||||
const VirtualAddress = lib.VirtualAddress;
|
||||
const VirtualMemoryRegion = lib.VirtualMemoryRegion;
|
||||
|
||||
const RegionList = cpu.RegionList;
|
||||
const PageTableRegions = cpu.arch.init.PageTableRegions;
|
||||
|
||||
const paging = privileged.arch.paging;
|
||||
pub const Error = error{
|
||||
feature_requested_and_not_available,
|
||||
no_files,
|
||||
cpu_file_not_found,
|
||||
init_file_not_found,
|
||||
no_space_for_bootstrap_region,
|
||||
};
|
||||
|
||||
pub fn initialize(bootloader_information: *bootloader.Information) !noreturn {
|
||||
// bootloader_information.draw_context.clearScreen(0xffff7f50);
|
||||
// Do an integrity check so that the bootloader information is in perfect state and there is no weird memory behavior.
|
||||
// This is mainly due to the transition from a 32-bit bootloader to a 64-bit CPU driver in the x86-64 architecture.
|
||||
try bootloader_information.checkIntegrity();
|
||||
// Informing the bootloader information struct that we have reached the CPU driver and any bootloader
|
||||
// functionality is not available anymore
|
||||
bootloader_information.stage = .cpu;
|
||||
// Check that the bootloader has loaded some files as the CPU driver needs them to go forward
|
||||
cpu.bundle = bootloader_information.getSlice(.bundle);
|
||||
if (cpu.bundle.len == 0) {
|
||||
return Error.no_files;
|
||||
}
|
||||
cpu.bundle_files = bootloader_information.getSlice(.file_list);
|
||||
if (cpu.bundle_files.len == 0) {
|
||||
return Error.no_files;
|
||||
}
|
||||
|
||||
try cpu.arch.init.initialize();
|
||||
|
||||
const memory_map_entries = bootloader_information.getMemoryMapEntries();
|
||||
const page_counters = bootloader_information.getPageCounters();
|
||||
|
||||
const first_heap_allocation_size = 2 * lib.arch.valid_page_sizes[0];
|
||||
|
||||
var heap_region_metadata: struct {
|
||||
region: PhysicalMemoryRegion,
|
||||
free_size: u64,
|
||||
index: usize,
|
||||
} = for (memory_map_entries, page_counters, 0..) |mmap_entry, page_counter, index| {
|
||||
if (mmap_entry.type == .usable) {
|
||||
const free_region = mmap_entry.getFreeRegion(page_counter);
|
||||
if (free_region.size >= first_heap_allocation_size) {
|
||||
break .{
|
||||
.region = PhysicalMemoryRegion.new(.{
|
||||
.address = free_region.address,
|
||||
.size = free_region.size,
|
||||
}),
|
||||
.free_size = free_region.size - first_heap_allocation_size,
|
||||
.index = index,
|
||||
};
|
||||
}
|
||||
}
|
||||
} else return error.no_space_for_bootstrap_region;
|
||||
|
||||
const heap_region = try heap_region_metadata.region.takeSlice(first_heap_allocation_size);
|
||||
try cpu.heap.addBootstrapingRegion(heap_region.toHigherHalfVirtualAddress());
|
||||
|
||||
const host_free_region_list = try cpu.heap.create(RegionList);
|
||||
|
||||
var free_size: u64 = 0;
|
||||
_ = try host_free_region_list.append(heap_region_metadata.region);
|
||||
free_size += heap_region_metadata.region.size;
|
||||
|
||||
var region_list_iterator = host_free_region_list;
|
||||
|
||||
for (memory_map_entries, page_counters, 0..) |memory_map_entry, page_counter, index| {
|
||||
if (index == heap_region_metadata.index) continue;
|
||||
|
||||
if (memory_map_entry.type == .usable) {
|
||||
const free_region = memory_map_entry.getFreeRegion(page_counter);
|
||||
|
||||
if (free_region.size > 0) {
|
||||
assert(lib.isAligned(free_region.size, lib.arch.valid_page_sizes[0]));
|
||||
_ = region_list_iterator.append(free_region) catch {
|
||||
const new_region_list = try cpu.heap.create(RegionList);
|
||||
region_list_iterator.metadata.next = new_region_list;
|
||||
new_region_list.metadata.previous = region_list_iterator;
|
||||
region_list_iterator = new_region_list;
|
||||
_ = try region_list_iterator.append(free_region);
|
||||
};
|
||||
|
||||
free_size += free_region.size;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
cpu.page_allocator.free_regions = host_free_region_list;
|
||||
cpu.page_allocator.free_byte_count = free_size;
|
||||
|
||||
// Add used regions by the bootloader to the physical memory manager
|
||||
for (memory_map_entries, page_counters) |memory_map_entry, page_counter| {
|
||||
if (memory_map_entry.type == .usable) {
|
||||
const used_region = memory_map_entry.getUsedRegion(page_counter);
|
||||
if (used_region.size > 0) {
|
||||
assert(lib.isAligned(used_region.size, lib.arch.valid_page_sizes[0]));
|
||||
try cpu.page_allocator.appendUsedRegion(used_region, .{ .reason = .bootloader });
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
var used_regions = cpu.page_allocator.used_regions;
|
||||
var used_memory_by_bootloader: usize = 0;
|
||||
while (used_regions) |used_region| : (used_regions = used_region.next) {
|
||||
if (used_region.use_case.reason == .bootloader) {
|
||||
used_memory_by_bootloader += used_region.region.size;
|
||||
}
|
||||
}
|
||||
|
||||
log.debug("Used memory by the bootloader: 0x{x} bytes", .{used_memory_by_bootloader});
|
||||
|
||||
try cpu.page_allocator.appendUsedRegion(heap_region, .{ .reason = .heap });
|
||||
|
||||
switch (cpu.bsp) {
|
||||
true => {
|
||||
// Setup kernel debug information
|
||||
cpu.debug_info = blk: {
|
||||
const cpu_driver_executable_descriptor = try bootloader_information.getFileDescriptor("cpu_driver");
|
||||
const elf_file = file: {
|
||||
const aligned_file_len = lib.alignForward(usize, cpu_driver_executable_descriptor.content.len, lib.arch.valid_page_sizes[0]);
|
||||
const elf_file_physical_allocation = try cpu.page_allocator.allocate(aligned_file_len, .{ .reason = .privileged });
|
||||
break :file elf_file_physical_allocation.toHigherHalfVirtualAddress().address.access([*]align(lib.arch.valid_page_sizes[0]) u8)[0..elf_file_physical_allocation.size];
|
||||
};
|
||||
lib.memcpy(elf_file[0..cpu_driver_executable_descriptor.content.len], cpu_driver_executable_descriptor.content);
|
||||
const result = try lib.getDebugInformation(cpu.heap.allocator.zigUnwrap(), elf_file);
|
||||
break :blk result;
|
||||
};
|
||||
|
||||
const init_module_descriptor = try bootloader_information.getFileDescriptor("init");
|
||||
|
||||
try spawnInitBSP(init_module_descriptor.content, bootloader_information.cpu_page_tables);
|
||||
},
|
||||
false => @panic("TODO: implement APP"),
|
||||
}
|
||||
}
|
||||
|
||||
const ELF = lib.ELF(64);
|
||||
|
||||
const SpawnInitCommonResult = extern struct {
|
||||
scheduler: *cpu.UserScheduler,
|
||||
entry_point: u64,
|
||||
};
|
||||
|
||||
pub const MappingArgument = extern struct {
|
||||
virtual: VirtualAddress,
|
||||
physical: PhysicalAddress,
|
||||
size: u64,
|
||||
};
|
||||
|
||||
pub const InitFile = struct {
|
||||
content: []const u8,
|
||||
segments: []const Segment,
|
||||
};
|
||||
|
||||
pub const Segment = extern struct {
|
||||
virtual: VirtualAddress,
|
||||
physical: PhysicalAddress,
|
||||
memory_size: usize,
|
||||
flags: privileged.Mapping.Flags,
|
||||
file_offset: usize,
|
||||
file_size: usize,
|
||||
};
|
||||
|
||||
var once: bool = false;
|
||||
|
||||
fn spawnInitCommon(init_file: []const u8, cpu_page_tables: paging.CPUPageTables) !SpawnInitCommonResult {
|
||||
assert(!once);
|
||||
once = true;
|
||||
// TODO: delete in the future
|
||||
assert(cpu.bsp);
|
||||
|
||||
const init_cpu_scheduler = try cpu.heap.create(cpu.UserScheduler);
|
||||
init_cpu_scheduler.* = cpu.UserScheduler{
|
||||
.s = .{
|
||||
.common = undefined,
|
||||
.capability_root_node = cpu.interface.Root{
|
||||
.static = .{
|
||||
.cpu = true,
|
||||
.boot = true,
|
||||
.process = true,
|
||||
},
|
||||
.dynamic = .{
|
||||
.io = .{
|
||||
.debug = true,
|
||||
},
|
||||
.memory = .{},
|
||||
.cpu_memory = .{
|
||||
.flags = .{
|
||||
.allocate = true,
|
||||
},
|
||||
},
|
||||
.page_table = cpu.interface.PageTables{
|
||||
.privileged = undefined,
|
||||
.user = birth.interface.PageTable{
|
||||
.index = 0,
|
||||
.entry_type = .page_table,
|
||||
},
|
||||
// .vmm = try cpu.interface.VMM.new(),
|
||||
.can_map_page_tables = true,
|
||||
.page_tables = .{
|
||||
.ptr = undefined,
|
||||
.len = 0,
|
||||
.capacity = 0,
|
||||
},
|
||||
.leaves = .{
|
||||
.ptr = undefined,
|
||||
.len = 0,
|
||||
.capacity = 0,
|
||||
},
|
||||
},
|
||||
.command_buffer_submission = .{ .region = PhysicalMemoryRegion.invalid() },
|
||||
.command_buffer_completion = .{ .region = PhysicalMemoryRegion.invalid() },
|
||||
.memory_mapping = .{},
|
||||
.page_table_mapping = .{},
|
||||
},
|
||||
.scheduler = .{
|
||||
.memory = undefined,
|
||||
// .memory = scheduler_physical_region,
|
||||
},
|
||||
},
|
||||
},
|
||||
};
|
||||
|
||||
const init_elf = try ELF.Parser.init(init_file);
|
||||
const entry_point = init_elf.getEntryPoint();
|
||||
const program_headers = init_elf.getProgramHeaders();
|
||||
|
||||
var segment_buffer: [20]Segment = undefined;
|
||||
var segment_count: usize = 0;
|
||||
var segment_total_size: usize = 0;
|
||||
var first_address: ?u64 = null;
|
||||
|
||||
for (program_headers) |program_header| {
|
||||
if (program_header.type == .load) {
|
||||
if (first_address == null) {
|
||||
first_address = program_header.virtual_address;
|
||||
}
|
||||
|
||||
const segment_size = lib.alignForward(usize, program_header.size_in_memory, lib.arch.valid_page_sizes[0]);
|
||||
segment_total_size += segment_size;
|
||||
|
||||
const segment_virtual = VirtualAddress.new(program_header.virtual_address);
|
||||
const segment_physical_region = try cpu.page_allocator.allocate(segment_size, .{ .reason = .user });
|
||||
|
||||
const segment = &segment_buffer[segment_count];
|
||||
segment.* = .{
|
||||
.physical = segment_physical_region.address,
|
||||
.virtual = segment_virtual,
|
||||
.memory_size = segment_size,
|
||||
.flags = .{
|
||||
.execute = program_header.flags.executable,
|
||||
.write = program_header.flags.writable,
|
||||
.user = true,
|
||||
.huge_pages = false,
|
||||
},
|
||||
.file_offset = program_header.offset,
|
||||
.file_size = program_header.size_in_file,
|
||||
};
|
||||
|
||||
const src = init_file[segment.file_offset..][0..segment.file_size];
|
||||
// It's necessary to use the higher half address here since the user mapping is not applied yet
|
||||
const dst = segment_physical_region.toHigherHalfVirtualAddress().access(u8)[0..src.len];
|
||||
lib.memcpy(dst, src);
|
||||
|
||||
segment_count += 1;
|
||||
}
|
||||
}
|
||||
|
||||
const init_start_address = first_address orelse @panic("WTF");
|
||||
const init_top_address = init_start_address + segment_total_size;
|
||||
const user_scheduler_virtual_address = VirtualAddress.new(init_top_address);
|
||||
init_cpu_scheduler.s.common = user_scheduler_virtual_address.access(*birth.Scheduler.Common);
|
||||
|
||||
const user_scheduler_virtual_region = VirtualMemoryRegion.new(.{
|
||||
.address = user_scheduler_virtual_address,
|
||||
.size = lib.alignForward(usize, @sizeOf(birth.Scheduler), lib.arch.valid_page_sizes[0]),
|
||||
});
|
||||
// Align to 2MB
|
||||
const user_initial_heap_top = lib.alignForward(usize, user_scheduler_virtual_region.top().value(), lib.arch.valid_page_sizes[1]);
|
||||
|
||||
const segments = segment_buffer[0..segment_count];
|
||||
|
||||
const user_virtual_region = VirtualMemoryRegion.new(.{
|
||||
.address = VirtualAddress.new(init_start_address),
|
||||
.size = user_initial_heap_top - init_start_address,
|
||||
});
|
||||
// const page_table_regions = try PageTableRegions.create(user_virtual_region, cpu_page_tables);
|
||||
log.debug("Scheduler region", .{});
|
||||
const scheduler_physical_region = try cpu.page_allocator.allocate(user_scheduler_virtual_region.size, .{ .reason = .user });
|
||||
init_cpu_scheduler.s.capability_root_node.scheduler.memory = scheduler_physical_region;
|
||||
|
||||
const scheduler_virtual_region = VirtualMemoryRegion.new(.{
|
||||
.address = user_scheduler_virtual_address,
|
||||
.size = scheduler_physical_region.size,
|
||||
});
|
||||
|
||||
scheduler_physical_region.address.toHigherHalfVirtualAddress().access(*birth.Scheduler.Common).self = user_scheduler_virtual_address.access(*birth.Scheduler.Common);
|
||||
|
||||
const heap_virtual_region = VirtualMemoryRegion.new(.{
|
||||
.address = scheduler_virtual_region.top(),
|
||||
.size = lib.alignForward(usize, scheduler_virtual_region.top().value(), 64 * lib.arch.valid_page_sizes[1]) - scheduler_virtual_region.top().value(),
|
||||
});
|
||||
|
||||
log.debug("Heap region", .{});
|
||||
const heap_physical_region = try cpu.page_allocator.allocate(heap_virtual_region.size, .{ .reason = .user });
|
||||
@memset(heap_physical_region.toHigherHalfVirtualAddress().access(u8), 0);
|
||||
|
||||
assert(scheduler_physical_region.size == scheduler_virtual_region.size);
|
||||
assert(heap_physical_region.size == heap_virtual_region.size);
|
||||
// Setup common variables
|
||||
const higher_half_scheduler_common = scheduler_physical_region.address.toHigherHalfVirtualAddress().access(*birth.Scheduler.Common);
|
||||
higher_half_scheduler_common.disabled = true;
|
||||
higher_half_scheduler_common.core_id = cpu.core_id;
|
||||
higher_half_scheduler_common.heap = VirtualMemoryRegion.new(.{
|
||||
.address = heap_virtual_region.address,
|
||||
.size = heap_virtual_region.size,
|
||||
});
|
||||
|
||||
try cpu.arch.init.setupMapping(init_cpu_scheduler, user_virtual_region, cpu_page_tables, .{
|
||||
.content = init_file,
|
||||
.segments = segments,
|
||||
}, .{
|
||||
.scheduler = .{
|
||||
.physical = scheduler_physical_region.address,
|
||||
.virtual = scheduler_virtual_region.address,
|
||||
.size = scheduler_virtual_region.size,
|
||||
},
|
||||
.heap = .{
|
||||
.physical = heap_physical_region.address,
|
||||
.virtual = heap_virtual_region.address,
|
||||
.size = heap_virtual_region.size,
|
||||
},
|
||||
});
|
||||
|
||||
return SpawnInitCommonResult{
|
||||
// .page_table_regions = page_table_regions,
|
||||
.scheduler = init_cpu_scheduler,
|
||||
.entry_point = entry_point,
|
||||
};
|
||||
}
|
||||
|
||||
fn spawnInitBSP(init_file: []const u8, cpu_page_tables: paging.CPUPageTables) !noreturn {
|
||||
const spawn_init = try spawnInitCommon(init_file, cpu_page_tables);
|
||||
const init_scheduler = spawn_init.scheduler;
|
||||
// const page_table_regions = spawn_init.page_table_regions;
|
||||
const entry_point = spawn_init.entry_point;
|
||||
const scheduler_common = init_scheduler.s.common;
|
||||
|
||||
cpu.user_scheduler = init_scheduler;
|
||||
|
||||
cpu.arch.init.setupSchedulerCommon(scheduler_common, entry_point);
|
||||
scheduler_common.disabled_save_area.contextSwitch();
|
||||
}
|
@ -1,602 +0,0 @@
|
||||
const lib = @import("lib");
|
||||
const assert = lib.assert;
|
||||
const Allocator = lib.Allocator;
|
||||
const enumCount = lib.enumCount;
|
||||
const log = lib.log.scoped(.capabilities);
|
||||
const SparseArray = lib.data_structures.SparseArray;
|
||||
const VirtualAddress = lib.VirtualAddress;
|
||||
|
||||
const privileged = @import("privileged");
|
||||
const paging = privileged.arch.paging;
|
||||
const PhysicalAddress = lib.PhysicalAddress;
|
||||
const PhysicalMemoryRegion = lib.PhysicalMemoryRegion;
|
||||
const VirtualMemoryRegion = lib.VirtualMemoryRegion;
|
||||
const birth = @import("birth");
|
||||
const cpu = @import("cpu");
|
||||
const RegionList = cpu.RegionList;
|
||||
|
||||
pub var system_call_count: usize = 0;
|
||||
|
||||
pub fn processFromRaw(options: birth.interface.Raw.Options, arguments: birth.interface.Raw.Arguments) birth.interface.Raw.Result {
|
||||
defer system_call_count += 1;
|
||||
return switch (options.general.convention) {
|
||||
.birth => switch (options.birth.type) {
|
||||
inline else => |capability| switch (@as(birth.interface.Command.fromCapability(capability), @enumFromInt(options.birth.command))) {
|
||||
inline else => |command| blk: {
|
||||
const Interface = birth.interface.Descriptor(capability, command);
|
||||
const result = processCommand(Interface, arguments) catch |err| {
|
||||
lib.log.err("Syscall ({s}, {s}) ended up in error: {}", .{ @tagName(capability), @tagName(command), err });
|
||||
break :blk Interface.fromError(err);
|
||||
};
|
||||
break :blk Interface.fromResult(result);
|
||||
},
|
||||
},
|
||||
},
|
||||
.emulated => @panic("TODO: emulated"),
|
||||
};
|
||||
}
|
||||
|
||||
pub fn processCommand(comptime Descriptor: type, raw_arguments: birth.interface.Raw.Arguments) Descriptor.Error!Descriptor.Result {
|
||||
defer cpu.command_count += 1;
|
||||
const capability = Descriptor.Capability;
|
||||
const command = Descriptor.Command;
|
||||
const arguments = try Descriptor.toArguments(raw_arguments);
|
||||
|
||||
const root = &cpu.user_scheduler.s.capability_root_node;
|
||||
// log.err("\n========\nSyscall received: {s}, {s}\n========\n", .{ @tagName(capability), @tagName(command) });
|
||||
|
||||
assert(root.static.process);
|
||||
const has_permissions = root.hasPermissions(Descriptor, arguments);
|
||||
|
||||
return if (has_permissions) switch (capability) {
|
||||
.io => switch (command) {
|
||||
.copy, .mint, .retype, .delete, .revoke, .create => unreachable,
|
||||
.log => blk: {
|
||||
const message = arguments;
|
||||
cpu.writer.writeAll(message) catch unreachable;
|
||||
comptime assert(Descriptor.Result == usize);
|
||||
break :blk message.len;
|
||||
},
|
||||
},
|
||||
.cpu => switch (command) {
|
||||
.copy, .mint, .retype, .delete, .revoke, .create => unreachable,
|
||||
.get_core_id => cpu.core_id,
|
||||
.shutdown => cpu.shutdown(.success),
|
||||
.get_command_buffer => {
|
||||
const command_buffer = arguments;
|
||||
_ = command_buffer;
|
||||
@panic("TODO: get_command_buffer");
|
||||
},
|
||||
},
|
||||
.cpu_memory => switch (command) {
|
||||
else => @panic(@tagName(command)),
|
||||
},
|
||||
.command_buffer_completion, .command_buffer_submission => switch (command) {
|
||||
.map => {
|
||||
const region = @field(root.dynamic, @tagName(capability)).region;
|
||||
assert(region.address.value() != 0);
|
||||
assert(region.size != 0);
|
||||
@panic("TODO: map");
|
||||
}, // TODO
|
||||
else => @panic(@tagName(command)),
|
||||
},
|
||||
.memory => switch (command) {
|
||||
.allocate => blk: {
|
||||
comptime assert(@TypeOf(arguments) == usize);
|
||||
const size = arguments;
|
||||
// TODO: we want more fine-grained control of the reason if we want more than a simple statistic
|
||||
const result = try root.allocateMemory(size);
|
||||
break :blk result.reference;
|
||||
},
|
||||
.retype => blk: {
|
||||
const source = arguments.source;
|
||||
const destination = arguments.destination;
|
||||
const region_ptr = root.dynamic.memory.find(source) orelse unreachable;
|
||||
const region_copy = region_ptr.*;
|
||||
root.dynamic.memory.remove(source);
|
||||
switch (destination) {
|
||||
.cpu_memory => {
|
||||
// TODO: delete properly
|
||||
const new_ref = root.dynamic.cpu_memory.allocated.append(region_copy) catch |err| {
|
||||
log.err("Error: {}", .{err});
|
||||
return error.OutOfMemory;
|
||||
};
|
||||
// TODO: delete properly
|
||||
|
||||
break :blk @bitCast(new_ref);
|
||||
},
|
||||
.command_buffer_submission, .command_buffer_completion => {
|
||||
switch (destination) {
|
||||
inline .command_buffer_completion, .command_buffer_submission => |dst| @field(root.dynamic, @tagName(dst)).region = region_copy,
|
||||
else => @panic("WTF"),
|
||||
}
|
||||
// TODO: better value
|
||||
break :blk .{ .integer = 0 };
|
||||
},
|
||||
else => @panic("WTF"),
|
||||
}
|
||||
if (true) @panic("TODO: retype");
|
||||
break :blk undefined;
|
||||
},
|
||||
else => @panic(@tagName(command)),
|
||||
},
|
||||
.boot => switch (command) {
|
||||
.get_bundle_size => cpu.bundle.len,
|
||||
.get_bundle_file_list_size => cpu.bundle_files.len,
|
||||
else => @panic(@tagName(command)),
|
||||
},
|
||||
.process => switch (command) {
|
||||
.exit => cpu.shutdown(switch (arguments) {
|
||||
true => .success,
|
||||
false => .failure,
|
||||
}),
|
||||
.panic => cpu.panic("User process panicked with exit code 0x{x}:\n==========\n{s}\n==========", .{ arguments.exit_code, arguments.message }),
|
||||
else => @panic(@tagName(command)),
|
||||
},
|
||||
.page_table => switch (command) {
|
||||
.get => {
|
||||
const descriptor = arguments.descriptor;
|
||||
assert(descriptor.entry_type == .page_table);
|
||||
|
||||
const block = try root.dynamic.page_table.page_tables.getChecked(descriptor.block);
|
||||
const page_table = &block.array[descriptor.index];
|
||||
log.debug("Page table: {}", .{page_table.flags.level});
|
||||
@memcpy(arguments.buffer, &page_table.children);
|
||||
},
|
||||
.get_leaf => {
|
||||
const descriptor = arguments.descriptor;
|
||||
assert(descriptor.entry_type == .leaf);
|
||||
|
||||
const block = try root.dynamic.page_table.leaves.getChecked(descriptor.block);
|
||||
const leaf = &block.array[descriptor.index];
|
||||
|
||||
const user_leaf = arguments.buffer;
|
||||
user_leaf.* = leaf.common;
|
||||
},
|
||||
else => @panic("TODO: page_table other"),
|
||||
},
|
||||
.memory_mapping => {
|
||||
@panic("TODO: memory_mapping");
|
||||
},
|
||||
.page_table_mapping => {
|
||||
@panic("TODO: page_table_mapping");
|
||||
},
|
||||
} else error.forbidden;
|
||||
}
|
||||
|
||||
pub const RootDescriptor = extern struct {
|
||||
value: *Root,
|
||||
};
|
||||
|
||||
pub const Static = enum {
|
||||
cpu,
|
||||
boot,
|
||||
process,
|
||||
|
||||
pub const count = lib.enumCount(@This());
|
||||
|
||||
pub const Bitmap = @Type(.{
|
||||
.Struct = blk: {
|
||||
const full_bit_size = @max(@as(comptime_int, 1 << 3), @as(u8, @sizeOf(Static)) << 3);
|
||||
break :blk .{
|
||||
.layout = .Packed,
|
||||
.backing_integer = @Type(.{
|
||||
.Int = .{
|
||||
.signedness = .unsigned,
|
||||
.bits = full_bit_size,
|
||||
},
|
||||
}),
|
||||
.fields = fields: {
|
||||
var fields: []const lib.Type.StructField = &.{};
|
||||
inline for (lib.enumFields(Static)) |static_field| {
|
||||
fields = fields ++ [1]lib.Type.StructField{.{
|
||||
.name = static_field.name,
|
||||
.type = bool,
|
||||
.default_value = null,
|
||||
.is_comptime = false,
|
||||
.alignment = 0,
|
||||
}};
|
||||
}
|
||||
|
||||
assert(Static.count > 0);
|
||||
assert(@sizeOf(Static) > 0 or Static.count == 1);
|
||||
|
||||
const padding_type = @Type(.{
|
||||
.Int = .{
|
||||
.signedness = .unsigned,
|
||||
.bits = full_bit_size - Static.count,
|
||||
},
|
||||
});
|
||||
|
||||
fields = fields ++ [1]lib.Type.StructField{.{
|
||||
.name = "reserved",
|
||||
.type = padding_type,
|
||||
.default_value = &@as(padding_type, 0),
|
||||
.is_comptime = false,
|
||||
.alignment = 0,
|
||||
}};
|
||||
break :fields fields;
|
||||
},
|
||||
.decls = &.{},
|
||||
.is_tuple = false,
|
||||
};
|
||||
},
|
||||
});
|
||||
};
|
||||
|
||||
pub const CommandBufferMemory = extern struct {
|
||||
region: PhysicalMemoryRegion,
|
||||
};
|
||||
|
||||
pub const Dynamic = enum {
|
||||
io,
|
||||
memory, // Barrelfish equivalent: Memory (no PhysAddr)
|
||||
cpu_memory, // Barrelfish equivalent: Frame
|
||||
page_table, // Barrelfish equivalent: VNode
|
||||
command_buffer_submission,
|
||||
command_buffer_completion,
|
||||
memory_mapping, // Barrelfish equivalent: Frame mapping, Device Frame Mapping
|
||||
page_table_mapping, // Barrelfish equivalent: VNode mapping
|
||||
// irq_table,
|
||||
// device_memory,
|
||||
// scheduler,
|
||||
|
||||
pub const Map = extern struct {
|
||||
io: IO,
|
||||
memory: Memory,
|
||||
cpu_memory: CPUMemory,
|
||||
page_table: PageTables,
|
||||
command_buffer_submission: CommandBufferMemory,
|
||||
command_buffer_completion: CommandBufferMemory,
|
||||
memory_mapping: Memory.Mapping,
|
||||
page_table_mapping: PageTables.Mapping,
|
||||
|
||||
comptime {
|
||||
inline for (lib.fields(Dynamic.Map), lib.fields(Dynamic)) |struct_field, enum_field| {
|
||||
assert(lib.equal(u8, enum_field.name, struct_field.name));
|
||||
}
|
||||
}
|
||||
};
|
||||
};
|
||||
|
||||
pub const Memory = extern struct {
|
||||
allocated: RegionList = .{},
|
||||
allocate: bool = true,
|
||||
|
||||
pub const Mapping = extern struct {
|
||||
foo: u32 = 0,
|
||||
};
|
||||
|
||||
const AllocateError = error{
|
||||
OutOfMemory,
|
||||
};
|
||||
|
||||
fn find(memory: *Memory, memory_descriptor: birth.interface.Memory) ?*PhysicalMemoryRegion {
|
||||
var iterator: ?*RegionList = &memory.allocated;
|
||||
var block_index: usize = 0;
|
||||
|
||||
return blk: while (iterator) |list| : ({
|
||||
iterator = list.metadata.next;
|
||||
block_index += 1;
|
||||
}) {
|
||||
if (block_index == memory_descriptor.block) {
|
||||
@panic("TODO: find");
|
||||
// if (memory_descriptor.region < list.metadata.count) {
|
||||
// const region = &list.regions[memory_descriptor.region];
|
||||
// if (region.size != 0 and region.address.value() != 0) {
|
||||
// assert(lib.isAligned(region.size, lib.arch.valid_page_sizes[0]));
|
||||
// assert(lib.isAligned(region.address.value(), lib.arch.valid_page_sizes[0]));
|
||||
// break :blk region;
|
||||
// }
|
||||
// }
|
||||
//
|
||||
// break :blk null;
|
||||
} else if (block_index > memory_descriptor.block) {
|
||||
break :blk null;
|
||||
} else {
|
||||
continue;
|
||||
}
|
||||
} else break :blk null;
|
||||
}
|
||||
|
||||
inline fn getListIndex(size: usize) usize {
|
||||
inline for (lib.arch.reverse_valid_page_sizes, 0..) |reverse_page_size, reverse_index| {
|
||||
if (size >= reverse_page_size) return reverse_index;
|
||||
}
|
||||
|
||||
@panic("WTF");
|
||||
}
|
||||
|
||||
pub fn appendRegion(memory: *Memory, region: PhysicalMemoryRegion) !birth.interface.Memory {
|
||||
var iterator: ?*RegionList = &memory.allocated;
|
||||
while (iterator) |region_list| : (iterator = region_list.metadata.next) {
|
||||
const result = region_list.append(region) catch continue;
|
||||
return result;
|
||||
}
|
||||
|
||||
return error.OutOfMemory;
|
||||
}
|
||||
|
||||
pub fn remove(memory: *Memory, ref: birth.interface.Memory) void {
|
||||
const region_index: u6 = @intCast(ref.region);
|
||||
var block_index: u32 = 0;
|
||||
var iterator: ?*RegionList = &memory.allocated;
|
||||
while (iterator) |region_list| : ({
|
||||
iterator = region_list.metadata.next;
|
||||
block_index += 1;
|
||||
}) {
|
||||
if (block_index == ref.block) {
|
||||
region_list.remove(region_index);
|
||||
break;
|
||||
} else if (block_index > ref.block) {
|
||||
@panic("WTF");
|
||||
} else continue;
|
||||
} else {
|
||||
@panic("WTF");
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
pub const CPUMemory = extern struct {
|
||||
allocated: RegionList = .{},
|
||||
flags: Flags = .{},
|
||||
|
||||
const Flags = packed struct(u64) {
|
||||
allocate: bool = true,
|
||||
reserved: u63 = 0,
|
||||
};
|
||||
};
|
||||
|
||||
pub const PageTable = extern struct {
|
||||
region: PhysicalMemoryRegion,
|
||||
mapping: VirtualAddress,
|
||||
flags: Flags,
|
||||
children: Children = .{.{}} ** children_count,
|
||||
|
||||
pub const Children = [children_count]birth.interface.PageTable;
|
||||
pub const children_count = paging.page_table_entry_count;
|
||||
|
||||
pub const Flags = packed struct(u64) {
|
||||
level: paging.Level,
|
||||
reserved: u62 = 0,
|
||||
};
|
||||
|
||||
pub const Array = extern struct {
|
||||
array: [count]PageTable,
|
||||
bitset: Bitset,
|
||||
next: ?*Array = null,
|
||||
|
||||
pub const Bitset = lib.data_structures.BitsetU64(count);
|
||||
|
||||
pub const count = 32;
|
||||
|
||||
pub fn get(array: *Array, index: u6) !*PageTable {
|
||||
if (array.bitset.isSet(index)) {
|
||||
return &array.array[index];
|
||||
} else {
|
||||
return error.index_out_of_bounds;
|
||||
}
|
||||
}
|
||||
};
|
||||
};
|
||||
|
||||
pub const Leaf = extern struct {
|
||||
common: birth.interface.Leaf,
|
||||
physical: PhysicalAddress,
|
||||
flags: Flags,
|
||||
|
||||
pub const Flags = packed struct(u64) {
|
||||
size: Size,
|
||||
reserved: u62 = 0,
|
||||
};
|
||||
|
||||
pub const Size = enum(u2) {
|
||||
@"4KB",
|
||||
@"2MB",
|
||||
@"1GB",
|
||||
};
|
||||
|
||||
pub const Array = extern struct {
|
||||
array: [count]Leaf,
|
||||
bitset: Bitset,
|
||||
next: ?*Array = null,
|
||||
pub const Bitset = lib.data_structures.BitsetU64(count);
|
||||
pub const count = 32;
|
||||
pub fn get(array: *Array, index: u6) !*PageTable {
|
||||
if (array.bitset.isSet(index)) {
|
||||
return &array.array[index];
|
||||
} else {
|
||||
return error.index_out_of_bounds;
|
||||
}
|
||||
}
|
||||
};
|
||||
};
|
||||
|
||||
pub const PageTables = extern struct {
|
||||
// This one has the kernel mapped
|
||||
privileged: PageTable, // This one is separate as cannot be mapped
|
||||
user: birth.interface.PageTable,
|
||||
page_tables: SparseArray(*PageTable.Array),
|
||||
leaves: SparseArray(*Leaf.Array),
|
||||
// vmm: VMM,
|
||||
can_map_page_tables: bool,
|
||||
|
||||
pub const Mapping = extern struct {
|
||||
foo: u32 = 0,
|
||||
};
|
||||
|
||||
const end = privileged.arch.paging.user_address_space_end;
|
||||
|
||||
fn getUser(page_tables: *const PageTables) ?PhysicalMemoryRegion {
|
||||
if (page_tables.user.address.value() == 0) {
|
||||
return null;
|
||||
}
|
||||
|
||||
if (page_tables.user.size == 0) {
|
||||
return null;
|
||||
}
|
||||
|
||||
return page_tables.user;
|
||||
}
|
||||
|
||||
pub fn switchPrivileged(page_tables: *const PageTables) void {
|
||||
paging.Specific.fromPhysicalRegion(page_tables.privileged.region).makeCurrentPrivileged();
|
||||
}
|
||||
|
||||
pub fn appendPageTable(page_tables: *PageTables, allocator: *Allocator, page_table: PageTable) !birth.interface.PageTable {
|
||||
if (page_tables.page_tables.len > 0) {
|
||||
const slice = page_tables.page_tables.ptr[0..page_tables.page_tables.len];
|
||||
for (slice, 0..) |block, block_index| {
|
||||
const index = block.bitset.allocate() catch continue;
|
||||
block.array[index] = page_table;
|
||||
return .{
|
||||
.index = index,
|
||||
.block = @intCast(block_index),
|
||||
.entry_type = .page_table,
|
||||
.present = true,
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
const page_table_array = try allocator.create(PageTable.Array);
|
||||
_ = try page_tables.page_tables.append(allocator, page_table_array);
|
||||
return appendPageTable(page_tables, allocator, page_table);
|
||||
}
|
||||
|
||||
pub fn appendLeaf(page_tables: *PageTables, allocator: *Allocator, leaf: Leaf) !birth.interface.PageTable {
|
||||
if (page_tables.leaves.len > 0) {
|
||||
const slice = page_tables.leaves.ptr[0..page_tables.leaves.len];
|
||||
for (slice, 0..) |block, block_index| {
|
||||
const index = block.bitset.allocate() catch continue;
|
||||
block.array[index] = leaf;
|
||||
|
||||
return .{
|
||||
.index = index,
|
||||
.block = @intCast(block_index),
|
||||
.entry_type = .leaf,
|
||||
.present = true,
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
const leaf_array = try allocator.create(Leaf.Array);
|
||||
_ = try page_tables.leaves.append(allocator, leaf_array);
|
||||
return appendLeaf(page_tables, allocator, leaf);
|
||||
}
|
||||
|
||||
pub fn getPageTable(page_tables: *PageTables, page_table: birth.interface.PageTable) !*PageTable {
|
||||
assert(page_table.entry_type == .page_table);
|
||||
if (page_table.present) {
|
||||
const page_table_block = try page_tables.page_tables.getChecked(page_table.block);
|
||||
const result = try page_table_block.get(@intCast(page_table.index));
|
||||
return result;
|
||||
} else {
|
||||
return error.not_present;
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
pub const IO = extern struct {
|
||||
debug: bool,
|
||||
};
|
||||
|
||||
pub const Scheduler = extern struct {
|
||||
memory: PhysicalMemoryRegion,
|
||||
};
|
||||
|
||||
comptime {
|
||||
const dynamic_count = enumCount(Dynamic);
|
||||
const static_count = enumCount(Static);
|
||||
const total_count = enumCount(birth.interface.Capability);
|
||||
assert(dynamic_count + static_count == total_count);
|
||||
}
|
||||
|
||||
pub const Root = extern struct {
|
||||
static: Static.Bitmap,
|
||||
dynamic: Dynamic.Map,
|
||||
scheduler: Scheduler,
|
||||
heap: Heap = .{},
|
||||
padding: [padding_byte_count]u8 = .{0} ** padding_byte_count,
|
||||
|
||||
const Heap = cpu.HeapImplementation(true);
|
||||
|
||||
const max_alignment = @max(@alignOf(Static.Bitmap), @alignOf(Dynamic.Map), @alignOf(Scheduler), @alignOf(Heap));
|
||||
const total_size = lib.alignForward(usize, @sizeOf(Static.Bitmap) + @sizeOf(Dynamic.Map) + @sizeOf(Scheduler) + @sizeOf(Heap), max_alignment);
|
||||
const page_aligned_size = lib.alignForward(usize, total_size, lib.arch.valid_page_sizes[0]);
|
||||
const padding_byte_count = page_aligned_size - total_size;
|
||||
|
||||
comptime {
|
||||
assert(@sizeOf(Root) % lib.arch.valid_page_sizes[0] == 0);
|
||||
}
|
||||
|
||||
pub const AllocateError = error{
|
||||
OutOfMemory,
|
||||
};
|
||||
|
||||
fn hasPermissions(root: *Root, comptime Descriptor: type, arguments: Descriptor.Arguments) bool {
|
||||
const capability = Descriptor.Capability;
|
||||
const command = Descriptor.Command;
|
||||
|
||||
if (command == .retype) {
|
||||
const can_retype: bool = switch (@TypeOf(arguments)) {
|
||||
void => @panic("Retype on void"),
|
||||
else => switch (arguments.destination) {
|
||||
inline else => |destination| blk: {
|
||||
const child_types = comptime capability.getChildTypes();
|
||||
inline for (child_types) |child_type| {
|
||||
if (child_type == destination) {
|
||||
break :blk true;
|
||||
}
|
||||
} else {
|
||||
break :blk false;
|
||||
}
|
||||
},
|
||||
},
|
||||
};
|
||||
|
||||
if (!can_retype) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
const has_permissions = switch (capability) {
|
||||
// static capabilities
|
||||
inline .cpu,
|
||||
.boot,
|
||||
=> |static_capability| @field(root.static, @tagName(static_capability)),
|
||||
.process => root.static.process or command == .panic,
|
||||
// dynamic capabilities
|
||||
.io => switch (command) {
|
||||
.copy, .mint, .retype, .delete, .revoke, .create => unreachable,
|
||||
.log => root.dynamic.io.debug,
|
||||
},
|
||||
.cpu_memory => root.dynamic.cpu_memory.flags.allocate,
|
||||
.command_buffer_completion, .command_buffer_submission => true, //TODO
|
||||
.memory => switch (command) {
|
||||
.allocate => root.dynamic.memory.allocate,
|
||||
.retype => root.dynamic.memory.find(arguments.source) != null,
|
||||
else => @panic("TODO: else => memory"),
|
||||
},
|
||||
.page_table => root.dynamic.page_table.can_map_page_tables, // TODO
|
||||
.memory_mapping => true, // TODO
|
||||
.page_table_mapping => true, // TODO
|
||||
};
|
||||
|
||||
return has_permissions;
|
||||
}
|
||||
|
||||
pub const AllocateMemoryResult = extern struct {
|
||||
region: PhysicalMemoryRegion,
|
||||
reference: birth.interface.Memory,
|
||||
};
|
||||
|
||||
pub fn allocateMemory(root: *Root, size: usize) !AllocateMemoryResult {
|
||||
const physical_region = try cpu.page_allocator.allocate(size, .{ .reason = .user });
|
||||
const reference = try root.dynamic.memory.appendRegion(physical_region);
|
||||
|
||||
return .{
|
||||
.region = physical_region,
|
||||
.reference = reference,
|
||||
};
|
||||
}
|
||||
};
|
@ -7,8 +7,11 @@ const stopCPU = privileged.arch.stopCPU;
|
||||
|
||||
const cpu = @import("cpu");
|
||||
|
||||
var lock: lib.Spinlock = .released;
|
||||
|
||||
pub const std_options = struct {
|
||||
pub fn logFn(comptime level: lib.log.Level, comptime scope: @TypeOf(.EnumLiteral), comptime format: []const u8, args: anytype) void {
|
||||
lock.acquire();
|
||||
cpu.writer.writeAll("[CPU DRIVER] ") catch unreachable;
|
||||
cpu.writer.writeByte('[') catch unreachable;
|
||||
cpu.writer.writeAll(@tagName(scope)) catch unreachable;
|
||||
@ -18,6 +21,8 @@ pub const std_options = struct {
|
||||
cpu.writer.writeAll("] ") catch unreachable;
|
||||
lib.format(cpu.writer, format, args) catch unreachable;
|
||||
cpu.writer.writeByte('\n') catch unreachable;
|
||||
|
||||
lock.release();
|
||||
}
|
||||
|
||||
pub const log_level = lib.log.Level.debug;
|
||||
|
38
src/cpu/test_runner.zig
Normal file
38
src/cpu/test_runner.zig
Normal file
@ -0,0 +1,38 @@
|
||||
const lib = @import("lib");
|
||||
const assert = lib.assert;
|
||||
const log = lib.log.scoped(.TEST);
|
||||
const privileged = @import("privileged");
|
||||
const QEMU = lib.QEMU;
|
||||
|
||||
const cpu = @import("cpu");
|
||||
|
||||
const RunAllTestResult = error{
|
||||
failure,
|
||||
};
|
||||
|
||||
pub fn runAllTests() RunAllTestResult!void {
|
||||
comptime assert(lib.is_test);
|
||||
const test_functions = @import("builtin").test_functions;
|
||||
var failed_test_count: usize = 0;
|
||||
for (test_functions) |test_function| {
|
||||
test_function.func() catch |err| {
|
||||
log.err("Test failed: {}", .{err});
|
||||
failed_test_count += 1;
|
||||
};
|
||||
}
|
||||
|
||||
const test_count = test_functions.len;
|
||||
assert(QEMU.isa_debug_exit.io_size == @sizeOf(u32));
|
||||
const exit_code = switch (failed_test_count) {
|
||||
0 => blk: {
|
||||
log.info("All {} tests passed.", .{test_count});
|
||||
break :blk .success;
|
||||
},
|
||||
else => blk: {
|
||||
log.info("Run {} tests. Failed {}.", .{ test_count, failed_test_count });
|
||||
break :blk .failure;
|
||||
},
|
||||
};
|
||||
|
||||
cpu.shutdown(exit_code);
|
||||
}
|
@ -251,20 +251,12 @@ pub fn main() anyerror!void {
|
||||
if (arguments.log) |log_configuration| {
|
||||
var log_what = host.ArrayList(u8).init(wrapped_allocator.zigUnwrap());
|
||||
|
||||
if (log_configuration.guest_errors) {
|
||||
try log_what.appendSlice("guest_errors,");
|
||||
}
|
||||
|
||||
if (log_configuration.interrupts) {
|
||||
try log_what.appendSlice("int,");
|
||||
}
|
||||
|
||||
if (!arguments_result.ci and log_configuration.assembly) {
|
||||
try log_what.appendSlice("in_asm,");
|
||||
}
|
||||
if (log_configuration.guest_errors) try log_what.appendSlice("guest_errors,");
|
||||
if (log_configuration.interrupts) try log_what.appendSlice("int,");
|
||||
if (!arguments_result.ci and log_configuration.assembly) try log_what.appendSlice("in_asm,");
|
||||
|
||||
if (log_what.items.len > 0) {
|
||||
//Delete the last comma
|
||||
// Delete the last comma
|
||||
_ = log_what.pop();
|
||||
|
||||
try argument_list.append("-d");
|
||||
@ -292,7 +284,7 @@ pub fn main() anyerror!void {
|
||||
// GF2, when not found in the PATH, can give problems
|
||||
const use_gf = switch (lib.os) {
|
||||
.macos => false,
|
||||
.linux => true,
|
||||
.linux => false,
|
||||
else => false,
|
||||
};
|
||||
|
||||
@ -300,8 +292,7 @@ pub fn main() anyerror!void {
|
||||
if (use_gf) {
|
||||
try command_line_gdb.append("gf2");
|
||||
} else {
|
||||
const terminal_emulator = "foot";
|
||||
try command_line_gdb.append(terminal_emulator);
|
||||
try command_line_gdb.append("kitty");
|
||||
try command_line_gdb.append(switch (lib.os) {
|
||||
.linux => "gdb",
|
||||
.macos => "x86_64-elf-gdb",
|
||||
@ -340,12 +331,12 @@ pub fn main() anyerror!void {
|
||||
try debugger_process.spawn();
|
||||
}
|
||||
|
||||
var emulator_process = host.ChildProcess.init(argument_list.items, wrapped_allocator.zigUnwrap());
|
||||
var process = host.ChildProcess.init(argument_list.items, wrapped_allocator.zigUnwrap());
|
||||
//process.stdout_behavior = .I;
|
||||
const emulator_process_result = try emulator_process.spawnAndWait();
|
||||
const result = try process.spawnAndWait();
|
||||
|
||||
if (emulator_process_result == .Exited) {
|
||||
const exit_code = emulator_process_result.Exited;
|
||||
if (result == .Exited) {
|
||||
const exit_code = result.Exited;
|
||||
if (exit_code & 1 != 0) {
|
||||
const mask = lib.maxInt(@TypeOf(exit_code)) - 1;
|
||||
const masked_exit_code = exit_code & mask;
|
||||
@ -363,7 +354,7 @@ pub fn main() anyerror!void {
|
||||
} else log.err("QEMU exited with unexpected code: {}. Masked: {}", .{ exit_code, masked_exit_code });
|
||||
} else log.err("QEMU exited with unexpected code: {}", .{exit_code});
|
||||
} else {
|
||||
log.err("QEMU was {s}", .{@tagName(emulator_process_result)});
|
||||
log.err("QEMU was {s}", .{@tagName(result)});
|
||||
}
|
||||
|
||||
if (debugcon_file_used) {
|
||||
|
581
src/lib.zig
581
src/lib.zig
@ -1,270 +1,6 @@
|
||||
const common = @import("common.zig");
|
||||
pub usingnamespace common;
|
||||
|
||||
const compiler_builtin = @import("builtin");
|
||||
pub const cpu = compiler_builtin.cpu;
|
||||
pub const os = compiler_builtin.os.tag;
|
||||
pub const build_mode = compiler_builtin.mode;
|
||||
pub const is_test = compiler_builtin.is_test;
|
||||
|
||||
pub const kb = 1024;
|
||||
pub const mb = kb * 1024;
|
||||
pub const gb = mb * 1024;
|
||||
pub const tb = gb * 1024;
|
||||
|
||||
pub const SizeUnit = enum(u64) {
|
||||
byte = 1,
|
||||
kilobyte = 1024,
|
||||
megabyte = 1024 * 1024,
|
||||
gigabyte = 1024 * 1024 * 1024,
|
||||
terabyte = 1024 * 1024 * 1024 * 1024,
|
||||
};
|
||||
|
||||
pub const std = @import("std");
|
||||
pub const Target = std.Target;
|
||||
pub const Cpu = Target.Cpu;
|
||||
pub const CrossTarget = std.zig.CrossTarget;
|
||||
|
||||
pub const log = std.log;
|
||||
|
||||
pub const data_structures = @import("lib/data_structures.zig");
|
||||
|
||||
pub const Atomic = std.atomic.Atomic;
|
||||
|
||||
pub const Reader = std.io.Reader;
|
||||
pub const Writer = std.io.Writer;
|
||||
|
||||
pub const FixedBufferStream = std.io.FixedBufferStream;
|
||||
pub const fixedBufferStream = std.io.fixedBufferStream;
|
||||
|
||||
pub fn assert(ok: bool) void {
|
||||
if (!ok) {
|
||||
if (@inComptime()) {
|
||||
@compileError("Assert failed!");
|
||||
} else {
|
||||
@panic("Assert failed!");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub const deflate = std.compress.deflate;
|
||||
|
||||
const debug = std.debug;
|
||||
pub const print = debug.print;
|
||||
pub const StackIterator = debug.StackIterator;
|
||||
pub const dwarf = std.dwarf;
|
||||
pub const ModuleDebugInfo = std.debug.ModuleDebugInfo;
|
||||
|
||||
pub const elf = std.elf;
|
||||
|
||||
const fmt = std.fmt;
|
||||
pub const format = std.fmt.format;
|
||||
pub const FormatOptions = fmt.FormatOptions;
|
||||
pub const bufPrint = fmt.bufPrint;
|
||||
pub const allocPrint = fmt.allocPrint;
|
||||
pub const comptimePrint = fmt.comptimePrint;
|
||||
pub const parseUnsigned = fmt.parseUnsigned;
|
||||
|
||||
const heap = std.heap;
|
||||
pub const FixedBufferAllocator = heap.FixedBufferAllocator;
|
||||
|
||||
pub const json = std.json;
|
||||
|
||||
const mem = std.mem;
|
||||
pub const ZigAllocator = mem.Allocator;
|
||||
pub const equal = mem.eql;
|
||||
pub const length = mem.len;
|
||||
pub const startsWith = mem.startsWith;
|
||||
pub const endsWith = mem.endsWith;
|
||||
pub const indexOf = mem.indexOf;
|
||||
// Ideal for small inputs
|
||||
pub const indexOfPosLinear = mem.indexOfPosLinear;
|
||||
pub const lastIndexOf = mem.lastIndexOf;
|
||||
pub const asBytes = mem.asBytes;
|
||||
pub const readIntBig = mem.readIntBig;
|
||||
pub const readIntSliceBig = mem.readIntSliceBig;
|
||||
pub const concat = mem.concat;
|
||||
pub const sliceAsBytes = mem.sliceAsBytes;
|
||||
pub const bytesAsSlice = mem.bytesAsSlice;
|
||||
pub const alignForward = mem.alignForward;
|
||||
pub const alignBackward = mem.alignBackward;
|
||||
pub const isAligned = mem.isAligned;
|
||||
pub const isAlignedGeneric = mem.isAlignedGeneric;
|
||||
pub const reverse = mem.reverse;
|
||||
pub const tokenize = mem.tokenize;
|
||||
pub const containsAtLeast = mem.containsAtLeast;
|
||||
pub const sliceTo = mem.sliceTo;
|
||||
pub const swap = mem.swap;
|
||||
|
||||
pub const random = std.rand;
|
||||
|
||||
pub const testing = std.testing;
|
||||
|
||||
pub const sort = std.sort;
|
||||
|
||||
pub fn fieldSize(comptime T: type, field_name: []const u8) comptime_int {
|
||||
var foo: T = undefined;
|
||||
return @sizeOf(@TypeOf(@field(foo, field_name)));
|
||||
}
|
||||
|
||||
const DiffError = error{
|
||||
diff,
|
||||
};
|
||||
|
||||
pub fn diff(file1: []const u8, file2: []const u8) !void {
|
||||
assert(file1.len == file2.len);
|
||||
var different_bytes: u64 = 0;
|
||||
for (file1, 0..) |byte1, index| {
|
||||
const byte2 = file2[index];
|
||||
const is_different_byte = byte1 != byte2;
|
||||
different_bytes += @intFromBool(is_different_byte);
|
||||
if (is_different_byte) {
|
||||
log.debug("Byte [0x{x}] is different: 0x{x} != 0x{x}", .{ index, byte1, byte2 });
|
||||
}
|
||||
}
|
||||
|
||||
if (different_bytes != 0) {
|
||||
log.debug("Total different bytes: 0x{x}", .{different_bytes});
|
||||
return DiffError.diff;
|
||||
}
|
||||
}
|
||||
|
||||
pub fn zeroes(comptime T: type) T {
|
||||
var result: T = undefined;
|
||||
const slice = asBytes(&result);
|
||||
@memset(slice, 0);
|
||||
return result;
|
||||
}
|
||||
|
||||
const ascii = std.ascii;
|
||||
pub const upperString = ascii.upperString;
|
||||
pub const isUpper = ascii.isUpper;
|
||||
pub const isAlphabetic = ascii.isAlphabetic;
|
||||
|
||||
const std_builtin = std.builtin;
|
||||
pub const AtomicRmwOp = std_builtin.AtomicRmwOp;
|
||||
pub const AtomicOrder = std_builtin.AtomicOrder;
|
||||
pub const Type = std_builtin.Type;
|
||||
pub const StackTrace = std_builtin.StackTrace;
|
||||
pub const SourceLocation = std_builtin.SourceLocation;
|
||||
|
||||
pub fn FieldType(comptime T: type, comptime name: []const u8) type {
|
||||
return @TypeOf(@field(@as(T, undefined), name));
|
||||
}
|
||||
|
||||
// META PROGRAMMING
|
||||
pub const AutoEnumArray = std.enums.EnumArray;
|
||||
pub const fields = std.meta.fields;
|
||||
pub const enumFromInt = std.meta.enumFromInt;
|
||||
pub const stringToEnum = std.meta.stringToEnum;
|
||||
pub const Tag = std.meta.Tag;
|
||||
|
||||
const math = std.math;
|
||||
pub const maxInt = math.maxInt;
|
||||
pub const min = math.min;
|
||||
pub const divCeil = math.divCeil;
|
||||
pub const clamp = math.clamp;
|
||||
pub const isPowerOfTwo = math.isPowerOfTwo;
|
||||
pub const mul = math.mul;
|
||||
pub const cast = math.cast;
|
||||
|
||||
pub const unicode = std.unicode;
|
||||
|
||||
pub const uefi = std.os.uefi;
|
||||
|
||||
pub const DiskType = enum(u32) {
|
||||
virtio = 0,
|
||||
nvme = 1,
|
||||
ahci = 2,
|
||||
ide = 3,
|
||||
memory = 4,
|
||||
bios = 5,
|
||||
|
||||
pub const count = enumCount(@This());
|
||||
};
|
||||
|
||||
pub fn enumFields(comptime E: type) []const Type.EnumField {
|
||||
return @typeInfo(E).Enum.fields;
|
||||
}
|
||||
|
||||
pub const enumValues = std.enums.values;
|
||||
|
||||
pub fn enumCount(comptime E: type) usize {
|
||||
return enumFields(E).len;
|
||||
}
|
||||
|
||||
pub const QEMU = extern struct {
|
||||
pub const isa_debug_exit = ISADebugExit{};
|
||||
|
||||
pub const ISADebugExit = extern struct {
|
||||
io_base: u8 = 0xf4,
|
||||
io_size: u8 = @sizeOf(u32),
|
||||
};
|
||||
|
||||
pub const ExitCode = enum(u32) {
|
||||
success = 0x10,
|
||||
failure = 0x11,
|
||||
_,
|
||||
};
|
||||
};
|
||||
|
||||
pub const OptimizeMode = std.builtin.OptimizeMode;
|
||||
|
||||
pub const Suffix = enum {
|
||||
bootloader,
|
||||
cpu_driver,
|
||||
image,
|
||||
complete,
|
||||
|
||||
pub fn fromConfiguration(suffix: Suffix, allocator: ZigAllocator, configuration: common.Configuration, prefix: ?[]const u8) ![]const u8 {
|
||||
const cpu_driver_suffix = [_][]const u8{
|
||||
@tagName(configuration.optimize_mode),
|
||||
"_",
|
||||
@tagName(configuration.architecture),
|
||||
"_",
|
||||
@tagName(configuration.executable_kind),
|
||||
};
|
||||
|
||||
const bootloader_suffix = [_][]const u8{
|
||||
@tagName(configuration.architecture),
|
||||
"_",
|
||||
@tagName(configuration.bootloader),
|
||||
"_",
|
||||
@tagName(configuration.boot_protocol),
|
||||
};
|
||||
|
||||
const image_suffix = [_][]const u8{
|
||||
@tagName(configuration.optimize_mode),
|
||||
"_",
|
||||
} ++ bootloader_suffix ++ [_][]const u8{
|
||||
"_",
|
||||
@tagName(configuration.executable_kind),
|
||||
};
|
||||
|
||||
const complete_suffix = image_suffix ++ [_][]const u8{
|
||||
"_",
|
||||
@tagName(configuration.execution_type),
|
||||
"_",
|
||||
@tagName(configuration.execution_environment),
|
||||
};
|
||||
|
||||
return try std.mem.concat(allocator, u8, &switch (suffix) {
|
||||
.cpu_driver => if (prefix) |pf| [1][]const u8{pf} ++ cpu_driver_suffix else cpu_driver_suffix,
|
||||
.bootloader => if (prefix) |pf| [1][]const u8{pf} ++ bootloader_suffix else bootloader_suffix,
|
||||
.image => if (prefix) |pf| [1][]const u8{pf} ++ image_suffix else image_suffix,
|
||||
.complete => if (prefix) |pf| [1][]const u8{pf} ++ complete_suffix else complete_suffix,
|
||||
});
|
||||
}
|
||||
};
|
||||
|
||||
pub const default_cpu_name = "/cpu";
|
||||
pub const default_init_file = "/init";
|
||||
|
||||
pub const default_disk_size = 64 * 1024 * 1024;
|
||||
pub const default_sector_size = 0x200;
|
||||
|
||||
pub const cache_line_size = 64;
|
||||
|
||||
pub const arch = @import("lib/arch.zig");
|
||||
/// This is done so the allocator can respect allocating from different address spaces
|
||||
pub const config = @import("lib/config.zig");
|
||||
@ -281,9 +17,10 @@ const extern_enum_array = @import("lib/extern_enum_array.zig");
|
||||
pub const EnumArray = extern_enum_array.EnumArray;
|
||||
|
||||
pub fn memcpy(noalias destination: []u8, noalias source: []const u8) void {
|
||||
@setRuntimeSafety(false);
|
||||
// Using this as the Zig implementation is really slow (at least in x86 with soft_float enabled
|
||||
// if (cpu.arch == .x86 or cpu.arch == .x86_64 and Target.x86.featureSetHas(cpu.features, .soft_float)) {
|
||||
const bytes_left = switch (cpu.arch) {
|
||||
// if (common.cpu.arch == .x86 or common.cpu.arch == .x86_64 and common.Target.x86.featureSetHas(common.cpu.features, .soft_float)) {
|
||||
const bytes_left = switch (common.cpu.arch) {
|
||||
.x86 => asm volatile (
|
||||
\\rep movsb
|
||||
: [ret] "={ecx}" (-> usize),
|
||||
@ -301,16 +38,46 @@ pub fn memcpy(noalias destination: []u8, noalias source: []const u8) void {
|
||||
else => @compileError("Unreachable"),
|
||||
};
|
||||
|
||||
assert(bytes_left == 0);
|
||||
common.assert(bytes_left == 0);
|
||||
// } else {
|
||||
// @memcpy(destination, source);
|
||||
// }
|
||||
}
|
||||
|
||||
// pub fn memset(comptime T: type, slice: []T, elem: T) void {
|
||||
// @setRuntimeSafety(false);
|
||||
//
|
||||
// const bytes_left = switch (T) {
|
||||
// u8 => switch (common.cpu.arch) {
|
||||
// .x86 => asm volatile (
|
||||
// \\rep stosb
|
||||
// : [ret] "={ecx}" (-> usize),
|
||||
// : [slice] "{edi}" (slice.ptr),
|
||||
// [len] "{ecx}" (slice.len),
|
||||
// [element] "{al}" (elem),
|
||||
// ),
|
||||
// .x86_64 => asm volatile (
|
||||
// \\rep movsb
|
||||
// : [ret] "={rcx}" (-> usize),
|
||||
// : [slice] "{rdi}" (slice.ptr),
|
||||
// [len] "{rcx}" (slice.len),
|
||||
// [element] "{al}" (elem),
|
||||
// ),
|
||||
// else => @compileError("Unsupported OS"),
|
||||
// },
|
||||
// else => @compileError("Type " ++ @typeName(T) ++ " not supported"),
|
||||
// };
|
||||
//
|
||||
// common.assert(bytes_left == 0);
|
||||
// }
|
||||
|
||||
pub fn EnumStruct(comptime Enum: type, comptime Value: type) type {
|
||||
const EnumFields = enumFields(Enum);
|
||||
const EnumFields = common.enumFields(Enum);
|
||||
const MyEnumStruct = @Type(.{
|
||||
.Struct = .{
|
||||
.layout = .Extern,
|
||||
.fields = &blk: {
|
||||
var arr = [1]Type.StructField{undefined} ** EnumFields.len;
|
||||
var arr = [1]common.Type.StructField{undefined} ** EnumFields.len;
|
||||
inline for (EnumFields) |EnumValue| {
|
||||
arr[EnumValue.value] = .{
|
||||
.name = EnumValue.name,
|
||||
@ -335,8 +102,8 @@ pub fn EnumStruct(comptime Enum: type, comptime Value: type) type {
|
||||
pub const Array = MyEnumArray;
|
||||
};
|
||||
|
||||
assert(@sizeOf(Union.Struct) == @sizeOf(Union.Array));
|
||||
assert(@sizeOf(Union.Array) == @sizeOf(Union));
|
||||
common.assert(@sizeOf(Union.Struct) == @sizeOf(Union.Array));
|
||||
common.assert(@sizeOf(Union.Array) == @sizeOf(Union));
|
||||
|
||||
return Union;
|
||||
}
|
||||
@ -348,7 +115,7 @@ pub const DirectoryTokenizer = struct {
|
||||
total_count: usize,
|
||||
|
||||
pub fn init(string: []const u8) DirectoryTokenizer {
|
||||
assert(string.len > 0);
|
||||
common.assert(string.len > 0);
|
||||
var count: usize = 0;
|
||||
|
||||
if (string[0] == '/') {
|
||||
@ -386,8 +153,8 @@ pub const DirectoryTokenizer = struct {
|
||||
|
||||
return tokenizer.string[original_index..];
|
||||
} else {
|
||||
assert(original_index == tokenizer.string.len);
|
||||
assert(tokenizer.given_count == tokenizer.total_count);
|
||||
common.assert(original_index == tokenizer.string.len);
|
||||
common.assert(tokenizer.given_count == tokenizer.total_count);
|
||||
return null;
|
||||
}
|
||||
}
|
||||
@ -398,8 +165,8 @@ pub const DirectoryTokenizer = struct {
|
||||
}
|
||||
|
||||
test "directory tokenizer" {
|
||||
log.err("ajskdjsa", .{});
|
||||
if (os != .freestanding) {
|
||||
common.log.err("ajskdjsa", .{});
|
||||
if (common.os != .freestanding) {
|
||||
const TestCase = struct {
|
||||
path: []const u8,
|
||||
expected_result: []const []const u8,
|
||||
@ -416,13 +183,13 @@ pub const DirectoryTokenizer = struct {
|
||||
var result_count: usize = 0;
|
||||
|
||||
while (dir_tokenizer.next()) |dir| {
|
||||
try testing.expect(result_count < results.len);
|
||||
try testing.expectEqualStrings(case.expected_result[result_count], dir);
|
||||
try common.testing.expect(result_count < results.len);
|
||||
try common.testing.expectEqualStrings(case.expected_result[result_count], dir);
|
||||
results[result_count] = dir;
|
||||
result_count += 1;
|
||||
}
|
||||
|
||||
try testing.expectEqual(case.expected_result.len, result_count);
|
||||
try common.testing.expectEqual(case.expected_result.len, result_count);
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -445,7 +212,7 @@ pub inline fn maybePtrSub(comptime T: type, ptr: ?*T, element_offset: usize) ?*T
|
||||
}
|
||||
|
||||
test {
|
||||
log.err("test not taken into the test suite");
|
||||
common.log.err("test not taken into the test suite");
|
||||
_ = DirectoryTokenizer;
|
||||
_ = Filesystem;
|
||||
_ = PartitionTable;
|
||||
@ -470,7 +237,7 @@ pub const Allocator = extern struct {
|
||||
};
|
||||
|
||||
/// Necessary to do this hack
|
||||
const Callbacks = switch (cpu.arch) {
|
||||
const Callbacks = switch (common.cpu.arch) {
|
||||
.x86 => extern struct {
|
||||
allocate: *const Allocate.Fn,
|
||||
allocate_padding: u32 = 0,
|
||||
@ -498,7 +265,7 @@ pub const Allocator = extern struct {
|
||||
return &result[0];
|
||||
}
|
||||
|
||||
pub fn wrap(zig_allocator: ZigAllocator) Wrapped {
|
||||
pub fn wrap(zig_allocator: common.ZigAllocator) Wrapped {
|
||||
return .{
|
||||
.allocator = .{
|
||||
.callbacks = .{
|
||||
@ -512,7 +279,7 @@ pub const Allocator = extern struct {
|
||||
};
|
||||
}
|
||||
|
||||
pub fn zigUnwrap(allocator: *Allocator) ZigAllocator {
|
||||
pub fn zigUnwrap(allocator: *Allocator) common.ZigAllocator {
|
||||
return .{
|
||||
.ptr = allocator,
|
||||
.vtable = &zig_vtable,
|
||||
@ -526,13 +293,11 @@ pub const Allocator = extern struct {
|
||||
};
|
||||
|
||||
pub fn zigAllocate(context: *anyopaque, size: usize, ptr_align: u8, return_address: usize) ?[*]u8 {
|
||||
_ = context;
|
||||
_ = size;
|
||||
_ = ptr_align;
|
||||
_ = return_address;
|
||||
const allocator: *Allocator = @ptrCast(@alignCast(context));
|
||||
// Not understanding why Zig API is like this:
|
||||
const alignment = @as(u64, 1) << @as(u6, @intCast(ptr_align));
|
||||
const result = allocator.allocateBytes(size, alignment) catch return null;
|
||||
assert(result.size >= size);
|
||||
return @ptrFromInt(result.address);
|
||||
return null;
|
||||
}
|
||||
|
||||
pub fn zigResize(context: *anyopaque, buffer: []u8, buffer_alignment: u8, new_length: usize, return_address: usize) bool {
|
||||
@ -555,14 +320,14 @@ pub const Allocator = extern struct {
|
||||
allocator: Allocator,
|
||||
zig: extern struct {
|
||||
ptr: *anyopaque,
|
||||
vtable: *const ZigAllocator.VTable,
|
||||
vtable: *const common.ZigAllocator.VTable,
|
||||
},
|
||||
|
||||
pub fn unwrap(wrapped_allocator: *Wrapped) *Allocator {
|
||||
return &wrapped_allocator.allocator;
|
||||
}
|
||||
|
||||
pub fn zigUnwrap(wrapped_allocator: *Wrapped) ZigAllocator {
|
||||
pub fn zigUnwrap(wrapped_allocator: *Wrapped) common.ZigAllocator {
|
||||
return .{
|
||||
.ptr = wrapped_allocator.zig.ptr,
|
||||
.vtable = wrapped_allocator.zig.vtable,
|
||||
@ -572,7 +337,7 @@ pub const Allocator = extern struct {
|
||||
pub fn wrappedCallbackAllocate(allocator: *Allocator, size: u64, alignment: u64) Allocator.Allocate.Error!Allocator.Allocate.Result {
|
||||
const wrapped_allocator = @fieldParentPtr(Wrapped, "allocator", allocator);
|
||||
const zig_allocator = wrapped_allocator.zigUnwrap();
|
||||
if (alignment > maxInt(u8)) {
|
||||
if (alignment > common.maxInt(u8)) {
|
||||
@panic("alignment supported by Zig is less than asked");
|
||||
}
|
||||
const zig_result = zig_allocator.vtable.alloc(zig_allocator.ptr, size, @as(u8, @intCast(alignment)), @returnAddress());
|
||||
@ -696,7 +461,7 @@ pub fn ELF(comptime bits: comptime_int) type {
|
||||
return Parser.Error.invalid_magic;
|
||||
}
|
||||
|
||||
if (!equal(u8, &file_header.elf_id, FileHeader.elf_signature)) {
|
||||
if (!common.equal(u8, &file_header.elf_id, FileHeader.elf_signature)) {
|
||||
return Parser.Error.invalid_signature;
|
||||
}
|
||||
|
||||
@ -719,7 +484,7 @@ pub fn ELF(comptime bits: comptime_int) type {
|
||||
|
||||
pub const ProgramHeader = switch (is_64) {
|
||||
true => extern struct {
|
||||
type: @This().Type = .load,
|
||||
type: Type = .load,
|
||||
flags: Flags, //= @enumToInt(Flags.readable) | @enumToInt(Flags.executable),
|
||||
offset: u64,
|
||||
virtual_address: u64,
|
||||
@ -753,7 +518,7 @@ pub fn ELF(comptime bits: comptime_int) type {
|
||||
reserved: u29,
|
||||
|
||||
comptime {
|
||||
assert(@sizeOf(Flags) == @sizeOf(u32));
|
||||
common.assert(@sizeOf(Flags) == @sizeOf(u32));
|
||||
}
|
||||
};
|
||||
},
|
||||
@ -820,8 +585,8 @@ pub fn ELF(comptime bits: comptime_int) type {
|
||||
|
||||
pub inline fn safeArchitectureCast(value: anytype) usize {
|
||||
return switch (@sizeOf(@TypeOf(value)) > @sizeOf(usize)) {
|
||||
true => if (value <= maxInt(usize)) @as(usize, @truncate(value)) else {
|
||||
log.err("PANIC: virtual address is longer than usize: 0x{x}", .{value});
|
||||
true => if (value <= common.maxInt(usize)) @as(usize, @truncate(value)) else {
|
||||
common.log.err("PANIC: virtual address is longer than usize: 0x{x}", .{value});
|
||||
@panic("safeArchitectureCast");
|
||||
},
|
||||
false => value,
|
||||
@ -833,11 +598,11 @@ pub const DereferenceError = error{
|
||||
};
|
||||
|
||||
pub inline fn tryDereferenceAddress(value: anytype) DereferenceError!usize {
|
||||
assert(@sizeOf(@TypeOf(value)) > @sizeOf(usize));
|
||||
return if (value <= maxInt(usize)) @as(usize, @truncate(value)) else return DereferenceError.address_bigger_than_usize;
|
||||
common.assert(@sizeOf(@TypeOf(value)) > @sizeOf(usize));
|
||||
return if (value <= common.maxInt(usize)) @as(usize, @truncate(value)) else return DereferenceError.address_bigger_than_usize;
|
||||
}
|
||||
|
||||
pub fn enumAddNames(comptime enum_fields: []const Type.EnumField, comptime names: []const []const u8) []const Type.EnumField {
|
||||
pub fn enumAddNames(comptime enum_fields: []const common.Type.EnumField, comptime names: []const []const u8) []const common.Type.EnumField {
|
||||
comptime var result = enum_fields;
|
||||
const previous_last_value = if (enum_fields.len > 0) enum_fields[enum_fields.len - 1].value else 0;
|
||||
|
||||
@ -852,13 +617,13 @@ pub fn enumAddNames(comptime enum_fields: []const Type.EnumField, comptime names
|
||||
return result;
|
||||
}
|
||||
|
||||
pub fn ErrorSet(comptime error_names: []const []const u8, comptime predefined_fields: []const Type.EnumField) type {
|
||||
comptime var error_fields: []const Type.Error = &.{};
|
||||
comptime var enum_items: []const Type.EnumField = predefined_fields;
|
||||
pub fn ErrorSet(comptime error_names: []const []const u8, comptime predefined_fields: []const common.Type.EnumField) type {
|
||||
comptime var error_fields: []const common.Type.Error = &.{};
|
||||
comptime var enum_items: []const common.Type.EnumField = predefined_fields;
|
||||
comptime var enum_value = enum_items[enum_items.len - 1].value + 1;
|
||||
|
||||
inline for (error_names) |error_name| {
|
||||
enum_items = enum_items ++ [1]Type.EnumField{
|
||||
enum_items = enum_items ++ [1]common.Type.EnumField{
|
||||
.{
|
||||
.name = error_name,
|
||||
.value = enum_value,
|
||||
@ -869,23 +634,23 @@ pub fn ErrorSet(comptime error_names: []const []const u8, comptime predefined_fi
|
||||
}
|
||||
|
||||
inline for (enum_items) |item| {
|
||||
error_fields = error_fields ++ [1]Type.Error{
|
||||
error_fields = error_fields ++ [1]common.Type.Error{
|
||||
.{
|
||||
.name = item.name,
|
||||
},
|
||||
};
|
||||
}
|
||||
|
||||
const EnumType = @Type(Type{
|
||||
const EnumType = @Type(common.Type{
|
||||
.Enum = .{
|
||||
.tag_type = u15,
|
||||
.tag_type = u16,
|
||||
.fields = enum_items,
|
||||
.decls = &.{},
|
||||
.is_exhaustive = true,
|
||||
},
|
||||
});
|
||||
|
||||
const ErrorType = @Type(Type{
|
||||
const ErrorType = @Type(common.Type{
|
||||
.ErrorSet = error_fields,
|
||||
});
|
||||
|
||||
@ -895,9 +660,12 @@ pub fn ErrorSet(comptime error_names: []const []const u8, comptime predefined_fi
|
||||
};
|
||||
}
|
||||
|
||||
pub fn getDebugInformation(allocator: ZigAllocator, elf_file: []align(arch.valid_page_sizes[0]) const u8) !ModuleDebugInfo {
|
||||
const hdr = @as(*align(1) const elf.Ehdr, @ptrCast(&elf_file[0]));
|
||||
if (!equal(u8, hdr.e_ident[0..4], elf.MAGIC)) return error.InvalidElfMagic;
|
||||
pub fn getDebugInformation(allocator: common.ZigAllocator, elf_file: []align(common.default_sector_size) const u8) !common.ModuleDebugInfo {
|
||||
const elf = common.elf;
|
||||
var module_debug_info: common.ModuleDebugInfo = undefined;
|
||||
_ = module_debug_info;
|
||||
const hdr = @as(*const elf.Ehdr, @ptrCast(&elf_file[0]));
|
||||
if (!common.equal(u8, hdr.e_ident[0..4], elf.MAGIC)) return error.InvalidElfMagic;
|
||||
if (hdr.e_ident[elf.EI_VERSION] != 1) return error.InvalidElfVersion;
|
||||
|
||||
const endian = .Little;
|
||||
@ -905,110 +673,94 @@ pub fn getDebugInformation(allocator: ZigAllocator, elf_file: []align(arch.valid
|
||||
const shoff = hdr.e_shoff;
|
||||
const str_section_off = shoff + @as(u64, hdr.e_shentsize) * @as(u64, hdr.e_shstrndx);
|
||||
const str_shdr = @as(
|
||||
*align(1) const elf.Shdr,
|
||||
@ptrCast(&elf_file[cast(usize, str_section_off) orelse return error.Overflow]),
|
||||
*const elf.Shdr,
|
||||
@ptrCast(@alignCast(&elf_file[common.cast(usize, str_section_off) orelse return error.Overflow])),
|
||||
);
|
||||
const header_strings = elf_file[str_shdr.sh_offset .. str_shdr.sh_offset + str_shdr.sh_size];
|
||||
const shdrs = @as(
|
||||
[*]align(1) const elf.Shdr,
|
||||
@ptrCast(&elf_file[shoff]),
|
||||
[*]const elf.Shdr,
|
||||
@ptrCast(@alignCast(&elf_file[shoff])),
|
||||
)[0..hdr.e_shnum];
|
||||
|
||||
var sections: dwarf.DwarfInfo.SectionArray = dwarf.DwarfInfo.null_section_array;
|
||||
|
||||
// Combine section list. This takes ownership over any owned sections from the parent scope.
|
||||
errdefer for (sections) |section| if (section) |s| if (s.owned) allocator.free(s.data);
|
||||
|
||||
var separate_debug_filename: ?[]const u8 = null;
|
||||
_ = separate_debug_filename;
|
||||
var separate_debug_crc: ?u32 = null;
|
||||
_ = separate_debug_crc;
|
||||
var opt_debug_info: ?[]const u8 = null;
|
||||
var opt_debug_abbrev: ?[]const u8 = null;
|
||||
var opt_debug_str: ?[]const u8 = null;
|
||||
var opt_debug_str_offsets: ?[]const u8 = null;
|
||||
var opt_debug_line: ?[]const u8 = null;
|
||||
var opt_debug_line_str: ?[]const u8 = null;
|
||||
var opt_debug_ranges: ?[]const u8 = null;
|
||||
var opt_debug_loclists: ?[]const u8 = null;
|
||||
var opt_debug_rnglists: ?[]const u8 = null;
|
||||
var opt_debug_addr: ?[]const u8 = null;
|
||||
var opt_debug_names: ?[]const u8 = null;
|
||||
var opt_debug_frame: ?[]const u8 = null;
|
||||
|
||||
for (shdrs) |*shdr| {
|
||||
if (shdr.sh_type == elf.SHT_NULL or shdr.sh_type == elf.SHT_NOBITS) continue;
|
||||
const name = sliceTo(header_strings[shdr.sh_name..], 0);
|
||||
if (shdr.sh_type == elf.SHT_NULL) continue;
|
||||
|
||||
if (equal(u8, name, ".gnu_debuglink")) {
|
||||
@panic("WTF");
|
||||
// const gnu_debuglink = try chopSlice(mapped_mem, shdr.sh_offset, shdr.sh_size);
|
||||
// const debug_filename = mem.sliceTo(@as([*:0]const u8, @ptrCast(gnu_debuglink.ptr)), 0);
|
||||
// const crc_offset = mem.alignForward(usize, @intFromPtr(&debug_filename[debug_filename.len]) + 1, 4) - @intFromPtr(gnu_debuglink.ptr);
|
||||
// const crc_bytes = gnu_debuglink[crc_offset .. crc_offset + 4];
|
||||
// separate_debug_crc = mem.readIntSliceNative(u32, crc_bytes);
|
||||
// separate_debug_filename = debug_filename;
|
||||
// continue;
|
||||
const name = common.sliceTo(header_strings[shdr.sh_name..], 0);
|
||||
if (common.equal(u8, name, ".debug_info")) {
|
||||
opt_debug_info = try chopSlice(elf_file, shdr.sh_offset, shdr.sh_size);
|
||||
} else if (common.equal(u8, name, ".debug_abbrev")) {
|
||||
opt_debug_abbrev = try chopSlice(elf_file, shdr.sh_offset, shdr.sh_size);
|
||||
} else if (common.equal(u8, name, ".debug_str")) {
|
||||
opt_debug_str = try chopSlice(elf_file, shdr.sh_offset, shdr.sh_size);
|
||||
} else if (common.equal(u8, name, ".debug_str_offsets")) {
|
||||
opt_debug_str_offsets = try chopSlice(elf_file, shdr.sh_offset, shdr.sh_size);
|
||||
} else if (common.equal(u8, name, ".debug_line")) {
|
||||
opt_debug_line = try chopSlice(elf_file, shdr.sh_offset, shdr.sh_size);
|
||||
} else if (common.equal(u8, name, ".debug_line_str")) {
|
||||
opt_debug_line_str = try chopSlice(elf_file, shdr.sh_offset, shdr.sh_size);
|
||||
} else if (common.equal(u8, name, ".debug_ranges")) {
|
||||
opt_debug_ranges = try chopSlice(elf_file, shdr.sh_offset, shdr.sh_size);
|
||||
} else if (common.equal(u8, name, ".debug_loclists")) {
|
||||
opt_debug_loclists = try chopSlice(elf_file, shdr.sh_offset, shdr.sh_size);
|
||||
} else if (common.equal(u8, name, ".debug_rnglists")) {
|
||||
opt_debug_rnglists = try chopSlice(elf_file, shdr.sh_offset, shdr.sh_size);
|
||||
} else if (common.equal(u8, name, ".debug_addr")) {
|
||||
opt_debug_addr = try chopSlice(elf_file, shdr.sh_offset, shdr.sh_size);
|
||||
} else if (common.equal(u8, name, ".debug_names")) {
|
||||
opt_debug_names = try chopSlice(elf_file, shdr.sh_offset, shdr.sh_size);
|
||||
} else if (common.equal(u8, name, ".debug_frame")) {
|
||||
opt_debug_frame = try chopSlice(elf_file, shdr.sh_offset, shdr.sh_size);
|
||||
}
|
||||
|
||||
var section_index: ?usize = null;
|
||||
inline for (@typeInfo(dwarf.DwarfSection).Enum.fields, 0..) |section, i| {
|
||||
if (equal(u8, "." ++ section.name, name)) section_index = i;
|
||||
}
|
||||
if (section_index == null) continue;
|
||||
if (sections[section_index.?] != null) continue;
|
||||
|
||||
const section_bytes = try chopSlice(elf_file, shdr.sh_offset, shdr.sh_size);
|
||||
sections[section_index.?] = if ((shdr.sh_flags & elf.SHF_COMPRESSED) > 0) blk: {
|
||||
var section_stream = fixedBufferStream(section_bytes);
|
||||
var section_reader = section_stream.reader();
|
||||
const chdr = section_reader.readStruct(elf.Chdr) catch continue;
|
||||
if (chdr.ch_type != .ZLIB) continue;
|
||||
|
||||
if (true) @panic("ZLIB");
|
||||
break :blk undefined;
|
||||
// var zlib_stream = std.compress.zlib.decompressStream(allocator, section_stream.reader()) catch continue;
|
||||
// defer zlib_stream.deinit();
|
||||
//
|
||||
// var decompressed_section = try allocator.alloc(u8, chdr.ch_size);
|
||||
// errdefer allocator.free(decompressed_section);
|
||||
//
|
||||
// const read = zlib_stream.reader().readAll(decompressed_section) catch continue;
|
||||
// assert(read == decompressed_section.len);
|
||||
//
|
||||
// break :blk .{
|
||||
// .data = decompressed_section,
|
||||
// .virtual_address = shdr.sh_addr,
|
||||
// .owned = true,
|
||||
// };
|
||||
} else .{
|
||||
.data = section_bytes,
|
||||
.virtual_address = shdr.sh_addr,
|
||||
.owned = false,
|
||||
};
|
||||
}
|
||||
|
||||
const missing_debug_info =
|
||||
sections[@intFromEnum(dwarf.DwarfSection.debug_info)] == null or
|
||||
sections[@intFromEnum(dwarf.DwarfSection.debug_abbrev)] == null or
|
||||
sections[@intFromEnum(dwarf.DwarfSection.debug_str)] == null or
|
||||
sections[@intFromEnum(dwarf.DwarfSection.debug_line)] == null;
|
||||
assert(!missing_debug_info);
|
||||
|
||||
var di = dwarf.DwarfInfo{
|
||||
var di = common.dwarf.DwarfInfo{
|
||||
.endian = endian,
|
||||
.sections = sections,
|
||||
.is_macho = false,
|
||||
.debug_info = opt_debug_info orelse return error.MissingDebugInfo,
|
||||
.debug_abbrev = opt_debug_abbrev orelse return error.MissingDebugInfo,
|
||||
.debug_str = opt_debug_str orelse return error.MissingDebugInfo,
|
||||
.debug_str_offsets = opt_debug_str_offsets,
|
||||
.debug_line = opt_debug_line orelse return error.MissingDebugInfo,
|
||||
.debug_line_str = opt_debug_line_str,
|
||||
.debug_ranges = opt_debug_ranges,
|
||||
.debug_loclists = opt_debug_loclists,
|
||||
.debug_rnglists = opt_debug_rnglists,
|
||||
.debug_addr = opt_debug_addr,
|
||||
.debug_names = opt_debug_names,
|
||||
.debug_frame = opt_debug_frame,
|
||||
};
|
||||
|
||||
try dwarf.openDwarfDebugInfo(&di, allocator);
|
||||
|
||||
try common.dwarf.openDwarfDebugInfo(&di, allocator);
|
||||
return di;
|
||||
}
|
||||
|
||||
fn chopSlice(ptr: []const u8, offset: u64, size: u64) error{Overflow}![]const u8 {
|
||||
const start = cast(usize, offset) orelse return error.Overflow;
|
||||
const end = start + (cast(usize, size) orelse return error.Overflow);
|
||||
const start = common.cast(usize, offset) orelse return error.Overflow;
|
||||
const end = start + (common.cast(usize, size) orelse return error.Overflow);
|
||||
return ptr[start..end];
|
||||
}
|
||||
|
||||
pub fn RegionInterface(comptime Region: type) type {
|
||||
const type_info = @typeInfo(Region);
|
||||
assert(type_info == .Struct);
|
||||
assert(type_info.Struct.layout == .Extern);
|
||||
assert(type_info.Struct.fields.len == 2);
|
||||
const region_fields = type_info.Struct.fields;
|
||||
assert(equal(u8, region_fields[0].name, "address"));
|
||||
assert(equal(u8, region_fields[1].name, "size"));
|
||||
const Addr = region_fields[0].type;
|
||||
common.assert(type_info == .Struct);
|
||||
common.assert(type_info.Struct.layout == .Extern);
|
||||
common.assert(type_info.Struct.fields.len == 2);
|
||||
const fields = type_info.Struct.fields;
|
||||
common.assert(common.equal(u8, fields[0].name, "address"));
|
||||
common.assert(common.equal(u8, fields[1].name, "size"));
|
||||
const Addr = fields[0].type;
|
||||
const AddrT = getAddrT(Addr);
|
||||
|
||||
return struct {
|
||||
@ -1021,14 +773,6 @@ pub fn RegionInterface(comptime Region: type) type {
|
||||
.size = info.size,
|
||||
};
|
||||
}
|
||||
|
||||
pub inline fn invalid() Region {
|
||||
return Region{
|
||||
.address = Addr.invalid(),
|
||||
.size = 0,
|
||||
};
|
||||
}
|
||||
|
||||
pub inline fn fromRaw(info: struct {
|
||||
raw_address: AddrT,
|
||||
size: AddrT,
|
||||
@ -1064,7 +808,7 @@ pub fn RegionInterface(comptime Region: type) type {
|
||||
|
||||
pub inline fn fromAnytype(any: anytype, info: struct {}) Region {
|
||||
_ = info;
|
||||
assert(@typeInfo(@TypeOf(any)) == .Pointer);
|
||||
common.assert(@typeInfo(@TypeOf(any)) == .Pointer);
|
||||
return Region{
|
||||
.address = VirtualAddress.new(@intFromPtr(any)),
|
||||
.size = @sizeOf(@TypeOf(any.*)),
|
||||
@ -1089,7 +833,7 @@ pub fn RegionInterface(comptime Region: type) type {
|
||||
}
|
||||
|
||||
pub fn shrinked(region: Region, size: AddrT) Region {
|
||||
assert(size <= region.size);
|
||||
common.assert(size <= region.size);
|
||||
const result = Region{
|
||||
.address = region.address,
|
||||
.size = size,
|
||||
@ -1098,34 +842,17 @@ pub fn RegionInterface(comptime Region: type) type {
|
||||
return result;
|
||||
}
|
||||
|
||||
const TakeSliceError = error{
|
||||
not_enough_space,
|
||||
};
|
||||
|
||||
pub inline fn slice(region: *const Region, size: AddrT) Region {
|
||||
assert(size <= region.size);
|
||||
const result = .{
|
||||
pub inline fn takeSlice(region: *Region, size: AddrT) Region {
|
||||
common.assert(size <= region.size);
|
||||
const result = Region{
|
||||
.address = region.address,
|
||||
.size = size,
|
||||
};
|
||||
region.* = region.offset(size);
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
pub inline fn takeSlice(region: *Region, size: AddrT) !Region {
|
||||
if (size <= region.size) {
|
||||
const result = Region{
|
||||
.address = region.address,
|
||||
.size = size,
|
||||
};
|
||||
region.* = region.offset(size);
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
return TakeSliceError.not_enough_space;
|
||||
}
|
||||
|
||||
pub inline fn split(region: Region, comptime count: comptime_int) [count]Region {
|
||||
const region_size = @divExact(region.size, count);
|
||||
var result: [count]Region = undefined;
|
||||
@ -1192,9 +919,9 @@ pub const VirtualMemoryRegion = extern struct {
|
||||
|
||||
fn getAddrT(comptime AddressEnum: type) type {
|
||||
const type_info = @typeInfo(AddressEnum);
|
||||
assert(type_info == .Enum);
|
||||
common.assert(type_info == .Enum);
|
||||
const AddrT = type_info.Enum.tag_type;
|
||||
assert(switch (cpu.arch) {
|
||||
common.assert(switch (common.cpu.arch) {
|
||||
.x86 => @sizeOf(AddrT) == 2 * @sizeOf(usize),
|
||||
else => @sizeOf(AddrT) == @sizeOf(usize),
|
||||
});
|
||||
@ -1296,12 +1023,12 @@ pub const VirtualAddress = enum(u64) {
|
||||
}
|
||||
|
||||
pub inline fn toPhysicalAddress(virtual_address: VirtualAddress) PhysicalAddress {
|
||||
assert(virtual_address.value() >= config.cpu_driver_higher_half_address);
|
||||
common.assert(virtual_address.value() >= config.cpu_driver_higher_half_address);
|
||||
return @as(PhysicalAddress, @enumFromInt(virtual_address.value() - config.cpu_driver_higher_half_address));
|
||||
}
|
||||
|
||||
pub inline fn toGuaranteedPhysicalAddress(virtual_address: VirtualAddress) PhysicalAddress {
|
||||
assert(virtual_address.value() < config.cpu_driver_higher_half_address);
|
||||
common.assert(virtual_address.value() < config.cpu_driver_higher_half_address);
|
||||
return PhysicalAddress.new(virtual_address.value());
|
||||
}
|
||||
};
|
||||
|
@ -15,8 +15,6 @@ pub const current = switch (@import("builtin").cpu.arch) {
|
||||
pub const x86 = @import("arch/x86.zig");
|
||||
pub const x86_64 = @import("arch/x86_64.zig");
|
||||
|
||||
pub const paging = x86_64.paging;
|
||||
|
||||
pub const default_page_size = current.default_page_size;
|
||||
pub const reasonable_page_size = current.reasonable_page_size;
|
||||
|
||||
|
@ -5,7 +5,7 @@ pub const CPUID = extern struct {
|
||||
ecx: u32,
|
||||
};
|
||||
|
||||
pub inline fn cpuid(leaf: u32, subleaf: u32) CPUID {
|
||||
pub inline fn cpuid(leaf: u32) CPUID {
|
||||
var eax: u32 = undefined;
|
||||
var ebx: u32 = undefined;
|
||||
var edx: u32 = undefined;
|
||||
@ -18,8 +18,6 @@ pub inline fn cpuid(leaf: u32, subleaf: u32) CPUID {
|
||||
[edx] "={edx}" (edx),
|
||||
[ecx] "={ecx}" (ecx),
|
||||
: [leaf] "{eax}" (leaf),
|
||||
[subleaf] "{ecx}" (subleaf),
|
||||
: "memory"
|
||||
);
|
||||
|
||||
return CPUID{
|
||||
|
@ -2,39 +2,6 @@ const lib = @import("lib");
|
||||
const x86 = @import("x86/common.zig");
|
||||
pub usingnamespace x86;
|
||||
|
||||
pub const paging = struct {
|
||||
pub const page_table_entry_size = @sizeOf(u64);
|
||||
pub const page_table_size = lib.arch.valid_page_sizes[0];
|
||||
pub const page_table_entry_count = @divExact(page_table_size, page_table_entry_size);
|
||||
pub const page_table_alignment = page_table_size;
|
||||
pub const page_table_mask = page_table_entry_count - 1;
|
||||
pub const user_address_space_start = 0x200_000;
|
||||
pub const user_address_space_end = 0x8000_0000_0000;
|
||||
pub const root_page_table_level: Level = switch (Level) {
|
||||
Level4 => Level.PML4,
|
||||
Level5 => @compileError("TODO"),
|
||||
else => @compileError("Unknown level"),
|
||||
};
|
||||
|
||||
pub const Level = Level4;
|
||||
|
||||
pub const Level4 = enum(u2) {
|
||||
PML4 = 0,
|
||||
PDP = 1,
|
||||
PD = 2,
|
||||
PT = 3,
|
||||
|
||||
pub const count = lib.enumCount(@This());
|
||||
};
|
||||
|
||||
pub const Level5 = enum(u3) {};
|
||||
|
||||
comptime {
|
||||
lib.assert(page_table_alignment == page_table_size);
|
||||
lib.assert(page_table_size == lib.arch.valid_page_sizes[0]);
|
||||
}
|
||||
};
|
||||
|
||||
pub const valid_page_sizes = [3]comptime_int{ 0x1000, 0x1000 * 0x200, 0x1000 * 0x200 * 0x200 };
|
||||
pub const reverse_valid_page_sizes = blk: {
|
||||
var reverse = valid_page_sizes;
|
||||
|
@ -1,127 +0,0 @@
|
||||
const lib = @import("lib");
|
||||
const Allocator = lib.Allocator;
|
||||
const assert = lib.assert;
|
||||
const maxInt = lib.maxInt;
|
||||
|
||||
pub fn BitsetU64(comptime bits: comptime_int) type {
|
||||
assert(bits <= @bitSizeOf(u64));
|
||||
const max_value = maxInt(@Type(.{
|
||||
.Int = .{
|
||||
.signedness = .unsigned,
|
||||
.bits = bits,
|
||||
},
|
||||
}));
|
||||
|
||||
return packed struct(u64) {
|
||||
value: u64 = 0,
|
||||
|
||||
const Error = error{
|
||||
block_full,
|
||||
};
|
||||
|
||||
pub inline fn allocate(bitset: *@This()) !u6 {
|
||||
if (bitset.value & max_value != max_value) {
|
||||
// log.debug("Bitset: 0b{b}", .{bitset.value});
|
||||
const result: u6 = @intCast(@ctz(~bitset.value));
|
||||
// log.debug("Result: {}", .{result});
|
||||
assert(!bitset.isSet(result));
|
||||
bitset.set(result);
|
||||
return result;
|
||||
} else {
|
||||
return error.block_full;
|
||||
}
|
||||
}
|
||||
|
||||
pub inline fn set(bitset: *@This(), index: u6) void {
|
||||
assert(index < bits);
|
||||
bitset.value |= (@as(u64, 1) << index);
|
||||
}
|
||||
|
||||
pub inline fn clear(bitset: *@This(), index: u6) void {
|
||||
assert(index < bits);
|
||||
bitset.value &= ~(@as(u64, 1) << index);
|
||||
}
|
||||
|
||||
pub inline fn isSet(bitset: @This(), index: u6) bool {
|
||||
assert(index < bits);
|
||||
return bitset.value & (@as(u64, 1) << index) != 0;
|
||||
}
|
||||
|
||||
pub inline fn isFull(bitset: @This()) bool {
|
||||
return bitset.value == max_value;
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
pub fn SparseArray(comptime T: type) type {
|
||||
return extern struct {
|
||||
ptr: [*]T,
|
||||
len: usize,
|
||||
capacity: usize,
|
||||
|
||||
const Array = @This();
|
||||
|
||||
pub const Error = error{
|
||||
index_out_of_bounds,
|
||||
};
|
||||
|
||||
pub fn allocate(array: *Array, allocator: *Allocator) !*T {
|
||||
try array.ensureCapacity(allocator, array.len + 1);
|
||||
const index = array.len;
|
||||
array.len += 1;
|
||||
const slice = array.ptr[0..array.len];
|
||||
return &slice[index];
|
||||
}
|
||||
|
||||
pub fn append(array: *Array, allocator: *Allocator, element: T) !usize {
|
||||
try array.ensureCapacity(allocator, array.len + 1);
|
||||
const index = array.len;
|
||||
array.len += 1;
|
||||
const slice = array.ptr[0..array.len];
|
||||
slice[index] = element;
|
||||
|
||||
return index;
|
||||
}
|
||||
|
||||
fn ensureCapacity(array: *Array, allocator: *Allocator, desired_capacity: usize) !void {
|
||||
if (array.capacity < desired_capacity) {
|
||||
// Allocate a new array
|
||||
const new_slice = try allocator.allocate(T, desired_capacity);
|
||||
if (array.capacity == 0) {
|
||||
array.ptr = new_slice.ptr;
|
||||
array.capacity = new_slice.len;
|
||||
} else {
|
||||
// Reallocate
|
||||
if (array.len > 0) {
|
||||
@memcpy(new_slice[0..array.len], array.ptr[0..array.len]);
|
||||
}
|
||||
|
||||
// TODO: free
|
||||
|
||||
array.ptr = new_slice.ptr;
|
||||
array.capacity = new_slice.len;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub fn indexOf(array: *Array, ptr: *T) usize {
|
||||
const base_int = @intFromPtr(array.ptr);
|
||||
const ptr_int = @intFromPtr(ptr);
|
||||
return @divExact(ptr_int - base_int, @sizeOf(T));
|
||||
}
|
||||
|
||||
pub inline fn get(array: *Array, index: usize) T {
|
||||
assert(array.len > index);
|
||||
const slice = array.ptr[0..array.len];
|
||||
return slice[index];
|
||||
}
|
||||
|
||||
pub inline fn getChecked(array: *Array, index: usize) !T {
|
||||
if (array.len > index) {
|
||||
return array.get(index);
|
||||
} else {
|
||||
return error.index_out_of_bounds;
|
||||
}
|
||||
}
|
||||
};
|
||||
}
|
@ -353,7 +353,10 @@ pub const Cache = extern struct {
|
||||
const aligned_file_size = lib.alignForward(usize, file_size, cache.disk.sector_size);
|
||||
const lba = cache.clusterToSector(first_cluster);
|
||||
|
||||
log.debug("Start disk callback", .{});
|
||||
|
||||
const result = try cache.disk.callbacks.read(cache.disk, @divExact(aligned_file_size, cache.disk.sector_size), lba, file_buffer);
|
||||
log.debug("End disk callback", .{});
|
||||
return result.buffer[0..file_size];
|
||||
}
|
||||
|
||||
|
@ -10,7 +10,6 @@ const assert = lib.assert;
|
||||
const log = lib.log;
|
||||
const maxInt = lib.maxInt;
|
||||
const Allocator = lib.Allocator;
|
||||
const VirtualAddress = lib.VirtualAddress;
|
||||
|
||||
const bootloader = @import("bootloader");
|
||||
|
||||
@ -65,8 +64,7 @@ pub const Mapping = extern struct {
|
||||
execute: bool = false,
|
||||
user: bool = false,
|
||||
secret: bool = false,
|
||||
huge_pages: bool = true,
|
||||
reserved: u25 = 0,
|
||||
reserved: u26 = 0,
|
||||
|
||||
pub inline fn empty() Flags {
|
||||
return .{};
|
||||
@ -88,7 +86,6 @@ pub const PageAllocator = struct {
|
||||
count: u16 = 1,
|
||||
level: arch.paging.Level,
|
||||
user: bool,
|
||||
virtual_address: arch.paging.IndexedVirtualAddress,
|
||||
};
|
||||
|
||||
pub inline fn allocatePageTable(page_allocator: PageAllocator, options: AllocatePageTablesOptions) !lib.PhysicalMemoryRegion {
|
||||
@ -97,7 +94,6 @@ pub const PageAllocator = struct {
|
||||
.level = options.level,
|
||||
.level_valid = true,
|
||||
.user = options.user,
|
||||
.virtual_address = options.virtual_address,
|
||||
});
|
||||
return result;
|
||||
}
|
||||
@ -108,7 +104,6 @@ pub const PageAllocator = struct {
|
||||
level: arch.paging.Level = undefined,
|
||||
level_valid: bool = false,
|
||||
user: bool = false,
|
||||
virtual_address: arch.paging.IndexedVirtualAddress,
|
||||
};
|
||||
|
||||
const ContextType = enum(u32) {
|
||||
|
@ -10,6 +10,7 @@ const zeroes = lib.zeroes;
|
||||
const Allocator = lib.Allocator;
|
||||
|
||||
const privileged = @import("privileged");
|
||||
const Heap = privileged.Heap;
|
||||
const PageAllocator = privileged.PageAllocator;
|
||||
|
||||
const valid_page_sizes = lib.arch.x86_64.valid_page_sizes;
|
||||
@ -21,12 +22,33 @@ const PhysicalAddress = lib.PhysicalAddress;
|
||||
const VirtualAddress = lib.VirtualAddress;
|
||||
const PhysicalMemoryRegion = lib.PhysicalMemoryRegion;
|
||||
const PhysicalAddressSpace = lib.PhysicalAddressSpace;
|
||||
pub const Mapping = privileged.Mapping;
|
||||
const Mapping = privileged.Mapping;
|
||||
|
||||
const bootloader = @import("bootloader");
|
||||
|
||||
const paging = lib.arch.x86_64.paging;
|
||||
pub usingnamespace paging;
|
||||
const page_table_level_count = 4;
|
||||
pub const page_table_mask = page_table_entry_count - 1;
|
||||
|
||||
pub fn entryCount(comptime level: Level, limit: u64) u10 {
|
||||
const index = baseFromVirtualAddress(level, limit - 1);
|
||||
const result = @as(u10, index) + 1;
|
||||
// @compileLog(limit, index, result);
|
||||
return result;
|
||||
}
|
||||
|
||||
// Comptime test
|
||||
comptime {
|
||||
const va = 134217728;
|
||||
const indices = computeIndices(va);
|
||||
const pml4_index = baseFromVirtualAddress(.PML4, va);
|
||||
const pdp_index = baseFromVirtualAddress(.PDP, va);
|
||||
const pd_index = baseFromVirtualAddress(.PD, va);
|
||||
const pt_index = baseFromVirtualAddress(.PT, va);
|
||||
assert(pml4_index == indices[@intFromEnum(Level.PML4)]);
|
||||
assert(pdp_index == indices[@intFromEnum(Level.PDP)]);
|
||||
assert(pd_index == indices[@intFromEnum(Level.PD)]);
|
||||
assert(pt_index == indices[@intFromEnum(Level.PT)]);
|
||||
}
|
||||
|
||||
const max_level_possible = 5;
|
||||
pub const IndexedVirtualAddress = packed struct(u64) {
|
||||
@ -45,24 +67,12 @@ pub const IndexedVirtualAddress = packed struct(u64) {
|
||||
return VirtualAddress.new(raw);
|
||||
}
|
||||
}
|
||||
|
||||
pub fn toIndices(indexed: IndexedVirtualAddress) [Level.count]u9 {
|
||||
var result: [Level.count]u9 = undefined;
|
||||
inline for (@typeInfo(Level).Enum.fields) |enum_field| {
|
||||
result[@intFromEnum(@field(Level, enum_field.name))] = @field(indexed, enum_field.name);
|
||||
}
|
||||
return result;
|
||||
}
|
||||
};
|
||||
|
||||
const Level = enum(u2) {
|
||||
PML4 = 0,
|
||||
PDP = 1,
|
||||
PD = 2,
|
||||
PT = 3,
|
||||
|
||||
const count = @typeInfo(Level).Enum.fields.len;
|
||||
};
|
||||
pub fn baseFromVirtualAddress(comptime level: Level, virtual_address: u64) u9 {
|
||||
const indexed = @as(IndexedVirtualAddress, @bitCast(virtual_address));
|
||||
return @field(indexed, @tagName(level));
|
||||
}
|
||||
|
||||
pub const CPUPageTables = extern struct {
|
||||
pml4_table: PhysicalAddress,
|
||||
@ -72,7 +82,7 @@ pub const CPUPageTables = extern struct {
|
||||
|
||||
const base = 0xffff_ffff_8000_0000;
|
||||
const top = base + pte_count * lib.arch.valid_page_sizes[0];
|
||||
const pte_count = paging.page_table_entry_count - left_ptables;
|
||||
const pte_count = page_table_entry_count - left_ptables;
|
||||
pub const left_ptables = 4;
|
||||
pub const pml4_index = 0x1ff;
|
||||
pub const pdp_index = 0x1fe;
|
||||
@ -84,12 +94,14 @@ pub const CPUPageTables = extern struct {
|
||||
1; // PT
|
||||
const allocated_size = allocated_table_count * 0x1000;
|
||||
|
||||
const page_table_base = top;
|
||||
|
||||
comptime {
|
||||
assert(top + (left_ptables * lib.arch.valid_page_sizes[0]) == base + lib.arch.valid_page_sizes[1]);
|
||||
}
|
||||
|
||||
pub fn initialize(page_allocator: PageAllocator) !CPUPageTables {
|
||||
const page_table_allocation = try page_allocator.allocate(page_allocator.context, allocated_size, lib.arch.valid_page_sizes[0], .{ .virtual_address = @bitCast(@as(u64, 0)) });
|
||||
const page_table_allocation = try page_allocator.allocate(page_allocator.context, allocated_size, lib.arch.valid_page_sizes[0], .{});
|
||||
|
||||
const page_tables = CPUPageTables{
|
||||
.pml4_table = page_table_allocation.address,
|
||||
@ -151,8 +163,8 @@ pub const CPUPageTables = extern struct {
|
||||
if (asked_virtual_address.offset(size).value() > top) return CPUPageTables.MapError.upper_limit_exceeded;
|
||||
|
||||
const flags = general_flags.toArchitectureSpecific();
|
||||
const indexed: IndexedVirtualAddress = @bitCast(asked_virtual_address.value());
|
||||
const index = indexed.PT;
|
||||
const indices = computeIndices(asked_virtual_address.value());
|
||||
const index = indices[indices.len - 1];
|
||||
const iteration_count = @as(u32, @intCast(size >> lib.arch.page_shifter(lib.arch.valid_page_sizes[0])));
|
||||
const p_table = cpu_page_tables.p_table.toIdentityMappedVirtualAddress().access(*PTable);
|
||||
const p_table_slice = p_table[index .. index + iteration_count];
|
||||
@ -169,26 +181,10 @@ pub const CPUPageTables = extern struct {
|
||||
pub const Specific = extern struct {
|
||||
cr3: cr3 align(8),
|
||||
|
||||
pub fn current() Specific {
|
||||
return .{
|
||||
.cr3 = cr3.read(),
|
||||
};
|
||||
}
|
||||
|
||||
pub inline fn makeCurrent(specific: Specific) void {
|
||||
specific.getUserCr3().write();
|
||||
}
|
||||
|
||||
pub inline fn makeCurrentPrivileged(specific: Specific) void {
|
||||
specific.cr3.write();
|
||||
}
|
||||
|
||||
pub fn fromPhysicalRegion(physical_memory_region: PhysicalMemoryRegion) Specific {
|
||||
return Specific{
|
||||
.cr3 = @bitCast(physical_memory_region.address.value()),
|
||||
};
|
||||
}
|
||||
|
||||
pub fn fromPageTables(cpu_page_tables: CPUPageTables) Specific {
|
||||
return .{
|
||||
.cr3 = cr3.fromAddress(cpu_page_tables.pml4_table),
|
||||
@ -203,7 +199,7 @@ pub const Specific = extern struct {
|
||||
if (size >= reverse_page_size) {
|
||||
const is_smallest_page_size = reverse_page_index == reverse_valid_page_sizes.len - 1;
|
||||
|
||||
if (is_smallest_page_size or !general_flags.huge_pages) {
|
||||
if (is_smallest_page_size) {
|
||||
var virtual_address = asked_virtual_address.value();
|
||||
var physical_address = asked_physical_address.value();
|
||||
|
||||
@ -251,8 +247,8 @@ pub const Specific = extern struct {
|
||||
}
|
||||
|
||||
fn mapGeneric(specific: Specific, asked_physical_address: PhysicalAddress, asked_virtual_address: VirtualAddress, size: u64, comptime asked_page_size: comptime_int, flags: MemoryFlags, page_allocator: PageAllocator) !void {
|
||||
if (!isAlignedGeneric(u64, asked_physical_address.value(), lib.arch.valid_page_sizes[0])) {
|
||||
log.debug("PA: {}. Page size: 0x{x}", .{ asked_physical_address, asked_page_size });
|
||||
if (!isAlignedGeneric(u64, asked_physical_address.value(), asked_page_size)) {
|
||||
//log.debug("PA: {}. Page size: 0x{x}", .{ asked_physical_address, asked_page_size });
|
||||
@panic("Misaligned physical address in mapGeneric");
|
||||
}
|
||||
if (!isAlignedGeneric(u64, asked_virtual_address.value(), asked_page_size)) {
|
||||
@ -270,7 +266,7 @@ pub const Specific = extern struct {
|
||||
// TODO: batch better
|
||||
switch (asked_page_size) {
|
||||
// 1 GB
|
||||
lib.arch.valid_page_sizes[0] * paging.page_table_entry_count * paging.page_table_entry_count => {
|
||||
lib.arch.valid_page_sizes[0] * page_table_entry_count * page_table_entry_count => {
|
||||
while (virtual_address < top_virtual_address) : ({
|
||||
physical_address += asked_page_size;
|
||||
virtual_address += asked_page_size;
|
||||
@ -279,7 +275,7 @@ pub const Specific = extern struct {
|
||||
}
|
||||
},
|
||||
// 2 MB
|
||||
lib.arch.valid_page_sizes[0] * paging.page_table_entry_count => {
|
||||
lib.arch.valid_page_sizes[0] * page_table_entry_count => {
|
||||
while (virtual_address < top_virtual_address) : ({
|
||||
physical_address += asked_page_size;
|
||||
virtual_address += asked_page_size;
|
||||
@ -301,34 +297,67 @@ pub const Specific = extern struct {
|
||||
}
|
||||
|
||||
fn map1GBPage(specific: Specific, physical_address: u64, virtual_address: u64, flags: MemoryFlags, page_allocator: PageAllocator) !void {
|
||||
const indexed: IndexedVirtualAddress = @bitCast(virtual_address);
|
||||
const indices = computeIndices(virtual_address);
|
||||
|
||||
const pml4_table = try getPML4Table(specific.cr3);
|
||||
const pdp_table = try getPDPTable(pml4_table, indexed, flags, page_allocator);
|
||||
try mapPageTable1GB(pdp_table, indexed, physical_address, flags);
|
||||
const pdp_table = try getPDPTable(pml4_table, indices, flags, page_allocator);
|
||||
try mapPageTable1GB(pdp_table, indices, physical_address, flags);
|
||||
}
|
||||
|
||||
fn map2MBPage(specific: Specific, physical_address: u64, virtual_address: u64, flags: MemoryFlags, page_allocator: PageAllocator) !void {
|
||||
const indexed: IndexedVirtualAddress = @bitCast(virtual_address);
|
||||
const indices = computeIndices(virtual_address);
|
||||
|
||||
const pml4_table = try getPML4Table(specific.cr3);
|
||||
const pdp_table = try getPDPTable(pml4_table, indexed, flags, page_allocator);
|
||||
const pd_table = try getPDTable(pdp_table, indexed, flags, page_allocator);
|
||||
const pdp_table = try getPDPTable(pml4_table, indices, flags, page_allocator);
|
||||
const pd_table = try getPDTable(pdp_table, indices, flags, page_allocator);
|
||||
|
||||
mapPageTable2MB(pd_table, indexed, physical_address, flags) catch |err| {
|
||||
mapPageTable2MB(pd_table, indices, physical_address, flags) catch |err| {
|
||||
log.err("Virtual address: 0x{x}. Physical address: 0x{x}", .{ virtual_address, physical_address });
|
||||
return err;
|
||||
};
|
||||
}
|
||||
|
||||
fn map4KPage(specific: Specific, physical_address: u64, virtual_address: u64, flags: MemoryFlags, page_allocator: PageAllocator) !void {
|
||||
const indexed: IndexedVirtualAddress = @bitCast(virtual_address);
|
||||
const indices = computeIndices(virtual_address);
|
||||
|
||||
const pml4_table = try getPML4Table(specific.cr3);
|
||||
const pdp_table = try getPDPTable(pml4_table, indexed, flags, page_allocator);
|
||||
const pd_table = try getPDTable(pdp_table, indexed, flags, page_allocator);
|
||||
const p_table = try getPTable(pd_table, indexed, flags, page_allocator);
|
||||
try mapPageTable4KB(p_table, indexed, physical_address, flags);
|
||||
const pdp_table = try getPDPTable(pml4_table, indices, flags, page_allocator);
|
||||
const pd_table = try getPDTable(pdp_table, indices, flags, page_allocator);
|
||||
const p_table = try getPTable(pd_table, indices, flags, page_allocator);
|
||||
try mapPageTable4KB(p_table, indices, physical_address, flags);
|
||||
}
|
||||
|
||||
pub inline fn switchTo(specific: *Specific, execution_mode: lib.TraditionalExecutionMode) void {
|
||||
const mask = ~@as(u64, 1 << 12);
|
||||
const masked_cr3 = (@as(u64, @bitCast(specific.cr3)) & mask);
|
||||
const privileged_or = (@as(u64, @intFromEnum(execution_mode)) << 12);
|
||||
const new_cr3 = @as(cr3, @bitCast(masked_cr3 | privileged_or));
|
||||
specific.cr3 = new_cr3;
|
||||
}
|
||||
|
||||
pub inline fn copyHigherHalfCommon(cpu_specific: Specific, pml4_physical_address: PhysicalAddress) void {
|
||||
const cpu_side_pml4_table = pml4_physical_address.toHigherHalfVirtualAddress().access(*PML4Table);
|
||||
const privileged_cpu_pml4_table = try getPML4Table(cpu_specific.cr3);
|
||||
for (cpu_side_pml4_table[0x100..], privileged_cpu_pml4_table[0x100..]) |*pml4_entry, cpu_pml4_entry| {
|
||||
pml4_entry.* = cpu_pml4_entry;
|
||||
}
|
||||
}
|
||||
|
||||
pub fn copyHigherHalfPrivileged(cpu_specific: Specific, pml4_physical_address: PhysicalAddress) void {
|
||||
cpu_specific.copyHigherHalfCommon(pml4_physical_address);
|
||||
}
|
||||
|
||||
pub fn copyHigherHalfUser(cpu_specific: Specific, pml4_physical_address: PhysicalAddress, page_allocator: *PageAllocator) !void {
|
||||
cpu_specific.copyHigherHalfCommon(pml4_physical_address);
|
||||
|
||||
const pml4_table = pml4_physical_address.toHigherHalfVirtualAddress().access(*PML4Table);
|
||||
const pml4_entry = pml4_table[0x1ff];
|
||||
const pml4_entry_address = PhysicalAddress.new(unpackAddress(pml4_entry));
|
||||
const pdp_table = pml4_entry_address.toHigherHalfVirtualAddress().access(*PDPTable);
|
||||
const new_pdp_table_allocation = try page_allocator.allocate(0x1000, 0x1000);
|
||||
const new_pdp_table = new_pdp_table_allocation.toHigherHalfVirtualAddress().access(PDPTE);
|
||||
@memcpy(new_pdp_table, pdp_table);
|
||||
new_pdp_table[0x1fd] = @as(PDPTE, @bitCast(@as(u64, 0)));
|
||||
}
|
||||
|
||||
pub const TranslateError = error{
|
||||
@ -344,7 +373,7 @@ pub const Specific = extern struct {
|
||||
};
|
||||
|
||||
pub fn translateAddress(specific: Specific, virtual_address: VirtualAddress, flags: MemoryFlags) !PhysicalAddress {
|
||||
const indexed: IndexedVirtualAddress = @bitCast(virtual_address.value());
|
||||
const indices = computeIndices(virtual_address.value());
|
||||
const is_desired = virtual_address.value() == 0xffff_ffff_8001_f000;
|
||||
|
||||
const pml4_table = try getPML4Table(specific.cr3);
|
||||
@ -353,10 +382,10 @@ pub const Specific = extern struct {
|
||||
// }
|
||||
|
||||
//log.debug("pml4 table: 0x{x}", .{@ptrToInt(pml4_table)});
|
||||
const pml4_index = indexed.PML4;
|
||||
const pml4_index = indices[@intFromEnum(Level.PML4)];
|
||||
const pml4_entry = pml4_table[pml4_index];
|
||||
if (!pml4_entry.present) {
|
||||
log.err("Virtual address: 0x{x}.\nPML4 pointer: 0x{x}\nPML4 index: {}.\nValue: {}\n", .{ virtual_address.value(), @intFromPtr(pml4_table), pml4_index, pml4_entry });
|
||||
log.err("Virtual address: 0x{x}.\nPML4 index: {}.\nValue: {}\n", .{ virtual_address.value(), pml4_index, pml4_entry });
|
||||
return TranslateError.pml4_entry_not_present;
|
||||
}
|
||||
|
||||
@ -369,12 +398,12 @@ pub const Specific = extern struct {
|
||||
return TranslateError.pml4_entry_address_null;
|
||||
}
|
||||
|
||||
const pdp_table = try getPDPTable(pml4_table, indexed, undefined, null);
|
||||
const pdp_table = try getPDPTable(pml4_table, indices, undefined, null);
|
||||
if (is_desired) {
|
||||
_ = try specific.translateAddress(VirtualAddress.new(@intFromPtr(pdp_table)), .{});
|
||||
}
|
||||
//log.debug("pdp table: 0x{x}", .{@ptrToInt(pdp_table)});
|
||||
const pdp_index = indexed.PDP;
|
||||
const pdp_index = indices[@intFromEnum(Level.PDP)];
|
||||
const pdp_entry = &pdp_table[pdp_index];
|
||||
if (!pdp_entry.present) {
|
||||
log.err("PDP index {} not present in PDP table 0x{x}", .{ pdp_index, @intFromPtr(pdp_table) });
|
||||
@ -406,7 +435,7 @@ pub const Specific = extern struct {
|
||||
_ = try specific.translateAddress(VirtualAddress.new(@intFromPtr(pd_table)), .{});
|
||||
}
|
||||
//log.debug("pd table: 0x{x}", .{@ptrToInt(pd_table)});
|
||||
const pd_index = indexed.PD;
|
||||
const pd_index = indices[@intFromEnum(Level.PD)];
|
||||
const pd_entry = &pd_table[pd_index];
|
||||
if (!pd_entry.present) {
|
||||
log.err("PD index: {}", .{pd_index});
|
||||
@ -439,10 +468,11 @@ pub const Specific = extern struct {
|
||||
_ = try specific.translateAddress(VirtualAddress.new(@intFromPtr(p_table)), .{});
|
||||
}
|
||||
// log.debug("p table: 0x{x}", .{@ptrToInt(p_table)});
|
||||
const pt_index = indexed.PT;
|
||||
const pt_index = indices[@intFromEnum(Level.PT)];
|
||||
const pt_entry = &p_table[pt_index];
|
||||
if (!pt_entry.present) {
|
||||
log.err("Virtual address 0x{x} not mapped", .{virtual_address.value()});
|
||||
log.err("Indices: {any}", .{indices});
|
||||
log.err("PTE: 0x{x}", .{@intFromPtr(pt_entry)});
|
||||
log.err("PDE: 0x{x}", .{@intFromPtr(pd_entry)});
|
||||
log.err("PDPE: 0x{x}", .{@intFromPtr(pdp_entry)});
|
||||
@ -461,20 +491,115 @@ pub const Specific = extern struct {
|
||||
return pt_entry_address;
|
||||
}
|
||||
|
||||
pub fn setMappingFlags(specific: Specific, virtual_address: u64, flags: Mapping.Flags) !void {
|
||||
const indices = computeIndices(virtual_address);
|
||||
|
||||
const vas_cr3 = specific.cr3;
|
||||
|
||||
const pml4_physical_address = vas_cr3.getAddress();
|
||||
|
||||
const pml4_table = try accessPageTable(pml4_physical_address, *PML4Table);
|
||||
const pml4_entry = pml4_table[indices[@intFromEnum(Level.PML4)]];
|
||||
if (!pml4_entry.present) {
|
||||
return TranslateError.pml4_entry_not_present;
|
||||
}
|
||||
|
||||
const pml4_entry_address = PhysicalAddress.new(unpackAddress(pml4_entry));
|
||||
if (pml4_entry_address.value() == 0) {
|
||||
return TranslateError.pml4_entry_address_null;
|
||||
}
|
||||
|
||||
const pdp_table = try accessPageTable(pml4_entry_address, *PDPTable);
|
||||
const pdp_entry = pdp_table[indices[@intFromEnum(Level.PDP)]];
|
||||
if (!pdp_entry.present) {
|
||||
return TranslateError.pdp_entry_not_present;
|
||||
}
|
||||
|
||||
const pdp_entry_address = PhysicalAddress.new(unpackAddress(pdp_entry));
|
||||
if (pdp_entry_address.value() == 0) {
|
||||
return TranslateError.pdp_entry_address_null;
|
||||
}
|
||||
|
||||
const pd_table = try accessPageTable(pdp_entry_address, *PDTable);
|
||||
const pd_entry = pd_table[indices[@intFromEnum(Level.PD)]];
|
||||
if (!pd_entry.present) {
|
||||
return TranslateError.pd_entry_not_present;
|
||||
}
|
||||
|
||||
const pd_entry_address = PhysicalAddress.new(unpackAddress(pd_entry));
|
||||
if (pd_entry_address.value() == 0) {
|
||||
return TranslateError.pd_entry_address_null;
|
||||
}
|
||||
|
||||
const pt_table = try accessPageTable(pd_entry_address, *PTable);
|
||||
const pt_entry = &pt_table[indices[@intFromEnum(Level.PT)]];
|
||||
if (!pt_entry.present) {
|
||||
return TranslateError.pd_entry_not_present;
|
||||
}
|
||||
|
||||
pt_entry.write = flags.write;
|
||||
pt_entry.user = flags.user;
|
||||
pt_entry.page_level_cache_disable = flags.cache_disable;
|
||||
pt_entry.global = flags.global;
|
||||
pt_entry.execute_disable = !flags.execute;
|
||||
}
|
||||
|
||||
pub fn debugMemoryMap(specific: Specific) !void {
|
||||
log.debug("[START] Memory map dump 0x{x}\n", .{specific.cr3.getAddress().value()});
|
||||
|
||||
const pml4 = try specific.getCpuPML4Table();
|
||||
|
||||
for (pml4, 0..) |*pml4te, pml4_index| {
|
||||
if (pml4te.present) {
|
||||
const pdp_table = try accessPageTable(PhysicalAddress.new(unpackAddress(pml4te.*)), *PDPTable);
|
||||
|
||||
for (pdp_table, 0..) |*pdpte, pdp_index| {
|
||||
if (pdpte.present) {
|
||||
if (pdpte.page_size) {
|
||||
continue;
|
||||
}
|
||||
|
||||
const pd_table = try accessPageTable(PhysicalAddress.new(unpackAddress(pdpte.*)), *PDTable);
|
||||
|
||||
for (pd_table, 0..) |*pdte, pd_index| {
|
||||
if (pdte.present) {
|
||||
if (pdte.page_size) @panic("bbbb");
|
||||
|
||||
const p_table = try accessPageTable(PhysicalAddress.new(unpackAddress(pdte.*)), *PTable);
|
||||
|
||||
for (p_table, 0..) |*pte, pt_index| {
|
||||
if (pte.present) {
|
||||
const indexed_virtual_address = IndexedVirtualAddress{
|
||||
.PML4 = @as(u9, @intCast(pml4_index)),
|
||||
.PDP = @as(u9, @intCast(pdp_index)),
|
||||
.PD = @as(u9, @intCast(pd_index)),
|
||||
.PT = @as(u9, @intCast(pt_index)),
|
||||
};
|
||||
|
||||
const virtual_address = indexed_virtual_address.toVirtualAddress();
|
||||
const physical_address = unpackAddress(pte.*);
|
||||
log.debug("0x{x} -> 0x{x}", .{ virtual_address.value(), physical_address });
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
log.debug("[END] Memory map dump", .{});
|
||||
}
|
||||
|
||||
inline fn getUserCr3(specific: Specific) cr3 {
|
||||
assert(specific.isPrivileged());
|
||||
return @as(cr3, @bitCast(@as(u64, @bitCast(specific.cr3)) | paging.page_table_size));
|
||||
assert(@as(u64, @bitCast(specific.cr3)) & page_table_size == 0);
|
||||
return @as(cr3, @bitCast(@as(u64, @bitCast(specific.cr3)) | page_table_size));
|
||||
}
|
||||
|
||||
pub inline fn getCpuPML4Table(specific: Specific) !*PML4Table {
|
||||
assert(specific.isPrivileged());
|
||||
assert(@as(u64, @bitCast(specific.cr3)) & page_table_size == 0);
|
||||
return try specific.getPML4TableUnchecked();
|
||||
}
|
||||
|
||||
fn isPrivileged(specific: Specific) bool {
|
||||
return @as(u64, @bitCast(specific.cr3)) & paging.page_table_size == 0;
|
||||
}
|
||||
|
||||
pub inline fn getUserPML4Table(specific: Specific) !*PML4Table {
|
||||
return try getPML4Table(specific.getUserCr3());
|
||||
}
|
||||
@ -484,6 +609,8 @@ pub const Specific = extern struct {
|
||||
}
|
||||
};
|
||||
|
||||
const Indices = [enumCount(Level)]u16;
|
||||
|
||||
const MapError = error{
|
||||
already_present_4kb,
|
||||
already_present_2mb,
|
||||
@ -523,8 +650,8 @@ fn getPML4Table(cr3r: cr3) !*PML4Table {
|
||||
return pml4_table;
|
||||
}
|
||||
|
||||
fn getPDPTable(pml4_table: *PML4Table, virtual_address: IndexedVirtualAddress, flags: MemoryFlags, maybe_page_allocator: ?PageAllocator) !*PDPTable {
|
||||
const index = virtual_address.PML4;
|
||||
fn getPDPTable(pml4_table: *PML4Table, indices: Indices, flags: MemoryFlags, maybe_page_allocator: ?PageAllocator) !*PDPTable {
|
||||
const index = indices[@intFromEnum(Level.PML4)];
|
||||
const entry_pointer = &pml4_table[index];
|
||||
|
||||
const table_physical_address = physical_address_blk: {
|
||||
@ -537,7 +664,6 @@ fn getPDPTable(pml4_table: *PML4Table, virtual_address: IndexedVirtualAddress, f
|
||||
const entry_allocation = try page_allocator.allocatePageTable(.{
|
||||
.level = .PDP,
|
||||
.user = flags.user,
|
||||
.virtual_address = virtual_address,
|
||||
});
|
||||
|
||||
entry_pointer.* = PML4TE{
|
||||
@ -580,8 +706,8 @@ pub inline fn getPageEntry(comptime Entry: type, physical_address: u64, flags: M
|
||||
};
|
||||
}
|
||||
|
||||
fn mapPageTable1GB(pdp_table: *PDPTable, indexed: IndexedVirtualAddress, physical_address: u64, flags: MemoryFlags) MapError!void {
|
||||
const entry_index = indexed.PDP;
|
||||
fn mapPageTable1GB(pdp_table: *PDPTable, indices: Indices, physical_address: u64, flags: MemoryFlags) MapError!void {
|
||||
const entry_index = indices[@intFromEnum(Level.PDP)];
|
||||
const entry_pointer = &pdp_table[entry_index];
|
||||
|
||||
if (entry_pointer.present) return MapError.already_present_1gb;
|
||||
@ -591,8 +717,8 @@ fn mapPageTable1GB(pdp_table: *PDPTable, indexed: IndexedVirtualAddress, physica
|
||||
entry_pointer.* = @as(PDPTE, @bitCast(getPageEntry(PDPTE_1GB, physical_address, flags)));
|
||||
}
|
||||
|
||||
fn mapPageTable2MB(pd_table: *PDTable, indexed: IndexedVirtualAddress, physical_address: u64, flags: MemoryFlags) !void {
|
||||
const entry_index = indexed.PD;
|
||||
fn mapPageTable2MB(pd_table: *PDTable, indices: Indices, physical_address: u64, flags: MemoryFlags) !void {
|
||||
const entry_index = indices[@intFromEnum(Level.PD)];
|
||||
const entry_pointer = &pd_table[entry_index];
|
||||
const entry_value = entry_pointer.*;
|
||||
|
||||
@ -601,11 +727,13 @@ fn mapPageTable2MB(pd_table: *PDTable, indexed: IndexedVirtualAddress, physical_
|
||||
return MapError.already_present_2mb;
|
||||
}
|
||||
|
||||
assert(isAlignedGeneric(u64, physical_address, valid_page_sizes[1]));
|
||||
|
||||
entry_pointer.* = @as(PDTE, @bitCast(getPageEntry(PDTE_2MB, physical_address, flags)));
|
||||
}
|
||||
|
||||
fn mapPageTable4KB(p_table: *PTable, indexed: IndexedVirtualAddress, physical_address: u64, flags: MemoryFlags) !void {
|
||||
const entry_index = indexed.PT;
|
||||
fn mapPageTable4KB(p_table: *PTable, indices: Indices, physical_address: u64, flags: MemoryFlags) !void {
|
||||
const entry_index = indices[@intFromEnum(Level.PT)];
|
||||
const entry_pointer = &p_table[entry_index];
|
||||
|
||||
if (entry_pointer.present) {
|
||||
@ -622,8 +750,8 @@ const ToImplementError = error{
|
||||
page_size,
|
||||
};
|
||||
|
||||
fn getPDTable(pdp_table: *PDPTable, indexed: IndexedVirtualAddress, flags: MemoryFlags, page_allocator: PageAllocator) !*PDTable {
|
||||
const entry_index = indexed.PDP;
|
||||
fn getPDTable(pdp_table: *PDPTable, indices: Indices, flags: MemoryFlags, page_allocator: PageAllocator) !*PDTable {
|
||||
const entry_index = indices[@intFromEnum(Level.PDP)];
|
||||
const entry_pointer = &pdp_table[entry_index];
|
||||
|
||||
const table_physical_address = physical_address_blk: {
|
||||
@ -637,7 +765,6 @@ fn getPDTable(pdp_table: *PDPTable, indexed: IndexedVirtualAddress, flags: Memor
|
||||
const entry_allocation = try page_allocator.allocatePageTable(.{
|
||||
.level = .PD,
|
||||
.user = flags.user,
|
||||
.virtual_address = indexed,
|
||||
});
|
||||
|
||||
entry_pointer.* = PDPTE{
|
||||
@ -654,8 +781,8 @@ fn getPDTable(pdp_table: *PDPTable, indexed: IndexedVirtualAddress, flags: Memor
|
||||
return try accessPageTable(table_physical_address, *PDTable);
|
||||
}
|
||||
|
||||
fn getPTable(pd_table: *PDTable, indexed: IndexedVirtualAddress, flags: MemoryFlags, page_allocator: PageAllocator) !*PTable {
|
||||
const entry_pointer = &pd_table[indexed.PD];
|
||||
fn getPTable(pd_table: *PDTable, indices: Indices, flags: MemoryFlags, page_allocator: PageAllocator) !*PTable {
|
||||
const entry_pointer = &pd_table[indices[@intFromEnum(Level.PD)]];
|
||||
const table_physical_address = physical_address_blk: {
|
||||
const entry_value = entry_pointer.*;
|
||||
if (entry_value.present) {
|
||||
@ -664,11 +791,7 @@ fn getPTable(pd_table: *PDTable, indexed: IndexedVirtualAddress, flags: MemoryFl
|
||||
return ToImplementError.page_size;
|
||||
} else break :physical_address_blk PhysicalAddress.new(unpackAddress(entry_value));
|
||||
} else {
|
||||
const entry_allocation = try page_allocator.allocatePageTable(.{
|
||||
.level = .PT,
|
||||
.user = flags.user,
|
||||
.virtual_address = indexed,
|
||||
});
|
||||
const entry_allocation = try page_allocator.allocatePageTable(.{ .level = .PT, .user = flags.user });
|
||||
|
||||
entry_pointer.* = PDTE{
|
||||
.present = true,
|
||||
@ -688,6 +811,21 @@ const half_entry_count = (@sizeOf(PML4Table) / @sizeOf(PML4TE)) / 2;
|
||||
|
||||
const needed_physical_memory_for_bootstrapping_cpu_driver_address_space = @sizeOf(PML4Table) + @sizeOf(PDPTable) * 256;
|
||||
|
||||
pub fn computeIndices(virtual_address: u64) Indices {
|
||||
var indices: Indices = undefined;
|
||||
var va = virtual_address;
|
||||
va = va >> 12;
|
||||
indices[3] = @as(u9, @truncate(va));
|
||||
va = va >> 9;
|
||||
indices[2] = @as(u9, @truncate(va));
|
||||
va = va >> 9;
|
||||
indices[1] = @as(u9, @truncate(va));
|
||||
va = va >> 9;
|
||||
indices[0] = @as(u9, @truncate(va));
|
||||
|
||||
return indices;
|
||||
}
|
||||
|
||||
pub inline fn newFlags(general_flags: Mapping.Flags) MemoryFlags {
|
||||
return MemoryFlags{
|
||||
.write = general_flags.write,
|
||||
@ -718,15 +856,28 @@ pub const MemoryFlags = packed struct(u64) {
|
||||
|
||||
const address_mask: u64 = 0x0000_00ff_ffff_f000;
|
||||
|
||||
pub const Level = Level4;
|
||||
|
||||
pub const Level4 = enum(u2) {
|
||||
PML4 = 0,
|
||||
PDP = 1,
|
||||
PD = 2,
|
||||
PT = 3,
|
||||
|
||||
pub const count = lib.enumCount(@This());
|
||||
};
|
||||
|
||||
pub const Level5 = enum(u3) {};
|
||||
|
||||
pub fn EntryTypeMapSize(comptime page_size: comptime_int) usize {
|
||||
return switch (paging.Level) {
|
||||
paging.Level4 => switch (page_size) {
|
||||
return switch (Level) {
|
||||
Level4 => switch (page_size) {
|
||||
lib.arch.valid_page_sizes[0] => 4,
|
||||
lib.arch.valid_page_sizes[1] => 3,
|
||||
lib.arch.valid_page_sizes[2] => 2,
|
||||
else => @compileError("Unknown page size"),
|
||||
},
|
||||
paging.Level5 => @compileError("TODO"),
|
||||
Level5 => @compileError("TODO"),
|
||||
else => @compileError("unreachable"),
|
||||
};
|
||||
}
|
||||
@ -735,28 +886,28 @@ pub fn EntryTypeMap(comptime page_size: comptime_int) [EntryTypeMapSize(page_siz
|
||||
const map_size = EntryTypeMapSize(page_size);
|
||||
const Result = [map_size]type;
|
||||
var result: Result = undefined;
|
||||
switch (paging.Level) {
|
||||
paging.Level4, paging.Level5 => {
|
||||
if (@hasField(paging.Level, "pml5")) {
|
||||
switch (Level) {
|
||||
Level4, Level5 => {
|
||||
if (@hasField(Level, "pml5")) {
|
||||
@compileError("TODO: type_map[@enumToInt(Level.PML5)] =");
|
||||
}
|
||||
|
||||
result[@intFromEnum(paging.Level.PML4)] = PML4TE;
|
||||
result[@intFromEnum(Level.PML4)] = PML4TE;
|
||||
|
||||
if (page_size == lib.arch.valid_page_sizes[2]) {
|
||||
assert(map_size == 2 + @intFromBool(paging.Level == paging.Level5));
|
||||
result[@intFromEnum(paging.Level.PDP)] = PDPTE_1GB;
|
||||
assert(map_size == 2 + @intFromBool(Level == Level5));
|
||||
result[@intFromEnum(Level.PDP)] = PDPTE_1GB;
|
||||
} else {
|
||||
result[@intFromEnum(paging.Level.PDP)] = PDPTE;
|
||||
result[@intFromEnum(Level.PDP)] = PDPTE;
|
||||
|
||||
if (page_size == lib.arch.valid_page_sizes[1]) {
|
||||
assert(map_size == @as(usize, 3) + @intFromBool(paging.Level == paging.Level5));
|
||||
result[@intFromEnum(paging.Level.PD)] = PDTE_2MB;
|
||||
assert(map_size == @as(usize, 3) + @intFromBool(Level == Level5));
|
||||
result[@intFromEnum(Level.PD)] = PDTE_2MB;
|
||||
} else {
|
||||
assert(page_size == lib.arch.valid_page_sizes[0]);
|
||||
|
||||
result[@intFromEnum(paging.Level.PD)] = PDTE;
|
||||
result[@intFromEnum(paging.Level.PT)] = PTE;
|
||||
result[@intFromEnum(Level.PD)] = PDTE;
|
||||
result[@intFromEnum(Level.PT)] = PTE;
|
||||
}
|
||||
}
|
||||
},
|
||||
@ -922,7 +1073,16 @@ pub const PTE = packed struct(u64) {
|
||||
}
|
||||
};
|
||||
|
||||
pub const PML4Table = [paging.page_table_entry_count]PML4TE;
|
||||
pub const PDPTable = [paging.page_table_entry_count]PDPTE;
|
||||
pub const PDTable = [paging.page_table_entry_count]PDTE;
|
||||
pub const PTable = [paging.page_table_entry_count]PTE;
|
||||
pub const PML4Table = [page_table_entry_count]PML4TE;
|
||||
pub const PDPTable = [page_table_entry_count]PDPTE;
|
||||
pub const PDTable = [page_table_entry_count]PDTE;
|
||||
pub const PTable = [page_table_entry_count]PTE;
|
||||
pub const page_table_entry_size = @sizeOf(u64);
|
||||
pub const page_table_size = lib.arch.valid_page_sizes[0];
|
||||
pub const page_table_entry_count = @divExact(page_table_size, page_table_entry_size);
|
||||
pub const page_table_alignment = page_table_size;
|
||||
|
||||
comptime {
|
||||
assert(page_table_alignment == page_table_size);
|
||||
assert(page_table_size == lib.arch.valid_page_sizes[0]);
|
||||
}
|
||||
|
118
src/user.zig
118
src/user.zig
@ -4,20 +4,24 @@ const assert = lib.assert;
|
||||
const ExecutionMode = lib.Syscall.ExecutionMode;
|
||||
|
||||
const birth = @import("birth");
|
||||
pub const Command = birth.interface.Command;
|
||||
pub const Interface = birth.interface.Descriptor;
|
||||
pub const Scheduler = birth.Scheduler;
|
||||
const capabilities = birth.capabilities;
|
||||
pub const Syscall = birth.capabilities.Syscall;
|
||||
|
||||
pub const arch = @import("user/arch.zig");
|
||||
pub const capabilities = @import("user/capabilities.zig");
|
||||
const core_state = @import("user/core_state.zig");
|
||||
pub const CoreState = core_state.CoreState;
|
||||
pub const PinnedState = core_state.PinnedState;
|
||||
pub const libc = @import("user/libc.zig");
|
||||
pub const thread = @import("user/thread.zig");
|
||||
pub const Thread = thread.Thread;
|
||||
pub const process = @import("user/process.zig");
|
||||
pub const Virtual = @import("user/virtual.zig");
|
||||
const vas = @import("user/virtual_address_space.zig");
|
||||
const VirtualAddress = lib.VirtualAddress;
|
||||
pub const VirtualAddressSpace = vas.VirtualAddressSpace;
|
||||
pub const MMUAwareVirtualAddressSpace = vas.MMUAwareVirtualAddressSpace;
|
||||
|
||||
pub const PhysicalMap = @import("user/physical_map.zig").PhysicalMap;
|
||||
pub const PhysicalMemoryRegion = @import("user/physical_memory_region.zig").PhysicalMemoryRegion;
|
||||
pub const SlotAllocator = @import("user/slot_allocator.zig").SlotAllocator;
|
||||
|
||||
comptime {
|
||||
@export(arch._start, .{ .linkage = .Strong, .name = "_start" });
|
||||
@ -25,8 +29,8 @@ comptime {
|
||||
|
||||
pub const writer = lib.Writer(void, Writer.Error, Writer.write){ .context = {} };
|
||||
const Writer = extern struct {
|
||||
const syscall = Interface(.io, .log);
|
||||
const Error = Writer.syscall.Error;
|
||||
const syscall = Syscall(.io, .log);
|
||||
const Error = Writer.syscall.ErrorSet.Error;
|
||||
|
||||
fn write(_: void, bytes: []const u8) Error!usize {
|
||||
const result = try Writer.syscall.blocking(bytes);
|
||||
@ -48,16 +52,22 @@ pub fn zigPanic(message: []const u8, _: ?*lib.StackTrace, _: ?usize) noreturn {
|
||||
}
|
||||
|
||||
pub fn panic(comptime format: []const u8, arguments: anytype) noreturn {
|
||||
var buffer: [0x100]u8 = undefined;
|
||||
const message: []const u8 = lib.bufPrint(&buffer, format, arguments) catch "Failed to get panic message!";
|
||||
lib.log.scoped(.PANIC).err(format, arguments);
|
||||
while (true) {
|
||||
Interface(.process, .panic).blocking(.{
|
||||
.message = message,
|
||||
.exit_code = 1,
|
||||
}) catch |err| log.err("Exit failed: {}", .{err});
|
||||
Syscall(.process, .exit).blocking(false) catch |err| log.err("Exit failed: {}", .{err});
|
||||
}
|
||||
}
|
||||
|
||||
pub const Scheduler = extern struct {
|
||||
time_slice: u32,
|
||||
core_id: u32,
|
||||
core_state: CoreState,
|
||||
};
|
||||
|
||||
pub inline fn currentScheduler() *Scheduler {
|
||||
return arch.currentScheduler();
|
||||
}
|
||||
|
||||
fn schedulerInitDisabled(scheduler: *arch.Scheduler) void {
|
||||
// Architecture-specific initialization
|
||||
scheduler.generic.time_slice = 1;
|
||||
@ -65,38 +75,18 @@ fn schedulerInitDisabled(scheduler: *arch.Scheduler) void {
|
||||
}
|
||||
|
||||
pub var is_init = false;
|
||||
pub var command_buffer: Command.Buffer = undefined;
|
||||
const entry_count = 50;
|
||||
pub var command_buffer: birth.CommandBuffer = undefined;
|
||||
|
||||
const CommandBufferCreateError = error{
|
||||
invalid_entry_count,
|
||||
};
|
||||
|
||||
fn createCommandBuffer(options: Command.Buffer.CreateOptions) !Command.Buffer {
|
||||
// TODO: allow kernel to chop slices of memories
|
||||
try capabilities.setupCommandFrame(Command.Submission, options.submission_entry_count);
|
||||
try capabilities.setupCommandFrame(Command.Completion, options.completion_entry_count);
|
||||
@panic("TODO: createCommandBuffer");
|
||||
}
|
||||
|
||||
pub export fn start(scheduler: *Scheduler, arg_init: bool) callconv(.C) noreturn {
|
||||
pub export fn start(scheduler: *arch.Scheduler, arg_init: bool) callconv(.C) noreturn {
|
||||
assert(arg_init);
|
||||
is_init = arg_init;
|
||||
if (is_init) {
|
||||
assert(scheduler.common.setup_stack_lock.load(.Monotonic));
|
||||
assert(scheduler.common.generic.setup_stack_lock.load(.Monotonic));
|
||||
}
|
||||
|
||||
initialize() catch |err| panic("Failed to initialize: {}", .{err});
|
||||
@import("root").main() catch |err| panic("Failed to execute main: {}", .{err});
|
||||
|
||||
while (true) {
|
||||
@panic("TODO: after main");
|
||||
}
|
||||
}
|
||||
|
||||
fn initialize() !void {
|
||||
currentScheduler().initializeAllocator();
|
||||
_ = try Virtual.AddressSpace.create();
|
||||
assert(scheduler.common.generic.disabled);
|
||||
scheduler.initDisabled();
|
||||
// command_buffer = Syscall(.cpu, .get_command_buffer).blocking(&command_buffer) catch @panic("Unable to get command buffer");
|
||||
Syscall(.cpu, .shutdown).blocking({}) catch unreachable;
|
||||
}
|
||||
|
||||
// export fn birthInitializeDisabled(scheduler: *arch.Scheduler, arg_init: bool) callconv(.C) noreturn {
|
||||
@ -108,7 +98,45 @@ fn initialize() !void {
|
||||
// }
|
||||
|
||||
// Barrelfish: vregion
|
||||
pub inline fn currentScheduler() *birth.Scheduler {
|
||||
const result = arch.maybeCurrentScheduler().?;
|
||||
return result;
|
||||
}
|
||||
pub const VirtualMemoryRegion = extern struct {
|
||||
virtual_address_space: *VirtualAddressSpace,
|
||||
physical_region: *PhysicalMemoryRegion,
|
||||
offset: usize,
|
||||
size: usize,
|
||||
address: VirtualAddress,
|
||||
flags: Flags,
|
||||
next: ?*VirtualMemoryRegion = null,
|
||||
|
||||
pub const Flags = packed struct(u8) {
|
||||
read: bool = false,
|
||||
write: bool = false,
|
||||
execute: bool = false,
|
||||
cache_disabled: bool = false,
|
||||
preferred_page_size: u2 = 0,
|
||||
write_combining: bool = false,
|
||||
reserved: u1 = 0,
|
||||
};
|
||||
};
|
||||
|
||||
pub const MoreCore = extern struct {
|
||||
const InitializationError = error{
|
||||
invalid_page_size,
|
||||
};
|
||||
|
||||
pub fn init(page_size: usize) InitializationError!void {
|
||||
blk: inline for (lib.arch.valid_page_sizes) |valid_page_size| {
|
||||
if (valid_page_size == page_size) break :blk;
|
||||
} else {
|
||||
return InitializationError.invalid_page_size;
|
||||
}
|
||||
|
||||
const morecore_state = process.getMoreCoreState();
|
||||
morecore_state.mmu_state = try MMUAwareVirtualAddressSpace.initAligned(SlotAllocator.getDefault(), lib.arch.valid_page_sizes[1], lib.arch.valid_page_sizes[0], .{ .read = true, .write = true });
|
||||
|
||||
@panic("TODO: MoreCore.init");
|
||||
}
|
||||
|
||||
pub const State = extern struct {
|
||||
mmu_state: MMUAwareVirtualAddressSpace,
|
||||
};
|
||||
};
|
||||
|
@ -16,6 +16,28 @@ const SlotAllocator = user.SlotAllocator;
|
||||
const Thread = user.Thread;
|
||||
const VirtualAddressSpace = user.VirtualAddressSpace;
|
||||
|
||||
pub const Scheduler = extern struct {
|
||||
common: birth.arch.UserScheduler,
|
||||
generic: user.Scheduler,
|
||||
|
||||
pub fn initDisabled(scheduler: *Scheduler) void {
|
||||
_ = scheduler;
|
||||
// TODO:
|
||||
// *set entry points?
|
||||
// *set tls registers?
|
||||
}
|
||||
|
||||
pub noinline fn restore(scheduler: *Scheduler, register_arena: *const RegisterArena) noreturn {
|
||||
assert(scheduler.common.generic.disabled);
|
||||
assert(scheduler.common.generic.has_work);
|
||||
|
||||
assert(register_arena.registers.rip > lib.arch.valid_page_sizes[0]);
|
||||
assert(register_arena.registers.rflags.IF and register_arena.registers.rflags.reserved0);
|
||||
|
||||
register_arena.contextSwitch();
|
||||
}
|
||||
};
|
||||
|
||||
// CRT0
|
||||
pub fn _start() callconv(.Naked) noreturn {
|
||||
asm volatile (
|
||||
@ -24,6 +46,8 @@ pub fn _start() callconv(.Naked) noreturn {
|
||||
:
|
||||
: [startFunction] "r" (user.start),
|
||||
);
|
||||
|
||||
unreachable;
|
||||
}
|
||||
|
||||
pub inline fn setInitialState(register_arena: *RegisterArena, entry: VirtualAddress, stack_virtual_address: VirtualAddress, arguments: birth.syscall.Arguments) void {
|
||||
@ -56,3 +80,37 @@ pub inline fn maybeCurrentScheduler() ?*user.Scheduler {
|
||||
: "memory"
|
||||
);
|
||||
}
|
||||
|
||||
pub inline fn currentScheduler() *user.Scheduler {
|
||||
const result = maybeCurrentScheduler().?;
|
||||
return result;
|
||||
}
|
||||
|
||||
/// This is an interface to user.PhysicalMap, providing the architecture-specific functionality
|
||||
pub const PhysicalMapInterface = struct {
|
||||
pub fn determineAddress(physical_map: *PhysicalMap, physical_memory_region: PhysicalMemoryRegion, alignment: usize) !VirtualAddress {
|
||||
_ = physical_memory_region;
|
||||
_ = alignment;
|
||||
assert(physical_map.virtual_address_space.regions != null);
|
||||
log.debug("PMap: 0x{x}", .{@intFromPtr(physical_map.virtual_address_space.regions)});
|
||||
log.debug("PMap: {?}", .{physical_map.virtual_address_space.regions});
|
||||
@panic("TODO: PhysicalMapInterface.determineAddress");
|
||||
}
|
||||
|
||||
pub fn initializeCurrent(physical_map: *PhysicalMap) !void {
|
||||
_ = physical_map;
|
||||
log.warn("TODO: PhysicalMapInterface.initializeCurrent", .{});
|
||||
}
|
||||
|
||||
pub fn init(virtual_address_space: *VirtualAddressSpace, page_level: u3, slot_allocator: *SlotAllocator) !PhysicalMap {
|
||||
var result = PhysicalMap{
|
||||
.virtual_address_space = virtual_address_space,
|
||||
.slot_allocator = slot_allocator,
|
||||
};
|
||||
_ = page_level;
|
||||
|
||||
try result.initPageTableManagement();
|
||||
|
||||
@panic("TODO: PhysicalMap.init");
|
||||
}
|
||||
};
|
||||
|
@ -6,7 +6,7 @@ PHDRS {
|
||||
}
|
||||
|
||||
SECTIONS {
|
||||
. = 0x200000;
|
||||
. = 0x600000;
|
||||
. = ALIGN(4K);
|
||||
.text . : {
|
||||
*(.text*)
|
||||
|
@ -1,85 +1,20 @@
|
||||
const lib = @import("lib");
|
||||
const log = lib.log;
|
||||
const assert = lib.assert;
|
||||
const birth = @import("birth");
|
||||
const user = @import("user");
|
||||
const Interface = user.Interface;
|
||||
|
||||
const Command = birth.interface.Command;
|
||||
|
||||
// TODO: ref
|
||||
pub fn frameCreate(bytes: usize) !birth.capabilities.RAM {
|
||||
return mappableCapabilityCreate(.cpu_memory, bytes);
|
||||
pub fn frameCreate(ref: usize, bytes: usize) !usize {
|
||||
return mappableCapabilityCreate(ref, .cpu_memory, bytes);
|
||||
}
|
||||
|
||||
const CommandBufferFrameType = enum {
|
||||
command_buffer_completion,
|
||||
command_buffer_submission,
|
||||
};
|
||||
|
||||
pub fn setupCommandFrame(comptime QueueType: type, entry_count: usize) !void {
|
||||
assert(entry_count > 0);
|
||||
comptime assert(@alignOf(QueueType) <= @sizeOf(QueueType.Header));
|
||||
const total_size = lib.alignForward(usize, @sizeOf(QueueType.Header) + entry_count * @sizeOf(QueueType), lib.arch.valid_page_sizes[0]);
|
||||
const capability = switch (QueueType) {
|
||||
Command.Submission => .command_buffer_submission,
|
||||
Command.Completion => .command_buffer_completion,
|
||||
else => @compileError("Unexpected type"),
|
||||
};
|
||||
|
||||
const allocation = try Interface(.ram, .allocate).blocking(total_size);
|
||||
const dst_cap_frame = try retype(@bitCast(allocation), capability);
|
||||
const flags = .{
|
||||
.write = QueueType == Command.Submission,
|
||||
.execute = false,
|
||||
};
|
||||
_ = try Interface(capability, .map).blocking(.{
|
||||
.frame = dst_cap_frame,
|
||||
.flags = flags,
|
||||
});
|
||||
|
||||
@panic("TODO: setup frame");
|
||||
}
|
||||
|
||||
fn mappableCapabilityCreate(capability: birth.capabilities.Type.Mappable, bytes: usize) !birth.capabilities.RAM {
|
||||
fn mappableCapabilityCreate(ref: usize, mappable_capability: birth.capabilities.Type.Mappable, bytes: usize) !usize {
|
||||
_ = mappable_capability;
|
||||
_ = ref;
|
||||
assert(bytes > 0);
|
||||
|
||||
return RamDescendant.create(capability, bytes);
|
||||
}
|
||||
|
||||
const Ram = extern struct {
|
||||
pub fn allocate(size: usize) !usize {
|
||||
_ = size;
|
||||
log.err("TODO: allocate", .{});
|
||||
return error.not_implemented;
|
||||
}
|
||||
};
|
||||
|
||||
const RamDescendant = extern struct {
|
||||
capability: usize,
|
||||
size: usize,
|
||||
|
||||
pub fn create(capability: birth.capabilities.Type.Mappable, size: usize) !birth.capabilities.RAM {
|
||||
const allocation = try Interface(.ram, .allocate).blocking(size);
|
||||
const generic_capability = switch (capability) {
|
||||
inline else => |mappable_cap| @field(birth.interface.Capability, @tagName(mappable_cap)),
|
||||
};
|
||||
const result = try retype(@bitCast(allocation), generic_capability);
|
||||
|
||||
// TODO: check if the previous capability needs to be deleted (because maybe it should be deleted at the retype operation
|
||||
// try destroy(@bitCast(allocation));
|
||||
return @bitCast(result);
|
||||
}
|
||||
};
|
||||
|
||||
// TODO: make this more complex and generic to handle all cases
|
||||
pub fn retype(source: birth.interface.Reference, capability: birth.interface.Capability) !birth.interface.Reference {
|
||||
const new_reference = try Interface(.ram, .retype).blocking(.{ .source = @bitCast(source), .destination = capability });
|
||||
return new_reference;
|
||||
}
|
||||
|
||||
pub fn destroy(capability: birth.capabilities.Reference) !void {
|
||||
_ = capability;
|
||||
log.err("TODO: destroy", .{});
|
||||
return error.not_implemented;
|
||||
fn ramDescendantCreate(
|
||||
ref: usize,
|
||||
) !usize {
|
||||
_ = ref;
|
||||
}
|
||||
|
@ -1,6 +1,27 @@
|
||||
const user = @import("user");
|
||||
const MoreCore = user.MoreCore;
|
||||
const PhysicalMap = user.PhysicalMap;
|
||||
const PhysicalMemoryRegion = user.PhysicalMemoryRegion;
|
||||
const SlotAllocator = user.SlotAllocator;
|
||||
const VirtualAddressSpace = user.VirtualAddressSpace;
|
||||
const VirtualMemoryRegion = user.VirtualMemoryRegion;
|
||||
|
||||
pub const PagingState = extern struct {
|
||||
virtual_address_space: VirtualAddressSpace,
|
||||
physical_map: PhysicalMap,
|
||||
};
|
||||
|
||||
pub const PinnedState = extern struct {
|
||||
physical_memory_region: PhysicalMemoryRegion.Pinned,
|
||||
virtual_memory_region: VirtualMemoryRegion,
|
||||
offset: usize,
|
||||
// TODO: lists
|
||||
};
|
||||
|
||||
pub const CoreState = extern struct {
|
||||
paging: PagingState,
|
||||
slot_allocator: SlotAllocator.State,
|
||||
virtual_address_space: VirtualAddressSpace.State,
|
||||
pinned: PinnedState,
|
||||
more_core: MoreCore.State,
|
||||
};
|
||||
|
@ -1,6 +1,7 @@
|
||||
const user = @import("user");
|
||||
|
||||
pub export fn malloc(size: usize) ?*anyopaque {
|
||||
_ = size;
|
||||
@panic("TODO: malloc");
|
||||
const morecore_state = user.process.getMoreCoreState();
|
||||
const result = morecore_state.mmu_state.map(size) catch return null;
|
||||
return result.ptr;
|
||||
}
|
||||
|
62
src/user/mmu_aware_virtual_address_space.zig
Normal file
62
src/user/mmu_aware_virtual_address_space.zig
Normal file
@ -0,0 +1,62 @@
|
||||
const lib = @import("lib");
|
||||
const assert = lib.assert;
|
||||
const log = lib.log.scoped(.MMUAwareVirtualAddressSpace);
|
||||
|
||||
const user = @import("user");
|
||||
const PhysicalMemoryRegion = user.PhysicalMemoryRegion;
|
||||
const SlotAllocator = user.SlotAllocator;
|
||||
const VirtualMemoryRegion = user.VirtualMemoryRegion;
|
||||
|
||||
pub const MMUAwareVirtualAddressSpace = extern struct {
|
||||
size: usize,
|
||||
alignment: usize,
|
||||
consumed: usize = 0,
|
||||
/// This is a index into the architecture-specific page sizes
|
||||
page_size: u8,
|
||||
slot_allocator: *SlotAllocator,
|
||||
physical_memory_region: PhysicalMemoryRegion.Anonymous,
|
||||
virtual_memory_region: VirtualMemoryRegion,
|
||||
// struct vregion vregion; ///< Needs just one vregion
|
||||
// struct memobj_anon memobj; ///< Needs just one memobj
|
||||
// lvaddr_t offset; ///< Offset of free space in anon
|
||||
// lvaddr_t mapoffset; ///< Offset into the anon that has been mapped in
|
||||
|
||||
pub fn init(size: usize) !MMUAwareVirtualAddressSpace {
|
||||
const slot_allocator = SlotAllocator.getDefault();
|
||||
const alignment = lib.arch.valid_page_sizes[0];
|
||||
return initAligned(slot_allocator, size, alignment, .{ .write = true });
|
||||
}
|
||||
|
||||
pub fn initAligned(slot_allocator: *SlotAllocator, size: usize, alignment: usize, flags: VirtualMemoryRegion.Flags) !MMUAwareVirtualAddressSpace {
|
||||
assert(flags.preferred_page_size < lib.arch.valid_page_sizes.len);
|
||||
var result = MMUAwareVirtualAddressSpace{
|
||||
.size = size,
|
||||
.alignment = alignment,
|
||||
.page_size = flags.preferred_page_size,
|
||||
.slot_allocator = slot_allocator,
|
||||
.physical_memory_region = try PhysicalMemoryRegion.Anonymous.new(size),
|
||||
.virtual_memory_region = undefined,
|
||||
};
|
||||
// TODO: fix this API
|
||||
result.virtual_memory_region = try user.process.getVirtualAddressSpace().mapAligned(result.physical_memory_region.getGeneric().*, 0, size, alignment, flags);
|
||||
|
||||
// TODO: create memobj
|
||||
// TODO: map memobj into vregion
|
||||
|
||||
@panic("TODO: MMUAwareVirtualAddressSpace.initAligned");
|
||||
}
|
||||
|
||||
const Error = error{
|
||||
alignment,
|
||||
};
|
||||
|
||||
pub fn map(virtual_address_space: *MMUAwareVirtualAddressSpace, size: usize) ![]u8 {
|
||||
if (!lib.isAligned(size, lib.arch.valid_page_sizes[0])) {
|
||||
return error.alignment;
|
||||
}
|
||||
_ = virtual_address_space;
|
||||
log.warn("[map] TODO: slot allocation", .{});
|
||||
//virtual_address_space.slot_allocator.allocate();
|
||||
@panic("TODO: MMUAwareVirtualAddressSpace.map");
|
||||
}
|
||||
};
|
25
src/user/physical_map.zig
Normal file
25
src/user/physical_map.zig
Normal file
@ -0,0 +1,25 @@
|
||||
const lib = @import("lib");
|
||||
const log = lib.log.scoped(.PhysicalMap);
|
||||
|
||||
const user = @import("user");
|
||||
const SlotAllocator = user.SlotAllocator;
|
||||
const VirtualAddressSpace = user.VirtualAddressSpace;
|
||||
|
||||
pub const PhysicalMap = extern struct {
|
||||
virtual_address_space: *VirtualAddressSpace,
|
||||
slot_allocator: *SlotAllocator,
|
||||
|
||||
pub usingnamespace user.arch.PhysicalMapInterface;
|
||||
|
||||
pub fn initPageTableManagement(physical_map: *PhysicalMap) !void {
|
||||
const current_physical_map = user.process.getPhysicalMap();
|
||||
log.debug("CURR: 0x{x}. PHYS: 0x{x}", .{ @intFromPtr(current_physical_map), @intFromPtr(physical_map) });
|
||||
if (current_physical_map == physical_map) {
|
||||
@panic("TODO: if");
|
||||
} else {
|
||||
log.warn("TODO: slab_init", .{});
|
||||
_ = user.libc.malloc(lib.arch.valid_page_sizes[0]);
|
||||
@panic("TODO: else");
|
||||
}
|
||||
}
|
||||
};
|
81
src/user/physical_memory_region.zig
Normal file
81
src/user/physical_memory_region.zig
Normal file
@ -0,0 +1,81 @@
|
||||
const lib = @import("lib");
|
||||
const assert = lib.assert;
|
||||
const log = lib.log.scoped(.PhysicalMemoryRegion);
|
||||
|
||||
// Barrelfish: memobj
|
||||
pub const PhysicalMemoryRegion = extern struct {
|
||||
size: usize,
|
||||
type: Type,
|
||||
|
||||
pub const Type = enum(u8) {
|
||||
anonymous = 0,
|
||||
one_frame = 1,
|
||||
pinned = 3,
|
||||
//one_frame_lazy,
|
||||
//one_frame_one_map,
|
||||
// vfs,
|
||||
// fixed,
|
||||
// numa,
|
||||
// append,
|
||||
|
||||
fn map(t: Type) type {
|
||||
return switch (t) {
|
||||
.anonymous => Anonymous,
|
||||
.one_frame => OneFrame,
|
||||
.pinned => Pinned,
|
||||
};
|
||||
}
|
||||
};
|
||||
|
||||
pub const Anonymous = extern struct {
|
||||
region: PhysicalMemoryRegion,
|
||||
|
||||
pub usingnamespace Interface(@This());
|
||||
|
||||
pub fn new(size: usize) !Anonymous {
|
||||
const result = Anonymous{
|
||||
.region = .{
|
||||
.size = size,
|
||||
.type = .anonymous,
|
||||
},
|
||||
};
|
||||
|
||||
log.warn("[Anonymous.new] TODO: initialize memory", .{});
|
||||
|
||||
return result;
|
||||
}
|
||||
};
|
||||
|
||||
pub const OneFrame = extern struct {
|
||||
pub usingnamespace Interface(@This());
|
||||
};
|
||||
|
||||
pub const Pinned = extern struct {
|
||||
region: PhysicalMemoryRegion,
|
||||
pub usingnamespace Interface(@This());
|
||||
|
||||
pub fn new(size: usize) !Pinned {
|
||||
const result = Pinned{
|
||||
.region = .{
|
||||
.size = size,
|
||||
.type = .pinned,
|
||||
},
|
||||
};
|
||||
|
||||
log.warn("[Pinned.new] TODO: initialize memory", .{});
|
||||
|
||||
return result;
|
||||
}
|
||||
};
|
||||
|
||||
fn Interface(comptime PhysicalMemoryRegionType: type) type {
|
||||
assert(@hasField(PhysicalMemoryRegionType, "region"));
|
||||
assert(@TypeOf(@field(@as(PhysicalMemoryRegionType, undefined), "region")) == PhysicalMemoryRegion);
|
||||
|
||||
return extern struct {
|
||||
pub inline fn getGeneric(r: *PhysicalMemoryRegionType) *PhysicalMemoryRegion {
|
||||
return &r.region;
|
||||
}
|
||||
};
|
||||
}
|
||||
};
|
@ -9,12 +9,10 @@ pub const std_options = user.std_options;
|
||||
export var core_id: u32 = 0;
|
||||
|
||||
pub fn main() !noreturn {
|
||||
@panic("TODO: main");
|
||||
|
||||
// core_id = try Syscall(.cpu, .get_core_id).blocking({});
|
||||
// user.currentScheduler().core_id = core_id;
|
||||
// log.debug("Hello world! User space initialization from core #{}", .{core_id});
|
||||
// const allocation = try Syscall(.cpu_memory, .allocate).blocking(0x1000);
|
||||
// log.debug("Look allocation successful at 0x{x}", .{allocation.value()});
|
||||
// try Syscall(.cpu, .shutdown).blocking({});
|
||||
core_id = try Syscall(.cpu, .get_core_id).blocking({});
|
||||
user.currentScheduler().core_id = core_id;
|
||||
log.debug("Hello world! User space initialization from core #{}", .{core_id});
|
||||
const allocation = try Syscall(.cpu_memory, .allocate).blocking(0x1000);
|
||||
log.debug("Look allocation successful at 0x{x}", .{allocation.value()});
|
||||
try Syscall(.cpu, .shutdown).blocking({});
|
||||
}
|
||||
|
@ -9,7 +9,7 @@ pub const std_options = user.std_options;
|
||||
|
||||
export var core_id: u32 = 0;
|
||||
|
||||
pub fn main() !void {
|
||||
pub fn main() !noreturn {
|
||||
// core_id = try syscall(.cpu, .get_core_id).blocking({});
|
||||
// user.currentScheduler().core_id = core_id;
|
||||
// log.debug("Hello world! User space initialization from core #{}", .{core_id});
|
||||
@ -21,5 +21,5 @@ pub fn main() !void {
|
||||
// const aligned_bundle_size = lib.alignForward(usize, bundle_size, lib.arch.valid_page_sizes[0]);
|
||||
// const bundle_allocation = try syscall(.cpu_memory, .allocate).blocking(aligned_bundle_size);
|
||||
// log.debug("Look allocation successful at 0x{x}", .{bundle_allocation.value()});
|
||||
// try syscall(.cpu, .shutdown).blocking({});
|
||||
try syscall(.cpu, .shutdown).blocking({});
|
||||
}
|
||||
|
28
src/user/slot_allocator.zig
Normal file
28
src/user/slot_allocator.zig
Normal file
@ -0,0 +1,28 @@
|
||||
const log = @import("lib").log;
|
||||
const user = @import("user");
|
||||
|
||||
pub const SlotAllocator = extern struct {
|
||||
foo: u32 = 0,
|
||||
|
||||
/// This function is inlined because it's only called once
|
||||
pub inline fn init() !void {
|
||||
log.warn("TODO: implement the whole SlotAllocator.init", .{});
|
||||
const state = user.process.getSlotAllocatorState();
|
||||
const default_allocator = state.default_allocator;
|
||||
_ = default_allocator;
|
||||
}
|
||||
|
||||
pub fn getDefault() *SlotAllocator {
|
||||
const process_slot_allocator_state = user.process.getSlotAllocatorState();
|
||||
return &process_slot_allocator_state.default_allocator.allocator;
|
||||
}
|
||||
|
||||
pub const State = extern struct {
|
||||
default_allocator: MultiSlotAllocator,
|
||||
};
|
||||
};
|
||||
|
||||
pub const MultiSlotAllocator = extern struct {
|
||||
allocator: SlotAllocator,
|
||||
// TODO:
|
||||
};
|
@ -9,21 +9,48 @@ const SlotAllocator = user.SlotAllocator;
|
||||
const VirtualAddress = lib.VirtualAddress;
|
||||
const VirtualAddressSpace = user.VirtualAddressSpace;
|
||||
|
||||
const Thread = birth.Thread;
|
||||
|
||||
const max_thread_count = 256;
|
||||
|
||||
pub fn initBootstrap(scheduler: *user.arch.Scheduler) noreturn {
|
||||
const thread = &scheduler.generic.bootstrap_thread;
|
||||
thread.stack = &scheduler.common.generic.setup_stack;
|
||||
thread.stack_top = @ptrFromInt(@intFromPtr(&scheduler.common.generic.setup_stack) + scheduler.common.generic.setup_stack.len);
|
||||
pub const Thread = extern struct {
|
||||
self: *Thread,
|
||||
previous: ?*Thread,
|
||||
next: ?*Thread,
|
||||
stack: [*]u8,
|
||||
stack_top: [*]align(lib.arch.stack_alignment) u8,
|
||||
register_arena: birth.arch.RegisterArena align(lib.arch.stack_alignment),
|
||||
core_id: u32,
|
||||
|
||||
pub fn init(thread: *Thread, scheduler: *user.arch.Scheduler) void {
|
||||
thread.self = thread;
|
||||
thread.previous = null;
|
||||
thread.next = null;
|
||||
thread.core_id = scheduler.generic.core_id;
|
||||
}
|
||||
};
|
||||
|
||||
pub const Mutex = extern struct {
|
||||
locked: bool = false,
|
||||
|
||||
pub inline fn internalLock(mutex: *volatile Mutex) void {
|
||||
mutex.locked = true;
|
||||
}
|
||||
};
|
||||
|
||||
var static_stack: [0x10000]u8 align(lib.arch.stack_alignment) = undefined;
|
||||
var static_thread: Thread = undefined;
|
||||
var static_thread_lock = Mutex{};
|
||||
|
||||
pub fn initDisabled(scheduler: *user.arch.Scheduler) noreturn {
|
||||
const thread = &static_thread;
|
||||
static_thread_lock.internalLock();
|
||||
thread.stack = &static_stack;
|
||||
thread.stack_top = static_stack[static_stack.len..];
|
||||
thread.init(scheduler);
|
||||
|
||||
// TODO: use RAX as parameter?
|
||||
|
||||
user.arch.setInitialState(&thread.register_arena, VirtualAddress.new(bootstrapThread), VirtualAddress.new(thread.stack_top), .{0} ** 6);
|
||||
scheduler.generic.enqueueThread(thread);
|
||||
scheduler.generic.current_thread = thread;
|
||||
|
||||
scheduler.common.generic.has_work = true;
|
||||
|
||||
scheduler.restore(&thread.register_arena);
|
||||
|
@ -1,100 +0,0 @@
|
||||
const Virtual = @This();
|
||||
|
||||
const birth = @import("birth");
|
||||
const lib = @import("lib");
|
||||
const user = @import("user");
|
||||
|
||||
const assert = lib.assert;
|
||||
const log = lib.log;
|
||||
const SparseArray = lib.data_structures.SparseArray;
|
||||
const VirtualAddress = lib.VirtualAddress;
|
||||
|
||||
const paging = lib.arch.paging;
|
||||
|
||||
const Leaf = birth.interface.Leaf;
|
||||
|
||||
pub const AddressSpace = extern struct {
|
||||
// page_table: PageTable,
|
||||
region: Virtual.AddressSpace.Region = .{},
|
||||
minimum: VirtualAddress = VirtualAddress.new(paging.user_address_space_start),
|
||||
maximum: VirtualAddress = VirtualAddress.new(paging.user_address_space_end),
|
||||
root_page_table: PageTable = .{},
|
||||
page_table_buffer: SparseArray(PageTable) = .{
|
||||
.ptr = undefined,
|
||||
.len = 0,
|
||||
.capacity = 0,
|
||||
},
|
||||
leaf_buffer: SparseArray(Leaf) = .{
|
||||
.ptr = undefined,
|
||||
.len = 0,
|
||||
.capacity = 0,
|
||||
},
|
||||
|
||||
const Region = extern struct {
|
||||
list: Virtual.Region.List = .{},
|
||||
block_count: usize = 0,
|
||||
};
|
||||
|
||||
pub fn create() !*AddressSpace {
|
||||
const scheduler = user.currentScheduler();
|
||||
const virtual_address_space = try scheduler.fast_allocator.create(AddressSpace);
|
||||
virtual_address_space.* = .{};
|
||||
|
||||
try virtual_address_space.collectPageTables(&virtual_address_space.root_page_table, .{});
|
||||
|
||||
@panic("TODO: create");
|
||||
}
|
||||
|
||||
fn collectPageTables(virtual_address_space: *Virtual.AddressSpace, page_table: *PageTable, descriptor: birth.interface.PageTable) !void {
|
||||
try user.Interface(.page_table, .get).blocking(.{
|
||||
.descriptor = descriptor,
|
||||
.buffer = &page_table.children_handles,
|
||||
});
|
||||
|
||||
const allocator = &user.currentScheduler().fast_allocator;
|
||||
|
||||
for (page_table.children_handles, &page_table.indices) |child, *index| {
|
||||
if (child.present) {
|
||||
switch (child.entry_type) {
|
||||
.page_table => {
|
||||
const page_table_index = virtual_address_space.page_table_buffer.len;
|
||||
const new_page_table = try virtual_address_space.page_table_buffer.allocate(allocator);
|
||||
//user.currentScheduler().fast_allocator.create(PageTable);
|
||||
index.* = @intCast(page_table_index);
|
||||
|
||||
try virtual_address_space.collectPageTables(new_page_table, child);
|
||||
},
|
||||
.leaf => {
|
||||
const new_leaf = try virtual_address_space.leaf_buffer.allocate(allocator);
|
||||
index.* = @intCast(virtual_address_space.leaf_buffer.indexOf(new_leaf));
|
||||
try getLeaf(child, new_leaf);
|
||||
log.debug("New leaf: {}", .{new_leaf});
|
||||
},
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn getLeaf(leaf_descriptor: birth.interface.PageTable, leaf: *Leaf) !void {
|
||||
try user.Interface(.page_table, .get_leaf).blocking(.{
|
||||
.descriptor = leaf_descriptor,
|
||||
.buffer = leaf,
|
||||
});
|
||||
}
|
||||
};
|
||||
|
||||
pub const Region = extern struct {
|
||||
foo: u32 = 0,
|
||||
|
||||
pub const List = extern struct {
|
||||
regions: [region_count]Region = .{.{}} ** region_count,
|
||||
next: ?*List = null,
|
||||
|
||||
const region_count = 20;
|
||||
};
|
||||
};
|
||||
|
||||
pub const PageTable = extern struct {
|
||||
children_handles: [512]birth.interface.PageTable = .{.{}} ** 512,
|
||||
indices: [512]u32 = .{0} ** 512,
|
||||
};
|
60
src/user/virtual_address_space.zig
Normal file
60
src/user/virtual_address_space.zig
Normal file
@ -0,0 +1,60 @@
|
||||
const lib = @import("lib");
|
||||
const log = lib.log;
|
||||
|
||||
const user = @import("user");
|
||||
const PhysicalMap = user.PhysicalMap;
|
||||
const PhysicalMemoryRegion = user.PhysicalMemoryRegion;
|
||||
const VirtualMemoryRegion = user.VirtualMemoryRegion;
|
||||
|
||||
pub const MMUAwareVirtualAddressSpace = @import("mmu_aware_virtual_address_space.zig").MMUAwareVirtualAddressSpace;
|
||||
|
||||
pub const VirtualAddressSpace = extern struct {
|
||||
physical_map: *PhysicalMap,
|
||||
// TODO: layout
|
||||
regions: ?*VirtualMemoryRegion = null,
|
||||
|
||||
/// The function is inlined because it's only called once
|
||||
pub inline fn initializeCurrent() !void {
|
||||
log.debug("VirtualAddressSpace.initializeCurrent", .{});
|
||||
const virtual_address_space = user.process.getVirtualAddressSpace();
|
||||
const physical_map = user.process.getPhysicalMap();
|
||||
virtual_address_space.physical_map = physical_map;
|
||||
|
||||
const root_page_level = 0;
|
||||
physical_map.* = try PhysicalMap.init(virtual_address_space, root_page_level, user.process.getSlotAllocator());
|
||||
// This should be an inline call as this the only time this function is called
|
||||
try physical_map.initializeCurrent();
|
||||
|
||||
try virtual_address_space.pinnedInit();
|
||||
|
||||
log.warn("TODO: VirtualAddressSpace.initializeCurrent is incomplete!", .{});
|
||||
}
|
||||
|
||||
pub inline fn pinnedInit(virtual_address_space: *VirtualAddressSpace) !void {
|
||||
const pinned_state = user.process.getPinnedState();
|
||||
const pinned_size = 128 * lib.mb;
|
||||
pinned_state.physical_memory_region = try PhysicalMemoryRegion.Pinned.new(pinned_size);
|
||||
|
||||
pinned_state.virtual_memory_region = try virtual_address_space.map(pinned_state.physical_memory_region.getGeneric().*, 0, pinned_size, .{ .write = true });
|
||||
log.warn("TODO: VirtualAddressSpace.pinnedInit", .{});
|
||||
}
|
||||
|
||||
pub inline fn map(virtual_address_space: *VirtualAddressSpace, physical_memory_region: PhysicalMemoryRegion, offset: usize, size: usize, flags: VirtualMemoryRegion.Flags) !VirtualMemoryRegion {
|
||||
const alignment = lib.arch.valid_page_sizes[0];
|
||||
return virtual_address_space.mapAligned(physical_memory_region, offset, size, alignment, flags);
|
||||
}
|
||||
|
||||
pub fn mapAligned(virtual_address_space: *VirtualAddressSpace, physical_memory_region: PhysicalMemoryRegion, offset: usize, size: usize, alignment: usize, flags: VirtualMemoryRegion.Flags) !VirtualMemoryRegion {
|
||||
const virtual_address = try virtual_address_space.physical_map.determineAddress(physical_memory_region, alignment);
|
||||
_ = virtual_address;
|
||||
_ = offset;
|
||||
_ = size;
|
||||
_ = flags;
|
||||
@panic("TODO: VirtualAddressSpace.mapAligned");
|
||||
}
|
||||
|
||||
pub const State = extern struct {
|
||||
virtual_address_space: VirtualAddressSpace,
|
||||
physical_map: PhysicalMap,
|
||||
};
|
||||
};
|
Loading…
x
Reference in New Issue
Block a user