Compare commits
1 Commits
main
...
syscall-re
Author | SHA1 | Date | |
---|---|---|---|
![]() |
beda088388 |
30
.github/workflows/ci.yml
vendored
30
.github/workflows/ci.yml
vendored
@ -42,18 +42,18 @@ jobs:
|
||||
run: zig build all_tests -Dci --verbose
|
||||
- name: Run host tests
|
||||
run: zig build test_host
|
||||
# build_and_test:
|
||||
# runs-on: [self-hosted, Linux, X64]
|
||||
# steps:
|
||||
# - name: Checkout
|
||||
# uses: actions/checkout@v3
|
||||
# - name: Set up Zig
|
||||
# uses: goto-bus-stop/setup-zig@v2
|
||||
# with:
|
||||
# version: master
|
||||
# - name: Zig environment variables
|
||||
# run: zig env
|
||||
# - name: Build test executables
|
||||
# run: zig build all_tests -Dci -Dci_native --verbose
|
||||
# - name: Test with QEMU
|
||||
# run: zig build test_all -Dci -Dci_native --verbose
|
||||
build_and_test:
|
||||
runs-on: [self-hosted, Linux, X64]
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v3
|
||||
- name: Set up Zig
|
||||
uses: goto-bus-stop/setup-zig@v2
|
||||
with:
|
||||
version: master
|
||||
- name: Zig environment variables
|
||||
run: zig env
|
||||
- name: Build test executables
|
||||
run: zig build all_tests -Dci -Dci_native --verbose
|
||||
- name: Test with QEMU
|
||||
run: zig build test_all -Dci -Dci_native --verbose
|
||||
|
45
build.zig
45
build.zig
@ -5,7 +5,7 @@ const os = common.os;
|
||||
// Build types
|
||||
const Build = std.Build;
|
||||
const CompileStep = std.Build.CompileStep;
|
||||
const FileSource = std.Build.FileSource;
|
||||
const LazyPath = std.Build.LazyPath;
|
||||
const Module = std.Build.Module;
|
||||
const ModuleDependency = std.Build.ModuleDependency;
|
||||
const OptionsStep = std.Build.OptionsStep;
|
||||
@ -58,7 +58,7 @@ pub fn build(b_arg: *Build) !void {
|
||||
var mods = Modules{};
|
||||
inline for (comptime common.enumValues(ModuleID)) |module_id| {
|
||||
mods.modules.set(module_id, b.createModule(.{
|
||||
.source_file = FileSource.relative(switch (module_id) {
|
||||
.source_file = LazyPath.relative(switch (module_id) {
|
||||
.limine_installer => "src/bootloader/limine/installer.zig",
|
||||
else => switch (module_id) {
|
||||
.bios, .uefi, .limine => "src/bootloader",
|
||||
@ -162,13 +162,13 @@ pub fn build(b_arg: *Build) !void {
|
||||
run_native: bool = true,
|
||||
|
||||
const C = struct {
|
||||
include_paths: []const []const u8,
|
||||
include_paths: []const LazyPath,
|
||||
source_files: []const SourceFile,
|
||||
link_libc: bool,
|
||||
link_libcpp: bool,
|
||||
|
||||
const SourceFile = struct {
|
||||
path: []const u8,
|
||||
path: LazyPath,
|
||||
flags: []const []const u8,
|
||||
};
|
||||
};
|
||||
@ -183,10 +183,10 @@ pub fn build(b_arg: *Build) !void {
|
||||
.root_project_path = disk_image_root_path,
|
||||
.modules = disk_image_builder_modules,
|
||||
.c = .{
|
||||
.include_paths = &.{"src/bootloader/limine/installables"},
|
||||
.include_paths = &.{LazyPath.relative("src/bootloader/limine/installables")},
|
||||
.source_files = &.{
|
||||
.{
|
||||
.path = "src/bootloader/limine/installables/limine-deploy.c",
|
||||
.path = LazyPath.relative("src/bootloader/limine/installables/limine-deploy.c"),
|
||||
.flags = &.{},
|
||||
},
|
||||
},
|
||||
@ -215,7 +215,7 @@ pub fn build(b_arg: *Build) !void {
|
||||
}
|
||||
|
||||
for (c.source_files) |source_file| {
|
||||
test_exe.addCSourceFile(source_file.path, source_file.flags);
|
||||
test_exe.addCSourceFile(.{ .file = source_file.path, .flags = source_file.flags });
|
||||
}
|
||||
|
||||
if (c.link_libc) {
|
||||
@ -301,7 +301,7 @@ pub fn build(b_arg: *Build) !void {
|
||||
else => return Error.architecture_not_supported,
|
||||
};
|
||||
|
||||
const cpu_driver_linker_script_path = FileSource.relative(try std.mem.concat(b.allocator, u8, &.{ cpu_driver_path, "/arch/", switch (architecture) {
|
||||
const cpu_driver_linker_script_path = LazyPath.relative(try std.mem.concat(b.allocator, u8, &.{ cpu_driver_path, "/arch/", switch (architecture) {
|
||||
.x86_64 => "x86/64",
|
||||
.x86 => "x86/32",
|
||||
else => @tagName(architecture),
|
||||
@ -311,7 +311,7 @@ pub fn build(b_arg: *Build) !void {
|
||||
|
||||
var user_module_list = try std.ArrayList(*CompileStep).initCapacity(b.allocator, user_modules.len);
|
||||
const user_architecture_source_path = try std.mem.concat(b.allocator, u8, &.{ "src/user/arch/", @tagName(architecture), "/" });
|
||||
const user_linker_script_path = FileSource.relative(try std.mem.concat(b.allocator, u8, &.{ user_architecture_source_path, "linker_script.ld" }));
|
||||
const user_linker_script_path = LazyPath.relative(try std.mem.concat(b.allocator, u8, &.{ user_architecture_source_path, "linker_script.ld" }));
|
||||
for (user_modules) |module| {
|
||||
const user_module = try addCompileStep(.{
|
||||
.kind = executable_kind,
|
||||
@ -353,9 +353,9 @@ pub fn build(b_arg: *Build) !void {
|
||||
|
||||
executable.strip = true;
|
||||
|
||||
executable.addAssemblyFile("src/bootloader/arch/x86/64/smp_trampoline.S");
|
||||
executable.addAssemblyFile(bootloader_path ++ "/unreal_mode.S");
|
||||
executable.setLinkerScriptPath(FileSource.relative(bootloader_path ++ "/linker_script.ld"));
|
||||
executable.addAssemblyFile(LazyPath.relative("src/bootloader/arch/x86/64/smp_trampoline.S"));
|
||||
executable.addAssemblyFile(LazyPath.relative(bootloader_path ++ "/unreal_mode.S"));
|
||||
executable.setLinkerScriptPath(LazyPath.relative(bootloader_path ++ "/linker_script.ld"));
|
||||
executable.code_model = .small;
|
||||
|
||||
break :blk executable;
|
||||
@ -380,7 +380,7 @@ pub fn build(b_arg: *Build) !void {
|
||||
executable.strip = true;
|
||||
|
||||
switch (architecture) {
|
||||
.x86_64 => executable.addAssemblyFile("src/bootloader/arch/x86/64/smp_trampoline.S"),
|
||||
.x86_64 => executable.addAssemblyFile(LazyPath.relative("src/bootloader/arch/x86/64/smp_trampoline.S")),
|
||||
else => {},
|
||||
}
|
||||
|
||||
@ -405,7 +405,7 @@ pub fn build(b_arg: *Build) !void {
|
||||
|
||||
executable.code_model = cpu_driver.code_model;
|
||||
|
||||
executable.setLinkerScriptPath(FileSource.relative(try common.concat(b.allocator, u8, &.{ limine_loader_path ++ "arch/", @tagName(architecture), "/linker_script.ld" })));
|
||||
executable.setLinkerScriptPath(LazyPath.relative(try common.concat(b.allocator, u8, &.{ limine_loader_path ++ "arch/", @tagName(architecture), "/linker_script.ld" })));
|
||||
|
||||
break :blk executable;
|
||||
},
|
||||
@ -591,13 +591,13 @@ fn addFileSize(artifact: *CompileStep, comptime name: []const u8) void {
|
||||
|
||||
fn newRunnerRunArtifact(arguments: struct {
|
||||
configuration: Configuration,
|
||||
disk_image_path: FileSource,
|
||||
disk_image_path: LazyPath,
|
||||
loader: *CompileStep,
|
||||
runner: *CompileStep,
|
||||
cpu_driver: *CompileStep,
|
||||
user_init: *CompileStep,
|
||||
qemu_options: QEMUOptions,
|
||||
ovmf_path: FileSource,
|
||||
ovmf_path: LazyPath,
|
||||
is_default: bool,
|
||||
}) !*RunStep {
|
||||
const runner = b.addRunArtifact(arguments.runner);
|
||||
@ -610,12 +610,12 @@ fn newRunnerRunArtifact(arguments: struct {
|
||||
.cpu_driver => runner.addArtifactArg(arguments.cpu_driver),
|
||||
.loader_path => runner.addArtifactArg(arguments.loader),
|
||||
.init => runner.addArtifactArg(arguments.user_init),
|
||||
.disk_image_path => runner.addFileSourceArg(arguments.disk_image_path),
|
||||
.disk_image_path => runner.addFileArg(arguments.disk_image_path),
|
||||
.qemu_options => inline for (common.fields(QEMUOptions)) |field| runner.addArg(if (@field(arguments.qemu_options, field.name)) "true" else "false"),
|
||||
.ci => runner.addArg(if (ci) "true" else "false"),
|
||||
.debug_user => runner.addArg(if (debug_user) "true" else "false"),
|
||||
.debug_loader => runner.addArg(if (debug_loader) "true" else "false"),
|
||||
.ovmf_path => runner.addFileSourceArg(arguments.ovmf_path),
|
||||
.ovmf_path => runner.addFileArg(arguments.ovmf_path),
|
||||
.is_default => runner.addArg(if (arguments.is_default) "true" else "false"),
|
||||
};
|
||||
|
||||
@ -631,15 +631,17 @@ const ExecutableDescriptor = struct {
|
||||
modules: []const ModuleID,
|
||||
};
|
||||
|
||||
const main_package_path = LazyPath.relative(source_root_dir);
|
||||
fn addCompileStep(executable_descriptor: ExecutableDescriptor) !*CompileStep {
|
||||
const main_file = try std.mem.concat(b.allocator, u8, &.{ executable_descriptor.root_project_path, "/main.zig" });
|
||||
const compile_step = switch (executable_descriptor.kind) {
|
||||
.exe => blk: {
|
||||
const executable = b.addExecutable(.{
|
||||
.name = executable_descriptor.name,
|
||||
.root_source_file = FileSource.relative(main_file),
|
||||
.root_source_file = LazyPath.relative(main_file),
|
||||
.target = executable_descriptor.target,
|
||||
.optimize = executable_descriptor.optimize_mode,
|
||||
.main_pkg_path = main_package_path,
|
||||
});
|
||||
|
||||
build_steps.build_all.dependOn(&executable.step);
|
||||
@ -647,13 +649,14 @@ fn addCompileStep(executable_descriptor: ExecutableDescriptor) !*CompileStep {
|
||||
break :blk executable;
|
||||
},
|
||||
.@"test" => blk: {
|
||||
const test_file = FileSource.relative(try std.mem.concat(b.allocator, u8, &.{ executable_descriptor.root_project_path, "/test.zig" }));
|
||||
const test_file = LazyPath.relative(try std.mem.concat(b.allocator, u8, &.{ executable_descriptor.root_project_path, "/test.zig" }));
|
||||
const test_exe = b.addTest(.{
|
||||
.name = executable_descriptor.name,
|
||||
.root_source_file = test_file,
|
||||
.target = executable_descriptor.target,
|
||||
.optimize = executable_descriptor.optimize_mode,
|
||||
.test_runner = if (executable_descriptor.target.os_tag) |_| main_file else null,
|
||||
.main_pkg_path = main_package_path,
|
||||
});
|
||||
|
||||
build_steps.build_all_tests.dependOn(&test_exe.step);
|
||||
@ -669,8 +672,6 @@ fn addCompileStep(executable_descriptor: ExecutableDescriptor) !*CompileStep {
|
||||
compile_step.entry_symbol_name = "_start";
|
||||
}
|
||||
|
||||
compile_step.setMainPkgPath(source_root_dir);
|
||||
|
||||
for (executable_descriptor.modules) |module| {
|
||||
modules.addModule(compile_step, module);
|
||||
}
|
||||
|
@ -1,7 +1,7 @@
|
||||
{
|
||||
"architecture": "x86_64",
|
||||
"bootloader": "limine",
|
||||
"boot_protocol": "uefi",
|
||||
"bootloader": "birth",
|
||||
"boot_protocol": "bios",
|
||||
"execution_environment": "qemu",
|
||||
"optimize_mode": "Debug",
|
||||
"execution_type": "emulated",
|
||||
|
@ -10,7 +10,7 @@ pub const UserScheduler = extern struct {
|
||||
disabled: bool,
|
||||
has_work: bool,
|
||||
core_id: u32,
|
||||
setup_stack: [lib.arch.valid_page_sizes[0]]u8 align(lib.arch.stack_alignment),
|
||||
setup_stack: [lib.arch.valid_page_sizes[0] * 4]u8 align(lib.arch.stack_alignment),
|
||||
setup_stack_lock: lib.Atomic(bool),
|
||||
|
||||
pub inline fn architectureSpecific(user_scheduler: *UserScheduler) *arch.UserScheduler {
|
||||
|
@ -7,6 +7,15 @@ const syscall = birth.syscall;
|
||||
|
||||
const Capabilities = @This();
|
||||
|
||||
pub const Reference = packed struct(usize) {
|
||||
integer: usize,
|
||||
};
|
||||
|
||||
pub const RAM = packed struct(u64) {
|
||||
block: u32,
|
||||
region: u32,
|
||||
};
|
||||
|
||||
pub const Type = enum(u8) {
|
||||
io, // primitive
|
||||
cpu, // primitive
|
||||
@ -21,7 +30,7 @@ pub const Type = enum(u8) {
|
||||
|
||||
// _,
|
||||
|
||||
pub const Type = u8;
|
||||
pub const BackingType = @typeInfo(Type).Enum.tag_type;
|
||||
|
||||
pub const Mappable = enum {
|
||||
cpu_memory,
|
||||
@ -47,6 +56,7 @@ pub fn CommandBuilder(comptime list: []const []const u8) type {
|
||||
"revoke",
|
||||
"create",
|
||||
} ++ list;
|
||||
|
||||
const enum_fields = lib.enumAddNames(&.{}, capability_base_command_list);
|
||||
|
||||
// TODO: make this non-exhaustive enums
|
||||
@ -75,16 +85,17 @@ pub fn Command(comptime capability: Type) type {
|
||||
"shutdown",
|
||||
"get_command_buffer",
|
||||
},
|
||||
.ram => [_][]const u8{},
|
||||
.cpu_memory => .{
|
||||
.ram => .{
|
||||
"allocate",
|
||||
},
|
||||
.cpu_memory => [_][]const u8{},
|
||||
.boot => .{
|
||||
"get_bundle_size",
|
||||
"get_bundle_file_list_size",
|
||||
},
|
||||
.process => .{
|
||||
"exit",
|
||||
"panic",
|
||||
},
|
||||
.page_table => [_][]const u8{},
|
||||
};
|
||||
@ -94,248 +105,352 @@ pub fn Command(comptime capability: Type) type {
|
||||
|
||||
const success = 0;
|
||||
const first_valid_error = success + 1;
|
||||
|
||||
pub fn ErrorSet(comptime error_names: []const []const u8) type {
|
||||
return lib.ErrorSet(error_names, &.{
|
||||
.{
|
||||
.name = "forbidden",
|
||||
.value = first_valid_error + 0,
|
||||
},
|
||||
.{
|
||||
.name = "corrupted_input",
|
||||
.value = first_valid_error + 1,
|
||||
},
|
||||
.{
|
||||
.name = "invalid_input",
|
||||
.value = first_valid_error + 2,
|
||||
},
|
||||
});
|
||||
const predefined_error_names = &.{ "forbidden", "corrupted_input", "invalid_input" };
|
||||
comptime var current_error = first_valid_error;
|
||||
comptime var predefined_errors: []const lib.Type.EnumField = &.{};
|
||||
|
||||
inline for (predefined_error_names) |predefined_error_name| {
|
||||
defer current_error += 1;
|
||||
|
||||
predefined_errors = predefined_errors ++ .{
|
||||
.{
|
||||
.name = predefined_error_name,
|
||||
.value = current_error,
|
||||
},
|
||||
};
|
||||
}
|
||||
|
||||
return lib.ErrorSet(error_names, predefined_errors);
|
||||
}
|
||||
|
||||
const raw_argument_count = @typeInfo(syscall.Arguments).Array.len;
|
||||
|
||||
pub fn Syscall(comptime capability_type: Type, comptime command_type: Command(capability_type)) type {
|
||||
const Types = switch (capability_type) {
|
||||
.io => switch (command_type) {
|
||||
.copy, .mint, .retype, .delete, .revoke, .create => struct {
|
||||
pub const ErrorSet = Capabilities.ErrorSet(&.{});
|
||||
pub const Result = void;
|
||||
pub const Arguments = void;
|
||||
const DefaultErrorSet = Capabilities.ErrorSet(&.{});
|
||||
const Types = struct {
|
||||
Result: type = void,
|
||||
Arguments: type = void,
|
||||
ErrorSet: type = DefaultErrorSet,
|
||||
};
|
||||
|
||||
fn Functions(comptime T: Types) type {
|
||||
const ToArguments = fn (syscall.Arguments) callconv(.Inline) T.ErrorSet.Error!T.Arguments;
|
||||
const FromArguments = fn (T.Arguments) callconv(.Inline) syscall.Arguments;
|
||||
|
||||
return switch (T.Result) {
|
||||
else => blk: {
|
||||
const ToResult = fn (syscall.Result.Birth) callconv(.Inline) T.Result;
|
||||
const FromResult = fn (T.Result) callconv(.Inline) syscall.Result;
|
||||
|
||||
// return if (T.Result == void and T.Arguments == void and T.ErrorSet == DefaultErrorSet) struct {
|
||||
break :blk if (T.ErrorSet == DefaultErrorSet and T.Result == void and T.Arguments == void) struct {
|
||||
toResult: ToResult = voidToResult,
|
||||
fromResult: FromResult = voidFromResult,
|
||||
toArguments: ToArguments = voidToArguments,
|
||||
fromArguments: FromArguments = voidFromArguments,
|
||||
} else if (T.ErrorSet == DefaultErrorSet and T.Result == void) struct {
|
||||
toResult: ToResult = voidToResult,
|
||||
fromResult: FromResult = voidFromResult,
|
||||
toArguments: ToArguments,
|
||||
fromArguments: FromArguments,
|
||||
} else if (T.ErrorSet == DefaultErrorSet and T.Arguments == void) struct {
|
||||
toResult: ToResult,
|
||||
fromResult: FromResult,
|
||||
toArguments: ToArguments = voidToArguments,
|
||||
fromArguments: FromArguments = voidFromArguments,
|
||||
} else struct {
|
||||
toResult: ToResult,
|
||||
fromResult: FromResult,
|
||||
toArguments: ToArguments,
|
||||
fromArguments: FromArguments,
|
||||
};
|
||||
},
|
||||
noreturn => if (T.ErrorSet == DefaultErrorSet and T.Arguments == void) struct {
|
||||
toArguments: ToArguments = voidToArguments,
|
||||
fromArguments: FromArguments = voidFromArguments,
|
||||
} else struct {
|
||||
toArguments: ToArguments,
|
||||
fromArguments: FromArguments,
|
||||
},
|
||||
};
|
||||
}
|
||||
|
||||
fn Descriptor(comptime T: Types) type {
|
||||
return struct {
|
||||
types: Types = T,
|
||||
functions: Functions(T),
|
||||
};
|
||||
}
|
||||
|
||||
fn CommandDescriptor(comptime capability: Type, comptime command: Command(capability)) type {
|
||||
return Descriptor(switch (capability) {
|
||||
.io => switch (command) {
|
||||
.log => .{
|
||||
.Result = usize,
|
||||
.Arguments = []const u8,
|
||||
},
|
||||
.log => struct {
|
||||
pub const ErrorSet = Capabilities.ErrorSet(&.{});
|
||||
pub const Result = usize;
|
||||
pub const Arguments = []const u8;
|
||||
else => .{},
|
||||
},
|
||||
.ram => switch (command) {
|
||||
.allocate => .{
|
||||
.Result = Reference,
|
||||
.Arguments = usize,
|
||||
.ErrorSet = ErrorSet(&.{"OutOfMemory"}),
|
||||
},
|
||||
else => .{},
|
||||
},
|
||||
.process => switch (command) {
|
||||
.exit => .{
|
||||
.Result = noreturn,
|
||||
.Arguments = bool,
|
||||
},
|
||||
.panic => .{
|
||||
.Result = noreturn,
|
||||
.Arguments = struct {
|
||||
message: []const u8,
|
||||
exit_code: u64,
|
||||
},
|
||||
},
|
||||
else => .{},
|
||||
},
|
||||
.cpu => switch (command) {
|
||||
.get_core_id => .{
|
||||
.Result = u32,
|
||||
},
|
||||
.shutdown => .{
|
||||
.Result = noreturn,
|
||||
},
|
||||
// .get_command_buffer = .{
|
||||
// },
|
||||
else => .{},
|
||||
},
|
||||
.boot => switch (command) {
|
||||
.get_bundle_file_list_size, .get_bundle_size => .{
|
||||
.Result = usize,
|
||||
},
|
||||
else => .{},
|
||||
},
|
||||
else => .{},
|
||||
});
|
||||
}
|
||||
|
||||
inline fn toResult(raw_result: syscall.Result.Birth) Result {
|
||||
return raw_result.second;
|
||||
}
|
||||
pub fn Syscall(comptime cap: Type, comptime com: Command(cap)) type {
|
||||
const D = CommandDescriptor(cap, com);
|
||||
const T = @as(?*const Types, @ptrCast(@typeInfo(D).Struct.fields[0].default_value)).?.*;
|
||||
const d = D{
|
||||
.functions = switch (cap) {
|
||||
.ram => switch (com) {
|
||||
.allocate => blk: {
|
||||
const F = struct {
|
||||
inline fn toResult(raw_result: syscall.Result.Birth) Reference {
|
||||
return @bitCast(raw_result.second);
|
||||
}
|
||||
|
||||
inline fn resultToRaw(result: Result) syscall.Result {
|
||||
return syscall.Result{
|
||||
.birth = .{
|
||||
.first = .{},
|
||||
.second = result,
|
||||
},
|
||||
inline fn fromResult(result: Reference) syscall.Result {
|
||||
return .{
|
||||
.birth = .{
|
||||
.first = .{},
|
||||
.second = @bitCast(result),
|
||||
},
|
||||
};
|
||||
}
|
||||
|
||||
inline fn toArguments(raw_arguments: syscall.Arguments) T.ErrorSet.Error!usize {
|
||||
const size = raw_arguments[0];
|
||||
return size;
|
||||
}
|
||||
|
||||
inline fn fromArguments(arguments: usize) syscall.Arguments {
|
||||
const result = [1]usize{arguments};
|
||||
return result ++ .{0} ** (raw_argument_count - result.len);
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
inline fn argumentsToRaw(arguments: Arguments) syscall.Arguments {
|
||||
const result = [2]usize{ @intFromPtr(arguments.ptr), arguments.len };
|
||||
return result ++ .{0} ** (raw_argument_count - result.len);
|
||||
}
|
||||
|
||||
inline fn toArguments(raw_arguments: syscall.Arguments) !Arguments {
|
||||
const message_ptr = @as(?[*]const u8, @ptrFromInt(raw_arguments[0])) orelse return error.invalid_input;
|
||||
const message_len = raw_arguments[1];
|
||||
if (message_len == 0) return error.invalid_input;
|
||||
const message = message_ptr[0..message_len];
|
||||
return message;
|
||||
}
|
||||
},
|
||||
},
|
||||
.cpu => switch (command_type) {
|
||||
.copy, .mint, .retype, .delete, .revoke, .create => struct {
|
||||
pub const ErrorSet = Capabilities.ErrorSet(&.{});
|
||||
pub const Result = void;
|
||||
pub const Arguments = void;
|
||||
},
|
||||
.get_core_id => struct {
|
||||
pub const ErrorSet = Capabilities.ErrorSet(&.{});
|
||||
pub const Result = u32;
|
||||
pub const Arguments = void;
|
||||
|
||||
inline fn toResult(raw_result: syscall.Result.birth) Result {
|
||||
return @as(Result, @intCast(raw_result.second));
|
||||
}
|
||||
|
||||
inline fn resultToRaw(result: Result) syscall.Result {
|
||||
return syscall.Result{
|
||||
.birth = .{
|
||||
.first = .{},
|
||||
.second = result,
|
||||
},
|
||||
break :blk .{
|
||||
.toResult = F.toResult,
|
||||
.fromResult = F.fromResult,
|
||||
.toArguments = F.toArguments,
|
||||
.fromArguments = F.fromArguments,
|
||||
};
|
||||
}
|
||||
},
|
||||
else => .{},
|
||||
},
|
||||
.shutdown => struct {
|
||||
pub const ErrorSet = Capabilities.ErrorSet(&.{});
|
||||
pub const Result = noreturn;
|
||||
pub const Arguments = void;
|
||||
.process => switch (com) {
|
||||
.exit => blk: {
|
||||
const F = struct {
|
||||
inline fn toArguments(raw_arguments: syscall.Arguments) T.ErrorSet.Error!T.Arguments {
|
||||
const result = raw_arguments[0] != 0;
|
||||
return result;
|
||||
}
|
||||
|
||||
pub const toResult = @compileError("noreturn unexpectedly returned");
|
||||
},
|
||||
.get_command_buffer => struct {
|
||||
pub const ErrorSet = Capabilities.ErrorSet(&.{});
|
||||
pub const Result = noreturn;
|
||||
pub const Arguments = *birth.CommandBuffer;
|
||||
|
||||
pub const toResult = @compileError("noreturn unexpectedly returned");
|
||||
|
||||
inline fn toArguments(raw_arguments: syscall.Arguments) !Arguments {
|
||||
const ptr = @as(?*birth.CommandBuffer, @ptrFromInt(raw_arguments[0])) orelse return error.invalid_input;
|
||||
return ptr;
|
||||
}
|
||||
|
||||
inline fn argumentsToRaw(arguments: Arguments) syscall.Arguments {
|
||||
const result = [1]usize{@intFromPtr(arguments)};
|
||||
return result ++ .{0} ** (raw_argument_count - result.len);
|
||||
}
|
||||
},
|
||||
},
|
||||
.ram => struct {
|
||||
pub const ErrorSet = Capabilities.ErrorSet(&.{});
|
||||
pub const Result = void;
|
||||
pub const Arguments = void;
|
||||
},
|
||||
.cpu_memory => struct {
|
||||
pub const ErrorSet = Capabilities.ErrorSet(&.{
|
||||
"OutOfMemory",
|
||||
});
|
||||
pub const Result = PhysicalAddress;
|
||||
pub const Arguments = usize;
|
||||
|
||||
inline fn toResult(raw_result: syscall.Result.birth) Result {
|
||||
return PhysicalAddress.new(raw_result.second);
|
||||
}
|
||||
|
||||
inline fn resultToRaw(result: Result) syscall.Result {
|
||||
return syscall.Result{
|
||||
.birth = .{
|
||||
.first = .{},
|
||||
.second = result.value(),
|
||||
},
|
||||
};
|
||||
}
|
||||
|
||||
inline fn toArguments(raw_arguments: syscall.Arguments) !Arguments {
|
||||
const size = raw_arguments[0];
|
||||
return size;
|
||||
}
|
||||
|
||||
inline fn argumentsToRaw(arguments: Arguments) syscall.Arguments {
|
||||
const result = [1]usize{arguments};
|
||||
return result ++ .{0} ** (raw_argument_count - result.len);
|
||||
}
|
||||
},
|
||||
.boot => switch (command_type) {
|
||||
.get_bundle_file_list_size, .get_bundle_size => struct {
|
||||
pub const ErrorSet = Capabilities.ErrorSet(&.{
|
||||
"buffer_too_small",
|
||||
});
|
||||
pub const Result = usize;
|
||||
pub const Arguments = void;
|
||||
|
||||
inline fn resultToRaw(result: Result) syscall.Result {
|
||||
return syscall.Result{
|
||||
.birth = .{
|
||||
.first = .{},
|
||||
.second = result,
|
||||
},
|
||||
inline fn fromArguments(arguments: T.Arguments) syscall.Arguments {
|
||||
const result = [1]usize{@intFromBool(arguments)};
|
||||
return result ++ .{0} ** (raw_argument_count - result.len);
|
||||
}
|
||||
};
|
||||
}
|
||||
break :blk .{
|
||||
// .toResult = F.toResult,
|
||||
// .fromResult = F.fromResult,
|
||||
.toArguments = F.toArguments,
|
||||
.fromArguments = F.fromArguments,
|
||||
};
|
||||
},
|
||||
.panic => blk: {
|
||||
const F = struct {
|
||||
inline fn toArguments(raw_arguments: syscall.Arguments) T.ErrorSet.Error!T.Arguments {
|
||||
if (@as(?[*]const u8, @ptrFromInt(raw_arguments[0]))) |message_ptr| {
|
||||
const message_len = raw_arguments[1];
|
||||
|
||||
inline fn toResult(raw_result: syscall.Result.birth) Result {
|
||||
return raw_result.second;
|
||||
}
|
||||
},
|
||||
else => struct {
|
||||
pub const ErrorSet = Capabilities.ErrorSet(&.{
|
||||
"buffer_too_small",
|
||||
});
|
||||
pub const Result = void;
|
||||
pub const Arguments = void;
|
||||
},
|
||||
},
|
||||
.process => switch (command_type) {
|
||||
.exit => struct {
|
||||
pub const ErrorSet = Capabilities.ErrorSet(&.{});
|
||||
pub const Result = noreturn;
|
||||
pub const Arguments = bool;
|
||||
if (message_len != 0) {
|
||||
const message = message_ptr[0..message_len];
|
||||
const exit_code = raw_arguments[2];
|
||||
|
||||
inline fn toArguments(raw_arguments: syscall.Arguments) !Arguments {
|
||||
const result = raw_arguments[0] != 0;
|
||||
return result;
|
||||
}
|
||||
inline fn argumentsToRaw(arguments: Arguments) syscall.Arguments {
|
||||
const result = [1]usize{@intFromBool(arguments)};
|
||||
return result ++ .{0} ** (raw_argument_count - result.len);
|
||||
}
|
||||
return .{
|
||||
.message = message,
|
||||
.exit_code = exit_code,
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
return error.invalid_input;
|
||||
}
|
||||
|
||||
inline fn fromArguments(arguments: T.Arguments) syscall.Arguments {
|
||||
const result: [3]usize = .{ @intFromPtr(arguments.message.ptr), arguments.message.len, arguments.exit_code };
|
||||
return result ++ .{0} ** (raw_argument_count - result.len);
|
||||
}
|
||||
};
|
||||
break :blk .{
|
||||
.toArguments = F.toArguments,
|
||||
.fromArguments = F.fromArguments,
|
||||
};
|
||||
},
|
||||
else => .{},
|
||||
},
|
||||
else => struct {
|
||||
pub const ErrorSet = Capabilities.ErrorSet(&.{});
|
||||
pub const Result = void;
|
||||
pub const Arguments = void;
|
||||
.io => switch (com) {
|
||||
.log => blk: {
|
||||
const F = struct {
|
||||
inline fn toResult(raw_result: syscall.Result.Birth) T.Result {
|
||||
return raw_result.second;
|
||||
}
|
||||
|
||||
inline fn fromResult(result: T.Result) syscall.Result {
|
||||
return syscall.Result{
|
||||
.birth = .{
|
||||
.first = .{},
|
||||
.second = result,
|
||||
},
|
||||
};
|
||||
}
|
||||
|
||||
inline fn toArguments(raw_arguments: syscall.Arguments) T.ErrorSet.Error!T.Arguments {
|
||||
const message_ptr = @as(?[*]const u8, @ptrFromInt(raw_arguments[0])) orelse return error.invalid_input;
|
||||
const message_len = raw_arguments[1];
|
||||
if (message_len == 0) return error.invalid_input;
|
||||
const message = message_ptr[0..message_len];
|
||||
return message;
|
||||
}
|
||||
|
||||
inline fn fromArguments(arguments: T.Arguments) syscall.Arguments {
|
||||
const result = [2]usize{ @intFromPtr(arguments.ptr), arguments.len };
|
||||
return result ++ .{0} ** (raw_argument_count - result.len);
|
||||
}
|
||||
};
|
||||
break :blk .{
|
||||
.toResult = F.toResult,
|
||||
.fromResult = F.fromResult,
|
||||
.toArguments = F.toArguments,
|
||||
.fromArguments = F.fromArguments,
|
||||
};
|
||||
},
|
||||
else => .{},
|
||||
},
|
||||
.cpu => switch (com) {
|
||||
.get_core_id => blk: {
|
||||
const F = struct {
|
||||
inline fn toResult(raw_result: syscall.Result.Birth) T.Result {
|
||||
return @as(T.Result, @intCast(raw_result.second));
|
||||
}
|
||||
|
||||
inline fn fromResult(result: T.Result) syscall.Result {
|
||||
return syscall.Result{
|
||||
.birth = .{
|
||||
.first = .{},
|
||||
.second = result,
|
||||
},
|
||||
};
|
||||
}
|
||||
};
|
||||
break :blk .{
|
||||
.toResult = F.toResult,
|
||||
.fromResult = F.fromResult,
|
||||
};
|
||||
},
|
||||
.shutdown => .{
|
||||
.toArguments = voidToArguments,
|
||||
.fromArguments = voidFromArguments,
|
||||
},
|
||||
.get_command_buffer => .{
|
||||
// .get_command_buffer => struct {
|
||||
// pub const ErrorSet = Capabilities.ErrorSet(&.{});
|
||||
// pub const Result = noreturn;
|
||||
// pub const Arguments = *birth.CommandBuffer;
|
||||
//
|
||||
// pub const toResult = @compileError("noreturn unexpectedly returned");
|
||||
//
|
||||
// inline fn toArguments(raw_arguments: syscall.Arguments) !Arguments {
|
||||
// const ptr = @as(?*birth.CommandBuffer, @ptrFromInt(raw_arguments[0])) orelse return error.invalid_input;
|
||||
// return ptr;
|
||||
// }
|
||||
//
|
||||
// inline fn argumentsToRaw(arguments: Arguments) syscall.Arguments {
|
||||
// const result = [1]usize{@intFromPtr(arguments)};
|
||||
// return result ++ .{0} ** (raw_argument_count - result.len);
|
||||
// }
|
||||
// },
|
||||
},
|
||||
else => .{},
|
||||
},
|
||||
.boot => switch (com) {
|
||||
.get_bundle_file_list_size, .get_bundle_size => blk: {
|
||||
const F = struct {
|
||||
inline fn toResult(raw_result: syscall.Result.Birth) T.Result {
|
||||
return raw_result.second;
|
||||
}
|
||||
inline fn fromResult(result: T.Result) syscall.Result {
|
||||
return syscall.Result{
|
||||
.birth = .{
|
||||
.first = .{},
|
||||
.second = result,
|
||||
},
|
||||
};
|
||||
}
|
||||
};
|
||||
|
||||
break :blk .{
|
||||
.toResult = F.toResult,
|
||||
.fromResult = F.fromResult,
|
||||
};
|
||||
},
|
||||
else => .{},
|
||||
},
|
||||
else => .{},
|
||||
},
|
||||
.page_table => switch (command_type) {
|
||||
else => struct {
|
||||
pub const ErrorSet = Capabilities.ErrorSet(&.{});
|
||||
pub const Result = void;
|
||||
pub const Arguments = void;
|
||||
},
|
||||
},
|
||||
// else => @compileError("TODO: " ++ @tagName(capability)),
|
||||
};
|
||||
|
||||
return struct {
|
||||
pub const ErrorSet = Types.ErrorSet;
|
||||
pub const Result = Types.Result;
|
||||
pub const Arguments = Types.Arguments;
|
||||
pub const toResult = Types.toResult;
|
||||
pub const toArguments = if (Arguments != void)
|
||||
Types.toArguments
|
||||
else
|
||||
struct {
|
||||
fn lambda(raw_arguments: syscall.Arguments) error{}!void {
|
||||
_ = raw_arguments;
|
||||
return {};
|
||||
}
|
||||
}.lambda;
|
||||
pub const capability = capability_type;
|
||||
pub const command = command_type;
|
||||
pub const capability = cap;
|
||||
pub const command = com;
|
||||
pub const Error = T.ErrorSet.Error;
|
||||
pub const Result = T.Result;
|
||||
|
||||
pub inline fn resultToRaw(result: Result) syscall.Result {
|
||||
return if (@hasDecl(Types, "resultToRaw")) blk: {
|
||||
comptime assert(Result != void and Result != noreturn);
|
||||
break :blk Types.resultToRaw(result);
|
||||
} else blk: {
|
||||
if (Result != void) {
|
||||
@compileError("expected void type, got " ++ @typeName(Result) ++ ". You forgot to implement a resultToRaw function" ++ " for (" ++ @tagName(capability) ++ ", " ++ @tagName(command) ++ ").");
|
||||
}
|
||||
pub const toResult = d.functions.toResult;
|
||||
pub const fromResult = d.functions.fromResult;
|
||||
pub const toArguments = d.functions.toArguments;
|
||||
pub const fromArguments = d.functions.fromArguments;
|
||||
|
||||
break :blk syscall.Result{
|
||||
.birth = .{
|
||||
.first = .{},
|
||||
.second = 0,
|
||||
},
|
||||
};
|
||||
};
|
||||
}
|
||||
|
||||
pub inline fn errorToRaw(err: @This().ErrorSet.Error) syscall.Result {
|
||||
pub inline fn fromError(err: Error) syscall.Result {
|
||||
const error_enum = switch (err) {
|
||||
inline else => |comptime_error| @field(@This().ErrorSet.Enum, @errorName(comptime_error)),
|
||||
inline else => |comptime_error| @field(T.ErrorSet.Enum, @errorName(comptime_error)),
|
||||
};
|
||||
return syscall.Result{
|
||||
.birth = .{
|
||||
@ -347,9 +462,8 @@ pub fn Syscall(comptime capability_type: Type, comptime command_type: Command(ca
|
||||
};
|
||||
}
|
||||
|
||||
/// This is not meant to be called in the CPU driver
|
||||
pub fn blocking(arguments: Arguments) @This().ErrorSet.Error!Result {
|
||||
const raw_arguments = if (Arguments != void) Types.argumentsToRaw(arguments) else [1]usize{0} ** raw_argument_count;
|
||||
pub fn blocking(arguments: T.Arguments) Error!Result {
|
||||
const raw_arguments = d.functions.fromArguments(arguments);
|
||||
// TODO: make this more reliable and robust?
|
||||
const options = birth.syscall.Options{
|
||||
.birth = .{
|
||||
@ -362,20 +476,46 @@ pub fn Syscall(comptime capability_type: Type, comptime command_type: Command(ca
|
||||
|
||||
const raw_error_value = raw_result.birth.first.@"error";
|
||||
comptime {
|
||||
assert(!@hasField(@This().ErrorSet.Enum, "ok"));
|
||||
assert(!@hasField(@This().ErrorSet.Enum, "success"));
|
||||
assert(lib.enumFields(@This().ErrorSet.Enum)[0].value == first_valid_error);
|
||||
assert(lib.enumFields(T.ErrorSet.Enum)[0].value == first_valid_error);
|
||||
}
|
||||
|
||||
return switch (raw_error_value) {
|
||||
success => switch (Result) {
|
||||
success => switch (T.Result) {
|
||||
noreturn => unreachable,
|
||||
else => toResult(raw_result.birth),
|
||||
else => d.functions.toResult(raw_result.birth),
|
||||
},
|
||||
else => switch (@as(@This().ErrorSet.Enum, @enumFromInt(raw_error_value))) {
|
||||
inline else => |comptime_error_enum| @field(@This().ErrorSet.Error, @tagName(comptime_error_enum)),
|
||||
else => switch (@as(T.ErrorSet.Enum, @enumFromInt(raw_error_value))) {
|
||||
inline else => |comptime_error_enum| @field(Error, @tagName(comptime_error_enum)),
|
||||
},
|
||||
};
|
||||
}
|
||||
|
||||
pub fn buffer(command_buffer: *birth.CommandBuffer, arguments: T.Arguments) void {
|
||||
_ = arguments;
|
||||
_ = command_buffer;
|
||||
|
||||
@panic("TODO: buffer");
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
inline fn voidToResult(raw_result: syscall.Result.Birth) void {
|
||||
_ = raw_result;
|
||||
|
||||
@panic("TODO: voidToResult");
|
||||
}
|
||||
|
||||
inline fn voidFromResult(result: void) syscall.Result {
|
||||
_ = result;
|
||||
|
||||
@panic("TODO: voidFromResult");
|
||||
}
|
||||
|
||||
inline fn voidToArguments(raw_arguments: syscall.Arguments) DefaultErrorSet.Error!void {
|
||||
_ = raw_arguments;
|
||||
}
|
||||
|
||||
inline fn voidFromArguments(arguments: void) syscall.Arguments {
|
||||
_ = arguments;
|
||||
return [6]usize{0};
|
||||
}
|
||||
|
@ -5,8 +5,7 @@ const log = lib.log.scoped(.Syscall);
|
||||
const birth = @import("birth");
|
||||
const capabilities = birth.capabilities;
|
||||
|
||||
pub const argument_count = 6;
|
||||
pub const Arguments = [argument_count]usize;
|
||||
pub const Arguments = [6]usize;
|
||||
|
||||
pub const Convention = enum(u1) {
|
||||
linux = 0,
|
||||
@ -44,18 +43,20 @@ pub const Options = extern union {
|
||||
pub const Birth = packed struct(u64) {
|
||||
type: capabilities.Type,
|
||||
command: capabilities.Subtype,
|
||||
reserved: lib.IntType(.unsigned, @bitSizeOf(u64) - @bitSizeOf(capabilities.Type) - @bitSizeOf(capabilities.Subtype) - @bitSizeOf(Convention)) = 0,
|
||||
reserved: ReservedInt = 0,
|
||||
convention: Convention = .birth,
|
||||
|
||||
const ReservedInt = lib.IntType(.unsigned, @bitSizeOf(u64) - @bitSizeOf(capabilities.Type) - @bitSizeOf(capabilities.Subtype) - @bitSizeOf(Convention));
|
||||
|
||||
comptime {
|
||||
Options.assertSize(@This());
|
||||
}
|
||||
|
||||
const IDInteger = u16;
|
||||
pub const ID = enum(IDInteger) {
|
||||
qemu_exit = 0,
|
||||
print = 1,
|
||||
};
|
||||
// const IDInteger = u16;
|
||||
// pub const ID = enum(IDInteger) {
|
||||
// qemu_exit = 0,
|
||||
// print = 1,
|
||||
// };
|
||||
};
|
||||
|
||||
pub const Linux = enum(u64) {
|
||||
|
@ -68,10 +68,8 @@ pub const Disk = extern struct {
|
||||
.segment = 0,
|
||||
.lba = lba,
|
||||
};
|
||||
lib.log.debug("DAP: {}", .{dap});
|
||||
|
||||
const dap_address = @intFromPtr(&dap);
|
||||
lib.log.debug("DAP address: 0x{x}", .{dap_address});
|
||||
const dap_offset = offset(dap_address);
|
||||
const dap_segment = segment(dap_address);
|
||||
var registers = Registers{
|
||||
@ -81,9 +79,7 @@ pub const Disk = extern struct {
|
||||
.ds = dap_segment,
|
||||
};
|
||||
|
||||
lib.log.debug("Start int", .{});
|
||||
interrupt(0x13, ®isters, ®isters);
|
||||
lib.log.debug("End int", .{});
|
||||
|
||||
if (registers.eflags.flags.carry_flag) return error.read_error;
|
||||
|
||||
@ -92,7 +88,6 @@ pub const Disk = extern struct {
|
||||
const src_slice = buffer[0..bytes_to_copy];
|
||||
|
||||
if (maybe_provided_buffer) |provided_buffer| {
|
||||
lib.log.debug("A", .{});
|
||||
const dst_slice = provided_buffer[@as(usize, @intCast(provided_buffer_offset))..][0..bytes_to_copy];
|
||||
|
||||
// TODO: report Zig that this codegen is so bad that we have to use rep movsb instead to make it go fast
|
||||
@ -114,7 +109,7 @@ pub const Disk = extern struct {
|
||||
@memcpy(dst_slice, src_slice);
|
||||
}
|
||||
} else {
|
||||
lib.log.debug("B", .{});
|
||||
//lib.log.debug("B", .{});
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -5,8 +5,7 @@ const log = lib.log;
|
||||
const privileged = @import("privileged");
|
||||
const ACPI = privileged.ACPI;
|
||||
const MemoryManager = privileged.MemoryManager;
|
||||
const PhysicalHeap = privileged.PhyicalHeap;
|
||||
const writer = privileged.writer;
|
||||
pub const writer = privileged.writer;
|
||||
|
||||
const stopCPU = privileged.arch.stopCPU;
|
||||
const GDT = privileged.arch.x86_64.GDT;
|
||||
@ -54,10 +53,7 @@ pub const std_options = struct {
|
||||
_ = format;
|
||||
_ = scope;
|
||||
_ = level;
|
||||
// _ = level;
|
||||
// writer.writeByte('[') catch stopCPU();
|
||||
// writer.writeAll(@tagName(scope)) catch stopCPU();
|
||||
// writer.writeAll("] ") catch stopCPU();
|
||||
_ = level;
|
||||
// lib.format(writer, format, args) catch stopCPU();
|
||||
// writer.writeByte('\n') catch stopCPU();
|
||||
}
|
||||
@ -83,17 +79,13 @@ const Filesystem = extern struct {
|
||||
}
|
||||
|
||||
pub fn readFile(filesystem: *Filesystem, file_path: []const u8, file_buffer: []u8) ![]const u8 {
|
||||
log.debug("File {s} read started", .{file_path});
|
||||
assert(filesystem.fat_allocator.allocated <= filesystem.fat_allocator.buffer.len);
|
||||
const file = try filesystem.fat_cache.readFileToBuffer(file_path, file_buffer);
|
||||
log.debug("File read succeeded", .{});
|
||||
return file;
|
||||
}
|
||||
|
||||
pub fn sneakFile(filesystem: *Filesystem, file_path: []const u8, size: usize) ![]const u8 {
|
||||
log.debug("File {s} read started", .{file_path});
|
||||
const file = try filesystem.fat_cache.readFileToCache(file_path, size);
|
||||
log.debug("File read succeeded", .{});
|
||||
return file;
|
||||
}
|
||||
|
||||
|
@ -25,9 +25,6 @@ pub const page_size = 0x1000;
|
||||
pub const page_shifter = lib.arch.page_shifter(page_size);
|
||||
|
||||
const privileged = @import("privileged");
|
||||
const PhysicalAddress = privileged.PhysicalAddress;
|
||||
const VirtualAddress = privileged.VirtualAddress;
|
||||
const VirtualMemoryRegion = privileged.VirtualMemoryRegion;
|
||||
const stopCPU = privileged.arch.stopCPU;
|
||||
|
||||
pub fn panic(comptime format: []const u8, arguments: anytype) noreturn {
|
||||
|
401
src/cpu.zig
401
src/cpu.zig
@ -13,34 +13,25 @@ const PhysicalAddress = lib.PhysicalAddress;
|
||||
const PhysicalAddressSpace = lib.PhysicalAddressSpace;
|
||||
const PhysicalMemoryRegion = lib.PhysicalMemoryRegion;
|
||||
const stopCPU = privileged.arch.stopCPU;
|
||||
const VirtualAddress = privileged.VirtualAddress;
|
||||
const VirtualMemoryRegion = privileged.VirtualMemoryRegion;
|
||||
const VirtualAddress = lib.VirtualAddress;
|
||||
const VirtualMemoryRegion = lib.VirtualMemoryRegion;
|
||||
|
||||
const birth = @import("birth");
|
||||
|
||||
pub const test_runner = @import("cpu/test_runner.zig");
|
||||
pub const arch = @import("cpu/arch.zig");
|
||||
pub const capabilities = @import("cpu/capabilities.zig");
|
||||
pub const syscall = @import("cpu/syscall.zig");
|
||||
|
||||
pub export var stack: [0x8000]u8 align(0x1000) = undefined;
|
||||
pub export var page_allocator = PageAllocator{
|
||||
.head = null,
|
||||
.list_allocator = .{
|
||||
.u = .{
|
||||
.primitive = .{
|
||||
.backing_4k_page = undefined,
|
||||
.allocated = 0,
|
||||
},
|
||||
},
|
||||
.primitive = true,
|
||||
},
|
||||
};
|
||||
|
||||
pub var bundle: []const u8 = &.{};
|
||||
pub var bundle_files: []const u8 = &.{};
|
||||
|
||||
pub export var user_scheduler: *UserScheduler = undefined;
|
||||
pub export var driver: *align(lib.arch.valid_page_sizes[0]) Driver = undefined;
|
||||
pub export var heap = Heap{};
|
||||
pub var debug_info: lib.ModuleDebugInfo = undefined;
|
||||
pub export var page_tables: CPUPageTables = undefined;
|
||||
pub var file: []align(lib.default_sector_size) const u8 = undefined;
|
||||
pub export var core_id: u32 = 0;
|
||||
@ -66,11 +57,15 @@ pub const Driver = extern struct {
|
||||
|
||||
/// This data structure holds the information needed to run a program in a core (cpu side)
|
||||
pub const UserScheduler = extern struct {
|
||||
capability_root_node: capabilities.Root,
|
||||
common: *birth.UserScheduler,
|
||||
s: S,
|
||||
padding: [padding_byte_count]u8 = .{0} ** padding_byte_count,
|
||||
|
||||
const total_size = @sizeOf(capabilities.Root) + @sizeOf(*birth.UserScheduler);
|
||||
const S = extern struct {
|
||||
capability_root_node: capabilities.Root,
|
||||
common: *birth.UserScheduler,
|
||||
};
|
||||
|
||||
const total_size = @sizeOf(S);
|
||||
const aligned_size = lib.alignForward(usize, total_size, lib.arch.valid_page_sizes[0]);
|
||||
const padding_byte_count = aligned_size - total_size;
|
||||
|
||||
@ -92,9 +87,9 @@ inline fn panicPrologue(comptime format: []const u8, arguments: anytype) !void {
|
||||
try writer.writeAll(lib.Color.get(.bold));
|
||||
try writer.writeAll(lib.Color.get(.red));
|
||||
try writer.writeAll("[CPU DRIVER] [PANIC] ");
|
||||
try writer.writeAll(lib.Color.get(.reset));
|
||||
try writer.print(format, arguments);
|
||||
try writer.writeByte('\n');
|
||||
try writer.writeAll(lib.Color.get(.reset));
|
||||
}
|
||||
|
||||
inline fn panicEpilogue() noreturn {
|
||||
@ -168,6 +163,7 @@ pub fn panic(comptime format: []const u8, arguments: anytype) noreturn {
|
||||
@call(.always_inline, panicFromInstructionPointerAndFramePointer, .{ @returnAddress(), @frameAddress(), format, arguments });
|
||||
}
|
||||
|
||||
pub var command_count: usize = 0;
|
||||
pub var syscall_count: usize = 0;
|
||||
|
||||
pub inline fn shutdown(exit_code: lib.QEMU.ExitCode) noreturn {
|
||||
@ -177,250 +173,147 @@ pub inline fn shutdown(exit_code: lib.QEMU.ExitCode) noreturn {
|
||||
privileged.shutdown(exit_code);
|
||||
}
|
||||
|
||||
pub const PageAllocator = extern struct {
|
||||
head: ?*Entry,
|
||||
list_allocator: ListAllocator,
|
||||
total_allocated_size: u32 = 0,
|
||||
|
||||
fn getPageAllocatorInterface(pa: *PageAllocator) PageAllocatorInterface {
|
||||
return .{
|
||||
/// This is only meant to be used by the CPU driver
|
||||
const Heap = extern struct {
|
||||
allocator: lib.Allocator = .{
|
||||
.callbacks = .{
|
||||
.allocate = callbackAllocate,
|
||||
.context = pa,
|
||||
.context_type = .cpu,
|
||||
};
|
||||
}
|
||||
|
||||
fn callbackAllocate(context: ?*anyopaque, size: u64, alignment: u64, options: PageAllocatorInterface.AllocateOptions) Allocator.Allocate.Error!PhysicalMemoryRegion {
|
||||
_ = options;
|
||||
const pa = @as(?*PageAllocator, @ptrCast(@alignCast(context))) orelse return Allocator.Allocate.Error.OutOfMemory;
|
||||
const result = try pa.allocate(size, alignment);
|
||||
return result;
|
||||
}
|
||||
|
||||
pub fn allocate(pa: *PageAllocator, size: u64, alignment: u64) Allocator.Allocate.Error!PhysicalMemoryRegion {
|
||||
if (pa.head == null) {
|
||||
@panic("head null");
|
||||
}
|
||||
|
||||
const allocation = blk: {
|
||||
var ptr = pa.head;
|
||||
while (ptr) |entry| : (ptr = entry.next) {
|
||||
if (lib.isAligned(entry.region.address.value(), alignment) and entry.region.size > size) {
|
||||
const result = PhysicalMemoryRegion{
|
||||
.address = entry.region.address,
|
||||
.size = size,
|
||||
};
|
||||
entry.region.address = entry.region.address.offset(size);
|
||||
entry.region.size -= size;
|
||||
|
||||
pa.total_allocated_size += @as(u32, @intCast(size));
|
||||
// log.debug("Allocated 0x{x}", .{size});
|
||||
|
||||
break :blk result;
|
||||
}
|
||||
}
|
||||
|
||||
ptr = pa.head;
|
||||
|
||||
while (ptr) |entry| : (ptr = entry.next) {
|
||||
const aligned_address = lib.alignForward(entry.region.address.value(), alignment);
|
||||
const top = entry.region.top().value();
|
||||
if (aligned_address < top and top - aligned_address > size) {
|
||||
// log.debug("Found region which we should be splitting: (0x{x}, 0x{x})", .{ entry.region.address.value(), entry.region.size });
|
||||
// log.debug("User asked for 0x{x} bytes with alignment 0x{x}", .{ size, alignment });
|
||||
// Split the addresses to obtain the desired result
|
||||
const first_region_size = aligned_address - entry.region.address.value();
|
||||
const first_region_address = entry.region.address;
|
||||
const first_region_next = entry.next;
|
||||
|
||||
const second_region_address = aligned_address + size;
|
||||
const second_region_size = top - aligned_address + size;
|
||||
|
||||
const result = PhysicalMemoryRegion{
|
||||
.address = PhysicalAddress.new(aligned_address),
|
||||
.size = size,
|
||||
};
|
||||
|
||||
// log.debug("\nFirst region: (Address: 0x{x}. Size: 0x{x}).\nRegion in the middle (allocated): (Address: 0x{x}. Size: 0x{x}).\nSecond region: (Address: 0x{x}. Size: 0x{x})", .{ first_region_address, first_region_size, result.address.value(), result.size, second_region_address, second_region_size });
|
||||
|
||||
const new_entry = pa.list_allocator.get();
|
||||
entry.* = .{
|
||||
.region = .{
|
||||
.address = first_region_address,
|
||||
.size = first_region_size,
|
||||
},
|
||||
.next = new_entry,
|
||||
};
|
||||
|
||||
new_entry.* = .{
|
||||
.region = .{
|
||||
.address = PhysicalAddress.new(second_region_address),
|
||||
.size = second_region_size,
|
||||
},
|
||||
.next = first_region_next,
|
||||
};
|
||||
// log.debug("First entry: (Address: 0x{x}. Size: 0x{x})", .{ entry.region.address.value(), entry.region.size });
|
||||
// log.debug("Second entry: (Address: 0x{x}. Size: 0x{x})", .{ new_entry.region.address.value(), new_entry.region.size });
|
||||
|
||||
// pa.total_allocated_size += @intCast(u32, size);
|
||||
// log.debug("Allocated 0x{x}", .{size});
|
||||
|
||||
break :blk result;
|
||||
}
|
||||
}
|
||||
|
||||
log.err("Allocate error. Size: 0x{x}. Alignment: 0x{x}. Total allocated size: 0x{x}", .{ size, alignment, pa.total_allocated_size });
|
||||
return Allocator.Allocate.Error.OutOfMemory;
|
||||
};
|
||||
|
||||
//log.debug("Physical allocation: 0x{x}, 0x{x}", .{ allocation.address.value(), allocation.size });
|
||||
|
||||
@memset(allocation.toHigherHalfVirtualAddress().access(u8), 0);
|
||||
|
||||
return allocation;
|
||||
}
|
||||
|
||||
pub inline fn fromBSP(bootloader_information: *bootloader.Information) InitializationError!PageAllocator {
|
||||
const memory_map_entries = bootloader_information.getMemoryMapEntries();
|
||||
const page_counters = bootloader_information.getPageCounters();
|
||||
|
||||
var total_size: usize = 0;
|
||||
const page_shifter = lib.arch.page_shifter(lib.arch.valid_page_sizes[0]);
|
||||
|
||||
for (memory_map_entries, page_counters) |entry, page_counter| {
|
||||
if (entry.type != .usable or !lib.isAligned(entry.region.size, lib.arch.valid_page_sizes[0]) or entry.region.address.value() < lib.mb) {
|
||||
continue;
|
||||
}
|
||||
|
||||
total_size += entry.region.size - (page_counter << page_shifter);
|
||||
}
|
||||
|
||||
const cpu_count = bootloader_information.smp.cpu_count;
|
||||
const total_memory_to_take = total_size / cpu_count;
|
||||
|
||||
// Look for a 4K page to host the memory map
|
||||
const backing_4k_page = for (memory_map_entries, page_counters) |entry, *page_counter| {
|
||||
const occupied_size = page_counter.* << page_shifter;
|
||||
const entry_size_left = entry.region.size - occupied_size;
|
||||
if (entry_size_left != 0) {
|
||||
if (entry.type != .usable or !lib.isAligned(entry.region.size, lib.arch.valid_page_sizes[0]) or entry.region.address.value() < lib.mb) continue;
|
||||
|
||||
assert(lib.isAligned(entry_size_left, lib.arch.valid_page_sizes[0]));
|
||||
page_counter.* += 1;
|
||||
break entry.region.address.offset(occupied_size);
|
||||
}
|
||||
} else return InitializationError.bootstrap_region_not_found;
|
||||
|
||||
var memory_taken: usize = 0;
|
||||
var backing_4k_page_memory_allocated: usize = 0;
|
||||
|
||||
var last_entry: ?*Entry = null;
|
||||
var first_entry: ?*Entry = null;
|
||||
|
||||
for (memory_map_entries, page_counters) |entry, *page_counter| {
|
||||
if (entry.type != .usable or !lib.isAligned(entry.region.size, lib.arch.valid_page_sizes[0]) or entry.region.address.value() < lib.mb) continue;
|
||||
|
||||
const occupied_size = page_counter.* << page_shifter;
|
||||
|
||||
if (occupied_size < entry.region.size) {
|
||||
const entry_size_left = entry.region.size - occupied_size;
|
||||
|
||||
var memory_taken_from_region: usize = 0;
|
||||
while (memory_taken + memory_taken_from_region < total_memory_to_take) {
|
||||
if (memory_taken_from_region == entry_size_left) break;
|
||||
|
||||
const size_to_take = @min(2 * lib.mb, entry_size_left);
|
||||
memory_taken_from_region += size_to_take;
|
||||
}
|
||||
|
||||
memory_taken += memory_taken_from_region;
|
||||
|
||||
page_counter.* += @as(u32, @intCast(memory_taken_from_region >> page_shifter));
|
||||
const region_descriptor = .{
|
||||
.address = entry.region.offset(occupied_size).address,
|
||||
.size = memory_taken_from_region,
|
||||
};
|
||||
|
||||
if (backing_4k_page_memory_allocated >= lib.arch.valid_page_sizes[0]) return InitializationError.memory_exceeded;
|
||||
const entry_address = backing_4k_page.offset(backing_4k_page_memory_allocated);
|
||||
const new_entry = entry_address.toHigherHalfVirtualAddress().access(*Entry);
|
||||
backing_4k_page_memory_allocated += @sizeOf(Entry);
|
||||
|
||||
new_entry.* = .{
|
||||
.region = .{
|
||||
.address = region_descriptor.address,
|
||||
.size = region_descriptor.size,
|
||||
},
|
||||
.next = null,
|
||||
};
|
||||
|
||||
if (last_entry) |e| {
|
||||
e.next = new_entry;
|
||||
} else {
|
||||
first_entry = new_entry;
|
||||
}
|
||||
|
||||
last_entry = new_entry;
|
||||
|
||||
if (memory_taken >= total_memory_to_take) break;
|
||||
}
|
||||
}
|
||||
|
||||
const result = .{
|
||||
.head = first_entry,
|
||||
.list_allocator = .{
|
||||
.u = .{
|
||||
.primitive = .{
|
||||
.backing_4k_page = backing_4k_page,
|
||||
.allocated = backing_4k_page_memory_allocated,
|
||||
},
|
||||
},
|
||||
.primitive = true,
|
||||
},
|
||||
};
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
const ListAllocator = extern struct {
|
||||
u: extern union {
|
||||
primitive: extern struct {
|
||||
backing_4k_page: PhysicalAddress,
|
||||
allocated: u64,
|
||||
},
|
||||
normal: extern struct {
|
||||
foo: u64,
|
||||
},
|
||||
},
|
||||
primitive: bool,
|
||||
},
|
||||
regions: ?*Region = null,
|
||||
region_heap: ?*Region = null,
|
||||
liberated_regions: ?*Region = null,
|
||||
|
||||
pub fn get(list_allocator: *ListAllocator) *Entry {
|
||||
switch (list_allocator.primitive) {
|
||||
true => {
|
||||
if (list_allocator.u.primitive.allocated < 0x1000) {
|
||||
const result = list_allocator.u.primitive.backing_4k_page.offset(list_allocator.u.primitive.allocated).toHigherHalfVirtualAddress().access(*Entry);
|
||||
list_allocator.u.primitive.backing_4k_page = list_allocator.u.primitive.backing_4k_page.offset(@sizeOf(Entry));
|
||||
return result;
|
||||
const Region = extern struct {
|
||||
region: VirtualMemoryRegion,
|
||||
previous: ?*Region = null,
|
||||
next: ?*Region = null,
|
||||
};
|
||||
|
||||
pub fn create(heap_allocator: *Heap, comptime T: type) lib.Allocator.Allocate.Error!*T {
|
||||
const result = try heap_allocator.allocate(@sizeOf(T), @alignOf(T));
|
||||
return @ptrFromInt(result.address);
|
||||
}
|
||||
|
||||
// TODO: turn the other way around: make the callback call this function
|
||||
pub fn allocate(heap_allocator: *Heap, size: u64, alignment: u64) lib.Allocator.Allocate.Error!lib.Allocator.Allocate.Result {
|
||||
return heap_allocator.allocator.callbacks.allocate(&heap_allocator.allocator, size, alignment);
|
||||
}
|
||||
|
||||
fn callbackAllocate(allocator: *Allocator, size: u64, alignment: u64) lib.Allocator.Allocate.Error!lib.Allocator.Allocate.Result {
|
||||
// This assert is triggered by the Zig std library
|
||||
//assert(lib.isAligned(size, alignment));
|
||||
const heap_allocator = @fieldParentPtr(Heap, "allocator", allocator);
|
||||
var iterator = heap_allocator.regions;
|
||||
|
||||
while (iterator) |region| : (iterator = region.next) {
|
||||
if (lib.isAligned(region.region.address.value(), alignment)) {
|
||||
if (region.region.size > size) {
|
||||
const virtual_region = region.region.takeSlice(size) catch return error.OutOfMemory;
|
||||
return .{
|
||||
.address = virtual_region.address.value(),
|
||||
.size = virtual_region.size,
|
||||
};
|
||||
} else if (region.region.size == size) {
|
||||
const result = .{
|
||||
.address = region.region.address.value(),
|
||||
.size = region.region.size,
|
||||
};
|
||||
region.previous.?.next = region.next;
|
||||
|
||||
region.* = lib.zeroes(Region);
|
||||
|
||||
if (heap_allocator.liberated_regions) |_| {
|
||||
var inner_iterator = heap_allocator.liberated_regions;
|
||||
while (inner_iterator) |inner_region| : (inner_iterator = inner_region.next) {
|
||||
if (inner_region.next == null) {
|
||||
inner_region.next = region;
|
||||
region.previous = inner_region;
|
||||
break;
|
||||
}
|
||||
}
|
||||
} else {
|
||||
@panic("reached limit");
|
||||
heap_allocator.liberated_regions = region;
|
||||
}
|
||||
},
|
||||
false => {
|
||||
@panic("not primitive allocator not implemented");
|
||||
},
|
||||
|
||||
return result;
|
||||
} else {
|
||||
continue;
|
||||
}
|
||||
}
|
||||
// TODO: else
|
||||
// Contemplate options to split the region to satisfy alignment
|
||||
} else {
|
||||
const total_size = lib.alignForward(u64, size, @max(alignment, lib.arch.valid_page_sizes[0]));
|
||||
const physical_region = try driver.getRootCapability().allocateRAMPrivileged(total_size);
|
||||
const virtual_region = physical_region.toHigherHalfVirtualAddress();
|
||||
|
||||
if (virtual_region.size > size) {
|
||||
const new_region = nr: {
|
||||
var region_heap_iterator: ?*Region = heap_allocator.region_heap;
|
||||
while (region_heap_iterator) |region| : (region_heap_iterator = region.next) {
|
||||
if (region.region.size > @sizeOf(Region)) {
|
||||
@panic("TODO: fits");
|
||||
} else if (region.region.size == @sizeOf(Region)) {
|
||||
@panic("TODO: fits exactly");
|
||||
} else {
|
||||
continue;
|
||||
}
|
||||
|
||||
break :nr undefined;
|
||||
} else {
|
||||
const physical_heap_region = try driver.getRootCapability().allocateRAMPrivileged(lib.arch.valid_page_sizes[0]);
|
||||
var virtual_heap_region = physical_heap_region.toHigherHalfVirtualAddress();
|
||||
const virtual_region_for_this_region = virtual_heap_region.takeSlice(@sizeOf(Region)) catch return error.OutOfMemory;
|
||||
const this_region = virtual_region_for_this_region.address.access(*Region);
|
||||
const virtual_region_for_new_region = virtual_heap_region.takeSlice(@sizeOf(Region)) catch return error.OutOfMemory;
|
||||
const new_region = virtual_region_for_new_region.address.access(*Region);
|
||||
new_region.* = .{
|
||||
.region = undefined,
|
||||
.previous = this_region,
|
||||
};
|
||||
this_region.* = .{
|
||||
.region = virtual_heap_region,
|
||||
.next = new_region,
|
||||
};
|
||||
|
||||
var region_iterator = heap.regions;
|
||||
if (region_iterator) |_| {
|
||||
while (region_iterator) |region| : (region_iterator = region.next) {
|
||||
if (region.next == null) {
|
||||
region.next = this_region;
|
||||
this_region.previous = region;
|
||||
break;
|
||||
}
|
||||
}
|
||||
} else {
|
||||
heap.regions = this_region;
|
||||
}
|
||||
|
||||
break :nr new_region;
|
||||
}
|
||||
};
|
||||
|
||||
var region_slicer = virtual_region;
|
||||
const real_virtual_region = region_slicer.takeSlice(size) catch return error.OutOfMemory;
|
||||
const result = .{
|
||||
.address = real_virtual_region.address.value(),
|
||||
.size = real_virtual_region.size,
|
||||
};
|
||||
new_region.region = region_slicer;
|
||||
|
||||
return result;
|
||||
} else {
|
||||
// TODO: register this allocation somehow
|
||||
return .{
|
||||
.address = virtual_region.address.value(),
|
||||
.size = virtual_region.size,
|
||||
};
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
pub const Entry = extern struct {
|
||||
region: PhysicalMemoryRegion,
|
||||
next: ?*Entry,
|
||||
};
|
||||
|
||||
const InitializationError = error{
|
||||
bootstrap_region_not_found,
|
||||
memory_exceeded,
|
||||
};
|
||||
@panic("TODO: callbackAllocate");
|
||||
}
|
||||
};
|
||||
|
||||
// fn getDebugInformation() !lib.ModuleDebugInfo {
|
||||
|
@ -46,8 +46,6 @@ pub fn entryPoint() callconv(.Naked) noreturn {
|
||||
[main] "{rax}" (&main),
|
||||
: "rsp", "rbp"
|
||||
);
|
||||
|
||||
unreachable;
|
||||
}
|
||||
|
||||
const InitializationError = error{
|
||||
@ -297,46 +295,123 @@ fn initialize(bootloader_information: *bootloader.Information) !noreturn {
|
||||
}
|
||||
} else @panic("Total physical region not found");
|
||||
|
||||
var offset: usize = 0;
|
||||
// Quick and dirty allocator to use only in this function
|
||||
//
|
||||
|
||||
cpu.driver = total_physical.region.offset(offset).address.toHigherHalfVirtualAddress().access(*align(lib.arch.valid_page_sizes[0]) cpu.Driver);
|
||||
offset += @sizeOf(cpu.Driver);
|
||||
const allocations = blk: {
|
||||
const RegionAllocator = extern struct {
|
||||
region: PhysicalMemoryRegion,
|
||||
offset: usize = 0,
|
||||
|
||||
const root_capability = total_physical.region.offset(offset).address.toHigherHalfVirtualAddress().access(*cpu.capabilities.Root);
|
||||
offset += @sizeOf(cpu.capabilities.Root);
|
||||
const Error = error{
|
||||
OutOfMemory,
|
||||
not_empty,
|
||||
misalignment,
|
||||
};
|
||||
|
||||
var heap_offset: usize = 0;
|
||||
const heap_region = total_physical.region.offset(offset);
|
||||
assert(heap_region.size == lib.arch.valid_page_sizes[0]);
|
||||
const host_free_ram = heap_region.offset(heap_offset).address.toHigherHalfVirtualAddress().access(*cpu.capabilities.RAM.Region);
|
||||
host_free_ram.* = .{
|
||||
.region = PhysicalMemoryRegion.new(.{
|
||||
.address = total_physical.region.offset(total_to_allocate).address,
|
||||
.size = total_physical.free_size,
|
||||
}),
|
||||
};
|
||||
heap_offset += @sizeOf(cpu.capabilities.RAM.Region);
|
||||
const privileged_cpu_memory = heap_region.offset(heap_offset).address.toHigherHalfVirtualAddress().access(*cpu.capabilities.RAM.Region);
|
||||
privileged_cpu_memory.* = .{
|
||||
.region = total_physical.region,
|
||||
fn allocate(allocator: *@This(), comptime T: type) Error!*T {
|
||||
return allocator.allocateAligned(T, null);
|
||||
}
|
||||
|
||||
fn allocateAligned(allocator: *@This(), comptime T: type, comptime maybe_alignment: ?u29) Error!if (maybe_alignment) |alignment| *align(alignment) T else *T {
|
||||
const alignment = maybe_alignment orelse @alignOf(T);
|
||||
if (!lib.isAligned(allocator.offset, alignment)) {
|
||||
log.err("Allocator offset not aligned to demanded alignment: 0x{x}", .{allocator.offset});
|
||||
return Error.misalignment;
|
||||
}
|
||||
|
||||
const physical_allocation = try allocator.allocatePhysical(@sizeOf(T));
|
||||
return physical_allocation.address.toHigherHalfVirtualAddress().access(*align(alignment) T);
|
||||
}
|
||||
|
||||
fn allocatePhysical(allocator: *@This(), size: usize) Error!PhysicalMemoryRegion {
|
||||
if (!lib.isAligned(allocator.offset, lib.arch.valid_page_sizes[0])) {
|
||||
log.err("Allocator offset not page-aligned: 0x{x}", .{allocator.offset});
|
||||
return Error.misalignment;
|
||||
}
|
||||
if (!lib.isAligned(size, lib.arch.valid_page_sizes[0])) {
|
||||
log.err("Size not page-aligned: 0x{x}", .{size});
|
||||
return Error.misalignment;
|
||||
}
|
||||
|
||||
var left_region = allocator.region.offset(allocator.offset);
|
||||
allocator.offset += size;
|
||||
return left_region.takeSlice(size) catch {
|
||||
log.err("takeSlice", .{});
|
||||
return Error.OutOfMemory;
|
||||
};
|
||||
}
|
||||
};
|
||||
var region_allocator = RegionAllocator{
|
||||
.region = total_physical.region,
|
||||
};
|
||||
cpu.driver = try region_allocator.allocateAligned(cpu.Driver, lib.arch.valid_page_sizes[0]);
|
||||
const root_capability = try region_allocator.allocate(cpu.capabilities.Root);
|
||||
const heap_region = try region_allocator.allocatePhysical(lib.arch.valid_page_sizes[0]);
|
||||
|
||||
break :blk .{
|
||||
.heap_region = heap_region,
|
||||
.root_capability = root_capability,
|
||||
};
|
||||
};
|
||||
|
||||
heap_offset += @sizeOf(cpu.capabilities.RAM);
|
||||
const root_capability = allocations.root_capability;
|
||||
const heap_region = allocations.heap_region;
|
||||
|
||||
const HeapAllocator = extern struct {
|
||||
region: VirtualMemoryRegion,
|
||||
offset: usize = 0,
|
||||
|
||||
const Error = error{
|
||||
misalignment,
|
||||
no_space,
|
||||
};
|
||||
|
||||
fn allocate(allocator: *@This(), comptime T: type) Error!*T {
|
||||
const alignment = @alignOf(T);
|
||||
const size = @sizeOf(T);
|
||||
|
||||
if (!lib.isAligned(allocator.offset, alignment)) {
|
||||
return Error.misalignment;
|
||||
}
|
||||
|
||||
if (allocator.region.size - allocator.offset < size) {
|
||||
log.err("Region size: 0x{x}. Allocator offset: 0x{x}. Size: 0x{x}", .{ allocator.region.size, allocator.offset, size });
|
||||
return Error.no_space;
|
||||
}
|
||||
|
||||
const result = allocator.region.offset(allocator.offset).address.access(*T);
|
||||
return result;
|
||||
}
|
||||
};
|
||||
|
||||
var heap_allocator = HeapAllocator{
|
||||
.region = heap_region.toHigherHalfVirtualAddress(),
|
||||
};
|
||||
const host_free_region_list = try heap_allocator.allocate(cpu.capabilities.RegionList);
|
||||
|
||||
log.err("PHYSICAL MAP START", .{});
|
||||
|
||||
_ = try host_free_region_list.append(PhysicalMemoryRegion.new(.{
|
||||
.address = total_physical.region.offset(total_to_allocate).address,
|
||||
.size = total_physical.free_size,
|
||||
}));
|
||||
|
||||
var region_list_iterator = host_free_region_list;
|
||||
|
||||
var previous_free_ram = host_free_ram;
|
||||
for (memory_map_entries, page_counters, 0..) |memory_map_entry, page_counter, index| {
|
||||
if (index == total_physical.index) continue;
|
||||
|
||||
if (memory_map_entry.type == .usable) {
|
||||
const region = memory_map_entry.getFreeRegion(page_counter);
|
||||
if (region.size > 0) {
|
||||
const new_free_ram = heap_region.offset(heap_offset).address.toHigherHalfVirtualAddress().access(*cpu.capabilities.RAM.Region);
|
||||
heap_offset += @sizeOf(cpu.capabilities.RAM.Region);
|
||||
new_free_ram.* = .{
|
||||
.region = region,
|
||||
_ = region_list_iterator.append(region) catch {
|
||||
const new_region_list = try heap_allocator.allocate(cpu.capabilities.RegionList);
|
||||
region_list_iterator.metadata.next = new_region_list;
|
||||
new_region_list.metadata.previous = region_list_iterator;
|
||||
region_list_iterator = new_region_list;
|
||||
_ = try region_list_iterator.append(region);
|
||||
};
|
||||
previous_free_ram.next = new_free_ram;
|
||||
previous_free_ram = new_free_ram;
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -353,29 +428,26 @@ fn initialize(bootloader_information: *bootloader.Information) !noreturn {
|
||||
},
|
||||
.ram = .{
|
||||
.lists = blk: {
|
||||
var lists = [1]?*cpu.capabilities.RAM.Region{null} ** lib.arch.reverse_valid_page_sizes.len;
|
||||
var free_ram_iterator: ?*cpu.capabilities.RAM.Region = host_free_ram;
|
||||
while (free_ram_iterator) |free_ram| {
|
||||
comptime assert(lib.arch.reverse_valid_page_sizes.len == 3);
|
||||
const next = free_ram.next;
|
||||
var lists = [1]cpu.capabilities.RegionList{.{}} ** lib.arch.reverse_valid_page_sizes.len;
|
||||
var list_iterator: ?*cpu.capabilities.RegionList = host_free_region_list;
|
||||
|
||||
if (free_ram.region.size >= lib.arch.reverse_valid_page_sizes[0]) {
|
||||
const previous_first = lists[0];
|
||||
lists[0] = free_ram;
|
||||
free_ram.next = previous_first;
|
||||
} else if (free_ram.region.size >= lib.arch.reverse_valid_page_sizes[1]) {
|
||||
const previous_first = lists[1];
|
||||
lists[1] = free_ram;
|
||||
free_ram.next = previous_first;
|
||||
} else if (free_ram.region.size >= lib.arch.reverse_valid_page_sizes[2]) {
|
||||
const previous_first = lists[2];
|
||||
lists[2] = free_ram;
|
||||
free_ram.next = previous_first;
|
||||
} else unreachable;
|
||||
|
||||
free_ram_iterator = next;
|
||||
while (list_iterator) |list| : (list_iterator = list.metadata.next) {
|
||||
for (list.getRegions()) |region| {
|
||||
// TODO: make this with inline for if possible
|
||||
comptime assert(lib.arch.reverse_valid_page_sizes.len == 3);
|
||||
const index: usize = if (region.size >= lib.arch.reverse_valid_page_sizes[0]) 0 else if (region.size >= lib.arch.reverse_valid_page_sizes[1]) 1 else if (region.size >= lib.arch.reverse_valid_page_sizes[2]) 2 else unreachable;
|
||||
_ = try lists[index].append(region);
|
||||
}
|
||||
}
|
||||
|
||||
var total_region_count: usize = 0;
|
||||
|
||||
for (&lists) |*list| {
|
||||
total_region_count += list.metadata.count;
|
||||
}
|
||||
|
||||
assert(total_region_count > 0);
|
||||
|
||||
break :blk lists;
|
||||
},
|
||||
},
|
||||
@ -389,9 +461,22 @@ fn initialize(bootloader_information: *bootloader.Information) !noreturn {
|
||||
.scheduler = .{
|
||||
.memory = undefined,
|
||||
},
|
||||
.heap = cpu.capabilities.Root.Heap.new(heap_region, heap_offset),
|
||||
.heap = cpu.capabilities.Root.Heap.new(heap_region, heap_allocator.offset),
|
||||
};
|
||||
|
||||
if (@intFromPtr(root_capability) == @intFromPtr(cpu.driver)) {
|
||||
@panic("WTF: addresses match");
|
||||
}
|
||||
|
||||
{
|
||||
var assertion_count: usize = 0;
|
||||
for (root_capability.dynamic.ram.lists) |list| {
|
||||
assertion_count += list.metadata.count;
|
||||
}
|
||||
|
||||
assert(assertion_count > 0);
|
||||
}
|
||||
|
||||
cpu.driver.* = .{
|
||||
.valid = true,
|
||||
.init_root_capability = .{
|
||||
@ -399,9 +484,25 @@ fn initialize(bootloader_information: *bootloader.Information) !noreturn {
|
||||
},
|
||||
};
|
||||
|
||||
{
|
||||
var assertion_count: usize = 0;
|
||||
for (root_capability.dynamic.ram.lists) |list| {
|
||||
assertion_count += list.metadata.count;
|
||||
}
|
||||
|
||||
assert(assertion_count > 0);
|
||||
}
|
||||
|
||||
log.err("PHYSICAL MAP END", .{});
|
||||
|
||||
switch (cpu.bsp) {
|
||||
true => {
|
||||
const init_module_descriptor = try bootloader_information.getFileDescriptor("init");
|
||||
const cpu_driver_executable_descriptor = try bootloader_information.getFileDescriptor("cpu_driver");
|
||||
const elf_file_allocation = try cpu.heap.allocate(lib.alignForward(usize, cpu_driver_executable_descriptor.content.len, lib.arch.valid_page_sizes[0]), lib.arch.valid_page_sizes[0]);
|
||||
const elf_file = @as([*]align(lib.arch.valid_page_sizes[0]) u8, @ptrFromInt(elf_file_allocation.address))[0..elf_file_allocation.size];
|
||||
@memcpy(elf_file[0..cpu_driver_executable_descriptor.content.len], cpu_driver_executable_descriptor.content);
|
||||
cpu.debug_info = try lib.getDebugInformation(cpu.heap.allocator.zigUnwrap(), elf_file);
|
||||
try spawnInitBSP(init_module_descriptor.content, bootloader_information.cpu_page_tables);
|
||||
},
|
||||
false => @panic("Implement APP"),
|
||||
@ -490,8 +591,6 @@ pub fn InterruptHandler(comptime interrupt_number: u64, comptime has_error_code:
|
||||
\\iretq
|
||||
\\int3
|
||||
::: "memory");
|
||||
|
||||
unreachable;
|
||||
}
|
||||
}.handler;
|
||||
}
|
||||
@ -779,105 +878,6 @@ const interrupt_handlers = [256]*const fn () callconv(.Naked) noreturn{
|
||||
InterruptHandler(0xff, false),
|
||||
};
|
||||
|
||||
const BSPEarlyAllocator = extern struct {
|
||||
base: PhysicalAddress,
|
||||
size: usize,
|
||||
offset: usize,
|
||||
allocator: Allocator = .{
|
||||
.callbacks = .{
|
||||
.allocate = callbackAllocate,
|
||||
},
|
||||
},
|
||||
heap_first: ?*BSPHeapEntry = null,
|
||||
|
||||
const BSPHeapEntry = extern struct {
|
||||
virtual_memory_region: VirtualMemoryRegion,
|
||||
offset: usize = 0,
|
||||
next: ?*BSPHeapEntry = null,
|
||||
|
||||
// pub fn create(heap: *BSPHeapEntry, comptime T: type) !*T {
|
||||
// _ = heap;
|
||||
// @panic("TODO: create");
|
||||
// }
|
||||
|
||||
pub fn allocateBytes(heap: *BSPHeapEntry, size: u64, alignment: u64) ![]u8 {
|
||||
assert(alignment < lib.arch.valid_page_sizes[0]);
|
||||
assert(heap.virtual_memory_region.size > size);
|
||||
if (!lib.isAligned(heap.virtual_memory_region.address.value(), alignment)) {
|
||||
const misalignment = lib.alignForward(usize, heap.virtual_memory_region.address.value(), alignment) - heap.virtual_memory_region.address.value();
|
||||
_ = heap.virtual_memory_region.takeSlice(misalignment);
|
||||
}
|
||||
|
||||
return heap.virtual_memory_region.takeByteSlice(size);
|
||||
}
|
||||
};
|
||||
|
||||
pub fn createPageAligned(allocator: *BSPEarlyAllocator, comptime T: type) AllocatorError!*align(lib.arch.valid_page_sizes[0]) T {
|
||||
return @as(*align(lib.arch.valid_page_sizes[0]) T, @ptrCast(try allocator.allocateBytes(@sizeOf(T), lib.arch.valid_page_sizes[0])));
|
||||
}
|
||||
|
||||
pub fn allocateBytes(allocator: *BSPEarlyAllocator, size: u64, alignment: u64) AllocatorError![]align(lib.arch.valid_page_sizes[0]) u8 {
|
||||
if (!lib.isAligned(size, lib.arch.valid_page_sizes[0])) return AllocatorError.bad_alignment;
|
||||
if (allocator.offset + size > allocator.size) return AllocatorError.out_of_memory;
|
||||
|
||||
// TODO: don't trash memory
|
||||
if (!lib.isAligned(allocator.base.offset(allocator.offset).value(), alignment)) {
|
||||
const aligned = lib.alignForward(usize, allocator.base.offset(allocator.offset).value(), alignment);
|
||||
allocator.offset += aligned - allocator.base.offset(allocator.offset).value();
|
||||
}
|
||||
|
||||
const physical_address = allocator.base.offset(allocator.offset);
|
||||
allocator.offset += size;
|
||||
const slice = physical_address.toHigherHalfVirtualAddress().access([*]align(lib.arch.valid_page_sizes[0]) u8)[0..size];
|
||||
@memset(slice, 0);
|
||||
|
||||
return slice;
|
||||
}
|
||||
|
||||
pub fn callbackAllocate(allocator: *Allocator, size: u64, alignment: u64) Allocator.Allocate.Error!Allocator.Allocate.Result {
|
||||
const early_allocator = @fieldParentPtr(BSPEarlyAllocator, "allocator", allocator);
|
||||
if (alignment == lib.arch.valid_page_sizes[0] or size % lib.arch.valid_page_sizes[0] == 0) {
|
||||
const result = early_allocator.allocateBytes(size, alignment) catch return Allocator.Allocate.Error.OutOfMemory;
|
||||
return .{
|
||||
.address = @intFromPtr(result.ptr),
|
||||
.size = result.len,
|
||||
};
|
||||
} else if (alignment > lib.arch.valid_page_sizes[0]) {
|
||||
@panic("WTF");
|
||||
} else {
|
||||
assert(size < lib.arch.valid_page_sizes[0]);
|
||||
const heap_entry_allocation = early_allocator.allocateBytes(lib.arch.valid_page_sizes[0], lib.arch.valid_page_sizes[0]) catch return Allocator.Allocate.Error.OutOfMemory;
|
||||
const heap_entry_region = VirtualMemoryRegion.fromByteSlice(.{
|
||||
.slice = heap_entry_allocation,
|
||||
});
|
||||
const heap_entry = try early_allocator.addHeapRegion(heap_entry_region);
|
||||
const result = try heap_entry.allocateBytes(size, alignment);
|
||||
return .{
|
||||
.address = @intFromPtr(result.ptr),
|
||||
.size = result.len,
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
inline fn addHeapRegion(early_allocator: *BSPEarlyAllocator, region: VirtualMemoryRegion) !*BSPHeapEntry {
|
||||
const heap_entry = region.address.access(*BSPHeapEntry);
|
||||
const offset = @sizeOf(BSPHeapEntry);
|
||||
heap_entry.* = .{
|
||||
.offset = offset,
|
||||
.virtual_memory_region = region.offset(offset),
|
||||
.next = early_allocator.heap_first,
|
||||
};
|
||||
|
||||
early_allocator.heap_first = heap_entry;
|
||||
|
||||
return heap_entry;
|
||||
}
|
||||
const AllocatorError = error{
|
||||
out_of_memory,
|
||||
bad_alignment,
|
||||
};
|
||||
};
|
||||
|
||||
const half_page_table_entry_count = @divExact(paging.page_table_entry_count, 2);
|
||||
|
||||
fn spawnInitBSP(init_file: []const u8, cpu_page_tables: paging.CPUPageTables) !noreturn {
|
||||
@ -890,7 +890,7 @@ fn spawnInitBSP(init_file: []const u8, cpu_page_tables: paging.CPUPageTables) !n
|
||||
const init_elf = try ELF.Parser.init(init_file);
|
||||
const entry_point = init_elf.getEntryPoint();
|
||||
const program_headers = init_elf.getProgramHeaders();
|
||||
const scheduler_common = init_scheduler.common;
|
||||
const scheduler_common = init_scheduler.s.common;
|
||||
|
||||
for (program_headers) |program_header| {
|
||||
if (program_header.type == .load) {
|
||||
@ -904,7 +904,7 @@ fn spawnInitBSP(init_file: []const u8, cpu_page_tables: paging.CPUPageTables) !n
|
||||
.user = true,
|
||||
};
|
||||
|
||||
const segment_physical_region = try cpu.driver.getRootCapability().allocatePages(aligned_size);
|
||||
const segment_physical_region = try cpu.driver.getRootCapability().allocateRAMPrivileged(aligned_size);
|
||||
try page_table_regions.map(segment_virtual_address, segment_physical_region.address, segment_physical_region.size, segment_flags);
|
||||
|
||||
const src = init_file[program_header.offset..][0..program_header.size_in_file];
|
||||
@ -1129,6 +1129,14 @@ fn spawnInitCommon(cpu_page_tables: paging.CPUPageTables) !SpawnInitCommonResult
|
||||
const cpu_page_table_size = (paging.Level.count - 1) * paging.page_table_size;
|
||||
const allocation_size = page_table_regions_total_size + cpu_page_table_size;
|
||||
const allocation_alignment = 2 * paging.page_table_alignment;
|
||||
{
|
||||
var assertion_count: usize = 0;
|
||||
for (cpu.driver.getRootCapability().dynamic.ram.lists) |list| {
|
||||
assertion_count += list.metadata.count;
|
||||
}
|
||||
|
||||
assert(assertion_count > 0);
|
||||
}
|
||||
const total_region = try cpu.driver.getRootCapability().allocatePageCustomAlignment(allocation_size, allocation_alignment);
|
||||
//log.debug("Total region: (0x{x}, 0x{x})", .{ total_region.address.value(), total_region.top().value() });
|
||||
var region_slicer = total_region;
|
||||
@ -1139,7 +1147,7 @@ fn spawnInitCommon(cpu_page_tables: paging.CPUPageTables) !SpawnInitCommonResult
|
||||
};
|
||||
|
||||
inline for (&page_table_regions.regions, 0..) |*region, index| {
|
||||
region.* = region_slicer.takeSlice(PageTableRegions.sizes[index]);
|
||||
region.* = try region_slicer.takeSlice(PageTableRegions.sizes[index]);
|
||||
}
|
||||
|
||||
assert(lib.isAligned(page_table_regions.regions[0].address.value(), 2 * paging.page_table_alignment));
|
||||
@ -1195,7 +1203,7 @@ fn spawnInitCommon(cpu_page_tables: paging.CPUPageTables) !SpawnInitCommonResult
|
||||
};
|
||||
}
|
||||
|
||||
const scheduler_memory_physical_region = try cpu.driver.getRootCapability().allocatePages(scheduler_memory_size);
|
||||
const scheduler_memory_physical_region = try cpu.driver.getRootCapability().allocateRAMPrivileged(scheduler_memory_size);
|
||||
const scheduler_memory_map_flags = .{
|
||||
.present = true,
|
||||
.write = true,
|
||||
@ -1236,9 +1244,9 @@ fn spawnInitCommon(cpu_page_tables: paging.CPUPageTables) !SpawnInitCommonResult
|
||||
const src_half = (try current_address_space.getPML4TableUnchecked())[half_page_table_entry_count..][0..half_page_table_entry_count];
|
||||
@memcpy(root_page_tables[0].toHigherHalfVirtualAddress().access(paging.PML4TE)[half_page_table_entry_count..][0..half_page_table_entry_count], src_half);
|
||||
|
||||
const pdp = cpu_page_table_physical_region_iterator.takeSlice(paging.page_table_size);
|
||||
const pd = cpu_page_table_physical_region_iterator.takeSlice(paging.page_table_size);
|
||||
const pt = cpu_page_table_physical_region_iterator.takeSlice(paging.page_table_size);
|
||||
const pdp = try cpu_page_table_physical_region_iterator.takeSlice(paging.page_table_size);
|
||||
const pd = try cpu_page_table_physical_region_iterator.takeSlice(paging.page_table_size);
|
||||
const pt = try cpu_page_table_physical_region_iterator.takeSlice(paging.page_table_size);
|
||||
assert(cpu_page_table_physical_region_iterator.size == 0);
|
||||
|
||||
const pdp_table = pdp.toHigherHalfVirtualAddress().access(paging.PDPTE);
|
||||
@ -1298,7 +1306,7 @@ fn spawnInitCommon(cpu_page_tables: paging.CPUPageTables) !SpawnInitCommonResult
|
||||
// log.debug("cpu indexed base: 0x{x}. top: 0x{x}", .{ @bitCast(u64, cpu_indexed_base), @bitCast(u64, cpu_indexed_top) });
|
||||
|
||||
const support_page_table_count = @as(usize, support_pdp_table_count + support_pd_table_count + support_p_table_count);
|
||||
const support_page_table_physical_region = try cpu.driver.getRootCapability().allocatePages(support_page_table_count * paging.page_table_size);
|
||||
const support_page_table_physical_region = try cpu.driver.getRootCapability().allocateRAMPrivileged(support_page_table_count * paging.page_table_size);
|
||||
// log.debug("Support page tables: 0x{x} - 0x{x}", .{ support_page_table_physical_region.address.value(), support_page_table_physical_region.top().value() });
|
||||
// log.debug("PD table count: {}. P table count: {}", .{ support_pd_table_count, support_p_table_count });
|
||||
|
||||
@ -1360,7 +1368,7 @@ fn spawnInitCommon(cpu_page_tables: paging.CPUPageTables) !SpawnInitCommonResult
|
||||
}
|
||||
|
||||
{
|
||||
const privileged_stack_physical_region = try cpu.driver.getRootCapability().allocatePages(x86_64.capability_address_space_stack_size);
|
||||
const privileged_stack_physical_region = try cpu.driver.getRootCapability().allocateRAMPrivileged(x86_64.capability_address_space_stack_size);
|
||||
const indexed_privileged_stack = @as(paging.IndexedVirtualAddress, @bitCast(x86_64.capability_address_space_stack_address.value()));
|
||||
const stack_last_page = x86_64.capability_address_space_stack_address.offset(x86_64.capability_address_space_stack_size - lib.arch.valid_page_sizes[0]);
|
||||
const indexed_privileged_stack_last_page = @as(paging.IndexedVirtualAddress, @bitCast(stack_last_page.value()));
|
||||
@ -1372,7 +1380,7 @@ fn spawnInitCommon(cpu_page_tables: paging.CPUPageTables) !SpawnInitCommonResult
|
||||
|
||||
const pdpte = &(try paging.accessPageTable(PhysicalAddress.new(paging.unpackAddress(pml4te)), *paging.PDPTable))[indexed_privileged_stack.PDP];
|
||||
assert(!pdpte.present);
|
||||
const pd_table_physical_region = try cpu.driver.getRootCapability().allocatePages(paging.page_table_size);
|
||||
const pd_table_physical_region = try cpu.driver.getRootCapability().allocateRAMPrivileged(paging.page_table_size);
|
||||
pdpte.* = paging.PDPTE{
|
||||
.present = true,
|
||||
.write = true,
|
||||
@ -1381,7 +1389,7 @@ fn spawnInitCommon(cpu_page_tables: paging.CPUPageTables) !SpawnInitCommonResult
|
||||
|
||||
const pdte = &(try paging.accessPageTable(PhysicalAddress.new(paging.unpackAddress(pdpte)), *paging.PDTable))[indexed_privileged_stack.PD];
|
||||
assert(!pdte.present);
|
||||
const p_table_physical_region = try cpu.driver.getRootCapability().allocatePages(paging.page_table_size);
|
||||
const p_table_physical_region = try cpu.driver.getRootCapability().allocateRAMPrivileged(paging.page_table_size);
|
||||
pdte.* = paging.PDTE{
|
||||
.present = true,
|
||||
.write = true,
|
||||
@ -1398,7 +1406,7 @@ fn spawnInitCommon(cpu_page_tables: paging.CPUPageTables) !SpawnInitCommonResult
|
||||
}
|
||||
}
|
||||
|
||||
const init_cpu_scheduler_physical_region = try cpu.driver.getRootCapability().allocatePages(@sizeOf(cpu.UserScheduler));
|
||||
const init_cpu_scheduler_physical_region = try cpu.driver.getRootCapability().allocateRAMPrivileged(@sizeOf(cpu.UserScheduler));
|
||||
const init_cpu_scheduler_virtual_region = init_cpu_scheduler_physical_region.toHigherHalfVirtualAddress();
|
||||
const init_cpu_scheduler = init_cpu_scheduler_virtual_region.address.access(*cpu.UserScheduler);
|
||||
// log.debug("Init scheduler: 0x{x}", .{init_cpu_scheduler_virtual_region.address.value()});
|
||||
@ -1420,7 +1428,7 @@ fn spawnInitCommon(cpu_page_tables: paging.CPUPageTables) !SpawnInitCommonResult
|
||||
assert(scheduler_pdpte.present == pdp_is_inside);
|
||||
|
||||
if (!scheduler_pdpte.present) {
|
||||
const pdte_allocation = try cpu.driver.getRootCapability().allocatePages(paging.page_table_size);
|
||||
const pdte_allocation = try cpu.driver.getRootCapability().allocateRAMPrivileged(paging.page_table_size);
|
||||
scheduler_pdpte.* = .{
|
||||
.present = true,
|
||||
.write = true,
|
||||
@ -1435,7 +1443,7 @@ fn spawnInitCommon(cpu_page_tables: paging.CPUPageTables) !SpawnInitCommonResult
|
||||
const is_inside_cpu_page_table_limits = cpu_scheduler_indexed.PD >= cpu_indexed_base.PD and cpu_scheduler_indexed.PD <= cpu_indexed_top.PD;
|
||||
assert(is_inside_cpu_page_table_limits == scheduler_pdte.present);
|
||||
if (!scheduler_pdte.present) {
|
||||
const pte_allocation = try cpu.driver.getRootCapability().allocatePages(paging.page_table_size);
|
||||
const pte_allocation = try cpu.driver.getRootCapability().allocateRAMPrivileged(paging.page_table_size);
|
||||
scheduler_pdte.* = .{
|
||||
.present = true,
|
||||
.write = true,
|
||||
@ -1452,28 +1460,30 @@ fn spawnInitCommon(cpu_page_tables: paging.CPUPageTables) !SpawnInitCommonResult
|
||||
});
|
||||
|
||||
init_cpu_scheduler.* = cpu.UserScheduler{
|
||||
.common = user_scheduler_virtual_address.access(*birth.UserScheduler),
|
||||
.capability_root_node = cpu.capabilities.Root{
|
||||
.static = .{
|
||||
.cpu = true,
|
||||
.boot = true,
|
||||
.process = true,
|
||||
},
|
||||
.dynamic = .{
|
||||
.io = .{
|
||||
.debug = true,
|
||||
.s = .{
|
||||
.common = user_scheduler_virtual_address.access(*birth.UserScheduler),
|
||||
.capability_root_node = cpu.capabilities.Root{
|
||||
.static = .{
|
||||
.cpu = true,
|
||||
.boot = true,
|
||||
.process = true,
|
||||
},
|
||||
.ram = cpu.driver.getRootCapability().dynamic.ram,
|
||||
.cpu_memory = .{
|
||||
.flags = .{
|
||||
.allocate = true,
|
||||
.dynamic = .{
|
||||
.io = .{
|
||||
.debug = true,
|
||||
},
|
||||
.ram = cpu.driver.getRootCapability().dynamic.ram,
|
||||
.cpu_memory = .{
|
||||
.flags = .{
|
||||
.allocate = true,
|
||||
},
|
||||
},
|
||||
.page_table = .{},
|
||||
},
|
||||
.scheduler = .{
|
||||
.handle = init_cpu_scheduler,
|
||||
.memory = scheduler_memory_physical_region,
|
||||
},
|
||||
.page_table = .{},
|
||||
},
|
||||
.scheduler = .{
|
||||
.handle = init_cpu_scheduler,
|
||||
.memory = scheduler_memory_physical_region,
|
||||
},
|
||||
},
|
||||
};
|
||||
|
@ -24,76 +24,10 @@ const pcid_mask = 1 << pcid_bit;
|
||||
/// - R10: argument 3
|
||||
/// - R8: argument 4
|
||||
/// - R9: argument 5
|
||||
fn birthSyscall(comptime Syscall: type, raw_arguments: birth.syscall.Arguments) Syscall.ErrorSet.Error!Syscall.Result {
|
||||
cpu.syscall_count += 1;
|
||||
comptime assert(Syscall == birth.capabilities.Syscall(Syscall.capability, Syscall.command));
|
||||
const capability: birth.capabilities.Type = Syscall.capability;
|
||||
const command: birth.capabilities.Command(capability) = Syscall.command;
|
||||
const arguments = try Syscall.toArguments(raw_arguments);
|
||||
|
||||
return if (cpu.user_scheduler.capability_root_node.hasPermissions(capability, command)) switch (capability) {
|
||||
.io => switch (command) {
|
||||
.copy, .mint, .retype, .delete, .revoke, .create => unreachable,
|
||||
.log => blk: {
|
||||
const message = arguments;
|
||||
cpu.writer.writeAll(message) catch unreachable;
|
||||
comptime assert(Syscall.Result == usize);
|
||||
break :blk message.len;
|
||||
},
|
||||
},
|
||||
.cpu => switch (command) {
|
||||
.copy, .mint, .retype, .delete, .revoke, .create => unreachable,
|
||||
.get_core_id => cpu.core_id,
|
||||
.shutdown => cpu.shutdown(.success),
|
||||
.get_command_buffer => {
|
||||
const command_buffer = arguments;
|
||||
_ = command_buffer;
|
||||
@panic("TODO: get_command_buffer");
|
||||
},
|
||||
},
|
||||
.cpu_memory => switch (command) {
|
||||
.allocate => blk: {
|
||||
comptime assert(@TypeOf(arguments) == usize);
|
||||
const size = arguments;
|
||||
const physical_region = try cpu.user_scheduler.capability_root_node.allocatePages(size);
|
||||
try cpu.user_scheduler.capability_root_node.allocateCPUMemory(physical_region, .{ .privileged = false });
|
||||
break :blk physical_region.address;
|
||||
},
|
||||
else => @panic(@tagName(command)),
|
||||
},
|
||||
.ram => unreachable,
|
||||
.boot => switch (command) {
|
||||
.get_bundle_size => cpu.bundle.len,
|
||||
.get_bundle_file_list_size => cpu.bundle_files.len,
|
||||
else => @panic(@tagName(command)),
|
||||
},
|
||||
.process => switch (command) {
|
||||
.exit => switch (arguments) {
|
||||
true => cpu.shutdown(.success),
|
||||
false => cpu.panic("User process panicked", .{}),
|
||||
},
|
||||
else => @panic(@tagName(command)),
|
||||
},
|
||||
.page_table => @panic("TODO: page_table"),
|
||||
} else error.forbidden;
|
||||
}
|
||||
|
||||
export fn syscall(registers: *const Registers) callconv(.C) birth.syscall.Result {
|
||||
const options = @as(birth.syscall.Options, @bitCast(registers.syscall_number));
|
||||
const arguments = birth.syscall.Arguments{ registers.rdi, registers.rsi, registers.rdx, registers.r10, registers.r8, registers.r9 };
|
||||
|
||||
return switch (options.general.convention) {
|
||||
.birth => switch (options.birth.type) {
|
||||
inline else => |capability| switch (@as(birth.capabilities.Command(capability), @enumFromInt(options.birth.command))) {
|
||||
inline else => |command| blk: {
|
||||
const Syscall = birth.capabilities.Syscall(capability, command);
|
||||
const result: Syscall.Result = birthSyscall(Syscall, arguments) catch |err| break :blk Syscall.errorToRaw(err);
|
||||
break :blk Syscall.resultToRaw(result);
|
||||
},
|
||||
},
|
||||
},
|
||||
.linux => @panic("linux syscall"),
|
||||
};
|
||||
return cpu.syscall.process(options, arguments);
|
||||
}
|
||||
|
||||
/// SYSCALL documentation
|
||||
@ -107,7 +41,7 @@ export fn syscall(registers: *const Registers) callconv(.C) birth.syscall.Result
|
||||
/// - R10: argument 3
|
||||
/// - R8: argument 4
|
||||
/// - R9: argument 5
|
||||
pub fn entryPoint() callconv(.Naked) void {
|
||||
pub fn entryPoint() callconv(.Naked) noreturn {
|
||||
asm volatile (
|
||||
\\endbr64
|
||||
\\swapgs
|
||||
@ -252,8 +186,6 @@ pub fn entryPoint() callconv(.Naked) void {
|
||||
asm volatile (
|
||||
\\int3
|
||||
::: "memory");
|
||||
|
||||
unreachable;
|
||||
}
|
||||
|
||||
pub const Registers = extern struct {
|
||||
|
@ -10,6 +10,69 @@ const PhysicalMemoryRegion = lib.PhysicalMemoryRegion;
|
||||
const birth = @import("birth");
|
||||
const cpu = @import("cpu");
|
||||
|
||||
pub fn processCommand(comptime Syscall: type, raw_arguments: birth.syscall.Arguments) Syscall.Error!Syscall.Result {
|
||||
defer cpu.command_count += 1;
|
||||
comptime assert(Syscall == birth.capabilities.Syscall(Syscall.capability, Syscall.command));
|
||||
const capability: birth.capabilities.Type = Syscall.capability;
|
||||
const command: birth.capabilities.Command(capability) = Syscall.command;
|
||||
const arguments = try Syscall.toArguments(raw_arguments);
|
||||
|
||||
return if (cpu.user_scheduler.s.capability_root_node.hasPermissions(capability, command)) switch (capability) {
|
||||
.io => switch (command) {
|
||||
.copy, .mint, .retype, .delete, .revoke, .create => unreachable,
|
||||
.log => blk: {
|
||||
const message = arguments;
|
||||
cpu.writer.writeAll(message) catch unreachable;
|
||||
comptime assert(Syscall.Result == usize);
|
||||
break :blk message.len;
|
||||
},
|
||||
},
|
||||
.cpu => switch (command) {
|
||||
.copy, .mint, .retype, .delete, .revoke, .create => unreachable,
|
||||
.get_core_id => cpu.core_id,
|
||||
.shutdown => cpu.shutdown(.success),
|
||||
.get_command_buffer => {
|
||||
const command_buffer = arguments;
|
||||
_ = command_buffer;
|
||||
@panic("TODO: get_command_buffer");
|
||||
},
|
||||
},
|
||||
.cpu_memory => switch (command) {
|
||||
// .allocate => blk: {
|
||||
// comptime assert(@TypeOf(arguments) == usize);
|
||||
// const size = arguments;
|
||||
// const physical_region = try cpu.user_scheduler.s.capability_root_node.allocatePages(size);
|
||||
// try cpu.user_scheduler.s.capability_root_node.allocateCPUMemory(physical_region, .{ .privileged = false });
|
||||
// break :blk physical_region.address;
|
||||
// },
|
||||
else => @panic(@tagName(command)),
|
||||
},
|
||||
.ram => switch (command) {
|
||||
.allocate => blk: {
|
||||
comptime assert(@TypeOf(arguments) == usize);
|
||||
const size = arguments;
|
||||
const ref = try cpu.driver.getRootCapability().allocateRAM(size);
|
||||
break :blk @bitCast(ref);
|
||||
},
|
||||
else => @panic(@tagName(command)),
|
||||
},
|
||||
.boot => switch (command) {
|
||||
.get_bundle_size => cpu.bundle.len,
|
||||
.get_bundle_file_list_size => cpu.bundle_files.len,
|
||||
else => @panic(@tagName(command)),
|
||||
},
|
||||
.process => switch (command) {
|
||||
.exit => cpu.shutdown(switch (arguments) {
|
||||
true => .success,
|
||||
false => .failure,
|
||||
}),
|
||||
.panic => cpu.panic("User process panicked with exit code 0x{x}: {s}", .{ arguments.exit_code, arguments.message }),
|
||||
else => @panic(@tagName(command)),
|
||||
},
|
||||
.page_table => @panic("TODO: page_table"),
|
||||
} else error.forbidden;
|
||||
}
|
||||
|
||||
pub const RootDescriptor = extern struct {
|
||||
value: *Root,
|
||||
};
|
||||
@ -83,8 +146,118 @@ pub const Dynamic = enum {
|
||||
};
|
||||
};
|
||||
|
||||
pub const RegionList = extern struct {
|
||||
regions: [list_region_count]PhysicalMemoryRegion = [1]PhysicalMemoryRegion{PhysicalMemoryRegion.invalid()} ** list_region_count,
|
||||
metadata: Metadata = .{},
|
||||
|
||||
pub const Metadata = extern struct {
|
||||
count: usize = 0,
|
||||
reserved: usize = 0,
|
||||
previous: ?*RegionList = null,
|
||||
next: ?*RegionList = null,
|
||||
};
|
||||
|
||||
const Error = error{
|
||||
OutOfMemory,
|
||||
no_space,
|
||||
misalignment_page_size,
|
||||
};
|
||||
|
||||
pub fn getRegions(list: *const RegionList) []const PhysicalMemoryRegion {
|
||||
return list.regions[0..list.metadata.count];
|
||||
}
|
||||
|
||||
pub fn allocateAligned(list: *RegionList, size: usize, alignment: usize) Error!PhysicalMemoryRegion {
|
||||
assert(alignment % lib.arch.valid_page_sizes[0] == 0);
|
||||
const regions = list.regions[0..list.metadata.count];
|
||||
|
||||
for (regions, 0..) |*region, index| {
|
||||
assert(region.size % lib.arch.valid_page_sizes[0] == 0);
|
||||
assert(region.address.value() % lib.arch.valid_page_sizes[0] == 0);
|
||||
|
||||
if (lib.isAligned(region.address.value(), alignment)) {
|
||||
if (region.size >= size) {
|
||||
const result = region.takeSlice(size) catch unreachable;
|
||||
if (region.size == 0) {
|
||||
if (index != regions.len - 1) {
|
||||
regions[index] = regions[regions.len - 1];
|
||||
}
|
||||
|
||||
list.metadata.count -= 1;
|
||||
}
|
||||
|
||||
return result;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return Error.OutOfMemory;
|
||||
}
|
||||
|
||||
pub const UnalignedAllocationResult = extern struct {
|
||||
wasted: PhysicalMemoryRegion,
|
||||
allocated: PhysicalMemoryRegion,
|
||||
};
|
||||
|
||||
/// Slow path
|
||||
pub fn allocateAlignedSplitting(list: *RegionList, size: usize, alignment: usize) !UnalignedAllocationResult {
|
||||
const regions = list.regions[0..list.metadata.count];
|
||||
|
||||
for (regions) |*region| {
|
||||
const aligned_region_address = lib.alignForward(usize, region.address.value(), alignment);
|
||||
const wasted_space = aligned_region_address - region.address.value();
|
||||
|
||||
if (region.size >= wasted_space + size) {
|
||||
const wasted_region = try region.takeSlice(wasted_space);
|
||||
const allocated_region = try region.takeSlice(size);
|
||||
|
||||
return UnalignedAllocationResult{
|
||||
.wasted = wasted_region,
|
||||
.allocated = allocated_region,
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
log.err("allocateAlignedSplitting", .{});
|
||||
return error.OutOfMemory;
|
||||
}
|
||||
|
||||
pub fn allocate(list: *RegionList, size: usize) Error!PhysicalMemoryRegion {
|
||||
return list.allocateAligned(size, lib.arch.valid_page_sizes[0]);
|
||||
}
|
||||
|
||||
pub fn append(list: *RegionList, region: PhysicalMemoryRegion) Error!birth.capabilities.RAM {
|
||||
var block_count: usize = 0;
|
||||
while (true) : (block_count += 1) {
|
||||
if (list.metadata.count < list.regions.len) {
|
||||
const block_id = block_count;
|
||||
const region_id = list.metadata.count;
|
||||
list.regions[list.metadata.count] = region;
|
||||
list.metadata.count += 1;
|
||||
|
||||
return .{
|
||||
.block = @intCast(block_id),
|
||||
.region = @intCast(region_id),
|
||||
};
|
||||
} else {
|
||||
return Error.no_space;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
const cache_line_count = 5;
|
||||
const list_region_count = @divExact((cache_line_count * lib.cache_line_size) - @sizeOf(Metadata), @sizeOf(PhysicalMemoryRegion));
|
||||
|
||||
comptime {
|
||||
assert(@sizeOf(RegionList) % lib.cache_line_size == 0);
|
||||
}
|
||||
};
|
||||
|
||||
pub const RAM = extern struct {
|
||||
lists: [lib.arch.reverse_valid_page_sizes.len]?*Region = .{null} ** lib.arch.valid_page_sizes.len,
|
||||
lists: [lib.arch.reverse_valid_page_sizes.len]RegionList = .{.{}} ** lib.arch.valid_page_sizes.len,
|
||||
allocated: RegionList = .{},
|
||||
privileged: RegionList = .{},
|
||||
allocate: bool = true,
|
||||
|
||||
const AllocateError = error{
|
||||
OutOfMemory,
|
||||
@ -98,39 +271,20 @@ pub const RAM = extern struct {
|
||||
unreachable;
|
||||
}
|
||||
|
||||
pub const Region = extern struct {
|
||||
region: PhysicalMemoryRegion,
|
||||
next: ?*@This() = null,
|
||||
|
||||
const UnalignedAllocationResult = extern struct {
|
||||
wasted: PhysicalMemoryRegion,
|
||||
allocated: PhysicalMemoryRegion,
|
||||
};
|
||||
|
||||
inline fn allocateUnaligned(free_ram: *Region, size: usize, alignment: usize) ?UnalignedAllocationResult {
|
||||
const aligned_region_address = lib.alignForward(usize, free_ram.region.address.value(), alignment);
|
||||
const wasted_space = aligned_region_address - free_ram.region.address.value();
|
||||
if (free_ram.region.size >= wasted_space + size) {
|
||||
const wasted_region = free_ram.region.takeSlice(wasted_space);
|
||||
const allocated_region = free_ram.region.takeSlice(size);
|
||||
return UnalignedAllocationResult{
|
||||
.wasted = wasted_region,
|
||||
.allocated = allocated_region,
|
||||
};
|
||||
}
|
||||
|
||||
return null;
|
||||
}
|
||||
};
|
||||
pub fn appendRegion(ram: *RAM, region: PhysicalMemoryRegion) !void {
|
||||
_ = region;
|
||||
_ = ram;
|
||||
@panic("TODO: appendRegion");
|
||||
}
|
||||
};
|
||||
|
||||
pub const CPUMemory = extern struct {
|
||||
privileged: RAM = .{},
|
||||
user: RAM = .{},
|
||||
flags: Flags,
|
||||
flags: Flags = .{},
|
||||
|
||||
const Flags = packed struct(u64) {
|
||||
allocate: bool,
|
||||
allocate: bool = true,
|
||||
reserved: u63 = 0,
|
||||
};
|
||||
};
|
||||
@ -168,11 +322,11 @@ pub const Root = extern struct {
|
||||
assert(@sizeOf(Root) % lib.arch.valid_page_sizes[0] == 0);
|
||||
}
|
||||
|
||||
pub fn copy(root: *Root, other: *Root) void {
|
||||
other.static = root.static;
|
||||
// TODO:
|
||||
other.dynamic = root.dynamic;
|
||||
}
|
||||
// pub fn copy(root: *Root, other: *Root) void {
|
||||
// other.static = root.static;
|
||||
// // TODO:
|
||||
// other.dynamic = root.dynamic;
|
||||
// }
|
||||
|
||||
pub inline fn hasPermissions(root: *const Root, comptime capability_type: birth.capabilities.Type, command: birth.capabilities.Command(capability_type)) bool {
|
||||
return switch (capability_type) {
|
||||
@ -187,7 +341,10 @@ pub const Root = extern struct {
|
||||
.log => root.dynamic.io.debug,
|
||||
},
|
||||
.cpu_memory => root.dynamic.cpu_memory.flags.allocate,
|
||||
.ram => unreachable,
|
||||
.ram => switch (command) {
|
||||
.allocate => root.dynamic.ram.allocate,
|
||||
else => @panic("TODO: else => ram"),
|
||||
},
|
||||
.page_table => unreachable,
|
||||
};
|
||||
}
|
||||
@ -197,30 +354,42 @@ pub const Root = extern struct {
|
||||
};
|
||||
|
||||
// Fast path
|
||||
pub fn allocatePages(root: *Root, size: usize) AllocateError!PhysicalMemoryRegion {
|
||||
fn allocateRAMRaw(root: *Root, size: usize) AllocateError!PhysicalMemoryRegion {
|
||||
lib.log.err("New allocation demanded: 0x{x} bytes", .{size});
|
||||
assert(size != 0);
|
||||
assert(lib.isAligned(size, lib.arch.valid_page_sizes[0]));
|
||||
var index = RAM.getListIndex(size);
|
||||
|
||||
const result = blk: {
|
||||
while (true) : (index -= 1) {
|
||||
const list = root.dynamic.ram.lists[index];
|
||||
var iterator = list;
|
||||
const list = &root.dynamic.ram.lists[index];
|
||||
var iterator: ?*cpu.capabilities.RegionList = list;
|
||||
|
||||
while (iterator) |free_ram| : (iterator = free_ram.next) {
|
||||
if (free_ram.region.size >= size) {
|
||||
if (free_ram.region.size >= size) {
|
||||
const result = free_ram.region.takeSlice(size);
|
||||
break :blk result;
|
||||
} else {
|
||||
@panic("TODO: cnsume all reigon");
|
||||
}
|
||||
}
|
||||
const page_size = @as(u64, switch (index) {
|
||||
0 => lib.arch.reverse_valid_page_sizes[0],
|
||||
1 => lib.arch.reverse_valid_page_sizes[1],
|
||||
2 => lib.arch.reverse_valid_page_sizes[2],
|
||||
else => unreachable,
|
||||
});
|
||||
|
||||
var list_count: usize = 0;
|
||||
while (iterator) |free_ram_list| : ({
|
||||
iterator = free_ram_list.metadata.next;
|
||||
list_count += 1;
|
||||
}) {
|
||||
const allocation = free_ram_list.allocate(size) catch continue;
|
||||
list_count += 1;
|
||||
log.err("Found 0x{x}-page-size region for 0x{x} bytes after ({}/{}) lists", .{ page_size, size, list_count, list.metadata.count });
|
||||
log.err("======", .{});
|
||||
break :blk allocation;
|
||||
}
|
||||
|
||||
log.err("Could not find any 0x{x}-page-size region for 0x{x} bytes after {} lists", .{ page_size, size, list.metadata.count });
|
||||
|
||||
if (index == 0) break;
|
||||
}
|
||||
|
||||
log.err("allocateRAMRaw", .{});
|
||||
return error.OutOfMemory;
|
||||
};
|
||||
|
||||
@ -229,6 +398,40 @@ pub const Root = extern struct {
|
||||
return result;
|
||||
}
|
||||
|
||||
pub fn allocateRAM(root: *Root, size: usize) AllocateError!birth.capabilities.RAM {
|
||||
const result = try allocateRAMRaw(root, size);
|
||||
const reference = root.dynamic.ram.allocated.append(result) catch |err| {
|
||||
log.err("err(user): {}", .{err});
|
||||
return AllocateError.OutOfMemory;
|
||||
};
|
||||
return reference;
|
||||
}
|
||||
|
||||
pub fn allocateRAMPrivileged(root: *Root, size: usize) AllocateError!PhysicalMemoryRegion {
|
||||
const result = try allocateRAMRaw(root, size);
|
||||
const reference = root.dynamic.ram.privileged.append(result) catch blk: {
|
||||
const region_list = try cpu.heap.create(RegionList);
|
||||
region_list.* = .{};
|
||||
const ref = region_list.append(result) catch |err| {
|
||||
log.err("Err(priv): {}", .{err});
|
||||
return AllocateError.OutOfMemory;
|
||||
};
|
||||
|
||||
var iterator: ?*RegionList = &root.dynamic.ram.privileged;
|
||||
while (iterator) |rl| : (iterator = rl.metadata.next) {
|
||||
if (rl.metadata.next == null) {
|
||||
rl.metadata.next = region_list;
|
||||
region_list.metadata.previous = rl;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
break :blk ref;
|
||||
};
|
||||
_ = reference;
|
||||
return result;
|
||||
}
|
||||
|
||||
// Slow uncommon path. Use cases:
|
||||
// 1. CR3 switch. This is assumed to be privileged, so this function assumes privileged use of the memory
|
||||
pub fn allocatePageCustomAlignment(root: *Root, size: usize, alignment: usize) AllocateError!PhysicalMemoryRegion {
|
||||
@ -238,33 +441,35 @@ pub const Root = extern struct {
|
||||
var index = RAM.getListIndex(size);
|
||||
|
||||
while (true) : (index -= 1) {
|
||||
if (root.dynamic.ram.lists[index]) |smallest_region_list| {
|
||||
var iterator: ?*cpu.capabilities.RAM.Region = smallest_region_list;
|
||||
while (iterator) |free_ram| : (iterator = free_ram.next) {
|
||||
if (lib.isAligned(free_ram.region.address.value(), alignment)) {
|
||||
if (free_ram.region.size >= size) {
|
||||
const allocated_region = free_ram.region.takeSlice(size);
|
||||
return allocated_region;
|
||||
}
|
||||
} else if (free_ram.allocateUnaligned(size, alignment)) |unaligned_allocation| {
|
||||
try root.addRegion(&root.dynamic.ram, unaligned_allocation.wasted);
|
||||
return unaligned_allocation.allocated;
|
||||
}
|
||||
const smallest_region_list = &root.dynamic.ram.lists[index];
|
||||
var iterator: ?*cpu.capabilities.RegionList = smallest_region_list;
|
||||
while (iterator) |free_region_list| : (iterator = free_region_list.metadata.next) {
|
||||
if (free_region_list.metadata.count > 0) {
|
||||
const physical_allocation = free_region_list.allocateAligned(size, alignment) catch blk: {
|
||||
const splitted_allocation = free_region_list.allocateAlignedSplitting(size, alignment) catch continue;
|
||||
_ = try root.appendRegion(&root.dynamic.ram, splitted_allocation.wasted);
|
||||
break :blk splitted_allocation.allocated;
|
||||
};
|
||||
|
||||
return physical_allocation;
|
||||
}
|
||||
}
|
||||
|
||||
if (index == 0) break;
|
||||
}
|
||||
|
||||
log.err("allocatePageCustomAlignment", .{});
|
||||
return AllocateError.OutOfMemory;
|
||||
}
|
||||
|
||||
fn allocateSingle(root: *Root, comptime T: type) AllocateError!*T {
|
||||
const size = @sizeOf(T);
|
||||
const alignment = @alignOf(T);
|
||||
var iterator = root.heap.first;
|
||||
while (iterator) |heap_region| : (iterator = heap_region.next) {
|
||||
if (heap_region.alignmentFits(@alignOf(T))) {
|
||||
if (heap_region.sizeFits(@sizeOf(T))) {
|
||||
const allocated_region = heap_region.takeRegion(@sizeOf(T));
|
||||
if (heap_region.alignmentFits(alignment)) {
|
||||
if (heap_region.sizeFits(size)) {
|
||||
const allocated_region = heap_region.takeRegion(size);
|
||||
const result = &allocated_region.toHigherHalfVirtualAddress().access(T)[0];
|
||||
return result;
|
||||
}
|
||||
@ -273,7 +478,7 @@ pub const Root = extern struct {
|
||||
}
|
||||
}
|
||||
|
||||
const physical_region = try root.allocatePages(lib.arch.valid_page_sizes[0]);
|
||||
const physical_region = try root.allocateRAM(lib.arch.valid_page_sizes[0]);
|
||||
const heap_region = physical_region.toHigherHalfVirtualAddress().address.access(*Heap.Region);
|
||||
const first = root.heap.first;
|
||||
heap_region.* = .{
|
||||
@ -294,15 +499,11 @@ pub const Root = extern struct {
|
||||
@panic("TODO many");
|
||||
}
|
||||
|
||||
fn addRegion(root: *Root, ram: *RAM, physical_region: PhysicalMemoryRegion) !void {
|
||||
const index = RAM.getListIndex(physical_region.size);
|
||||
const new_region = try root.allocateSingle(RAM.Region);
|
||||
new_region.* = RAM.Region{
|
||||
.region = physical_region,
|
||||
.next = root.dynamic.ram.lists[index],
|
||||
};
|
||||
|
||||
ram.lists[index] = new_region;
|
||||
fn appendRegion(root: *Root, ram: *RAM, region: PhysicalMemoryRegion) !birth.capabilities.RAM {
|
||||
_ = root;
|
||||
const index = RAM.getListIndex(region.size);
|
||||
const ref = ram.lists[index].append(region) catch @panic("TODO: allocate in appendRegion");
|
||||
return ref;
|
||||
}
|
||||
|
||||
pub const AllocateCPUMemoryOptions = packed struct {
|
||||
@ -315,7 +516,7 @@ pub const Root = extern struct {
|
||||
false => &root.dynamic.cpu_memory.user,
|
||||
};
|
||||
|
||||
try root.addRegion(ram_region, physical_region);
|
||||
try root.appendRegion(ram_region, physical_region);
|
||||
}
|
||||
|
||||
pub const Heap = extern struct {
|
||||
@ -349,7 +550,7 @@ pub const Root = extern struct {
|
||||
const allocation = heap_region.allocate(T, count) catch continue;
|
||||
return allocation;
|
||||
}
|
||||
@panic("TODO: allocate");
|
||||
@panic("TODO: heap allocate");
|
||||
}
|
||||
|
||||
const Region = extern struct {
|
||||
@ -387,20 +588,20 @@ pub const Root = extern struct {
|
||||
return &result[0];
|
||||
}
|
||||
|
||||
inline fn canAllocateDirectly(region: Region, size: usize, alignment: usize) bool {
|
||||
const alignment_fits = region.alignmentFits(alignment);
|
||||
const size_fits = region.sizeFits(size);
|
||||
return alignment_fits and size_fits;
|
||||
}
|
||||
// inline fn canAllocateDirectly(region: Region, size: usize, alignment: usize) bool {
|
||||
// const alignment_fits = region.alignmentFits(alignment);
|
||||
// const size_fits = region.sizeFits(size);
|
||||
// return alignment_fits and size_fits;
|
||||
// }
|
||||
|
||||
inline fn canAllocateSplitting(region: Region, size: usize, alignment: usize) bool {
|
||||
const free_region = region.getFreeRegion();
|
||||
const aligned_region_address = lib.alignForward(usize, free_region.address.value(), alignment);
|
||||
const wasted_space = aligned_region_address - free_region.address.value();
|
||||
log.warn("Wasted space: {} bytes", .{wasted_space});
|
||||
_ = size;
|
||||
@panic("TODO: canAllocateSplitting");
|
||||
}
|
||||
// inline fn canAllocateSplitting(region: Region, size: usize, alignment: usize) bool {
|
||||
// const free_region = region.getFreeRegion();
|
||||
// const aligned_region_address = lib.alignForward(usize, free_region.address.value(), alignment);
|
||||
// const wasted_space = aligned_region_address - free_region.address.value();
|
||||
// log.warn("Wasted space: {} bytes", .{wasted_space});
|
||||
// _ = size;
|
||||
// @panic("TODO: canAllocateSplitting");
|
||||
// }
|
||||
|
||||
inline fn sizeFits(region: Region, size: usize) bool {
|
||||
return region.descriptor.size - region.allocated_size >= size;
|
||||
|
21
src/cpu/syscall.zig
Normal file
21
src/cpu/syscall.zig
Normal file
@ -0,0 +1,21 @@
|
||||
const lib = @import("lib");
|
||||
const cpu = @import("cpu");
|
||||
const birth = @import("birth");
|
||||
|
||||
pub fn process(options: birth.syscall.Options, arguments: birth.syscall.Arguments) birth.syscall.Result {
|
||||
return switch (options.general.convention) {
|
||||
.birth => switch (options.birth.type) {
|
||||
inline else => |capability| switch (@as(birth.capabilities.Command(capability), @enumFromInt(options.birth.command))) {
|
||||
inline else => |command| blk: {
|
||||
const Syscall = birth.capabilities.Syscall(capability, command);
|
||||
const result = cpu.capabilities.processCommand(Syscall, arguments) catch |err| {
|
||||
lib.log.err("Syscall ended up in error: {}", .{err});
|
||||
break :blk Syscall.fromError(err);
|
||||
};
|
||||
break :blk Syscall.fromResult(result);
|
||||
},
|
||||
},
|
||||
},
|
||||
.linux => @panic("linux syscall"),
|
||||
};
|
||||
}
|
@ -284,7 +284,7 @@ pub fn main() anyerror!void {
|
||||
// GF2, when not found in the PATH, can give problems
|
||||
const use_gf = switch (lib.os) {
|
||||
.macos => false,
|
||||
.linux => false,
|
||||
.linux => true,
|
||||
else => false,
|
||||
};
|
||||
|
||||
@ -292,7 +292,8 @@ pub fn main() anyerror!void {
|
||||
if (use_gf) {
|
||||
try command_line_gdb.append("gf2");
|
||||
} else {
|
||||
try command_line_gdb.append("kitty");
|
||||
const terminal_emulator = "foot";
|
||||
try command_line_gdb.append(terminal_emulator);
|
||||
try command_line_gdb.append(switch (lib.os) {
|
||||
.linux => "gdb",
|
||||
.macos => "x86_64-elf-gdb",
|
||||
@ -331,12 +332,12 @@ pub fn main() anyerror!void {
|
||||
try debugger_process.spawn();
|
||||
}
|
||||
|
||||
var process = host.ChildProcess.init(argument_list.items, wrapped_allocator.zigUnwrap());
|
||||
var emulator_process = host.ChildProcess.init(argument_list.items, wrapped_allocator.zigUnwrap());
|
||||
//process.stdout_behavior = .I;
|
||||
const result = try process.spawnAndWait();
|
||||
const emulator_process_result = try emulator_process.spawnAndWait();
|
||||
|
||||
if (result == .Exited) {
|
||||
const exit_code = result.Exited;
|
||||
if (emulator_process_result == .Exited) {
|
||||
const exit_code = emulator_process_result.Exited;
|
||||
if (exit_code & 1 != 0) {
|
||||
const mask = lib.maxInt(@TypeOf(exit_code)) - 1;
|
||||
const masked_exit_code = exit_code & mask;
|
||||
@ -354,7 +355,7 @@ pub fn main() anyerror!void {
|
||||
} else log.err("QEMU exited with unexpected code: {}. Masked: {}", .{ exit_code, masked_exit_code });
|
||||
} else log.err("QEMU exited with unexpected code: {}", .{exit_code});
|
||||
} else {
|
||||
log.err("QEMU was {s}", .{@tagName(result)});
|
||||
log.err("QEMU was {s}", .{@tagName(emulator_process_result)});
|
||||
}
|
||||
|
||||
if (debugcon_file_used) {
|
||||
|
173
src/lib.zig
173
src/lib.zig
@ -1,6 +1,8 @@
|
||||
const common = @import("common.zig");
|
||||
pub usingnamespace common;
|
||||
|
||||
pub const cache_line_size = 64;
|
||||
|
||||
pub const arch = @import("lib/arch.zig");
|
||||
/// This is done so the allocator can respect allocating from different address spaces
|
||||
pub const config = @import("lib/config.zig");
|
||||
@ -293,11 +295,13 @@ pub const Allocator = extern struct {
|
||||
};
|
||||
|
||||
pub fn zigAllocate(context: *anyopaque, size: usize, ptr_align: u8, return_address: usize) ?[*]u8 {
|
||||
_ = context;
|
||||
_ = size;
|
||||
_ = ptr_align;
|
||||
_ = return_address;
|
||||
return null;
|
||||
const allocator: *Allocator = @ptrCast(@alignCast(context));
|
||||
// Not understanding why Zig API is like this:
|
||||
const alignment = @as(u64, 1) << @as(u6, @intCast(ptr_align));
|
||||
const result = allocator.allocateBytes(size, alignment) catch return null;
|
||||
common.assert(result.size >= size);
|
||||
return @ptrFromInt(result.address);
|
||||
}
|
||||
|
||||
pub fn zigResize(context: *anyopaque, buffer: []u8, buffer_alignment: u8, new_length: usize, return_address: usize) bool {
|
||||
@ -660,11 +664,9 @@ pub fn ErrorSet(comptime error_names: []const []const u8, comptime predefined_fi
|
||||
};
|
||||
}
|
||||
|
||||
pub fn getDebugInformation(allocator: common.ZigAllocator, elf_file: []align(common.default_sector_size) const u8) !common.ModuleDebugInfo {
|
||||
pub fn getDebugInformation(allocator: common.ZigAllocator, elf_file: []align(arch.valid_page_sizes[0]) const u8) !common.ModuleDebugInfo {
|
||||
const elf = common.elf;
|
||||
var module_debug_info: common.ModuleDebugInfo = undefined;
|
||||
_ = module_debug_info;
|
||||
const hdr = @as(*const elf.Ehdr, @ptrCast(&elf_file[0]));
|
||||
const hdr = @as(*align(1) const elf.Ehdr, @ptrCast(&elf_file[0]));
|
||||
if (!common.equal(u8, hdr.e_ident[0..4], elf.MAGIC)) return error.InvalidElfMagic;
|
||||
if (hdr.e_ident[elf.EI_VERSION] != 1) return error.InvalidElfVersion;
|
||||
|
||||
@ -673,76 +675,92 @@ pub fn getDebugInformation(allocator: common.ZigAllocator, elf_file: []align(com
|
||||
const shoff = hdr.e_shoff;
|
||||
const str_section_off = shoff + @as(u64, hdr.e_shentsize) * @as(u64, hdr.e_shstrndx);
|
||||
const str_shdr = @as(
|
||||
*const elf.Shdr,
|
||||
@ptrCast(@alignCast(&elf_file[common.cast(usize, str_section_off) orelse return error.Overflow])),
|
||||
*align(1) const elf.Shdr,
|
||||
@ptrCast(&elf_file[common.cast(usize, str_section_off) orelse return error.Overflow]),
|
||||
);
|
||||
const header_strings = elf_file[str_shdr.sh_offset .. str_shdr.sh_offset + str_shdr.sh_size];
|
||||
const shdrs = @as(
|
||||
[*]const elf.Shdr,
|
||||
@ptrCast(@alignCast(&elf_file[shoff])),
|
||||
[*]align(1) const elf.Shdr,
|
||||
@ptrCast(&elf_file[shoff]),
|
||||
)[0..hdr.e_shnum];
|
||||
|
||||
var opt_debug_info: ?[]const u8 = null;
|
||||
var opt_debug_abbrev: ?[]const u8 = null;
|
||||
var opt_debug_str: ?[]const u8 = null;
|
||||
var opt_debug_str_offsets: ?[]const u8 = null;
|
||||
var opt_debug_line: ?[]const u8 = null;
|
||||
var opt_debug_line_str: ?[]const u8 = null;
|
||||
var opt_debug_ranges: ?[]const u8 = null;
|
||||
var opt_debug_loclists: ?[]const u8 = null;
|
||||
var opt_debug_rnglists: ?[]const u8 = null;
|
||||
var opt_debug_addr: ?[]const u8 = null;
|
||||
var opt_debug_names: ?[]const u8 = null;
|
||||
var opt_debug_frame: ?[]const u8 = null;
|
||||
var sections: common.dwarf.DwarfInfo.SectionArray = common.dwarf.DwarfInfo.null_section_array;
|
||||
|
||||
// Combine section list. This takes ownership over any owned sections from the parent scope.
|
||||
errdefer for (sections) |section| if (section) |s| if (s.owned) allocator.free(s.data);
|
||||
|
||||
var separate_debug_filename: ?[]const u8 = null;
|
||||
_ = separate_debug_filename;
|
||||
var separate_debug_crc: ?u32 = null;
|
||||
_ = separate_debug_crc;
|
||||
|
||||
for (shdrs) |*shdr| {
|
||||
if (shdr.sh_type == elf.SHT_NULL) continue;
|
||||
|
||||
if (shdr.sh_type == elf.SHT_NULL or shdr.sh_type == elf.SHT_NOBITS) continue;
|
||||
const name = common.sliceTo(header_strings[shdr.sh_name..], 0);
|
||||
if (common.equal(u8, name, ".debug_info")) {
|
||||
opt_debug_info = try chopSlice(elf_file, shdr.sh_offset, shdr.sh_size);
|
||||
} else if (common.equal(u8, name, ".debug_abbrev")) {
|
||||
opt_debug_abbrev = try chopSlice(elf_file, shdr.sh_offset, shdr.sh_size);
|
||||
} else if (common.equal(u8, name, ".debug_str")) {
|
||||
opt_debug_str = try chopSlice(elf_file, shdr.sh_offset, shdr.sh_size);
|
||||
} else if (common.equal(u8, name, ".debug_str_offsets")) {
|
||||
opt_debug_str_offsets = try chopSlice(elf_file, shdr.sh_offset, shdr.sh_size);
|
||||
} else if (common.equal(u8, name, ".debug_line")) {
|
||||
opt_debug_line = try chopSlice(elf_file, shdr.sh_offset, shdr.sh_size);
|
||||
} else if (common.equal(u8, name, ".debug_line_str")) {
|
||||
opt_debug_line_str = try chopSlice(elf_file, shdr.sh_offset, shdr.sh_size);
|
||||
} else if (common.equal(u8, name, ".debug_ranges")) {
|
||||
opt_debug_ranges = try chopSlice(elf_file, shdr.sh_offset, shdr.sh_size);
|
||||
} else if (common.equal(u8, name, ".debug_loclists")) {
|
||||
opt_debug_loclists = try chopSlice(elf_file, shdr.sh_offset, shdr.sh_size);
|
||||
} else if (common.equal(u8, name, ".debug_rnglists")) {
|
||||
opt_debug_rnglists = try chopSlice(elf_file, shdr.sh_offset, shdr.sh_size);
|
||||
} else if (common.equal(u8, name, ".debug_addr")) {
|
||||
opt_debug_addr = try chopSlice(elf_file, shdr.sh_offset, shdr.sh_size);
|
||||
} else if (common.equal(u8, name, ".debug_names")) {
|
||||
opt_debug_names = try chopSlice(elf_file, shdr.sh_offset, shdr.sh_size);
|
||||
} else if (common.equal(u8, name, ".debug_frame")) {
|
||||
opt_debug_frame = try chopSlice(elf_file, shdr.sh_offset, shdr.sh_size);
|
||||
|
||||
if (common.equal(u8, name, ".gnu_debuglink")) {
|
||||
@panic("WTF");
|
||||
// const gnu_debuglink = try chopSlice(mapped_mem, shdr.sh_offset, shdr.sh_size);
|
||||
// const debug_filename = mem.sliceTo(@as([*:0]const u8, @ptrCast(gnu_debuglink.ptr)), 0);
|
||||
// const crc_offset = mem.alignForward(usize, @intFromPtr(&debug_filename[debug_filename.len]) + 1, 4) - @intFromPtr(gnu_debuglink.ptr);
|
||||
// const crc_bytes = gnu_debuglink[crc_offset .. crc_offset + 4];
|
||||
// separate_debug_crc = mem.readIntSliceNative(u32, crc_bytes);
|
||||
// separate_debug_filename = debug_filename;
|
||||
// continue;
|
||||
}
|
||||
|
||||
var section_index: ?usize = null;
|
||||
inline for (@typeInfo(common.dwarf.DwarfSection).Enum.fields, 0..) |section, i| {
|
||||
if (common.equal(u8, "." ++ section.name, name)) section_index = i;
|
||||
}
|
||||
if (section_index == null) continue;
|
||||
if (sections[section_index.?] != null) continue;
|
||||
|
||||
const section_bytes = try chopSlice(elf_file, shdr.sh_offset, shdr.sh_size);
|
||||
sections[section_index.?] = if ((shdr.sh_flags & elf.SHF_COMPRESSED) > 0) blk: {
|
||||
var section_stream = common.fixedBufferStream(section_bytes);
|
||||
var section_reader = section_stream.reader();
|
||||
const chdr = section_reader.readStruct(elf.Chdr) catch continue;
|
||||
if (chdr.ch_type != .ZLIB) continue;
|
||||
|
||||
if (true) @panic("ZLIB");
|
||||
break :blk undefined;
|
||||
// var zlib_stream = std.compress.zlib.decompressStream(allocator, section_stream.reader()) catch continue;
|
||||
// defer zlib_stream.deinit();
|
||||
//
|
||||
// var decompressed_section = try allocator.alloc(u8, chdr.ch_size);
|
||||
// errdefer allocator.free(decompressed_section);
|
||||
//
|
||||
// const read = zlib_stream.reader().readAll(decompressed_section) catch continue;
|
||||
// assert(read == decompressed_section.len);
|
||||
//
|
||||
// break :blk .{
|
||||
// .data = decompressed_section,
|
||||
// .virtual_address = shdr.sh_addr,
|
||||
// .owned = true,
|
||||
// };
|
||||
} else .{
|
||||
.data = section_bytes,
|
||||
.virtual_address = shdr.sh_addr,
|
||||
.owned = false,
|
||||
};
|
||||
}
|
||||
|
||||
const missing_debug_info =
|
||||
sections[@intFromEnum(common.dwarf.DwarfSection.debug_info)] == null or
|
||||
sections[@intFromEnum(common.dwarf.DwarfSection.debug_abbrev)] == null or
|
||||
sections[@intFromEnum(common.dwarf.DwarfSection.debug_str)] == null or
|
||||
sections[@intFromEnum(common.dwarf.DwarfSection.debug_line)] == null;
|
||||
common.assert(!missing_debug_info);
|
||||
|
||||
var di = common.dwarf.DwarfInfo{
|
||||
.endian = endian,
|
||||
.debug_info = opt_debug_info orelse return error.MissingDebugInfo,
|
||||
.debug_abbrev = opt_debug_abbrev orelse return error.MissingDebugInfo,
|
||||
.debug_str = opt_debug_str orelse return error.MissingDebugInfo,
|
||||
.debug_str_offsets = opt_debug_str_offsets,
|
||||
.debug_line = opt_debug_line orelse return error.MissingDebugInfo,
|
||||
.debug_line_str = opt_debug_line_str,
|
||||
.debug_ranges = opt_debug_ranges,
|
||||
.debug_loclists = opt_debug_loclists,
|
||||
.debug_rnglists = opt_debug_rnglists,
|
||||
.debug_addr = opt_debug_addr,
|
||||
.debug_names = opt_debug_names,
|
||||
.debug_frame = opt_debug_frame,
|
||||
.sections = sections,
|
||||
.is_macho = false,
|
||||
};
|
||||
|
||||
try common.dwarf.openDwarfDebugInfo(&di, allocator);
|
||||
|
||||
return di;
|
||||
}
|
||||
|
||||
@ -773,6 +791,14 @@ pub fn RegionInterface(comptime Region: type) type {
|
||||
.size = info.size,
|
||||
};
|
||||
}
|
||||
|
||||
pub inline fn invalid() Region {
|
||||
return Region{
|
||||
.address = Addr.invalid(),
|
||||
.size = 0,
|
||||
};
|
||||
}
|
||||
|
||||
pub inline fn fromRaw(info: struct {
|
||||
raw_address: AddrT,
|
||||
size: AddrT,
|
||||
@ -842,15 +868,22 @@ pub fn RegionInterface(comptime Region: type) type {
|
||||
return result;
|
||||
}
|
||||
|
||||
pub inline fn takeSlice(region: *Region, size: AddrT) Region {
|
||||
common.assert(size <= region.size);
|
||||
const result = Region{
|
||||
.address = region.address,
|
||||
.size = size,
|
||||
};
|
||||
region.* = region.offset(size);
|
||||
const TakeSliceError = error{
|
||||
not_enough_space,
|
||||
};
|
||||
|
||||
return result;
|
||||
pub inline fn takeSlice(region: *Region, size: AddrT) !Region {
|
||||
if (size <= region.size) {
|
||||
const result = Region{
|
||||
.address = region.address,
|
||||
.size = size,
|
||||
};
|
||||
region.* = region.offset(size);
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
return TakeSliceError.not_enough_space;
|
||||
}
|
||||
|
||||
pub inline fn split(region: Region, comptime count: comptime_int) [count]Region {
|
||||
|
@ -353,10 +353,7 @@ pub const Cache = extern struct {
|
||||
const aligned_file_size = lib.alignForward(usize, file_size, cache.disk.sector_size);
|
||||
const lba = cache.clusterToSector(first_cluster);
|
||||
|
||||
log.debug("Start disk callback", .{});
|
||||
|
||||
const result = try cache.disk.callbacks.read(cache.disk, @divExact(aligned_file_size, cache.disk.sector_size), lba, file_buffer);
|
||||
log.debug("End disk callback", .{});
|
||||
return result.buffer[0..file_size];
|
||||
}
|
||||
|
||||
|
43
src/user.zig
43
src/user.zig
@ -4,15 +4,16 @@ const assert = lib.assert;
|
||||
const ExecutionMode = lib.Syscall.ExecutionMode;
|
||||
|
||||
const birth = @import("birth");
|
||||
const capabilities = birth.capabilities;
|
||||
pub const Syscall = birth.capabilities.Syscall;
|
||||
|
||||
pub const arch = @import("user/arch.zig");
|
||||
pub const capabilities = @import("user/capabilities.zig");
|
||||
const core_state = @import("user/core_state.zig");
|
||||
pub const CoreState = core_state.CoreState;
|
||||
pub const PinnedState = core_state.PinnedState;
|
||||
pub const libc = @import("user/libc.zig");
|
||||
pub const thread = @import("user/thread.zig");
|
||||
pub const Thread = thread.Thread;
|
||||
pub const process = @import("user/process.zig");
|
||||
const vas = @import("user/virtual_address_space.zig");
|
||||
const VirtualAddress = lib.VirtualAddress;
|
||||
@ -30,7 +31,7 @@ comptime {
|
||||
pub const writer = lib.Writer(void, Writer.Error, Writer.write){ .context = {} };
|
||||
const Writer = extern struct {
|
||||
const syscall = Syscall(.io, .log);
|
||||
const Error = Writer.syscall.ErrorSet.Error;
|
||||
const Error = Writer.syscall.Error;
|
||||
|
||||
fn write(_: void, bytes: []const u8) Error!usize {
|
||||
const result = try Writer.syscall.blocking(bytes);
|
||||
@ -52,16 +53,36 @@ pub fn zigPanic(message: []const u8, _: ?*lib.StackTrace, _: ?usize) noreturn {
|
||||
}
|
||||
|
||||
pub fn panic(comptime format: []const u8, arguments: anytype) noreturn {
|
||||
lib.log.scoped(.PANIC).err(format, arguments);
|
||||
var buffer: [0x100]u8 = undefined;
|
||||
const message: []const u8 = lib.bufPrint(&buffer, format, arguments) catch "Failed to get panic message!";
|
||||
while (true) {
|
||||
Syscall(.process, .exit).blocking(false) catch |err| log.err("Exit failed: {}", .{err});
|
||||
Syscall(.process, .panic).blocking(.{
|
||||
.message = message,
|
||||
.exit_code = 1,
|
||||
}) catch |err| log.err("Exit failed: {}", .{err});
|
||||
}
|
||||
}
|
||||
|
||||
pub const Scheduler = extern struct {
|
||||
current_thread: *Thread,
|
||||
thread_queue: ?*Thread = null,
|
||||
time_slice: u32,
|
||||
core_id: u32,
|
||||
core_state: CoreState,
|
||||
bootstrap_thread: Thread,
|
||||
|
||||
pub fn enqueueThread(scheduler: *Scheduler, thread_to_queue: *Thread) void {
|
||||
// TODO: check queue
|
||||
// TODO: defer check queue
|
||||
if (scheduler.thread_queue) |thread_queue| {
|
||||
_ = thread_queue;
|
||||
@panic("TODO: enqueueThread");
|
||||
} else {
|
||||
scheduler.thread_queue = thread_to_queue;
|
||||
thread_to_queue.previous = thread_to_queue;
|
||||
thread_to_queue.next = thread_to_queue;
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
pub inline fn currentScheduler() *Scheduler {
|
||||
@ -83,10 +104,18 @@ pub export fn start(scheduler: *arch.Scheduler, arg_init: bool) callconv(.C) nor
|
||||
if (is_init) {
|
||||
assert(scheduler.common.generic.setup_stack_lock.load(.Monotonic));
|
||||
}
|
||||
assert(scheduler.common.generic.disabled);
|
||||
scheduler.initDisabled();
|
||||
|
||||
const frame_id = capabilities.frameCreate(lib.arch.valid_page_sizes[0]) catch |err| {
|
||||
panic("Unable to create frame: {}", .{err});
|
||||
};
|
||||
_ = frame_id;
|
||||
|
||||
@panic("TODO: start");
|
||||
// assert(scheduler.common.generic.disabled);
|
||||
// scheduler.initDisabled();
|
||||
// thread.initBootstrap(scheduler);
|
||||
// command_buffer = Syscall(.cpu, .get_command_buffer).blocking(&command_buffer) catch @panic("Unable to get command buffer");
|
||||
Syscall(.cpu, .shutdown).blocking({}) catch unreachable;
|
||||
// Syscall(.cpu, .shutdown).blocking({}) catch unreachable;
|
||||
}
|
||||
|
||||
// export fn birthInitializeDisabled(scheduler: *arch.Scheduler, arg_init: bool) callconv(.C) noreturn {
|
||||
|
@ -46,8 +46,6 @@ pub fn _start() callconv(.Naked) noreturn {
|
||||
:
|
||||
: [startFunction] "r" (user.start),
|
||||
);
|
||||
|
||||
unreachable;
|
||||
}
|
||||
|
||||
pub inline fn setInitialState(register_arena: *RegisterArena, entry: VirtualAddress, stack_virtual_address: VirtualAddress, arguments: birth.syscall.Arguments) void {
|
||||
|
@ -1,20 +1,53 @@
|
||||
const lib = @import("lib");
|
||||
const log = lib.log;
|
||||
const assert = lib.assert;
|
||||
const birth = @import("birth");
|
||||
const user = @import("user");
|
||||
const Syscall = user.Syscall;
|
||||
|
||||
// TODO: ref
|
||||
pub fn frameCreate(ref: usize, bytes: usize) !usize {
|
||||
return mappableCapabilityCreate(ref, .cpu_memory, bytes);
|
||||
pub fn frameCreate(bytes: usize) !birth.capabilities.Reference {
|
||||
return mappableCapabilityCreate(.cpu_memory, bytes);
|
||||
}
|
||||
|
||||
fn mappableCapabilityCreate(ref: usize, mappable_capability: birth.capabilities.Type.Mappable, bytes: usize) !usize {
|
||||
_ = mappable_capability;
|
||||
_ = ref;
|
||||
fn mappableCapabilityCreate(capability: birth.capabilities.Type.Mappable, bytes: usize) !birth.capabilities.Reference {
|
||||
assert(bytes > 0);
|
||||
|
||||
return RamDescendant.create(capability, bytes);
|
||||
}
|
||||
|
||||
fn ramDescendantCreate(
|
||||
ref: usize,
|
||||
) !usize {
|
||||
_ = ref;
|
||||
const Ram = extern struct {
|
||||
pub fn allocate(size: usize) !usize {
|
||||
_ = size;
|
||||
log.err("TODO: allocate", .{});
|
||||
return error.not_implemented;
|
||||
}
|
||||
};
|
||||
|
||||
const RamDescendant = extern struct {
|
||||
capability: usize,
|
||||
size: usize,
|
||||
|
||||
pub fn create(capability: birth.capabilities.Type.Mappable, size: usize) !birth.capabilities.Reference {
|
||||
const allocation = try Syscall(.ram, .allocate).blocking(size);
|
||||
const result = try retype(allocation, 0, capability.toCapability(), size, 1);
|
||||
try destroy(allocation);
|
||||
return result;
|
||||
}
|
||||
};
|
||||
|
||||
pub fn retype(source: birth.capabilities.Reference, offset: usize, capability: birth.capabilities.Type, object_size: usize, object_count: usize) !birth.capabilities.Reference {
|
||||
_ = object_count;
|
||||
_ = object_size;
|
||||
_ = capability;
|
||||
_ = offset;
|
||||
_ = source;
|
||||
log.err("TODO: retype", .{});
|
||||
return error.not_implemented;
|
||||
}
|
||||
|
||||
pub fn destroy(capability: birth.capabilities.Reference) !void {
|
||||
_ = capability;
|
||||
log.err("TODO: destroy", .{});
|
||||
return error.not_implemented;
|
||||
}
|
||||
|
@ -13,44 +13,35 @@ const max_thread_count = 256;
|
||||
|
||||
pub const Thread = extern struct {
|
||||
self: *Thread,
|
||||
previous: ?*Thread,
|
||||
next: ?*Thread,
|
||||
previous: ?*Thread = null,
|
||||
next: ?*Thread = null,
|
||||
stack: [*]u8,
|
||||
stack_top: [*]align(lib.arch.stack_alignment) u8,
|
||||
register_arena: birth.arch.RegisterArena align(lib.arch.stack_alignment),
|
||||
core_id: u32,
|
||||
|
||||
pub fn init(thread: *Thread, scheduler: *user.arch.Scheduler) void {
|
||||
thread.self = thread;
|
||||
thread.previous = null;
|
||||
thread.next = null;
|
||||
thread.core_id = scheduler.generic.core_id;
|
||||
thread.* = Thread{
|
||||
.self = thread,
|
||||
.core_id = scheduler.generic.core_id,
|
||||
.stack = thread.stack,
|
||||
.stack_top = thread.stack_top,
|
||||
.register_arena = thread.register_arena,
|
||||
};
|
||||
}
|
||||
};
|
||||
|
||||
pub const Mutex = extern struct {
|
||||
locked: bool = false,
|
||||
|
||||
pub inline fn internalLock(mutex: *volatile Mutex) void {
|
||||
mutex.locked = true;
|
||||
}
|
||||
};
|
||||
|
||||
var static_stack: [0x10000]u8 align(lib.arch.stack_alignment) = undefined;
|
||||
var static_thread: Thread = undefined;
|
||||
var static_thread_lock = Mutex{};
|
||||
|
||||
pub fn initDisabled(scheduler: *user.arch.Scheduler) noreturn {
|
||||
const thread = &static_thread;
|
||||
static_thread_lock.internalLock();
|
||||
thread.stack = &static_stack;
|
||||
thread.stack_top = static_stack[static_stack.len..];
|
||||
pub fn initBootstrap(scheduler: *user.arch.Scheduler) noreturn {
|
||||
const thread = &scheduler.generic.bootstrap_thread;
|
||||
thread.stack = &scheduler.common.generic.setup_stack;
|
||||
thread.stack_top = @ptrFromInt(@intFromPtr(&scheduler.common.generic.setup_stack) + scheduler.common.generic.setup_stack.len);
|
||||
thread.init(scheduler);
|
||||
|
||||
// TODO: use RAX as parameter?
|
||||
|
||||
user.arch.setInitialState(&thread.register_arena, VirtualAddress.new(bootstrapThread), VirtualAddress.new(thread.stack_top), .{0} ** 6);
|
||||
|
||||
scheduler.generic.enqueueThread(thread);
|
||||
scheduler.generic.current_thread = thread;
|
||||
scheduler.common.generic.has_work = true;
|
||||
|
||||
scheduler.restore(&thread.register_arena);
|
||||
|
Loading…
x
Reference in New Issue
Block a user