commit 1
This commit is contained in:
parent
403822310e
commit
4212c4c674
@ -1,4 +1,5 @@
|
|||||||
const lib = @import("lib");
|
const lib = @import("lib");
|
||||||
|
const Allocator = lib.Allocator;
|
||||||
const assert = lib.assert;
|
const assert = lib.assert;
|
||||||
|
|
||||||
pub const arch = @import("birth/arch.zig");
|
pub const arch = @import("birth/arch.zig");
|
||||||
@ -13,6 +14,22 @@ pub const Scheduler = extern struct {
|
|||||||
core_id: u32,
|
core_id: u32,
|
||||||
core_state: CoreState,
|
core_state: CoreState,
|
||||||
bootstrap_thread: Thread,
|
bootstrap_thread: Thread,
|
||||||
|
fast_allocator: Allocator,
|
||||||
|
|
||||||
|
pub fn initializeAllocator(scheduler: *Scheduler) void {
|
||||||
|
scheduler.fast_allocator = Allocator{
|
||||||
|
.callbacks = .{
|
||||||
|
.allocate = callbackAllocate,
|
||||||
|
},
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
fn callbackAllocate(allocator: *Allocator, size: u64, alignment: u64) Allocator.Allocate.Error!Allocator.Allocate.Result {
|
||||||
|
const scheduler = @fieldParentPtr(Scheduler, "fast_allocator", allocator);
|
||||||
|
assert(scheduler.common.heap.address.isAligned(alignment));
|
||||||
|
const result = scheduler.common.heap.takeSlice(size) catch return error.OutOfMemory;
|
||||||
|
return @bitCast(result);
|
||||||
|
}
|
||||||
|
|
||||||
pub const Common = extern struct {
|
pub const Common = extern struct {
|
||||||
self: *Common,
|
self: *Common,
|
||||||
@ -24,16 +41,16 @@ pub const Scheduler = extern struct {
|
|||||||
setup_stack_lock: lib.Atomic(bool),
|
setup_stack_lock: lib.Atomic(bool),
|
||||||
disabled_save_area: arch.RegisterArena,
|
disabled_save_area: arch.RegisterArena,
|
||||||
|
|
||||||
pub fn heapAllocateFast(common: *Common, comptime T: type) !*T {
|
// pub fn heapAllocateFast(common: *Common, comptime T: type) !*T {
|
||||||
const size = @sizeOf(T);
|
// const size = @sizeOf(T);
|
||||||
const alignment = @alignOf(T);
|
// const alignment = @alignOf(T);
|
||||||
lib.log.debug("Heap: {}. Size: {}. Alignment: {}", .{ common.heap, size, alignment });
|
// lib.log.debug("Heap: {}. Size: {}. Alignment: {}", .{ common.heap, size, alignment });
|
||||||
const result = try common.heap.takeSlice(size);
|
// const result = try common.heap.takeSlice(size);
|
||||||
const ptr = &result.access(T)[0];
|
// const ptr = &result.access(T)[0];
|
||||||
assert(lib.isAligned(@intFromPtr(ptr), alignment));
|
// assert(lib.isAligned(@intFromPtr(ptr), alignment));
|
||||||
|
//
|
||||||
return ptr;
|
// return ptr;
|
||||||
}
|
// }
|
||||||
};
|
};
|
||||||
|
|
||||||
pub fn enqueueThread(scheduler: *Scheduler, thread_to_queue: *Thread) void {
|
pub fn enqueueThread(scheduler: *Scheduler, thread_to_queue: *Thread) void {
|
||||||
|
@ -72,6 +72,10 @@ pub const PageTable = packed struct(u16) {
|
|||||||
};
|
};
|
||||||
};
|
};
|
||||||
|
|
||||||
|
pub const Mapping = packed struct(u32) {
|
||||||
|
foo: u32 = 0,
|
||||||
|
};
|
||||||
|
|
||||||
pub fn CommandBuilder(comptime list: []const []const u8) type {
|
pub fn CommandBuilder(comptime list: []const []const u8) type {
|
||||||
const capability_base_command_list = .{
|
const capability_base_command_list = .{
|
||||||
"copy",
|
"copy",
|
||||||
@ -164,6 +168,7 @@ pub const Command = extern struct {
|
|||||||
},
|
},
|
||||||
.page_table => .{
|
.page_table => .{
|
||||||
"get",
|
"get",
|
||||||
|
"get_leaf",
|
||||||
},
|
},
|
||||||
.memory_mapping => .{},
|
.memory_mapping => .{},
|
||||||
.page_table_mapping => .{},
|
.page_table_mapping => .{},
|
||||||
@ -290,6 +295,17 @@ fn CommandDescriptor(comptime capability: Capability, comptime command: Command.
|
|||||||
"not_present",
|
"not_present",
|
||||||
}),
|
}),
|
||||||
},
|
},
|
||||||
|
.get_leaf => .{
|
||||||
|
.Arguments = extern struct {
|
||||||
|
/// This descriptor works for leaves as well
|
||||||
|
descriptor: PageTable,
|
||||||
|
buffer: *Leaf,
|
||||||
|
},
|
||||||
|
.ErrorSet = ErrorSet(&.{
|
||||||
|
"index_out_of_bounds",
|
||||||
|
"not_present",
|
||||||
|
}),
|
||||||
|
},
|
||||||
else => .{},
|
else => .{},
|
||||||
},
|
},
|
||||||
else => .{},
|
else => .{},
|
||||||
@ -689,6 +705,41 @@ pub fn Descriptor(comptime capability: Capability, comptime command: Command.fro
|
|||||||
.fromArguments = F.fromArguments,
|
.fromArguments = F.fromArguments,
|
||||||
};
|
};
|
||||||
},
|
},
|
||||||
|
.get_leaf => blk: {
|
||||||
|
const F = struct {
|
||||||
|
inline fn toResult(raw_result: Raw.Result.Birth) void {
|
||||||
|
_ = raw_result;
|
||||||
|
}
|
||||||
|
|
||||||
|
inline fn fromResult(result: T.Result) Raw.Result {
|
||||||
|
_ = result;
|
||||||
|
return Raw.Result{
|
||||||
|
.birth = .{
|
||||||
|
.first = .{},
|
||||||
|
.second = 0,
|
||||||
|
},
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
const struct_helper = StructHelperArguments(T.Arguments);
|
||||||
|
inline fn toArguments(raw_arguments: Raw.Arguments) T.ErrorSet.Error!T.Arguments {
|
||||||
|
const args = try struct_helper.toArguments(raw_arguments);
|
||||||
|
|
||||||
|
return args;
|
||||||
|
}
|
||||||
|
|
||||||
|
inline fn fromArguments(arguments: T.Arguments) Raw.Arguments {
|
||||||
|
return struct_helper.fromArguments(arguments);
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
break :blk .{
|
||||||
|
.toResult = F.toResult,
|
||||||
|
.fromResult = F.fromResult,
|
||||||
|
.toArguments = F.toArguments,
|
||||||
|
.fromArguments = F.fromArguments,
|
||||||
|
};
|
||||||
|
},
|
||||||
else => .{},
|
else => .{},
|
||||||
},
|
},
|
||||||
else => .{},
|
else => .{},
|
||||||
@ -1029,3 +1080,13 @@ pub const Raw = extern struct {
|
|||||||
}
|
}
|
||||||
};
|
};
|
||||||
};
|
};
|
||||||
|
|
||||||
|
pub const Leaf = extern struct {
|
||||||
|
mapped_physical: birth.interface.Memory,
|
||||||
|
own_physical: birth.interface.Memory,
|
||||||
|
flags: Flags,
|
||||||
|
|
||||||
|
pub const Flags = packed struct(u64) {
|
||||||
|
foo: u64 = 0,
|
||||||
|
};
|
||||||
|
};
|
||||||
|
61
src/cpu.zig
61
src/cpu.zig
@ -167,10 +167,12 @@ pub const RegionList = extern struct {
|
|||||||
|
|
||||||
pub const Metadata = extern struct {
|
pub const Metadata = extern struct {
|
||||||
reserved: usize = 0,
|
reserved: usize = 0,
|
||||||
bitset: lib.BitsetU64(list_region_count) = .{},
|
bitset: Bitset = .{},
|
||||||
previous: ?*RegionList = null,
|
previous: ?*RegionList = null,
|
||||||
next: ?*RegionList = null,
|
next: ?*RegionList = null,
|
||||||
|
|
||||||
|
const Bitset = lib.data_structures.BitsetU64(list_region_count);
|
||||||
|
|
||||||
comptime {
|
comptime {
|
||||||
assert(@sizeOf(Metadata) == expected_size);
|
assert(@sizeOf(Metadata) == expected_size);
|
||||||
assert(@bitSizeOf(usize) - list_region_count < 8);
|
assert(@bitSizeOf(usize) - list_region_count < 8);
|
||||||
@ -557,60 +559,3 @@ pub fn HeapImplementation(comptime user: bool) type {
|
|||||||
}
|
}
|
||||||
|
|
||||||
pub const writer = privileged.E9Writer{ .context = {} };
|
pub const writer = privileged.E9Writer{ .context = {} };
|
||||||
|
|
||||||
pub fn SparseArray(comptime T: type) type {
|
|
||||||
return extern struct {
|
|
||||||
ptr: [*]T,
|
|
||||||
len: usize,
|
|
||||||
capacity: usize,
|
|
||||||
|
|
||||||
const Array = @This();
|
|
||||||
|
|
||||||
pub const Error = error{
|
|
||||||
index_out_of_bounds,
|
|
||||||
};
|
|
||||||
|
|
||||||
pub fn append(array: *Array, allocator: *Allocator, element: T) !void {
|
|
||||||
try array.ensureCapacity(allocator, array.len + 1);
|
|
||||||
const index = array.len;
|
|
||||||
array.len += 1;
|
|
||||||
const slice = array.ptr[0..array.len];
|
|
||||||
slice[index] = element;
|
|
||||||
}
|
|
||||||
|
|
||||||
fn ensureCapacity(array: *Array, allocator: *Allocator, desired_capacity: usize) !void {
|
|
||||||
if (array.capacity < desired_capacity) {
|
|
||||||
// Allocate a new array
|
|
||||||
const new_slice = try allocator.allocate(T, desired_capacity);
|
|
||||||
if (array.capacity == 0) {
|
|
||||||
array.ptr = new_slice.ptr;
|
|
||||||
array.capacity = new_slice.len;
|
|
||||||
} else {
|
|
||||||
// Reallocate
|
|
||||||
if (array.len > 0) {
|
|
||||||
@memcpy(new_slice[0..array.len], array.ptr[0..array.len]);
|
|
||||||
}
|
|
||||||
|
|
||||||
// TODO: free
|
|
||||||
|
|
||||||
array.ptr = new_slice.ptr;
|
|
||||||
array.capacity = new_slice.len;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub inline fn get(array: *Array, index: usize) T {
|
|
||||||
assert(array.len > index);
|
|
||||||
const slice = array.ptr[0..array.len];
|
|
||||||
return slice[index];
|
|
||||||
}
|
|
||||||
|
|
||||||
pub inline fn getChecked(array: *Array, index: usize) !T {
|
|
||||||
if (array.len > index) {
|
|
||||||
return array.get(index);
|
|
||||||
} else {
|
|
||||||
return error.index_out_of_bounds;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
};
|
|
||||||
}
|
|
||||||
|
@ -624,41 +624,72 @@ fn map(address_space: paging.Specific, virtual: VirtualAddress, physical: Physic
|
|||||||
|
|
||||||
var page_table_ref = user_page_tables.user;
|
var page_table_ref = user_page_tables.user;
|
||||||
|
|
||||||
for (0..paging.Level.count - 1) |level_index| {
|
assert(indexed.PML4 == top_indexed.PML4);
|
||||||
|
assert(indexed.PDP == top_indexed.PDP);
|
||||||
|
log.debug("PD base: {}. PD top: {}", .{ indexed.PD, top_indexed.PD });
|
||||||
|
log.debug("PT base: {}. PT top: {}", .{ indexed.PT, top_indexed.PT });
|
||||||
|
var pd_index: u10 = indexed.PD;
|
||||||
|
var offset: usize = 0;
|
||||||
|
|
||||||
|
while (pd_index <= top_indexed.PD) : (pd_index += 1) {
|
||||||
|
const pt_base = if (pd_index == indexed.PD) indexed.PT else 0;
|
||||||
|
const pt_top = if (pd_index == top_indexed.PD) top_indexed.PT else 511;
|
||||||
|
log.debug("PD index: {}. Base: {}. Top: {}", .{ pd_index, pt_base, pt_top });
|
||||||
|
|
||||||
|
var pt_index = pt_base;
|
||||||
|
while (pt_index <= pt_top) : ({
|
||||||
|
pt_index += 1;
|
||||||
|
offset += lib.arch.valid_page_sizes[0];
|
||||||
|
}) {
|
||||||
|
const leaf = Leaf{
|
||||||
|
.physical = physical.offset(offset),
|
||||||
|
.flags = .{
|
||||||
|
.size = .@"4KB",
|
||||||
|
},
|
||||||
|
.common = undefined, // TODO:
|
||||||
|
};
|
||||||
|
const leaf_ref = try user_page_tables.appendLeaf(&cpu.user_scheduler.s.capability_root_node.heap.allocator, leaf);
|
||||||
|
const level_fields = @typeInfo(paging.Level).Enum.fields;
|
||||||
|
inline for (level_fields[0 .. level_fields.len - 1]) |level_field| {
|
||||||
|
const level = @field(paging.Level, level_field.name);
|
||||||
const page_table = user_page_tables.getPageTable(page_table_ref) catch |err| {
|
const page_table = user_page_tables.getPageTable(page_table_ref) catch |err| {
|
||||||
log.err("Error {s} at level {} when trying to map 0x{x} to 0x{x}", .{ @errorName(err), level_index, virtual.value(), physical.value() });
|
log.err("Error {s} at level {} when trying to map 0x{x} to 0x{x}", .{ @errorName(err), level, virtual.value(), physical.value() });
|
||||||
const physical_address = address_space.translateAddress(virtual, .{
|
const virtual_address = virtual.offset(offset);
|
||||||
|
const physical_address = address_space.translateAddress(virtual_address, .{
|
||||||
.execute_disable = !flags.execute,
|
.execute_disable = !flags.execute,
|
||||||
.write = flags.write,
|
.write = flags.write,
|
||||||
.user = flags.user,
|
.user = flags.user,
|
||||||
}) catch @panic("Could not translate address");
|
}) catch @panic("Could not translate address");
|
||||||
if (physical_address.value() != physical.value()) {
|
if (physical_address.value() != physical.offset(offset).value()) {
|
||||||
@panic("Address mismatch");
|
@panic("Address mismatch");
|
||||||
} else {
|
} else {
|
||||||
@panic("Address match");
|
cpu.panic("PD index: {}. PT index: {}. Virtual: 0x{x}. Physical: 0x{x}", .{ pd_index, pt_index, virtual_address.value(), physical_address.value() });
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
page_table_ref = page_table.children[indices[level_index]];
|
|
||||||
}
|
|
||||||
|
|
||||||
assert(indexed.PML4 == top_indexed.PML4);
|
page_table_ref = page_table.children[indices[@intFromEnum(level)]];
|
||||||
assert(indexed.PDP == top_indexed.PDP);
|
}
|
||||||
assert(indexed.PD == top_indexed.PD);
|
|
||||||
assert(indexed.PT <= top_indexed.PT);
|
|
||||||
|
|
||||||
const page_table = try user_page_tables.getPageTable(page_table_ref);
|
const page_table = try user_page_tables.getPageTable(page_table_ref);
|
||||||
var index: u10 = indexed.PT;
|
page_table.children[pt_index] = leaf_ref;
|
||||||
while (index <= top_indexed.PT) : (index += 1) {
|
|
||||||
const leaf = Leaf{
|
|
||||||
.physical = physical.offset(index - indexed.PT),
|
|
||||||
.flags = .{
|
|
||||||
.size = .@"4KB",
|
|
||||||
},
|
|
||||||
};
|
|
||||||
const leaf_ref = try user_page_tables.appendLeaf(&cpu.user_scheduler.s.capability_root_node.heap.allocator, leaf);
|
|
||||||
page_table.children[index] = leaf_ref;
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
// assert(indexed.PD == top_indexed.PD);
|
||||||
|
// assert(indexed.PT <= top_indexed.PT);
|
||||||
|
|
||||||
|
// var index: u10 = indexed.PT;
|
||||||
|
// while (index <= top_indexed.PT) : (index += 1) {
|
||||||
|
// const leaf = Leaf{
|
||||||
|
// .physical = physical.offset(index - indexed.PT),
|
||||||
|
// .flags = .{
|
||||||
|
// .size = .@"4KB",
|
||||||
|
// },
|
||||||
|
// .common = undefined, // TODO:
|
||||||
|
// };
|
||||||
|
// const leaf_ref = try user_page_tables.appendLeaf(&cpu.user_scheduler.s.capability_root_node.heap.allocator, leaf);
|
||||||
|
// page_table.children[index] = leaf_ref;
|
||||||
|
// }
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
const CPUPageTables = privileged.arch.CPUPageTables;
|
const CPUPageTables = privileged.arch.CPUPageTables;
|
||||||
|
109
src/cpu/init.zig
109
src/cpu/init.zig
@ -184,6 +184,58 @@ fn spawnInitCommon(init_file: []const u8, cpu_page_tables: paging.CPUPageTables)
|
|||||||
// TODO: delete in the future
|
// TODO: delete in the future
|
||||||
assert(cpu.bsp);
|
assert(cpu.bsp);
|
||||||
|
|
||||||
|
const init_cpu_scheduler = try cpu.heap.create(cpu.UserScheduler);
|
||||||
|
init_cpu_scheduler.* = cpu.UserScheduler{
|
||||||
|
.s = .{
|
||||||
|
.common = undefined,
|
||||||
|
.capability_root_node = cpu.interface.Root{
|
||||||
|
.static = .{
|
||||||
|
.cpu = true,
|
||||||
|
.boot = true,
|
||||||
|
.process = true,
|
||||||
|
},
|
||||||
|
.dynamic = .{
|
||||||
|
.io = .{
|
||||||
|
.debug = true,
|
||||||
|
},
|
||||||
|
.memory = .{},
|
||||||
|
.cpu_memory = .{
|
||||||
|
.flags = .{
|
||||||
|
.allocate = true,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
.page_table = cpu.interface.PageTables{
|
||||||
|
.privileged = undefined,
|
||||||
|
.user = birth.interface.PageTable{
|
||||||
|
.index = 0,
|
||||||
|
.entry_type = .page_table,
|
||||||
|
},
|
||||||
|
// .vmm = try cpu.interface.VMM.new(),
|
||||||
|
.can_map_page_tables = true,
|
||||||
|
.page_tables = .{
|
||||||
|
.ptr = undefined,
|
||||||
|
.len = 0,
|
||||||
|
.capacity = 0,
|
||||||
|
},
|
||||||
|
.leaves = .{
|
||||||
|
.ptr = undefined,
|
||||||
|
.len = 0,
|
||||||
|
.capacity = 0,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
.command_buffer_submission = .{ .region = PhysicalMemoryRegion.invalid() },
|
||||||
|
.command_buffer_completion = .{ .region = PhysicalMemoryRegion.invalid() },
|
||||||
|
.memory_mapping = .{},
|
||||||
|
.page_table_mapping = .{},
|
||||||
|
},
|
||||||
|
.scheduler = .{
|
||||||
|
.memory = undefined,
|
||||||
|
// .memory = scheduler_physical_region,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
};
|
||||||
|
|
||||||
const init_elf = try ELF.Parser.init(init_file);
|
const init_elf = try ELF.Parser.init(init_file);
|
||||||
const entry_point = init_elf.getEntryPoint();
|
const entry_point = init_elf.getEntryPoint();
|
||||||
const program_headers = init_elf.getProgramHeaders();
|
const program_headers = init_elf.getProgramHeaders();
|
||||||
@ -231,6 +283,8 @@ fn spawnInitCommon(init_file: []const u8, cpu_page_tables: paging.CPUPageTables)
|
|||||||
const init_start_address = first_address orelse @panic("WTF");
|
const init_start_address = first_address orelse @panic("WTF");
|
||||||
const init_top_address = init_start_address + segment_total_size;
|
const init_top_address = init_start_address + segment_total_size;
|
||||||
const user_scheduler_virtual_address = VirtualAddress.new(init_top_address);
|
const user_scheduler_virtual_address = VirtualAddress.new(init_top_address);
|
||||||
|
init_cpu_scheduler.s.common = user_scheduler_virtual_address.access(*birth.Scheduler.Common);
|
||||||
|
|
||||||
const user_scheduler_virtual_region = VirtualMemoryRegion.new(.{
|
const user_scheduler_virtual_region = VirtualMemoryRegion.new(.{
|
||||||
.address = user_scheduler_virtual_address,
|
.address = user_scheduler_virtual_address,
|
||||||
.size = lib.alignForward(usize, @sizeOf(birth.Scheduler), lib.arch.valid_page_sizes[0]),
|
.size = lib.alignForward(usize, @sizeOf(birth.Scheduler), lib.arch.valid_page_sizes[0]),
|
||||||
@ -247,58 +301,7 @@ fn spawnInitCommon(init_file: []const u8, cpu_page_tables: paging.CPUPageTables)
|
|||||||
// const page_table_regions = try PageTableRegions.create(user_virtual_region, cpu_page_tables);
|
// const page_table_regions = try PageTableRegions.create(user_virtual_region, cpu_page_tables);
|
||||||
log.debug("Scheduler region", .{});
|
log.debug("Scheduler region", .{});
|
||||||
const scheduler_physical_region = try cpu.page_allocator.allocate(user_scheduler_virtual_region.size, .{ .reason = .user });
|
const scheduler_physical_region = try cpu.page_allocator.allocate(user_scheduler_virtual_region.size, .{ .reason = .user });
|
||||||
|
init_cpu_scheduler.s.capability_root_node.scheduler.memory = scheduler_physical_region;
|
||||||
log.debug("Heap scheduler", .{});
|
|
||||||
const init_cpu_scheduler = try cpu.heap.create(cpu.UserScheduler);
|
|
||||||
init_cpu_scheduler.* = cpu.UserScheduler{
|
|
||||||
.s = .{
|
|
||||||
.common = user_scheduler_virtual_address.access(*birth.Scheduler.Common),
|
|
||||||
.capability_root_node = cpu.interface.Root{
|
|
||||||
.static = .{
|
|
||||||
.cpu = true,
|
|
||||||
.boot = true,
|
|
||||||
.process = true,
|
|
||||||
},
|
|
||||||
.dynamic = .{
|
|
||||||
.io = .{
|
|
||||||
.debug = true,
|
|
||||||
},
|
|
||||||
.memory = .{},
|
|
||||||
.cpu_memory = .{
|
|
||||||
.flags = .{
|
|
||||||
.allocate = true,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
.page_table = cpu.interface.PageTables{
|
|
||||||
.privileged = undefined,
|
|
||||||
.user = birth.interface.PageTable{
|
|
||||||
.index = 0,
|
|
||||||
.entry_type = .page_table,
|
|
||||||
},
|
|
||||||
// .vmm = try cpu.interface.VMM.new(),
|
|
||||||
.can_map_page_tables = true,
|
|
||||||
.page_tables = .{
|
|
||||||
.ptr = undefined,
|
|
||||||
.len = 0,
|
|
||||||
.capacity = 0,
|
|
||||||
},
|
|
||||||
.leaves = .{
|
|
||||||
.ptr = undefined,
|
|
||||||
.len = 0,
|
|
||||||
.capacity = 0,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
.command_buffer_submission = .{ .region = PhysicalMemoryRegion.invalid() },
|
|
||||||
.command_buffer_completion = .{ .region = PhysicalMemoryRegion.invalid() },
|
|
||||||
.memory_mapping = .{},
|
|
||||||
.page_table_mapping = .{},
|
|
||||||
},
|
|
||||||
.scheduler = .{
|
|
||||||
.memory = scheduler_physical_region,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
};
|
|
||||||
|
|
||||||
const scheduler_virtual_region = VirtualMemoryRegion.new(.{
|
const scheduler_virtual_region = VirtualMemoryRegion.new(.{
|
||||||
.address = user_scheduler_virtual_address,
|
.address = user_scheduler_virtual_address,
|
||||||
@ -309,7 +312,7 @@ fn spawnInitCommon(init_file: []const u8, cpu_page_tables: paging.CPUPageTables)
|
|||||||
|
|
||||||
const heap_virtual_region = VirtualMemoryRegion.new(.{
|
const heap_virtual_region = VirtualMemoryRegion.new(.{
|
||||||
.address = scheduler_virtual_region.top(),
|
.address = scheduler_virtual_region.top(),
|
||||||
.size = lib.alignForward(usize, scheduler_virtual_region.top().value(), lib.arch.valid_page_sizes[1]) - scheduler_virtual_region.top().value(),
|
.size = lib.alignForward(usize, scheduler_virtual_region.top().value(), 64 * lib.arch.valid_page_sizes[1]) - scheduler_virtual_region.top().value(),
|
||||||
});
|
});
|
||||||
|
|
||||||
log.debug("Heap region", .{});
|
log.debug("Heap region", .{});
|
||||||
|
@ -3,6 +3,7 @@ const assert = lib.assert;
|
|||||||
const Allocator = lib.Allocator;
|
const Allocator = lib.Allocator;
|
||||||
const enumCount = lib.enumCount;
|
const enumCount = lib.enumCount;
|
||||||
const log = lib.log.scoped(.capabilities);
|
const log = lib.log.scoped(.capabilities);
|
||||||
|
const SparseArray = lib.data_structures.SparseArray;
|
||||||
const VirtualAddress = lib.VirtualAddress;
|
const VirtualAddress = lib.VirtualAddress;
|
||||||
|
|
||||||
const privileged = @import("privileged");
|
const privileged = @import("privileged");
|
||||||
@ -13,7 +14,6 @@ const VirtualMemoryRegion = lib.VirtualMemoryRegion;
|
|||||||
const birth = @import("birth");
|
const birth = @import("birth");
|
||||||
const cpu = @import("cpu");
|
const cpu = @import("cpu");
|
||||||
const RegionList = cpu.RegionList;
|
const RegionList = cpu.RegionList;
|
||||||
const SparseArray = cpu.SparseArray;
|
|
||||||
|
|
||||||
pub var system_call_count: usize = 0;
|
pub var system_call_count: usize = 0;
|
||||||
|
|
||||||
@ -85,9 +85,8 @@ pub fn processCommand(comptime Descriptor: type, raw_arguments: birth.interface.
|
|||||||
comptime assert(@TypeOf(arguments) == usize);
|
comptime assert(@TypeOf(arguments) == usize);
|
||||||
const size = arguments;
|
const size = arguments;
|
||||||
// TODO: we want more fine-grained control of the reason if we want more than a simple statistic
|
// TODO: we want more fine-grained control of the reason if we want more than a simple statistic
|
||||||
const physical_region = try cpu.page_allocator.allocate(size, .{ .reason = .user });
|
const result = try root.allocateMemory(size);
|
||||||
const result = try root.dynamic.memory.appendRegion(physical_region);
|
break :blk result.reference;
|
||||||
break :blk result;
|
|
||||||
},
|
},
|
||||||
.retype => blk: {
|
.retype => blk: {
|
||||||
const source = arguments.source;
|
const source = arguments.source;
|
||||||
@ -144,6 +143,16 @@ pub fn processCommand(comptime Descriptor: type, raw_arguments: birth.interface.
|
|||||||
log.debug("Page table: {}", .{page_table.flags.level});
|
log.debug("Page table: {}", .{page_table.flags.level});
|
||||||
@memcpy(arguments.buffer, &page_table.children);
|
@memcpy(arguments.buffer, &page_table.children);
|
||||||
},
|
},
|
||||||
|
.get_leaf => {
|
||||||
|
const descriptor = arguments.descriptor;
|
||||||
|
assert(descriptor.entry_type == .leaf);
|
||||||
|
|
||||||
|
const block = try root.dynamic.page_table.leaves.getChecked(descriptor.block);
|
||||||
|
const leaf = &block.array[descriptor.index];
|
||||||
|
|
||||||
|
const user_leaf = arguments.buffer;
|
||||||
|
user_leaf.* = leaf.common;
|
||||||
|
},
|
||||||
else => @panic("TODO: page_table other"),
|
else => @panic("TODO: page_table other"),
|
||||||
},
|
},
|
||||||
.memory_mapping => {
|
.memory_mapping => {
|
||||||
@ -354,9 +363,11 @@ pub const PageTable = extern struct {
|
|||||||
|
|
||||||
pub const Array = extern struct {
|
pub const Array = extern struct {
|
||||||
array: [count]PageTable,
|
array: [count]PageTable,
|
||||||
bitset: lib.BitsetU64(count),
|
bitset: Bitset,
|
||||||
next: ?*Array = null,
|
next: ?*Array = null,
|
||||||
|
|
||||||
|
pub const Bitset = lib.data_structures.BitsetU64(count);
|
||||||
|
|
||||||
pub const count = 32;
|
pub const count = 32;
|
||||||
|
|
||||||
pub fn get(array: *Array, index: u6) !*PageTable {
|
pub fn get(array: *Array, index: u6) !*PageTable {
|
||||||
@ -370,6 +381,7 @@ pub const PageTable = extern struct {
|
|||||||
};
|
};
|
||||||
|
|
||||||
pub const Leaf = extern struct {
|
pub const Leaf = extern struct {
|
||||||
|
common: birth.interface.Leaf,
|
||||||
physical: PhysicalAddress,
|
physical: PhysicalAddress,
|
||||||
flags: Flags,
|
flags: Flags,
|
||||||
|
|
||||||
@ -386,8 +398,9 @@ pub const Leaf = extern struct {
|
|||||||
|
|
||||||
pub const Array = extern struct {
|
pub const Array = extern struct {
|
||||||
array: [count]Leaf,
|
array: [count]Leaf,
|
||||||
bitset: lib.BitsetU64(count),
|
bitset: Bitset,
|
||||||
next: ?*Array = null,
|
next: ?*Array = null,
|
||||||
|
pub const Bitset = lib.data_structures.BitsetU64(count);
|
||||||
pub const count = 32;
|
pub const count = 32;
|
||||||
pub fn get(array: *Array, index: u6) !*PageTable {
|
pub fn get(array: *Array, index: u6) !*PageTable {
|
||||||
if (array.bitset.isSet(index)) {
|
if (array.bitset.isSet(index)) {
|
||||||
@ -446,7 +459,7 @@ pub const PageTables = extern struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
const page_table_array = try allocator.create(PageTable.Array);
|
const page_table_array = try allocator.create(PageTable.Array);
|
||||||
try page_tables.page_tables.append(allocator, page_table_array);
|
_ = try page_tables.page_tables.append(allocator, page_table_array);
|
||||||
return appendPageTable(page_tables, allocator, page_table);
|
return appendPageTable(page_tables, allocator, page_table);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -467,7 +480,7 @@ pub const PageTables = extern struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
const leaf_array = try allocator.create(Leaf.Array);
|
const leaf_array = try allocator.create(Leaf.Array);
|
||||||
try page_tables.leaves.append(allocator, leaf_array);
|
_ = try page_tables.leaves.append(allocator, leaf_array);
|
||||||
return appendLeaf(page_tables, allocator, leaf);
|
return appendLeaf(page_tables, allocator, leaf);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -572,127 +585,18 @@ pub const Root = extern struct {
|
|||||||
return has_permissions;
|
return has_permissions;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Fast path
|
pub const AllocateMemoryResult = extern struct {
|
||||||
fn allocateMemoryRaw(root: *Root, size: usize) AllocateError!PhysicalMemoryRegion {
|
region: PhysicalMemoryRegion,
|
||||||
lib.log.err("New allocation demanded: 0x{x} bytes", .{size});
|
reference: birth.interface.Memory,
|
||||||
assert(size != 0);
|
|
||||||
assert(lib.isAligned(size, lib.arch.valid_page_sizes[0]));
|
|
||||||
var index = Memory.getListIndex(size);
|
|
||||||
|
|
||||||
const result = blk: {
|
|
||||||
while (true) : (index -= 1) {
|
|
||||||
const list = &root.dynamic.memory.lists[index];
|
|
||||||
var iterator: ?*cpu.capabilities.RegionList = list;
|
|
||||||
|
|
||||||
// const page_size = @as(u64, switch (index) {
|
|
||||||
// 0 => lib.arch.reverse_valid_page_sizes[0],
|
|
||||||
// 1 => lib.arch.reverse_valid_page_sizes[1],
|
|
||||||
// 2 => lib.arch.reverse_valid_page_sizes[2],
|
|
||||||
// else => unreachable,
|
|
||||||
// });
|
|
||||||
|
|
||||||
var list_count: usize = 0;
|
|
||||||
while (iterator) |free_memory_list| : ({
|
|
||||||
iterator = free_memory_list.metadata.next;
|
|
||||||
list_count += 1;
|
|
||||||
}) {
|
|
||||||
const allocation = free_memory_list.allocate(size) catch continue;
|
|
||||||
list_count += 1;
|
|
||||||
break :blk allocation;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (index == 0) break;
|
|
||||||
}
|
|
||||||
|
|
||||||
log.err("allocateMemoryRaw", .{});
|
|
||||||
return error.OutOfMemory;
|
|
||||||
};
|
};
|
||||||
|
|
||||||
@memset(result.toHigherHalfVirtualAddress().access(u8), 0);
|
pub fn allocateMemory(root: *Root, size: usize) !AllocateMemoryResult {
|
||||||
|
const physical_region = try cpu.page_allocator.allocate(size, .{ .reason = .user });
|
||||||
|
const reference = try root.dynamic.memory.appendRegion(physical_region);
|
||||||
|
|
||||||
return result;
|
return .{
|
||||||
}
|
.region = physical_region,
|
||||||
|
.reference = reference,
|
||||||
pub fn allocateMemory(root: *Root, size: usize) AllocateError!birth.capabilities.memory {
|
|
||||||
log.debug("Allocating 0x{x} bytes for user (root is 0x{x}", .{ size, @intFromPtr(root) });
|
|
||||||
const result = try allocateMemoryRaw(root, size);
|
|
||||||
const reference = root.dynamic.memory.allocated.append(result) catch |err| {
|
|
||||||
log.err("err(user): {}", .{err});
|
|
||||||
return AllocateError.OutOfMemory;
|
|
||||||
};
|
};
|
||||||
assert(reference.block == 0);
|
|
||||||
assert(reference.region == 0);
|
|
||||||
const region_address = &root.dynamic.memory.allocated.regions[reference.region];
|
|
||||||
log.debug("Region address: 0x{x}", .{@intFromPtr(region_address)});
|
|
||||||
return reference;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Slow uncommon path. Use cases:
|
|
||||||
// 1. CR3 switch. This is assumed to be privileged, so this function assumes privileged use of the memory
|
|
||||||
pub fn allocatePageCustomAlignment(root: *Root, size: usize, alignment: usize) AllocateError!PhysicalMemoryRegion {
|
|
||||||
assert(alignment > lib.arch.valid_page_sizes[0] and alignment < lib.arch.valid_page_sizes[1]);
|
|
||||||
|
|
||||||
comptime assert(lib.arch.valid_page_sizes.len == 3);
|
|
||||||
var index = Memory.getListIndex(size);
|
|
||||||
|
|
||||||
while (true) : (index -= 1) {
|
|
||||||
const smallest_region_list = &root.dynamic.memory.lists[index];
|
|
||||||
var iterator: ?*cpu.capabilities.RegionList = smallest_region_list;
|
|
||||||
while (iterator) |free_region_list| : (iterator = free_region_list.metadata.next) {
|
|
||||||
const physical_allocation = free_region_list.allocateAligned(size, alignment) catch blk: {
|
|
||||||
const splitted_allocation = free_region_list.allocateAlignedSplitting(size, alignment) catch continue;
|
|
||||||
_ = try root.appendRegion(&root.dynamic.memory, splitted_allocation.wasted);
|
|
||||||
break :blk splitted_allocation.allocated;
|
|
||||||
};
|
|
||||||
|
|
||||||
return physical_allocation;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (index == 0) break;
|
|
||||||
}
|
|
||||||
|
|
||||||
log.err("allocatePageCustomAlignment", .{});
|
|
||||||
return AllocateError.OutOfMemory;
|
|
||||||
}
|
|
||||||
|
|
||||||
fn allocateSingle(root: *Root, comptime T: type) AllocateError!*T {
|
|
||||||
const size = @sizeOf(T);
|
|
||||||
const alignment = @alignOf(T);
|
|
||||||
var iterator = root.heap.first;
|
|
||||||
while (iterator) |heap_region| : (iterator = heap_region.next) {
|
|
||||||
if (heap_region.alignmentFits(alignment)) {
|
|
||||||
if (heap_region.sizeFits(size)) {
|
|
||||||
const allocated_region = heap_region.takeRegion(size);
|
|
||||||
const result = &allocated_region.toHigherHalfVirtualAddress().access(T)[0];
|
|
||||||
return result;
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
@panic("ELSE");
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
const physical_region = try root.allocateMemory(lib.arch.valid_page_sizes[0]);
|
|
||||||
const heap_region = physical_region.toHigherHalfVirtualAddress().address.access(*Heap.Region);
|
|
||||||
const first = root.heap.first;
|
|
||||||
heap_region.* = .{
|
|
||||||
.descriptor = physical_region.offset(@sizeOf(Heap.Region)),
|
|
||||||
.allocated_size = @sizeOf(Heap.Region),
|
|
||||||
.next = first,
|
|
||||||
};
|
|
||||||
|
|
||||||
root.heap.first = heap_region;
|
|
||||||
|
|
||||||
return try root.allocateSingle(T);
|
|
||||||
}
|
|
||||||
|
|
||||||
fn allocateMany(root: *Root, comptime T: type, count: usize) AllocateError![]T {
|
|
||||||
_ = count;
|
|
||||||
_ = root;
|
|
||||||
|
|
||||||
@panic("TODO many");
|
|
||||||
}
|
|
||||||
|
|
||||||
pub const AllocateCPUMemoryOptions = packed struct {
|
|
||||||
privileged: bool,
|
|
||||||
};
|
|
||||||
};
|
};
|
||||||
|
50
src/lib.zig
50
src/lib.zig
@ -27,55 +27,7 @@ pub const CrossTarget = std.zig.CrossTarget;
|
|||||||
|
|
||||||
pub const log = std.log;
|
pub const log = std.log;
|
||||||
|
|
||||||
pub fn BitsetU64(comptime bits: comptime_int) type {
|
pub const data_structures = @import("lib/data_structures.zig");
|
||||||
assert(bits <= @bitSizeOf(u64));
|
|
||||||
const max_value = maxInt(@Type(.{
|
|
||||||
.Int = .{
|
|
||||||
.signedness = .unsigned,
|
|
||||||
.bits = bits,
|
|
||||||
},
|
|
||||||
}));
|
|
||||||
|
|
||||||
return packed struct(u64) {
|
|
||||||
value: u64 = 0,
|
|
||||||
|
|
||||||
const Error = error{
|
|
||||||
block_full,
|
|
||||||
};
|
|
||||||
|
|
||||||
pub inline fn allocate(bitset: *@This()) !u6 {
|
|
||||||
if (bitset.value & max_value != max_value) {
|
|
||||||
// log.debug("Bitset: 0b{b}", .{bitset.value});
|
|
||||||
const result: u6 = @intCast(@ctz(~bitset.value));
|
|
||||||
// log.debug("Result: {}", .{result});
|
|
||||||
assert(!bitset.isSet(result));
|
|
||||||
bitset.set(result);
|
|
||||||
return result;
|
|
||||||
} else {
|
|
||||||
return error.block_full;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub inline fn set(bitset: *@This(), index: u6) void {
|
|
||||||
assert(index < bits);
|
|
||||||
bitset.value |= (@as(u64, 1) << index);
|
|
||||||
}
|
|
||||||
|
|
||||||
pub inline fn clear(bitset: *@This(), index: u6) void {
|
|
||||||
assert(index < bits);
|
|
||||||
bitset.value &= ~(@as(u64, 1) << index);
|
|
||||||
}
|
|
||||||
|
|
||||||
pub inline fn isSet(bitset: @This(), index: u6) bool {
|
|
||||||
assert(index < bits);
|
|
||||||
return bitset.value & (@as(u64, 1) << index) != 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
pub inline fn isFull(bitset: @This()) bool {
|
|
||||||
return bitset.value == max_value;
|
|
||||||
}
|
|
||||||
};
|
|
||||||
}
|
|
||||||
|
|
||||||
pub const Atomic = std.atomic.Atomic;
|
pub const Atomic = std.atomic.Atomic;
|
||||||
|
|
||||||
|
127
src/lib/data_structures.zig
Normal file
127
src/lib/data_structures.zig
Normal file
@ -0,0 +1,127 @@
|
|||||||
|
const lib = @import("lib");
|
||||||
|
const Allocator = lib.Allocator;
|
||||||
|
const assert = lib.assert;
|
||||||
|
const maxInt = lib.maxInt;
|
||||||
|
|
||||||
|
pub fn BitsetU64(comptime bits: comptime_int) type {
|
||||||
|
assert(bits <= @bitSizeOf(u64));
|
||||||
|
const max_value = maxInt(@Type(.{
|
||||||
|
.Int = .{
|
||||||
|
.signedness = .unsigned,
|
||||||
|
.bits = bits,
|
||||||
|
},
|
||||||
|
}));
|
||||||
|
|
||||||
|
return packed struct(u64) {
|
||||||
|
value: u64 = 0,
|
||||||
|
|
||||||
|
const Error = error{
|
||||||
|
block_full,
|
||||||
|
};
|
||||||
|
|
||||||
|
pub inline fn allocate(bitset: *@This()) !u6 {
|
||||||
|
if (bitset.value & max_value != max_value) {
|
||||||
|
// log.debug("Bitset: 0b{b}", .{bitset.value});
|
||||||
|
const result: u6 = @intCast(@ctz(~bitset.value));
|
||||||
|
// log.debug("Result: {}", .{result});
|
||||||
|
assert(!bitset.isSet(result));
|
||||||
|
bitset.set(result);
|
||||||
|
return result;
|
||||||
|
} else {
|
||||||
|
return error.block_full;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub inline fn set(bitset: *@This(), index: u6) void {
|
||||||
|
assert(index < bits);
|
||||||
|
bitset.value |= (@as(u64, 1) << index);
|
||||||
|
}
|
||||||
|
|
||||||
|
pub inline fn clear(bitset: *@This(), index: u6) void {
|
||||||
|
assert(index < bits);
|
||||||
|
bitset.value &= ~(@as(u64, 1) << index);
|
||||||
|
}
|
||||||
|
|
||||||
|
pub inline fn isSet(bitset: @This(), index: u6) bool {
|
||||||
|
assert(index < bits);
|
||||||
|
return bitset.value & (@as(u64, 1) << index) != 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
pub inline fn isFull(bitset: @This()) bool {
|
||||||
|
return bitset.value == max_value;
|
||||||
|
}
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn SparseArray(comptime T: type) type {
|
||||||
|
return extern struct {
|
||||||
|
ptr: [*]T,
|
||||||
|
len: usize,
|
||||||
|
capacity: usize,
|
||||||
|
|
||||||
|
const Array = @This();
|
||||||
|
|
||||||
|
pub const Error = error{
|
||||||
|
index_out_of_bounds,
|
||||||
|
};
|
||||||
|
|
||||||
|
pub fn allocate(array: *Array, allocator: *Allocator) !*T {
|
||||||
|
try array.ensureCapacity(allocator, array.len + 1);
|
||||||
|
const index = array.len;
|
||||||
|
array.len += 1;
|
||||||
|
const slice = array.ptr[0..array.len];
|
||||||
|
return &slice[index];
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn append(array: *Array, allocator: *Allocator, element: T) !usize {
|
||||||
|
try array.ensureCapacity(allocator, array.len + 1);
|
||||||
|
const index = array.len;
|
||||||
|
array.len += 1;
|
||||||
|
const slice = array.ptr[0..array.len];
|
||||||
|
slice[index] = element;
|
||||||
|
|
||||||
|
return index;
|
||||||
|
}
|
||||||
|
|
||||||
|
fn ensureCapacity(array: *Array, allocator: *Allocator, desired_capacity: usize) !void {
|
||||||
|
if (array.capacity < desired_capacity) {
|
||||||
|
// Allocate a new array
|
||||||
|
const new_slice = try allocator.allocate(T, desired_capacity);
|
||||||
|
if (array.capacity == 0) {
|
||||||
|
array.ptr = new_slice.ptr;
|
||||||
|
array.capacity = new_slice.len;
|
||||||
|
} else {
|
||||||
|
// Reallocate
|
||||||
|
if (array.len > 0) {
|
||||||
|
@memcpy(new_slice[0..array.len], array.ptr[0..array.len]);
|
||||||
|
}
|
||||||
|
|
||||||
|
// TODO: free
|
||||||
|
|
||||||
|
array.ptr = new_slice.ptr;
|
||||||
|
array.capacity = new_slice.len;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn indexOf(array: *Array, ptr: *T) usize {
|
||||||
|
const base_int = @intFromPtr(array.ptr);
|
||||||
|
const ptr_int = @intFromPtr(ptr);
|
||||||
|
return @divExact(ptr_int - base_int, @sizeOf(T));
|
||||||
|
}
|
||||||
|
|
||||||
|
pub inline fn get(array: *Array, index: usize) T {
|
||||||
|
assert(array.len > index);
|
||||||
|
const slice = array.ptr[0..array.len];
|
||||||
|
return slice[index];
|
||||||
|
}
|
||||||
|
|
||||||
|
pub inline fn getChecked(array: *Array, index: usize) !T {
|
||||||
|
if (array.len > index) {
|
||||||
|
return array.get(index);
|
||||||
|
} else {
|
||||||
|
return error.index_out_of_bounds;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
};
|
||||||
|
}
|
@ -28,13 +28,6 @@ const bootloader = @import("bootloader");
|
|||||||
const paging = lib.arch.x86_64.paging;
|
const paging = lib.arch.x86_64.paging;
|
||||||
pub usingnamespace paging;
|
pub usingnamespace paging;
|
||||||
|
|
||||||
pub fn entryCount(comptime level: paging.Level, limit: u64) u10 {
|
|
||||||
const index = baseFromVirtualAddress(level, limit - 1);
|
|
||||||
const result = @as(u10, index) + 1;
|
|
||||||
// @compileLog(limit, index, result);
|
|
||||||
return result;
|
|
||||||
}
|
|
||||||
|
|
||||||
const max_level_possible = 5;
|
const max_level_possible = 5;
|
||||||
pub const IndexedVirtualAddress = packed struct(u64) {
|
pub const IndexedVirtualAddress = packed struct(u64) {
|
||||||
page_offset: u12 = 0,
|
page_offset: u12 = 0,
|
||||||
@ -71,11 +64,6 @@ const Level = enum(u2) {
|
|||||||
const count = @typeInfo(Level).Enum.fields.len;
|
const count = @typeInfo(Level).Enum.fields.len;
|
||||||
};
|
};
|
||||||
|
|
||||||
pub fn baseFromVirtualAddress(comptime level: paging.Level, virtual_address: u64) u9 {
|
|
||||||
const indexed = @as(IndexedVirtualAddress, @bitCast(virtual_address));
|
|
||||||
return @field(indexed, @tagName(level));
|
|
||||||
}
|
|
||||||
|
|
||||||
pub const CPUPageTables = extern struct {
|
pub const CPUPageTables = extern struct {
|
||||||
pml4_table: PhysicalAddress,
|
pml4_table: PhysicalAddress,
|
||||||
pdp_table: PhysicalAddress,
|
pdp_table: PhysicalAddress,
|
||||||
@ -96,8 +84,6 @@ pub const CPUPageTables = extern struct {
|
|||||||
1; // PT
|
1; // PT
|
||||||
const allocated_size = allocated_table_count * 0x1000;
|
const allocated_size = allocated_table_count * 0x1000;
|
||||||
|
|
||||||
const page_table_base = top;
|
|
||||||
|
|
||||||
comptime {
|
comptime {
|
||||||
assert(top + (left_ptables * lib.arch.valid_page_sizes[0]) == base + lib.arch.valid_page_sizes[1]);
|
assert(top + (left_ptables * lib.arch.valid_page_sizes[0]) == base + lib.arch.valid_page_sizes[1]);
|
||||||
}
|
}
|
||||||
@ -265,8 +251,8 @@ pub const Specific = extern struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
fn mapGeneric(specific: Specific, asked_physical_address: PhysicalAddress, asked_virtual_address: VirtualAddress, size: u64, comptime asked_page_size: comptime_int, flags: MemoryFlags, page_allocator: PageAllocator) !void {
|
fn mapGeneric(specific: Specific, asked_physical_address: PhysicalAddress, asked_virtual_address: VirtualAddress, size: u64, comptime asked_page_size: comptime_int, flags: MemoryFlags, page_allocator: PageAllocator) !void {
|
||||||
if (!isAlignedGeneric(u64, asked_physical_address.value(), asked_page_size)) {
|
if (!isAlignedGeneric(u64, asked_physical_address.value(), lib.arch.valid_page_sizes[0])) {
|
||||||
//log.debug("PA: {}. Page size: 0x{x}", .{ asked_physical_address, asked_page_size });
|
log.debug("PA: {}. Page size: 0x{x}", .{ asked_physical_address, asked_page_size });
|
||||||
@panic("Misaligned physical address in mapGeneric");
|
@panic("Misaligned physical address in mapGeneric");
|
||||||
}
|
}
|
||||||
if (!isAlignedGeneric(u64, asked_virtual_address.value(), asked_page_size)) {
|
if (!isAlignedGeneric(u64, asked_virtual_address.value(), asked_page_size)) {
|
||||||
@ -475,106 +461,6 @@ pub const Specific = extern struct {
|
|||||||
return pt_entry_address;
|
return pt_entry_address;
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn setMappingFlags(specific: Specific, virtual_address: u64, flags: Mapping.Flags) !void {
|
|
||||||
const indexed: IndexedVirtualAddress = @bitCast(virtual_address);
|
|
||||||
|
|
||||||
const vas_cr3 = specific.cr3;
|
|
||||||
|
|
||||||
const pml4_physical_address = vas_cr3.getAddress();
|
|
||||||
|
|
||||||
const pml4_table = try accessPageTable(pml4_physical_address, *PML4Table);
|
|
||||||
const pml4_entry = pml4_table[indexed.PML4];
|
|
||||||
if (!pml4_entry.present) {
|
|
||||||
return TranslateError.pml4_entry_not_present;
|
|
||||||
}
|
|
||||||
|
|
||||||
const pml4_entry_address = PhysicalAddress.new(unpackAddress(pml4_entry));
|
|
||||||
if (pml4_entry_address.value() == 0) {
|
|
||||||
return TranslateError.pml4_entry_address_null;
|
|
||||||
}
|
|
||||||
|
|
||||||
const pdp_table = try accessPageTable(pml4_entry_address, *PDPTable);
|
|
||||||
const pdp_entry = pdp_table[indexed.PDP];
|
|
||||||
if (!pdp_entry.present) {
|
|
||||||
return TranslateError.pdp_entry_not_present;
|
|
||||||
}
|
|
||||||
|
|
||||||
const pdp_entry_address = PhysicalAddress.new(unpackAddress(pdp_entry));
|
|
||||||
if (pdp_entry_address.value() == 0) {
|
|
||||||
return TranslateError.pdp_entry_address_null;
|
|
||||||
}
|
|
||||||
|
|
||||||
const pd_table = try accessPageTable(pdp_entry_address, *PDTable);
|
|
||||||
const pd_entry = pd_table[indexed.PD];
|
|
||||||
if (!pd_entry.present) {
|
|
||||||
return TranslateError.pd_entry_not_present;
|
|
||||||
}
|
|
||||||
|
|
||||||
const pd_entry_address = PhysicalAddress.new(unpackAddress(pd_entry));
|
|
||||||
if (pd_entry_address.value() == 0) {
|
|
||||||
return TranslateError.pd_entry_address_null;
|
|
||||||
}
|
|
||||||
|
|
||||||
const pt_table = try accessPageTable(pd_entry_address, *PTable);
|
|
||||||
const pt_entry = &pt_table[indexed.PT];
|
|
||||||
if (!pt_entry.present) {
|
|
||||||
return TranslateError.pd_entry_not_present;
|
|
||||||
}
|
|
||||||
|
|
||||||
pt_entry.write = flags.write;
|
|
||||||
pt_entry.user = flags.user;
|
|
||||||
pt_entry.page_level_cache_disable = flags.cache_disable;
|
|
||||||
pt_entry.global = flags.global;
|
|
||||||
pt_entry.execute_disable = !flags.execute;
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn debugMemoryMap(specific: Specific) !void {
|
|
||||||
log.debug("[START] Memory map dump 0x{x}\n", .{specific.cr3.getAddress().value()});
|
|
||||||
|
|
||||||
const pml4 = try specific.getCpuPML4Table();
|
|
||||||
|
|
||||||
for (pml4, 0..) |*pml4te, pml4_index| {
|
|
||||||
if (pml4te.present) {
|
|
||||||
const pdp_table = try accessPageTable(PhysicalAddress.new(unpackAddress(pml4te.*)), *PDPTable);
|
|
||||||
|
|
||||||
for (pdp_table, 0..) |*pdpte, pdp_index| {
|
|
||||||
if (pdpte.present) {
|
|
||||||
if (pdpte.page_size) {
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
|
|
||||||
const pd_table = try accessPageTable(PhysicalAddress.new(unpackAddress(pdpte.*)), *PDTable);
|
|
||||||
|
|
||||||
for (pd_table, 0..) |*pdte, pd_index| {
|
|
||||||
if (pdte.present) {
|
|
||||||
if (pdte.page_size) @panic("bbbb");
|
|
||||||
|
|
||||||
const p_table = try accessPageTable(PhysicalAddress.new(unpackAddress(pdte.*)), *PTable);
|
|
||||||
|
|
||||||
for (p_table, 0..) |*pte, pt_index| {
|
|
||||||
if (pte.present) {
|
|
||||||
const indexed_virtual_address = IndexedVirtualAddress{
|
|
||||||
.PML4 = @as(u9, @intCast(pml4_index)),
|
|
||||||
.PDP = @as(u9, @intCast(pdp_index)),
|
|
||||||
.PD = @as(u9, @intCast(pd_index)),
|
|
||||||
.PT = @as(u9, @intCast(pt_index)),
|
|
||||||
};
|
|
||||||
|
|
||||||
const virtual_address = indexed_virtual_address.toVirtualAddress();
|
|
||||||
const physical_address = unpackAddress(pte.*);
|
|
||||||
log.debug("0x{x} -> 0x{x}", .{ virtual_address.value(), physical_address });
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
log.debug("[END] Memory map dump", .{});
|
|
||||||
}
|
|
||||||
|
|
||||||
inline fn getUserCr3(specific: Specific) cr3 {
|
inline fn getUserCr3(specific: Specific) cr3 {
|
||||||
assert(specific.isPrivileged());
|
assert(specific.isPrivileged());
|
||||||
return @as(cr3, @bitCast(@as(u64, @bitCast(specific.cr3)) | paging.page_table_size));
|
return @as(cr3, @bitCast(@as(u64, @bitCast(specific.cr3)) | paging.page_table_size));
|
||||||
@ -715,8 +601,6 @@ fn mapPageTable2MB(pd_table: *PDTable, indexed: IndexedVirtualAddress, physical_
|
|||||||
return MapError.already_present_2mb;
|
return MapError.already_present_2mb;
|
||||||
}
|
}
|
||||||
|
|
||||||
assert(isAlignedGeneric(u64, physical_address, valid_page_sizes[1]));
|
|
||||||
|
|
||||||
entry_pointer.* = @as(PDTE, @bitCast(getPageEntry(PDTE_2MB, physical_address, flags)));
|
entry_pointer.* = @as(PDTE, @bitCast(getPageEntry(PDTE_2MB, physical_address, flags)));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -95,6 +95,7 @@ pub export fn start(scheduler: *Scheduler, arg_init: bool) callconv(.C) noreturn
|
|||||||
}
|
}
|
||||||
|
|
||||||
fn initialize() !void {
|
fn initialize() !void {
|
||||||
|
currentScheduler().initializeAllocator();
|
||||||
_ = try Virtual.AddressSpace.create();
|
_ = try Virtual.AddressSpace.create();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -6,15 +6,29 @@ const user = @import("user");
|
|||||||
|
|
||||||
const assert = lib.assert;
|
const assert = lib.assert;
|
||||||
const log = lib.log;
|
const log = lib.log;
|
||||||
|
const SparseArray = lib.data_structures.SparseArray;
|
||||||
const VirtualAddress = lib.VirtualAddress;
|
const VirtualAddress = lib.VirtualAddress;
|
||||||
|
|
||||||
const paging = lib.arch.paging;
|
const paging = lib.arch.paging;
|
||||||
|
|
||||||
|
const Leaf = birth.interface.Leaf;
|
||||||
|
|
||||||
pub const AddressSpace = extern struct {
|
pub const AddressSpace = extern struct {
|
||||||
// page_table: PageTable,
|
// page_table: PageTable,
|
||||||
region: Virtual.AddressSpace.Region,
|
region: Virtual.AddressSpace.Region = .{},
|
||||||
minimum: VirtualAddress = VirtualAddress.new(paging.user_address_space_start),
|
minimum: VirtualAddress = VirtualAddress.new(paging.user_address_space_start),
|
||||||
maximum: VirtualAddress = VirtualAddress.new(paging.user_address_space_end),
|
maximum: VirtualAddress = VirtualAddress.new(paging.user_address_space_end),
|
||||||
|
root_page_table: PageTable = .{},
|
||||||
|
page_table_buffer: SparseArray(PageTable) = .{
|
||||||
|
.ptr = undefined,
|
||||||
|
.len = 0,
|
||||||
|
.capacity = 0,
|
||||||
|
},
|
||||||
|
leaf_buffer: SparseArray(Leaf) = .{
|
||||||
|
.ptr = undefined,
|
||||||
|
.len = 0,
|
||||||
|
.capacity = 0,
|
||||||
|
},
|
||||||
|
|
||||||
const Region = extern struct {
|
const Region = extern struct {
|
||||||
list: Virtual.Region.List = .{},
|
list: Virtual.Region.List = .{},
|
||||||
@ -23,44 +37,50 @@ pub const AddressSpace = extern struct {
|
|||||||
|
|
||||||
pub fn create() !*AddressSpace {
|
pub fn create() !*AddressSpace {
|
||||||
const scheduler = user.currentScheduler();
|
const scheduler = user.currentScheduler();
|
||||||
const virtual_address_space = try scheduler.common.heapAllocateFast(AddressSpace);
|
const virtual_address_space = try scheduler.fast_allocator.create(AddressSpace);
|
||||||
virtual_address_space.* = .{
|
virtual_address_space.* = .{};
|
||||||
.page_table = undefined,
|
|
||||||
.region = .{},
|
|
||||||
};
|
|
||||||
|
|
||||||
virtual_address_space.collectPageTables(0, 0, 0, &virtual_address_space.page_table.root.u.page_table.children);
|
try virtual_address_space.collectPageTables(&virtual_address_space.root_page_table, .{});
|
||||||
|
|
||||||
@panic("TODO: create");
|
@panic("TODO: create");
|
||||||
}
|
}
|
||||||
|
|
||||||
fn collectPageTables(virtual_address_space: *AddressSpace, block: u7, index: u7, level: usize, page_table_buffer: *[512]birth.interface.PageTable) !void {
|
fn collectPageTables(virtual_address_space: *Virtual.AddressSpace, page_table: *PageTable, descriptor: birth.interface.PageTable) !void {
|
||||||
_ = virtual_address_space;
|
|
||||||
try user.Interface(.page_table, .get).blocking(.{
|
try user.Interface(.page_table, .get).blocking(.{
|
||||||
.descriptor = .{
|
.descriptor = descriptor,
|
||||||
.block = block,
|
.buffer = &page_table.children_handles,
|
||||||
.index = index,
|
|
||||||
.entry_type = .page_table,
|
|
||||||
},
|
|
||||||
.buffer = page_table_buffer,
|
|
||||||
});
|
});
|
||||||
|
|
||||||
for (page_table_buffer, 0..) |page_table_entry, i| {
|
const allocator = &user.currentScheduler().fast_allocator;
|
||||||
_ = i;
|
|
||||||
if (page_table_entry.present) {
|
for (page_table.children_handles, &page_table.indices) |child, *index| {
|
||||||
switch (page_table_entry.entry_type) {
|
if (child.present) {
|
||||||
|
switch (child.entry_type) {
|
||||||
.page_table => {
|
.page_table => {
|
||||||
const scheduler = user.currentScheduler();
|
const page_table_index = virtual_address_space.page_table_buffer.len;
|
||||||
const buffer = try scheduler.common.heapAllocateFast([512]birth.interface.PageTable);
|
const new_page_table = try virtual_address_space.page_table_buffer.allocate(allocator);
|
||||||
collectPageTables(page_table_entry.block, page_table_entry.index, level + 1, buffer) catch unreachable;
|
//user.currentScheduler().fast_allocator.create(PageTable);
|
||||||
|
index.* = @intCast(page_table_index);
|
||||||
|
|
||||||
|
try virtual_address_space.collectPageTables(new_page_table, child);
|
||||||
},
|
},
|
||||||
.leaf => {
|
.leaf => {
|
||||||
log.err("Leaf: {}", .{page_table_entry});
|
const new_leaf = try virtual_address_space.leaf_buffer.allocate(allocator);
|
||||||
|
index.* = @intCast(virtual_address_space.leaf_buffer.indexOf(new_leaf));
|
||||||
|
try getLeaf(child, new_leaf);
|
||||||
|
log.debug("New leaf: {}", .{new_leaf});
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fn getLeaf(leaf_descriptor: birth.interface.PageTable, leaf: *Leaf) !void {
|
||||||
|
try user.Interface(.page_table, .get_leaf).blocking(.{
|
||||||
|
.descriptor = leaf_descriptor,
|
||||||
|
.buffer = leaf,
|
||||||
|
});
|
||||||
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
pub const Region = extern struct {
|
pub const Region = extern struct {
|
||||||
@ -74,45 +94,7 @@ pub const Region = extern struct {
|
|||||||
};
|
};
|
||||||
};
|
};
|
||||||
|
|
||||||
// fn newPageTableNode(page_table: Virtual.PageTable.Node.PageTable, level: paging.Level) PageTable.Node {
|
pub const PageTable = extern struct {
|
||||||
// return .{
|
children_handles: [512]birth.interface.PageTable = .{.{}} ** 512,
|
||||||
// .flags = .{
|
indices: [512]u32 = .{0} ** 512,
|
||||||
// .type = .page_table,
|
};
|
||||||
// .level = level,
|
|
||||||
// },
|
|
||||||
// .u = .{
|
|
||||||
// .page_table = page_table,
|
|
||||||
// },
|
|
||||||
// };
|
|
||||||
// }
|
|
||||||
|
|
||||||
// pub const PageTable = extern struct {
|
|
||||||
// root: Node,
|
|
||||||
// foo: u32 = 0,
|
|
||||||
//
|
|
||||||
// pub const Node = extern struct {
|
|
||||||
// flags: Flags,
|
|
||||||
// u: extern union {
|
|
||||||
// leaf: Leaf,
|
|
||||||
// page_table: Node.PageTable,
|
|
||||||
// },
|
|
||||||
//
|
|
||||||
// pub const Flags = packed struct(u32) {
|
|
||||||
// type: birth.interface.PageTable.EntryType,
|
|
||||||
// level: paging.Level,
|
|
||||||
// reserved: u29 = 0,
|
|
||||||
// };
|
|
||||||
//
|
|
||||||
// pub const Leaf = extern struct {
|
|
||||||
// foo: u32 = 0,
|
|
||||||
// };
|
|
||||||
//
|
|
||||||
// pub const PageTable = extern struct {
|
|
||||||
// foo: u32 = 0,
|
|
||||||
// children: Buffer = .{.{ .entry_type = .page_table }} ** node_count,
|
|
||||||
// };
|
|
||||||
// };
|
|
||||||
//
|
|
||||||
// const node_count = paging.page_table_entry_count;
|
|
||||||
// pub const Buffer = [node_count]birth.interface.PageTable;
|
|
||||||
// };
|
|
||||||
|
Loading…
x
Reference in New Issue
Block a user