diff --git a/src/birth.zig b/src/birth.zig index 1b17326..98185bb 100644 --- a/src/birth.zig +++ b/src/birth.zig @@ -1,4 +1,5 @@ const lib = @import("lib"); +const Allocator = lib.Allocator; const assert = lib.assert; pub const arch = @import("birth/arch.zig"); @@ -13,6 +14,22 @@ pub const Scheduler = extern struct { core_id: u32, core_state: CoreState, bootstrap_thread: Thread, + fast_allocator: Allocator, + + pub fn initializeAllocator(scheduler: *Scheduler) void { + scheduler.fast_allocator = Allocator{ + .callbacks = .{ + .allocate = callbackAllocate, + }, + }; + } + + fn callbackAllocate(allocator: *Allocator, size: u64, alignment: u64) Allocator.Allocate.Error!Allocator.Allocate.Result { + const scheduler = @fieldParentPtr(Scheduler, "fast_allocator", allocator); + assert(scheduler.common.heap.address.isAligned(alignment)); + const result = scheduler.common.heap.takeSlice(size) catch return error.OutOfMemory; + return @bitCast(result); + } pub const Common = extern struct { self: *Common, @@ -24,16 +41,16 @@ pub const Scheduler = extern struct { setup_stack_lock: lib.Atomic(bool), disabled_save_area: arch.RegisterArena, - pub fn heapAllocateFast(common: *Common, comptime T: type) !*T { - const size = @sizeOf(T); - const alignment = @alignOf(T); - lib.log.debug("Heap: {}. Size: {}. Alignment: {}", .{ common.heap, size, alignment }); - const result = try common.heap.takeSlice(size); - const ptr = &result.access(T)[0]; - assert(lib.isAligned(@intFromPtr(ptr), alignment)); - - return ptr; - } + // pub fn heapAllocateFast(common: *Common, comptime T: type) !*T { + // const size = @sizeOf(T); + // const alignment = @alignOf(T); + // lib.log.debug("Heap: {}. Size: {}. Alignment: {}", .{ common.heap, size, alignment }); + // const result = try common.heap.takeSlice(size); + // const ptr = &result.access(T)[0]; + // assert(lib.isAligned(@intFromPtr(ptr), alignment)); + // + // return ptr; + // } }; pub fn enqueueThread(scheduler: *Scheduler, thread_to_queue: *Thread) void { diff --git a/src/birth/interface.zig b/src/birth/interface.zig index e4b5eb8..8a9f959 100644 --- a/src/birth/interface.zig +++ b/src/birth/interface.zig @@ -72,6 +72,10 @@ pub const PageTable = packed struct(u16) { }; }; +pub const Mapping = packed struct(u32) { + foo: u32 = 0, +}; + pub fn CommandBuilder(comptime list: []const []const u8) type { const capability_base_command_list = .{ "copy", @@ -164,6 +168,7 @@ pub const Command = extern struct { }, .page_table => .{ "get", + "get_leaf", }, .memory_mapping => .{}, .page_table_mapping => .{}, @@ -290,6 +295,17 @@ fn CommandDescriptor(comptime capability: Capability, comptime command: Command. "not_present", }), }, + .get_leaf => .{ + .Arguments = extern struct { + /// This descriptor works for leaves as well + descriptor: PageTable, + buffer: *Leaf, + }, + .ErrorSet = ErrorSet(&.{ + "index_out_of_bounds", + "not_present", + }), + }, else => .{}, }, else => .{}, @@ -689,6 +705,41 @@ pub fn Descriptor(comptime capability: Capability, comptime command: Command.fro .fromArguments = F.fromArguments, }; }, + .get_leaf => blk: { + const F = struct { + inline fn toResult(raw_result: Raw.Result.Birth) void { + _ = raw_result; + } + + inline fn fromResult(result: T.Result) Raw.Result { + _ = result; + return Raw.Result{ + .birth = .{ + .first = .{}, + .second = 0, + }, + }; + } + + const struct_helper = StructHelperArguments(T.Arguments); + inline fn toArguments(raw_arguments: Raw.Arguments) T.ErrorSet.Error!T.Arguments { + const args = try struct_helper.toArguments(raw_arguments); + + return args; + } + + inline fn fromArguments(arguments: T.Arguments) Raw.Arguments { + return struct_helper.fromArguments(arguments); + } + }; + + break :blk .{ + .toResult = F.toResult, + .fromResult = F.fromResult, + .toArguments = F.toArguments, + .fromArguments = F.fromArguments, + }; + }, else => .{}, }, else => .{}, @@ -1029,3 +1080,13 @@ pub const Raw = extern struct { } }; }; + +pub const Leaf = extern struct { + mapped_physical: birth.interface.Memory, + own_physical: birth.interface.Memory, + flags: Flags, + + pub const Flags = packed struct(u64) { + foo: u64 = 0, + }; +}; diff --git a/src/cpu.zig b/src/cpu.zig index 441603e..3155a2d 100644 --- a/src/cpu.zig +++ b/src/cpu.zig @@ -167,10 +167,12 @@ pub const RegionList = extern struct { pub const Metadata = extern struct { reserved: usize = 0, - bitset: lib.BitsetU64(list_region_count) = .{}, + bitset: Bitset = .{}, previous: ?*RegionList = null, next: ?*RegionList = null, + const Bitset = lib.data_structures.BitsetU64(list_region_count); + comptime { assert(@sizeOf(Metadata) == expected_size); assert(@bitSizeOf(usize) - list_region_count < 8); @@ -557,60 +559,3 @@ pub fn HeapImplementation(comptime user: bool) type { } pub const writer = privileged.E9Writer{ .context = {} }; - -pub fn SparseArray(comptime T: type) type { - return extern struct { - ptr: [*]T, - len: usize, - capacity: usize, - - const Array = @This(); - - pub const Error = error{ - index_out_of_bounds, - }; - - pub fn append(array: *Array, allocator: *Allocator, element: T) !void { - try array.ensureCapacity(allocator, array.len + 1); - const index = array.len; - array.len += 1; - const slice = array.ptr[0..array.len]; - slice[index] = element; - } - - fn ensureCapacity(array: *Array, allocator: *Allocator, desired_capacity: usize) !void { - if (array.capacity < desired_capacity) { - // Allocate a new array - const new_slice = try allocator.allocate(T, desired_capacity); - if (array.capacity == 0) { - array.ptr = new_slice.ptr; - array.capacity = new_slice.len; - } else { - // Reallocate - if (array.len > 0) { - @memcpy(new_slice[0..array.len], array.ptr[0..array.len]); - } - - // TODO: free - - array.ptr = new_slice.ptr; - array.capacity = new_slice.len; - } - } - } - - pub inline fn get(array: *Array, index: usize) T { - assert(array.len > index); - const slice = array.ptr[0..array.len]; - return slice[index]; - } - - pub inline fn getChecked(array: *Array, index: usize) !T { - if (array.len > index) { - return array.get(index); - } else { - return error.index_out_of_bounds; - } - } - }; -} diff --git a/src/cpu/arch/x86/64/init.zig b/src/cpu/arch/x86/64/init.zig index ce4182f..6d32be7 100644 --- a/src/cpu/arch/x86/64/init.zig +++ b/src/cpu/arch/x86/64/init.zig @@ -624,40 +624,71 @@ fn map(address_space: paging.Specific, virtual: VirtualAddress, physical: Physic var page_table_ref = user_page_tables.user; - for (0..paging.Level.count - 1) |level_index| { - const page_table = user_page_tables.getPageTable(page_table_ref) catch |err| { - log.err("Error {s} at level {} when trying to map 0x{x} to 0x{x}", .{ @errorName(err), level_index, virtual.value(), physical.value() }); - const physical_address = address_space.translateAddress(virtual, .{ - .execute_disable = !flags.execute, - .write = flags.write, - .user = flags.user, - }) catch @panic("Could not translate address"); - if (physical_address.value() != physical.value()) { - @panic("Address mismatch"); - } else { - @panic("Address match"); - } - }; - page_table_ref = page_table.children[indices[level_index]]; - } - assert(indexed.PML4 == top_indexed.PML4); assert(indexed.PDP == top_indexed.PDP); - assert(indexed.PD == top_indexed.PD); - assert(indexed.PT <= top_indexed.PT); + log.debug("PD base: {}. PD top: {}", .{ indexed.PD, top_indexed.PD }); + log.debug("PT base: {}. PT top: {}", .{ indexed.PT, top_indexed.PT }); + var pd_index: u10 = indexed.PD; + var offset: usize = 0; - const page_table = try user_page_tables.getPageTable(page_table_ref); - var index: u10 = indexed.PT; - while (index <= top_indexed.PT) : (index += 1) { - const leaf = Leaf{ - .physical = physical.offset(index - indexed.PT), - .flags = .{ - .size = .@"4KB", - }, - }; - const leaf_ref = try user_page_tables.appendLeaf(&cpu.user_scheduler.s.capability_root_node.heap.allocator, leaf); - page_table.children[index] = leaf_ref; + while (pd_index <= top_indexed.PD) : (pd_index += 1) { + const pt_base = if (pd_index == indexed.PD) indexed.PT else 0; + const pt_top = if (pd_index == top_indexed.PD) top_indexed.PT else 511; + log.debug("PD index: {}. Base: {}. Top: {}", .{ pd_index, pt_base, pt_top }); + + var pt_index = pt_base; + while (pt_index <= pt_top) : ({ + pt_index += 1; + offset += lib.arch.valid_page_sizes[0]; + }) { + const leaf = Leaf{ + .physical = physical.offset(offset), + .flags = .{ + .size = .@"4KB", + }, + .common = undefined, // TODO: + }; + const leaf_ref = try user_page_tables.appendLeaf(&cpu.user_scheduler.s.capability_root_node.heap.allocator, leaf); + const level_fields = @typeInfo(paging.Level).Enum.fields; + inline for (level_fields[0 .. level_fields.len - 1]) |level_field| { + const level = @field(paging.Level, level_field.name); + const page_table = user_page_tables.getPageTable(page_table_ref) catch |err| { + log.err("Error {s} at level {} when trying to map 0x{x} to 0x{x}", .{ @errorName(err), level, virtual.value(), physical.value() }); + const virtual_address = virtual.offset(offset); + const physical_address = address_space.translateAddress(virtual_address, .{ + .execute_disable = !flags.execute, + .write = flags.write, + .user = flags.user, + }) catch @panic("Could not translate address"); + if (physical_address.value() != physical.offset(offset).value()) { + @panic("Address mismatch"); + } else { + cpu.panic("PD index: {}. PT index: {}. Virtual: 0x{x}. Physical: 0x{x}", .{ pd_index, pt_index, virtual_address.value(), physical_address.value() }); + } + }; + + page_table_ref = page_table.children[indices[@intFromEnum(level)]]; + } + + const page_table = try user_page_tables.getPageTable(page_table_ref); + page_table.children[pt_index] = leaf_ref; + } } + // assert(indexed.PD == top_indexed.PD); + // assert(indexed.PT <= top_indexed.PT); + + // var index: u10 = indexed.PT; + // while (index <= top_indexed.PT) : (index += 1) { + // const leaf = Leaf{ + // .physical = physical.offset(index - indexed.PT), + // .flags = .{ + // .size = .@"4KB", + // }, + // .common = undefined, // TODO: + // }; + // const leaf_ref = try user_page_tables.appendLeaf(&cpu.user_scheduler.s.capability_root_node.heap.allocator, leaf); + // page_table.children[index] = leaf_ref; + // } } } diff --git a/src/cpu/init.zig b/src/cpu/init.zig index 0bef328..2f08c80 100644 --- a/src/cpu/init.zig +++ b/src/cpu/init.zig @@ -184,6 +184,58 @@ fn spawnInitCommon(init_file: []const u8, cpu_page_tables: paging.CPUPageTables) // TODO: delete in the future assert(cpu.bsp); + const init_cpu_scheduler = try cpu.heap.create(cpu.UserScheduler); + init_cpu_scheduler.* = cpu.UserScheduler{ + .s = .{ + .common = undefined, + .capability_root_node = cpu.interface.Root{ + .static = .{ + .cpu = true, + .boot = true, + .process = true, + }, + .dynamic = .{ + .io = .{ + .debug = true, + }, + .memory = .{}, + .cpu_memory = .{ + .flags = .{ + .allocate = true, + }, + }, + .page_table = cpu.interface.PageTables{ + .privileged = undefined, + .user = birth.interface.PageTable{ + .index = 0, + .entry_type = .page_table, + }, + // .vmm = try cpu.interface.VMM.new(), + .can_map_page_tables = true, + .page_tables = .{ + .ptr = undefined, + .len = 0, + .capacity = 0, + }, + .leaves = .{ + .ptr = undefined, + .len = 0, + .capacity = 0, + }, + }, + .command_buffer_submission = .{ .region = PhysicalMemoryRegion.invalid() }, + .command_buffer_completion = .{ .region = PhysicalMemoryRegion.invalid() }, + .memory_mapping = .{}, + .page_table_mapping = .{}, + }, + .scheduler = .{ + .memory = undefined, + // .memory = scheduler_physical_region, + }, + }, + }, + }; + const init_elf = try ELF.Parser.init(init_file); const entry_point = init_elf.getEntryPoint(); const program_headers = init_elf.getProgramHeaders(); @@ -231,6 +283,8 @@ fn spawnInitCommon(init_file: []const u8, cpu_page_tables: paging.CPUPageTables) const init_start_address = first_address orelse @panic("WTF"); const init_top_address = init_start_address + segment_total_size; const user_scheduler_virtual_address = VirtualAddress.new(init_top_address); + init_cpu_scheduler.s.common = user_scheduler_virtual_address.access(*birth.Scheduler.Common); + const user_scheduler_virtual_region = VirtualMemoryRegion.new(.{ .address = user_scheduler_virtual_address, .size = lib.alignForward(usize, @sizeOf(birth.Scheduler), lib.arch.valid_page_sizes[0]), @@ -247,58 +301,7 @@ fn spawnInitCommon(init_file: []const u8, cpu_page_tables: paging.CPUPageTables) // const page_table_regions = try PageTableRegions.create(user_virtual_region, cpu_page_tables); log.debug("Scheduler region", .{}); const scheduler_physical_region = try cpu.page_allocator.allocate(user_scheduler_virtual_region.size, .{ .reason = .user }); - - log.debug("Heap scheduler", .{}); - const init_cpu_scheduler = try cpu.heap.create(cpu.UserScheduler); - init_cpu_scheduler.* = cpu.UserScheduler{ - .s = .{ - .common = user_scheduler_virtual_address.access(*birth.Scheduler.Common), - .capability_root_node = cpu.interface.Root{ - .static = .{ - .cpu = true, - .boot = true, - .process = true, - }, - .dynamic = .{ - .io = .{ - .debug = true, - }, - .memory = .{}, - .cpu_memory = .{ - .flags = .{ - .allocate = true, - }, - }, - .page_table = cpu.interface.PageTables{ - .privileged = undefined, - .user = birth.interface.PageTable{ - .index = 0, - .entry_type = .page_table, - }, - // .vmm = try cpu.interface.VMM.new(), - .can_map_page_tables = true, - .page_tables = .{ - .ptr = undefined, - .len = 0, - .capacity = 0, - }, - .leaves = .{ - .ptr = undefined, - .len = 0, - .capacity = 0, - }, - }, - .command_buffer_submission = .{ .region = PhysicalMemoryRegion.invalid() }, - .command_buffer_completion = .{ .region = PhysicalMemoryRegion.invalid() }, - .memory_mapping = .{}, - .page_table_mapping = .{}, - }, - .scheduler = .{ - .memory = scheduler_physical_region, - }, - }, - }, - }; + init_cpu_scheduler.s.capability_root_node.scheduler.memory = scheduler_physical_region; const scheduler_virtual_region = VirtualMemoryRegion.new(.{ .address = user_scheduler_virtual_address, @@ -309,7 +312,7 @@ fn spawnInitCommon(init_file: []const u8, cpu_page_tables: paging.CPUPageTables) const heap_virtual_region = VirtualMemoryRegion.new(.{ .address = scheduler_virtual_region.top(), - .size = lib.alignForward(usize, scheduler_virtual_region.top().value(), lib.arch.valid_page_sizes[1]) - scheduler_virtual_region.top().value(), + .size = lib.alignForward(usize, scheduler_virtual_region.top().value(), 64 * lib.arch.valid_page_sizes[1]) - scheduler_virtual_region.top().value(), }); log.debug("Heap region", .{}); diff --git a/src/cpu/interface.zig b/src/cpu/interface.zig index 2b5fd68..ed3eea8 100644 --- a/src/cpu/interface.zig +++ b/src/cpu/interface.zig @@ -3,6 +3,7 @@ const assert = lib.assert; const Allocator = lib.Allocator; const enumCount = lib.enumCount; const log = lib.log.scoped(.capabilities); +const SparseArray = lib.data_structures.SparseArray; const VirtualAddress = lib.VirtualAddress; const privileged = @import("privileged"); @@ -13,7 +14,6 @@ const VirtualMemoryRegion = lib.VirtualMemoryRegion; const birth = @import("birth"); const cpu = @import("cpu"); const RegionList = cpu.RegionList; -const SparseArray = cpu.SparseArray; pub var system_call_count: usize = 0; @@ -85,9 +85,8 @@ pub fn processCommand(comptime Descriptor: type, raw_arguments: birth.interface. comptime assert(@TypeOf(arguments) == usize); const size = arguments; // TODO: we want more fine-grained control of the reason if we want more than a simple statistic - const physical_region = try cpu.page_allocator.allocate(size, .{ .reason = .user }); - const result = try root.dynamic.memory.appendRegion(physical_region); - break :blk result; + const result = try root.allocateMemory(size); + break :blk result.reference; }, .retype => blk: { const source = arguments.source; @@ -144,6 +143,16 @@ pub fn processCommand(comptime Descriptor: type, raw_arguments: birth.interface. log.debug("Page table: {}", .{page_table.flags.level}); @memcpy(arguments.buffer, &page_table.children); }, + .get_leaf => { + const descriptor = arguments.descriptor; + assert(descriptor.entry_type == .leaf); + + const block = try root.dynamic.page_table.leaves.getChecked(descriptor.block); + const leaf = &block.array[descriptor.index]; + + const user_leaf = arguments.buffer; + user_leaf.* = leaf.common; + }, else => @panic("TODO: page_table other"), }, .memory_mapping => { @@ -354,9 +363,11 @@ pub const PageTable = extern struct { pub const Array = extern struct { array: [count]PageTable, - bitset: lib.BitsetU64(count), + bitset: Bitset, next: ?*Array = null, + pub const Bitset = lib.data_structures.BitsetU64(count); + pub const count = 32; pub fn get(array: *Array, index: u6) !*PageTable { @@ -370,6 +381,7 @@ pub const PageTable = extern struct { }; pub const Leaf = extern struct { + common: birth.interface.Leaf, physical: PhysicalAddress, flags: Flags, @@ -386,8 +398,9 @@ pub const Leaf = extern struct { pub const Array = extern struct { array: [count]Leaf, - bitset: lib.BitsetU64(count), + bitset: Bitset, next: ?*Array = null, + pub const Bitset = lib.data_structures.BitsetU64(count); pub const count = 32; pub fn get(array: *Array, index: u6) !*PageTable { if (array.bitset.isSet(index)) { @@ -446,7 +459,7 @@ pub const PageTables = extern struct { } const page_table_array = try allocator.create(PageTable.Array); - try page_tables.page_tables.append(allocator, page_table_array); + _ = try page_tables.page_tables.append(allocator, page_table_array); return appendPageTable(page_tables, allocator, page_table); } @@ -467,7 +480,7 @@ pub const PageTables = extern struct { } const leaf_array = try allocator.create(Leaf.Array); - try page_tables.leaves.append(allocator, leaf_array); + _ = try page_tables.leaves.append(allocator, leaf_array); return appendLeaf(page_tables, allocator, leaf); } @@ -572,127 +585,18 @@ pub const Root = extern struct { return has_permissions; } - // Fast path - fn allocateMemoryRaw(root: *Root, size: usize) AllocateError!PhysicalMemoryRegion { - lib.log.err("New allocation demanded: 0x{x} bytes", .{size}); - assert(size != 0); - assert(lib.isAligned(size, lib.arch.valid_page_sizes[0])); - var index = Memory.getListIndex(size); - - const result = blk: { - while (true) : (index -= 1) { - const list = &root.dynamic.memory.lists[index]; - var iterator: ?*cpu.capabilities.RegionList = list; - - // const page_size = @as(u64, switch (index) { - // 0 => lib.arch.reverse_valid_page_sizes[0], - // 1 => lib.arch.reverse_valid_page_sizes[1], - // 2 => lib.arch.reverse_valid_page_sizes[2], - // else => unreachable, - // }); - - var list_count: usize = 0; - while (iterator) |free_memory_list| : ({ - iterator = free_memory_list.metadata.next; - list_count += 1; - }) { - const allocation = free_memory_list.allocate(size) catch continue; - list_count += 1; - break :blk allocation; - } - - if (index == 0) break; - } - - log.err("allocateMemoryRaw", .{}); - return error.OutOfMemory; - }; - - @memset(result.toHigherHalfVirtualAddress().access(u8), 0); - - return result; - } - - pub fn allocateMemory(root: *Root, size: usize) AllocateError!birth.capabilities.memory { - log.debug("Allocating 0x{x} bytes for user (root is 0x{x}", .{ size, @intFromPtr(root) }); - const result = try allocateMemoryRaw(root, size); - const reference = root.dynamic.memory.allocated.append(result) catch |err| { - log.err("err(user): {}", .{err}); - return AllocateError.OutOfMemory; - }; - assert(reference.block == 0); - assert(reference.region == 0); - const region_address = &root.dynamic.memory.allocated.regions[reference.region]; - log.debug("Region address: 0x{x}", .{@intFromPtr(region_address)}); - return reference; - } - - // Slow uncommon path. Use cases: - // 1. CR3 switch. This is assumed to be privileged, so this function assumes privileged use of the memory - pub fn allocatePageCustomAlignment(root: *Root, size: usize, alignment: usize) AllocateError!PhysicalMemoryRegion { - assert(alignment > lib.arch.valid_page_sizes[0] and alignment < lib.arch.valid_page_sizes[1]); - - comptime assert(lib.arch.valid_page_sizes.len == 3); - var index = Memory.getListIndex(size); - - while (true) : (index -= 1) { - const smallest_region_list = &root.dynamic.memory.lists[index]; - var iterator: ?*cpu.capabilities.RegionList = smallest_region_list; - while (iterator) |free_region_list| : (iterator = free_region_list.metadata.next) { - const physical_allocation = free_region_list.allocateAligned(size, alignment) catch blk: { - const splitted_allocation = free_region_list.allocateAlignedSplitting(size, alignment) catch continue; - _ = try root.appendRegion(&root.dynamic.memory, splitted_allocation.wasted); - break :blk splitted_allocation.allocated; - }; - - return physical_allocation; - } - - if (index == 0) break; - } - - log.err("allocatePageCustomAlignment", .{}); - return AllocateError.OutOfMemory; - } - - fn allocateSingle(root: *Root, comptime T: type) AllocateError!*T { - const size = @sizeOf(T); - const alignment = @alignOf(T); - var iterator = root.heap.first; - while (iterator) |heap_region| : (iterator = heap_region.next) { - if (heap_region.alignmentFits(alignment)) { - if (heap_region.sizeFits(size)) { - const allocated_region = heap_region.takeRegion(size); - const result = &allocated_region.toHigherHalfVirtualAddress().access(T)[0]; - return result; - } - } else { - @panic("ELSE"); - } - } - - const physical_region = try root.allocateMemory(lib.arch.valid_page_sizes[0]); - const heap_region = physical_region.toHigherHalfVirtualAddress().address.access(*Heap.Region); - const first = root.heap.first; - heap_region.* = .{ - .descriptor = physical_region.offset(@sizeOf(Heap.Region)), - .allocated_size = @sizeOf(Heap.Region), - .next = first, - }; - - root.heap.first = heap_region; - - return try root.allocateSingle(T); - } - - fn allocateMany(root: *Root, comptime T: type, count: usize) AllocateError![]T { - _ = count; - _ = root; - - @panic("TODO many"); - } - - pub const AllocateCPUMemoryOptions = packed struct { - privileged: bool, + pub const AllocateMemoryResult = extern struct { + region: PhysicalMemoryRegion, + reference: birth.interface.Memory, }; + + pub fn allocateMemory(root: *Root, size: usize) !AllocateMemoryResult { + const physical_region = try cpu.page_allocator.allocate(size, .{ .reason = .user }); + const reference = try root.dynamic.memory.appendRegion(physical_region); + + return .{ + .region = physical_region, + .reference = reference, + }; + } }; diff --git a/src/lib.zig b/src/lib.zig index 7d5ac01..91b4180 100644 --- a/src/lib.zig +++ b/src/lib.zig @@ -27,55 +27,7 @@ pub const CrossTarget = std.zig.CrossTarget; pub const log = std.log; -pub fn BitsetU64(comptime bits: comptime_int) type { - assert(bits <= @bitSizeOf(u64)); - const max_value = maxInt(@Type(.{ - .Int = .{ - .signedness = .unsigned, - .bits = bits, - }, - })); - - return packed struct(u64) { - value: u64 = 0, - - const Error = error{ - block_full, - }; - - pub inline fn allocate(bitset: *@This()) !u6 { - if (bitset.value & max_value != max_value) { - // log.debug("Bitset: 0b{b}", .{bitset.value}); - const result: u6 = @intCast(@ctz(~bitset.value)); - // log.debug("Result: {}", .{result}); - assert(!bitset.isSet(result)); - bitset.set(result); - return result; - } else { - return error.block_full; - } - } - - pub inline fn set(bitset: *@This(), index: u6) void { - assert(index < bits); - bitset.value |= (@as(u64, 1) << index); - } - - pub inline fn clear(bitset: *@This(), index: u6) void { - assert(index < bits); - bitset.value &= ~(@as(u64, 1) << index); - } - - pub inline fn isSet(bitset: @This(), index: u6) bool { - assert(index < bits); - return bitset.value & (@as(u64, 1) << index) != 0; - } - - pub inline fn isFull(bitset: @This()) bool { - return bitset.value == max_value; - } - }; -} +pub const data_structures = @import("lib/data_structures.zig"); pub const Atomic = std.atomic.Atomic; diff --git a/src/lib/data_structures.zig b/src/lib/data_structures.zig new file mode 100644 index 0000000..2857ce1 --- /dev/null +++ b/src/lib/data_structures.zig @@ -0,0 +1,127 @@ +const lib = @import("lib"); +const Allocator = lib.Allocator; +const assert = lib.assert; +const maxInt = lib.maxInt; + +pub fn BitsetU64(comptime bits: comptime_int) type { + assert(bits <= @bitSizeOf(u64)); + const max_value = maxInt(@Type(.{ + .Int = .{ + .signedness = .unsigned, + .bits = bits, + }, + })); + + return packed struct(u64) { + value: u64 = 0, + + const Error = error{ + block_full, + }; + + pub inline fn allocate(bitset: *@This()) !u6 { + if (bitset.value & max_value != max_value) { + // log.debug("Bitset: 0b{b}", .{bitset.value}); + const result: u6 = @intCast(@ctz(~bitset.value)); + // log.debug("Result: {}", .{result}); + assert(!bitset.isSet(result)); + bitset.set(result); + return result; + } else { + return error.block_full; + } + } + + pub inline fn set(bitset: *@This(), index: u6) void { + assert(index < bits); + bitset.value |= (@as(u64, 1) << index); + } + + pub inline fn clear(bitset: *@This(), index: u6) void { + assert(index < bits); + bitset.value &= ~(@as(u64, 1) << index); + } + + pub inline fn isSet(bitset: @This(), index: u6) bool { + assert(index < bits); + return bitset.value & (@as(u64, 1) << index) != 0; + } + + pub inline fn isFull(bitset: @This()) bool { + return bitset.value == max_value; + } + }; +} + +pub fn SparseArray(comptime T: type) type { + return extern struct { + ptr: [*]T, + len: usize, + capacity: usize, + + const Array = @This(); + + pub const Error = error{ + index_out_of_bounds, + }; + + pub fn allocate(array: *Array, allocator: *Allocator) !*T { + try array.ensureCapacity(allocator, array.len + 1); + const index = array.len; + array.len += 1; + const slice = array.ptr[0..array.len]; + return &slice[index]; + } + + pub fn append(array: *Array, allocator: *Allocator, element: T) !usize { + try array.ensureCapacity(allocator, array.len + 1); + const index = array.len; + array.len += 1; + const slice = array.ptr[0..array.len]; + slice[index] = element; + + return index; + } + + fn ensureCapacity(array: *Array, allocator: *Allocator, desired_capacity: usize) !void { + if (array.capacity < desired_capacity) { + // Allocate a new array + const new_slice = try allocator.allocate(T, desired_capacity); + if (array.capacity == 0) { + array.ptr = new_slice.ptr; + array.capacity = new_slice.len; + } else { + // Reallocate + if (array.len > 0) { + @memcpy(new_slice[0..array.len], array.ptr[0..array.len]); + } + + // TODO: free + + array.ptr = new_slice.ptr; + array.capacity = new_slice.len; + } + } + } + + pub fn indexOf(array: *Array, ptr: *T) usize { + const base_int = @intFromPtr(array.ptr); + const ptr_int = @intFromPtr(ptr); + return @divExact(ptr_int - base_int, @sizeOf(T)); + } + + pub inline fn get(array: *Array, index: usize) T { + assert(array.len > index); + const slice = array.ptr[0..array.len]; + return slice[index]; + } + + pub inline fn getChecked(array: *Array, index: usize) !T { + if (array.len > index) { + return array.get(index); + } else { + return error.index_out_of_bounds; + } + } + }; +} diff --git a/src/privileged/arch/x86/64/paging.zig b/src/privileged/arch/x86/64/paging.zig index f7813d3..07b6283 100644 --- a/src/privileged/arch/x86/64/paging.zig +++ b/src/privileged/arch/x86/64/paging.zig @@ -28,13 +28,6 @@ const bootloader = @import("bootloader"); const paging = lib.arch.x86_64.paging; pub usingnamespace paging; -pub fn entryCount(comptime level: paging.Level, limit: u64) u10 { - const index = baseFromVirtualAddress(level, limit - 1); - const result = @as(u10, index) + 1; - // @compileLog(limit, index, result); - return result; -} - const max_level_possible = 5; pub const IndexedVirtualAddress = packed struct(u64) { page_offset: u12 = 0, @@ -71,11 +64,6 @@ const Level = enum(u2) { const count = @typeInfo(Level).Enum.fields.len; }; -pub fn baseFromVirtualAddress(comptime level: paging.Level, virtual_address: u64) u9 { - const indexed = @as(IndexedVirtualAddress, @bitCast(virtual_address)); - return @field(indexed, @tagName(level)); -} - pub const CPUPageTables = extern struct { pml4_table: PhysicalAddress, pdp_table: PhysicalAddress, @@ -96,8 +84,6 @@ pub const CPUPageTables = extern struct { 1; // PT const allocated_size = allocated_table_count * 0x1000; - const page_table_base = top; - comptime { assert(top + (left_ptables * lib.arch.valid_page_sizes[0]) == base + lib.arch.valid_page_sizes[1]); } @@ -265,8 +251,8 @@ pub const Specific = extern struct { } fn mapGeneric(specific: Specific, asked_physical_address: PhysicalAddress, asked_virtual_address: VirtualAddress, size: u64, comptime asked_page_size: comptime_int, flags: MemoryFlags, page_allocator: PageAllocator) !void { - if (!isAlignedGeneric(u64, asked_physical_address.value(), asked_page_size)) { - //log.debug("PA: {}. Page size: 0x{x}", .{ asked_physical_address, asked_page_size }); + if (!isAlignedGeneric(u64, asked_physical_address.value(), lib.arch.valid_page_sizes[0])) { + log.debug("PA: {}. Page size: 0x{x}", .{ asked_physical_address, asked_page_size }); @panic("Misaligned physical address in mapGeneric"); } if (!isAlignedGeneric(u64, asked_virtual_address.value(), asked_page_size)) { @@ -475,106 +461,6 @@ pub const Specific = extern struct { return pt_entry_address; } - pub fn setMappingFlags(specific: Specific, virtual_address: u64, flags: Mapping.Flags) !void { - const indexed: IndexedVirtualAddress = @bitCast(virtual_address); - - const vas_cr3 = specific.cr3; - - const pml4_physical_address = vas_cr3.getAddress(); - - const pml4_table = try accessPageTable(pml4_physical_address, *PML4Table); - const pml4_entry = pml4_table[indexed.PML4]; - if (!pml4_entry.present) { - return TranslateError.pml4_entry_not_present; - } - - const pml4_entry_address = PhysicalAddress.new(unpackAddress(pml4_entry)); - if (pml4_entry_address.value() == 0) { - return TranslateError.pml4_entry_address_null; - } - - const pdp_table = try accessPageTable(pml4_entry_address, *PDPTable); - const pdp_entry = pdp_table[indexed.PDP]; - if (!pdp_entry.present) { - return TranslateError.pdp_entry_not_present; - } - - const pdp_entry_address = PhysicalAddress.new(unpackAddress(pdp_entry)); - if (pdp_entry_address.value() == 0) { - return TranslateError.pdp_entry_address_null; - } - - const pd_table = try accessPageTable(pdp_entry_address, *PDTable); - const pd_entry = pd_table[indexed.PD]; - if (!pd_entry.present) { - return TranslateError.pd_entry_not_present; - } - - const pd_entry_address = PhysicalAddress.new(unpackAddress(pd_entry)); - if (pd_entry_address.value() == 0) { - return TranslateError.pd_entry_address_null; - } - - const pt_table = try accessPageTable(pd_entry_address, *PTable); - const pt_entry = &pt_table[indexed.PT]; - if (!pt_entry.present) { - return TranslateError.pd_entry_not_present; - } - - pt_entry.write = flags.write; - pt_entry.user = flags.user; - pt_entry.page_level_cache_disable = flags.cache_disable; - pt_entry.global = flags.global; - pt_entry.execute_disable = !flags.execute; - } - - pub fn debugMemoryMap(specific: Specific) !void { - log.debug("[START] Memory map dump 0x{x}\n", .{specific.cr3.getAddress().value()}); - - const pml4 = try specific.getCpuPML4Table(); - - for (pml4, 0..) |*pml4te, pml4_index| { - if (pml4te.present) { - const pdp_table = try accessPageTable(PhysicalAddress.new(unpackAddress(pml4te.*)), *PDPTable); - - for (pdp_table, 0..) |*pdpte, pdp_index| { - if (pdpte.present) { - if (pdpte.page_size) { - continue; - } - - const pd_table = try accessPageTable(PhysicalAddress.new(unpackAddress(pdpte.*)), *PDTable); - - for (pd_table, 0..) |*pdte, pd_index| { - if (pdte.present) { - if (pdte.page_size) @panic("bbbb"); - - const p_table = try accessPageTable(PhysicalAddress.new(unpackAddress(pdte.*)), *PTable); - - for (p_table, 0..) |*pte, pt_index| { - if (pte.present) { - const indexed_virtual_address = IndexedVirtualAddress{ - .PML4 = @as(u9, @intCast(pml4_index)), - .PDP = @as(u9, @intCast(pdp_index)), - .PD = @as(u9, @intCast(pd_index)), - .PT = @as(u9, @intCast(pt_index)), - }; - - const virtual_address = indexed_virtual_address.toVirtualAddress(); - const physical_address = unpackAddress(pte.*); - log.debug("0x{x} -> 0x{x}", .{ virtual_address.value(), physical_address }); - } - } - } - } - } - } - } - } - - log.debug("[END] Memory map dump", .{}); - } - inline fn getUserCr3(specific: Specific) cr3 { assert(specific.isPrivileged()); return @as(cr3, @bitCast(@as(u64, @bitCast(specific.cr3)) | paging.page_table_size)); @@ -715,8 +601,6 @@ fn mapPageTable2MB(pd_table: *PDTable, indexed: IndexedVirtualAddress, physical_ return MapError.already_present_2mb; } - assert(isAlignedGeneric(u64, physical_address, valid_page_sizes[1])); - entry_pointer.* = @as(PDTE, @bitCast(getPageEntry(PDTE_2MB, physical_address, flags))); } diff --git a/src/user.zig b/src/user.zig index e44aca0..4cfa614 100644 --- a/src/user.zig +++ b/src/user.zig @@ -95,6 +95,7 @@ pub export fn start(scheduler: *Scheduler, arg_init: bool) callconv(.C) noreturn } fn initialize() !void { + currentScheduler().initializeAllocator(); _ = try Virtual.AddressSpace.create(); } diff --git a/src/user/virtual.zig b/src/user/virtual.zig index 40767c5..ccd1152 100644 --- a/src/user/virtual.zig +++ b/src/user/virtual.zig @@ -6,15 +6,29 @@ const user = @import("user"); const assert = lib.assert; const log = lib.log; +const SparseArray = lib.data_structures.SparseArray; const VirtualAddress = lib.VirtualAddress; const paging = lib.arch.paging; +const Leaf = birth.interface.Leaf; + pub const AddressSpace = extern struct { // page_table: PageTable, - region: Virtual.AddressSpace.Region, + region: Virtual.AddressSpace.Region = .{}, minimum: VirtualAddress = VirtualAddress.new(paging.user_address_space_start), maximum: VirtualAddress = VirtualAddress.new(paging.user_address_space_end), + root_page_table: PageTable = .{}, + page_table_buffer: SparseArray(PageTable) = .{ + .ptr = undefined, + .len = 0, + .capacity = 0, + }, + leaf_buffer: SparseArray(Leaf) = .{ + .ptr = undefined, + .len = 0, + .capacity = 0, + }, const Region = extern struct { list: Virtual.Region.List = .{}, @@ -23,44 +37,50 @@ pub const AddressSpace = extern struct { pub fn create() !*AddressSpace { const scheduler = user.currentScheduler(); - const virtual_address_space = try scheduler.common.heapAllocateFast(AddressSpace); - virtual_address_space.* = .{ - .page_table = undefined, - .region = .{}, - }; + const virtual_address_space = try scheduler.fast_allocator.create(AddressSpace); + virtual_address_space.* = .{}; - virtual_address_space.collectPageTables(0, 0, 0, &virtual_address_space.page_table.root.u.page_table.children); + try virtual_address_space.collectPageTables(&virtual_address_space.root_page_table, .{}); @panic("TODO: create"); } - fn collectPageTables(virtual_address_space: *AddressSpace, block: u7, index: u7, level: usize, page_table_buffer: *[512]birth.interface.PageTable) !void { - _ = virtual_address_space; + fn collectPageTables(virtual_address_space: *Virtual.AddressSpace, page_table: *PageTable, descriptor: birth.interface.PageTable) !void { try user.Interface(.page_table, .get).blocking(.{ - .descriptor = .{ - .block = block, - .index = index, - .entry_type = .page_table, - }, - .buffer = page_table_buffer, + .descriptor = descriptor, + .buffer = &page_table.children_handles, }); - for (page_table_buffer, 0..) |page_table_entry, i| { - _ = i; - if (page_table_entry.present) { - switch (page_table_entry.entry_type) { + const allocator = &user.currentScheduler().fast_allocator; + + for (page_table.children_handles, &page_table.indices) |child, *index| { + if (child.present) { + switch (child.entry_type) { .page_table => { - const scheduler = user.currentScheduler(); - const buffer = try scheduler.common.heapAllocateFast([512]birth.interface.PageTable); - collectPageTables(page_table_entry.block, page_table_entry.index, level + 1, buffer) catch unreachable; + const page_table_index = virtual_address_space.page_table_buffer.len; + const new_page_table = try virtual_address_space.page_table_buffer.allocate(allocator); + //user.currentScheduler().fast_allocator.create(PageTable); + index.* = @intCast(page_table_index); + + try virtual_address_space.collectPageTables(new_page_table, child); }, .leaf => { - log.err("Leaf: {}", .{page_table_entry}); + const new_leaf = try virtual_address_space.leaf_buffer.allocate(allocator); + index.* = @intCast(virtual_address_space.leaf_buffer.indexOf(new_leaf)); + try getLeaf(child, new_leaf); + log.debug("New leaf: {}", .{new_leaf}); }, } } } } + + fn getLeaf(leaf_descriptor: birth.interface.PageTable, leaf: *Leaf) !void { + try user.Interface(.page_table, .get_leaf).blocking(.{ + .descriptor = leaf_descriptor, + .buffer = leaf, + }); + } }; pub const Region = extern struct { @@ -74,45 +94,7 @@ pub const Region = extern struct { }; }; -// fn newPageTableNode(page_table: Virtual.PageTable.Node.PageTable, level: paging.Level) PageTable.Node { -// return .{ -// .flags = .{ -// .type = .page_table, -// .level = level, -// }, -// .u = .{ -// .page_table = page_table, -// }, -// }; -// } - -// pub const PageTable = extern struct { -// root: Node, -// foo: u32 = 0, -// -// pub const Node = extern struct { -// flags: Flags, -// u: extern union { -// leaf: Leaf, -// page_table: Node.PageTable, -// }, -// -// pub const Flags = packed struct(u32) { -// type: birth.interface.PageTable.EntryType, -// level: paging.Level, -// reserved: u29 = 0, -// }; -// -// pub const Leaf = extern struct { -// foo: u32 = 0, -// }; -// -// pub const PageTable = extern struct { -// foo: u32 = 0, -// children: Buffer = .{.{ .entry_type = .page_table }} ** node_count, -// }; -// }; -// -// const node_count = paging.page_table_entry_count; -// pub const Buffer = [node_count]birth.interface.PageTable; -// }; +pub const PageTable = extern struct { + children_handles: [512]birth.interface.PageTable = .{.{}} ** 512, + indices: [512]u32 = .{0} ** 512, +};