0000'7FFF'FFFF'0000 ↔ 0000'8000'0000'0000 - stack: 64KiB
-- Invalid addresses --
FFFF'8000'0000'0000 ↔ FFFF'C000'0000'0000 - physical memory: 64TiB
-FFFF'C000'0000'0000 ↔ FFFF'FFFF'8000'0000 - unmapped: 64TiB - 2GiB
+FFFF'C000'0000'0000 ↔ FFFF'C000'0001'0000 - process information table: 64KiB, 2048 processes at most
+FFFF'C000'0001'0000 ↔ FFFF'C000'0801'0000 - port information table: 128MiB, 4096 ports per process at most
+FFFF'C000'0801'0000 ↔ FFFF'FFFF'8000'0000 - unmapped: 64TiB - 2GiB - 128MiB - 64KiB
FFFF'FFFF'8000'0000 ↔10000'0000'0000'0000 - kernel: 2GiB
---------------------------
// not, see <https://www.gnu.org/licenses/>.
#include "elf64.hpp"
+
+os::process os::elf::load_elf(void* start, std::size_t length, const paging::PML4T& original_PML4T) {
+ os::assert(length >= sizeof(os::elf::header), "Elf file isn't big enough to contain a header: there is an error.");
+ // TODO: Check that the elf file sections are all fully inside the file.
+
+ // Load test-module elf file:
+ const os::elf::header elf_header = *(os::elf::header*)start;
+
+ // Check if elf is valid:
+ os::assert(elf_header.magic[0] == '\x7f', "No elf header.");
+ os::assert(elf_header.magic[1] == 'E', "No elf header.");
+ os::assert(elf_header.magic[2] == 'L', "No elf header.");
+ os::assert(elf_header.magic[3] == 'F', "No elf header.");
+ os::assert(elf_header.bitn == 2, "Elf file not 64 bits.");
+ os::assert(elf_header.endianness == 1, "Elf file not little endian.");
+ os::assert(elf_header.type == 2, "Elf file not executable.");
+ os::assert(elf_header.arch == 0x3E, "Elf file not x86_64.");
+
+ constexpr std::size_t stack_size = 16 * 0x1000 /* 64KiB */;
+ std::byte* const stack = (std::byte*)0x0000'8000'0000'0000 - stack_size;
+ os::process result = {
+ .PML4T = phys_ptr<paging::PML4T>(os::paging::page_allocator.allocate(1).ptr.get_phys_addr()),
+ .rip = std::uint64_t(elf_header.entry),
+ .rsp = std::uint64_t(stack + stack_size)
+ };
+
+ // Copy kernel mappings to the new virtual address space.
+ for (size_t i = 0; i < 256; i++) {
+ result.PML4T->contents[256 + i] = original_PML4T.contents[256 + i];
+ }
+
+ for (std::size_t i = 0; i < elf_header.entry_count_program_header_table; i++) {
+ const os::elf::program_header program_header = *(os::elf::program_header*)(
+ reinterpret_cast<std::byte*>(start) + elf_header.program_header_table
+ + i * elf_header.entry_size_program_header_table
+ );
+ if (program_header.type != 1) { // Segment shouldn't be loaded.
+ os::print("Segment.\n");
+ continue;
+ }
+ os::print("Segment: loadable\n");
+ os::assert(0x1000 <= std::uint64_t(program_header.p_vaddr)
+ && std::uint64_t(program_header.p_vaddr + program_header.p_memsz) < 0x10'0000'0000,
+ "Program segments must be contained between 0x1000 and 0x10'0000'0000 (i.e. 64GiB).");
+
+ // Allocate, map and initialise memory for segment (memory above program_header.p_filesz is already 0-initialised by the page allocator):
+ std::size_t nb_pages = (std::uint64_t(program_header.p_vaddr) % 0x1000 + program_header.p_memsz + 0x1000 - 1) / 0x1000;
+ for (std::size_t i = 0; i < nb_pages; i++) {
+ std::byte* const page =
+ os::paging::setup_page(*result.PML4T, program_header.p_vaddr + i * 0x1000, (program_header.flags & 2) >> 1, 1);
+ for (std::size_t j = 0; j < 0x1000 && j < program_header.p_filesz - i * 0x1000; j++) {
+ page[j] = ((std::byte*)start)[program_header.p_offset + j];
+ }
+ }
+ }
+
+ // Allocate and map stack (already 0-initialised by the page allocator):
+ for (std::size_t i = 0; i < stack_size / 0x1000 /* 64KiB */; i++) {
+ os::paging::setup_page(*result.PML4T, stack + i * 0x1000, 1, 1);
+ }
+
+ return result;
+}
#include <cstdint>
#include "lib/phys_ptr.hpp"
+#include "ring3.hpp"
namespace os { namespace elf {
struct header;
struct program_header;
-
struct __attribute__((packed)) header {
std::uint8_t magic[4];
std::uint8_t bitn;
std::uint64_t align;
};
static_assert(sizeof(program_header) == 56);
+
+ process load_elf(void* start, std::size_t length, const paging::PML4T& original_PML4T);
} } // namespace os::elf
}
struct {
- os::phys_ptr<os::paging::page> start_address = nullptr;
- os::phys_ptr<os::paging::page> end_address = nullptr;
+ std::uint64_t start_address = 0;
+ std::uint64_t end_address = 0;
} test_module;
{
struct {
break;
case multiboot2::info::type_t::modules:
os::print("{}->{}: {}\n", multiboot2::modules_mod_start(it) / 0x1000 * 0x1000, multiboot2::modules_mod_end(it) / 0x1000 * 0x1000, multiboot2::modules_string(it));
- test_module.start_address = os::phys_ptr<os::paging::page>(multiboot2::modules_mod_start(it) / 0x1000 * 0x1000);
- test_module.end_address = os::phys_ptr<os::paging::page>(multiboot2::modules_mod_end(it) / 0x1000 * 0x1000); // [s,e], not [s,e[
+ test_module.start_address = multiboot2::modules_mod_start(it);
+ test_module.end_address = multiboot2::modules_mod_end(it);
break;
default: break;
}
available_ram_length++;
}
}
+ struct {
+ os::phys_ptr<os::paging::page> start_address;
+ os::phys_ptr<os::paging::page> end_address;
+ } test_module_block{
+ os::phys_ptr<os::paging::page>(test_module.start_address / 0x1000 * 0x1000),
+ os::phys_ptr<os::paging::page>(test_module.end_address / 0x1000 * 0x1000)
+ };
// Remove module from available RAM:
for (std::size_t i = 0; i < available_ram_length; i++) {
- if (test_module.end_address < available_ram[i].start_address || available_ram[i].end_address < test_module.start_address) {
+ if (test_module_block.end_address < available_ram[i].start_address || available_ram[i].end_address < test_module_block.start_address) {
continue;
}
- if (test_module.start_address <= available_ram[i].start_address && available_ram[i].end_address <= test_module.end_address) {
+ if (test_module_block.start_address <= available_ram[i].start_address && available_ram[i].end_address <= test_module_block.end_address) {
available_ram[i] = available_ram[--available_ram_length];
i--;
- } else if (test_module.start_address <= available_ram[i].start_address) {
- available_ram[i].start_address = test_module.end_address + 1; // Since e < end_address, new start_address <= end_address.
- } else if (available_ram[i].end_address <= test_module.end_address) {
- available_ram[i].end_address = test_module.start_address - 1; // Since start_address < s, start_address <= new end_address.
+ } else if (test_module_block.start_address <= available_ram[i].start_address) {
+ available_ram[i].start_address = test_module_block.end_address + 1; // Since e < end_address, new start_address <= end_address.
+ } else if (available_ram[i].end_address <= test_module_block.end_address) {
+ available_ram[i].end_address = test_module_block.start_address - 1; // Since start_address < s, start_address <= new end_address.
} else {
os::assert(available_ram_length < 50, "Too much available RAM sections to initialise correctly. Will fix eventually, probably.");
available_ram[available_ram_length] = available_ram[i];
- available_ram[i].end_address = test_module.start_address - 1;
- available_ram[available_ram_length].start_address = test_module.end_address + 1;
+ available_ram[i].end_address = test_module_block.start_address - 1;
+ available_ram[available_ram_length].start_address = test_module_block.end_address + 1;
available_ram_length++;
}
}
os::paging::page_allocator.deallocate({.ptr = os::phys_ptr<os::paging::page>(get_base_address(PML4T.contents[0]).get_phys_addr()), .size = 1});
os::invlpg((void*)0x0);
- // Load test-module elf file:
- const os::elf::header elf_header = *os::phys_ptr<os::elf::header>(test_module.start_address.get_phys_addr());
-
- // Check if elf is valid:
- os::assert(elf_header.magic[0] == '\x7f', "No elf header.");
- os::assert(elf_header.magic[1] == 'E', "No elf header.");
- os::assert(elf_header.magic[2] == 'L', "No elf header.");
- os::assert(elf_header.magic[3] == 'F', "No elf header.");
- os::assert(elf_header.bitn == 2, "Elf file not 64 bits.");
- os::assert(elf_header.endianness == 1, "Elf file not little endian.");
- os::assert(elf_header.type == 2, "Elf file not executable.");
- os::assert(elf_header.arch == 0x3E, "Elf file not x86_64.");
-
- for (std::size_t i = 0; i < elf_header.entry_count_program_header_table; i++) {
- const os::elf::program_header program_header = *os::phys_ptr<os::elf::program_header>(
- (os::phys_ptr<std::byte>(test_module.start_address.get_phys_addr()) + elf_header.program_header_table
- + i * elf_header.entry_size_program_header_table).get_phys_addr()
- );
- if (program_header.type != 1) { // Segment shouldn't be loaded.
- os::print("Segment.\n");
- continue;
- }
- os::print("Segment: loadable\n");
- os::assert(0x1000 <= std::uint64_t(program_header.p_vaddr)
- && std::uint64_t(program_header.p_vaddr + program_header.p_memsz) < 0x10'0000'0000,
- "Program segments must be contained between 0x1000 and 0x10'0000'0000 (i.e. 64GiB).");
-
- // Allocate memory for segment:
- std::size_t nb_pages = (std::uint64_t(program_header.p_vaddr) % 0x1000 + program_header.p_memsz + 0x1000 - 1) / 0x1000;
- for (std::size_t i = 0; i < nb_pages; i++) {
- os::paging::setup_page(PML4T, program_header.p_vaddr + i * 0x1000, (program_header.flags & 2) >> 1, 1);
- }
- // Initialise memory for segment:
- for (std::size_t i = 0; i < program_header.p_filesz; i++) {
- program_header.p_vaddr[i] = os::phys_ptr<std::byte>(test_module.start_address.get_phys_addr())[program_header.p_offset + i];
- }
- for (std::size_t i = program_header.p_filesz; i < program_header.p_memsz; i++) {
- program_header.p_vaddr[i] = std::byte(0);
- }
+ // Allocate this page so that I only ever need to update the mapping once when I create a new process or port.
+ {
+ const auto index = os::paging::calc_page_table_indices((void*)0xFFFF'C000'0000'0000).pml4e;
+ PML4T.contents[index] = {.P = 1, .R_W = 1, .U_S = 0, .PWT = 0, .PCD = 0, .base_address = 0, .NX = 0};
+ const auto PDPT_alloc = os::paging::page_allocator.allocate(1);
+ set_base_address(PML4T.contents[index], os::phys_ptr<os::paging::PDPT>(PDPT_alloc.ptr.get_phys_addr()));
}
- // Allocate and map stack.
- constexpr std::size_t stack_size = 16 * 0x1000 /* 64KiB */;
- std::byte * const stack = (std::byte*)0x0000'8000'0000'0000 - stack_size;
- for (std::size_t i = 0; i < stack_size / 0x1000 /* 64KiB */; i++) {
- os::paging::setup_page(PML4T, stack + i * 0x1000, 1, 1);
- }
+ auto test_module_process =
+ os::elf::load_elf(&*os::phys_ptr<std::byte>(test_module.start_address), test_module.end_address - test_module.start_address, PML4T);
os::print("Loading ring 3 interrupts stack.\n");
os::set_ring0_stack(TSS, std::uint64_t(&interrupt_stack_top));
os::load_tss();
os::print("Enabling syscalls.\n");
os::enable_syscalls();
+
+ os::paging::setup_page(PML4T, (void*)0xFFFF'C000'0000'0000, true, false);
+ os::current_pid = 1;
+ os::get_process(os::current_pid) = test_module_process;
+
+ os::print("Loading test_module virtual memory map.\n");
+ os::paging::load_pml4t(os::get_process(os::current_pid).PML4T);
os::print("Moving to ring 3.\n");
- os::ftl_to_userspace(elf_header.entry, stack + stack_size);
+ os::ftl_to_userspace((void*)os::get_process(os::current_pid).rip, (std::byte*)os::get_process(os::current_pid).rsp);
}
#include "paging.hpp"
#include "serial.hpp"
-void os::paging::setup_page(os::paging::PML4T& PML4T, const void* vaddr, bool R_W, bool U_S) {
+std::byte* os::paging::setup_page(os::paging::PML4T& PML4T, const void* vaddr, bool R_W, bool U_S) {
const auto indices = os::paging::calc_page_table_indices(vaddr);
if (PML4T.contents[indices.pml4e].P == 0) {
PML4T.contents[indices.pml4e] = {.P = 1, .R_W = 1, .U_S = U_S, .PWT = 0, .PCD = 0, .base_address = 0, .NX = 0};
const auto page_alloc = os::paging::page_allocator.allocate(1);
set_page_base_address(PT.contents[indices.pe], os::phys_ptr<os::paging::page>(page_alloc.ptr.get_phys_addr()));
invlpg(vaddr);
+ return (std::byte*)&*page_alloc.ptr;
}
namespace {
}
}
+void os::paging::load_pml4t(phys_ptr<PML4T> PML4T) {
+ asm volatile("mov %0, %%cr3" :: "r" (PML4T) : "memory");
+}
os::paging::page one_past_end_page_for_page_allocator;
os::paging::page_allocator_t os::paging::page_allocator(os::phys_ptr<os::paging::page>(reinterpret_cast<uintptr_t>(&one_past_end_page_for_page_allocator) - 0xFFFFFFFF80000000));
};
}
-void setup_page(PML4T& PML4T, const void* vaddr, bool R_W, bool U_S);
+std::byte* setup_page(PML4T& PML4T, const void* vaddr, bool R_W, bool U_S);
// For all present page mappings, calls f(virtual address, physical address, page size in bytes (4KiB, 2MiB or 1GiB)).
void on_all_pages(const PML4T& PML4T, void f(page*, phys_ptr<page>, std::size_t));
+void load_pml4t(phys_ptr<PML4T> PML4T);
+
class page_allocator_t;
extern page_allocator_t page_allocator;
extern "C" void os::syscall_rax_error_handler() {
os::assert(false, "Incorrect %rax for syscall.");
}
+
+std::uint64_t os::current_pid;
#include <cstdint>
#include <cstddef>
+#include "lib/phys_ptr.hpp"
+#include "paging.hpp"
namespace os {
void set_ring0_stack(tss& tss, std::uint64_t stack);
void enable_syscalls();
+struct process {
+ phys_ptr<paging::PML4T> PML4T;
+ std::uint64_t rip;
+ std::uint64_t rsp;
+ std::uint64_t : 64;
+};
+static_assert(sizeof(process) == 32);
+
+// (¬0);0 -> port was closed on other side, via syscall or program termination
+// 0 ;0 -> port doesn't exist
+struct port {
+ std::uint64_t other_pid;
+ std::uint64_t other_port;
+};
+static_assert(sizeof(port) == 16);
+
+inline process& get_process(std::uint64_t pid) {
+ os::assert(1 <= pid && pid <= 2048, "Invalid pid: {}.\n", pid);
+ return ((process*)0xFFFF'C000'0000'0000)[pid - 1];
+}
+inline port& get_port(std::uint64_t pid, std::uint64_t port) {
+ os::assert(1 <= pid && pid <= 2048, "Invalid pid: {}.\n", pid);
+ os::assert(1 <= port && port <= 4096, "Invalid port: {}.\n", pid);
+ return ((os::port*)0xFFFF'C000'0001'0000)[(pid - 1) * 4096 + (port - 1)];
+}
+
+extern std::uint64_t current_pid;
+
} // namespace os