os/
loader.rs

1//! Loading user applications into memory
2//!
3//! For chapter 3, user applications are simply part of the data included in the
4//! kernel binary, so we only need to copy them to the space allocated for each
5//! app to load them. We also allocate fixed spaces for each task's
6//! [`KernelStack`] and [`UserStack`].
7
8use crate::config::*;
9use crate::trap::TrapContext;
10use core::arch::asm;
11
12#[repr(align(4096))]
13#[derive(Copy, Clone)]
14struct KernelStack {
15    data: [u8; KERNEL_STACK_SIZE],
16}
17
18#[repr(align(4096))]
19#[derive(Copy, Clone)]
20struct UserStack {
21    data: [u8; USER_STACK_SIZE],
22}
23
24static KERNEL_STACK: [KernelStack; MAX_APP_NUM] = [KernelStack {
25    data: [0; KERNEL_STACK_SIZE],
26}; MAX_APP_NUM];
27
28static USER_STACK: [UserStack; MAX_APP_NUM] = [UserStack {
29    data: [0; USER_STACK_SIZE],
30}; MAX_APP_NUM];
31
32impl KernelStack {
33    fn get_sp(&self) -> usize {
34        self.data.as_ptr() as usize + KERNEL_STACK_SIZE
35    }
36    pub fn push_context(&self, trap_cx: TrapContext) -> usize {
37        let trap_cx_ptr = (self.get_sp() - core::mem::size_of::<TrapContext>()) as *mut TrapContext;
38        unsafe {
39            *trap_cx_ptr = trap_cx;
40        }
41        trap_cx_ptr as usize
42    }
43}
44
45impl UserStack {
46    fn get_sp(&self) -> usize {
47        self.data.as_ptr() as usize + USER_STACK_SIZE
48    }
49}
50
51/// Get base address of app i.
52fn get_base_i(app_id: usize) -> usize {
53    APP_BASE_ADDRESS + app_id * APP_SIZE_LIMIT
54}
55
56/// Get the total number of applications.
57pub fn get_num_app() -> usize {
58    unsafe extern "C" {
59        safe fn _num_app();
60    }
61    unsafe { (_num_app as usize as *const usize).read_volatile() }
62}
63
64/// Load nth user app at
65/// [APP_BASE_ADDRESS + n * APP_SIZE_LIMIT, APP_BASE_ADDRESS + (n+1) * APP_SIZE_LIMIT).
66pub fn load_apps() {
67    unsafe extern "C" {
68        safe fn _num_app();
69    }
70    let num_app_ptr = _num_app as usize as *const usize;
71    let num_app = get_num_app();
72    let app_start = unsafe { core::slice::from_raw_parts(num_app_ptr.add(1), num_app + 1) };
73    // load apps
74    for i in 0..num_app {
75        let base_i = get_base_i(i);
76        // clear region
77        (base_i..base_i + APP_SIZE_LIMIT)
78            .for_each(|addr| unsafe { (addr as *mut u8).write_volatile(0) });
79        // load app from data section to memory
80        let src = unsafe {
81            core::slice::from_raw_parts(app_start[i] as *const u8, app_start[i + 1] - app_start[i])
82        };
83        let dst = unsafe { core::slice::from_raw_parts_mut(base_i as *mut u8, src.len()) };
84        dst.copy_from_slice(src);
85    }
86    // Memory fence about fetching the instruction memory
87    // It is guaranteed that a subsequent instruction fetch must
88    // observes all previous writes to the instruction memory.
89    // Therefore, fence.i must be executed after we have loaded
90    // the code of the next app into the instruction memory.
91    // See also: riscv non-priv spec chapter 3, 'Zifencei' extension.
92    unsafe {
93        asm!("fence.i");
94    }
95}
96
97/// get app info with entry and sp and save `TrapContext` in kernel stack
98pub fn init_app_cx(app_id: usize) -> usize {
99    KERNEL_STACK[app_id].push_context(TrapContext::app_init_context(
100        get_base_i(app_id),
101        USER_STACK[app_id].get_sp(),
102    ))
103}