os/task/
pid.rs

1//!Implementation of [`PidAllocator`]
2use crate::config::{KERNEL_STACK_SIZE, PAGE_SIZE, TRAMPOLINE};
3use crate::mm::{KERNEL_SPACE, MapPermission, VirtAddr};
4use crate::sync::UPSafeCell;
5use alloc::vec::Vec;
6use lazy_static::*;
7///Pid Allocator struct
8pub struct PidAllocator {
9    current: usize,
10    recycled: Vec<usize>,
11}
12
13impl PidAllocator {
14    ///Create an empty `PidAllocator`
15    pub fn new() -> Self {
16        PidAllocator {
17            current: 0,
18            recycled: Vec::new(),
19        }
20    }
21    ///Allocate a pid
22    pub fn alloc(&mut self) -> PidHandle {
23        if let Some(pid) = self.recycled.pop() {
24            PidHandle(pid)
25        } else {
26            self.current += 1;
27            PidHandle(self.current - 1)
28        }
29    }
30    ///Recycle a pid
31    pub fn dealloc(&mut self, pid: usize) {
32        assert!(pid < self.current);
33        assert!(
34            !self.recycled.iter().any(|ppid| *ppid == pid),
35            "pid {} has been deallocated!",
36            pid
37        );
38        self.recycled.push(pid);
39    }
40}
41
42lazy_static! {
43    pub static ref PID_ALLOCATOR: UPSafeCell<PidAllocator> =
44        unsafe { UPSafeCell::new(PidAllocator::new()) };
45}
46///Bind pid lifetime to `PidHandle`
47pub struct PidHandle(pub usize);
48
49impl Drop for PidHandle {
50    fn drop(&mut self) {
51        //println!("drop pid {}", self.0);
52        PID_ALLOCATOR.exclusive_access().dealloc(self.0);
53    }
54}
55///Allocate a pid from PID_ALLOCATOR
56pub fn pid_alloc() -> PidHandle {
57    PID_ALLOCATOR.exclusive_access().alloc()
58}
59
60/// Return (bottom, top) of a kernel stack in kernel space.
61pub fn kernel_stack_position(app_id: usize) -> (usize, usize) {
62    let top = TRAMPOLINE - app_id * (KERNEL_STACK_SIZE + PAGE_SIZE);
63    let bottom = top - KERNEL_STACK_SIZE;
64    (bottom, top)
65}
66///Kernelstack for app
67pub struct KernelStack {
68    pid: usize,
69}
70
71impl KernelStack {
72    ///Create a kernelstack from pid
73    pub fn new(pid_handle: &PidHandle) -> Self {
74        let pid = pid_handle.0;
75        let (kernel_stack_bottom, kernel_stack_top) = kernel_stack_position(pid);
76        KERNEL_SPACE.exclusive_access().insert_framed_area(
77            kernel_stack_bottom.into(),
78            kernel_stack_top.into(),
79            MapPermission::R | MapPermission::W,
80        );
81        KernelStack { pid: pid_handle.0 }
82    }
83    #[allow(unused)]
84    ///Push a value on top of kernelstack
85    pub fn push_on_top<T>(&self, value: T) -> *mut T
86    where
87        T: Sized,
88    {
89        let kernel_stack_top = self.get_top();
90        let ptr_mut = (kernel_stack_top - core::mem::size_of::<T>()) as *mut T;
91        unsafe {
92            *ptr_mut = value;
93        }
94        ptr_mut
95    }
96    ///Get the value on the top of kernelstack
97    pub fn get_top(&self) -> usize {
98        let (_, kernel_stack_top) = kernel_stack_position(self.pid);
99        kernel_stack_top
100    }
101}
102
103impl Drop for KernelStack {
104    fn drop(&mut self) {
105        let (kernel_stack_bottom, _) = kernel_stack_position(self.pid);
106        let kernel_stack_bottom_va: VirtAddr = kernel_stack_bottom.into();
107        KERNEL_SPACE
108            .exclusive_access()
109            .remove_area_with_start_vpn(kernel_stack_bottom_va.into());
110    }
111}