os/task/
id.rs

1use super::ProcessControlBlock;
2use crate::config::{KERNEL_STACK_SIZE, PAGE_SIZE, TRAMPOLINE, TRAP_CONTEXT_BASE, USER_STACK_SIZE};
3use crate::mm::{KERNEL_SPACE, MapPermission, PhysPageNum, VirtAddr};
4use crate::sync::UPSafeCell;
5use alloc::{
6    sync::{Arc, Weak},
7    vec::Vec,
8};
9use lazy_static::*;
10
11pub struct RecycleAllocator {
12    current: usize,
13    recycled: Vec<usize>,
14}
15
16impl RecycleAllocator {
17    pub fn new() -> Self {
18        RecycleAllocator {
19            current: 0,
20            recycled: Vec::new(),
21        }
22    }
23    pub fn alloc(&mut self) -> usize {
24        if let Some(id) = self.recycled.pop() {
25            id
26        } else {
27            self.current += 1;
28            self.current - 1
29        }
30    }
31    pub fn dealloc(&mut self, id: usize) {
32        assert!(id < self.current);
33        assert!(
34            !self.recycled.iter().any(|i| *i == id),
35            "id {} has been deallocated!",
36            id
37        );
38        self.recycled.push(id);
39    }
40}
41
42lazy_static! {
43    static ref PID_ALLOCATOR: UPSafeCell<RecycleAllocator> =
44        unsafe { UPSafeCell::new(RecycleAllocator::new()) };
45    static ref KSTACK_ALLOCATOR: UPSafeCell<RecycleAllocator> =
46        unsafe { UPSafeCell::new(RecycleAllocator::new()) };
47}
48
49pub const IDLE_PID: usize = 0;
50
51pub struct PidHandle(pub usize);
52
53pub fn pid_alloc() -> PidHandle {
54    PidHandle(PID_ALLOCATOR.exclusive_access().alloc())
55}
56
57impl Drop for PidHandle {
58    fn drop(&mut self) {
59        PID_ALLOCATOR.exclusive_access().dealloc(self.0);
60    }
61}
62
63/// Return (bottom, top) of a kernel stack in kernel space.
64pub fn kernel_stack_position(kstack_id: usize) -> (usize, usize) {
65    let top = TRAMPOLINE - kstack_id * (KERNEL_STACK_SIZE + PAGE_SIZE);
66    let bottom = top - KERNEL_STACK_SIZE;
67    (bottom, top)
68}
69
70pub struct KernelStack(pub usize);
71
72pub fn kstack_alloc() -> KernelStack {
73    let kstack_id = KSTACK_ALLOCATOR.exclusive_access().alloc();
74    let (kstack_bottom, kstack_top) = kernel_stack_position(kstack_id);
75    KERNEL_SPACE.exclusive_access().insert_framed_area(
76        kstack_bottom.into(),
77        kstack_top.into(),
78        MapPermission::R | MapPermission::W,
79    );
80    KernelStack(kstack_id)
81}
82
83impl Drop for KernelStack {
84    fn drop(&mut self) {
85        let (kernel_stack_bottom, _) = kernel_stack_position(self.0);
86        let kernel_stack_bottom_va: VirtAddr = kernel_stack_bottom.into();
87        KERNEL_SPACE
88            .exclusive_access()
89            .remove_area_with_start_vpn(kernel_stack_bottom_va.into());
90        KSTACK_ALLOCATOR.exclusive_access().dealloc(self.0);
91    }
92}
93
94impl KernelStack {
95    #[allow(unused)]
96    pub fn push_on_top<T>(&self, value: T) -> *mut T
97    where
98        T: Sized,
99    {
100        let kernel_stack_top = self.get_top();
101        let ptr_mut = (kernel_stack_top - core::mem::size_of::<T>()) as *mut T;
102        unsafe {
103            *ptr_mut = value;
104        }
105        ptr_mut
106    }
107    pub fn get_top(&self) -> usize {
108        let (_, kernel_stack_top) = kernel_stack_position(self.0);
109        kernel_stack_top
110    }
111}
112
113pub struct TaskUserRes {
114    pub tid: usize,
115    pub ustack_base: usize,
116    pub process: Weak<ProcessControlBlock>,
117}
118
119fn trap_cx_bottom_from_tid(tid: usize) -> usize {
120    TRAP_CONTEXT_BASE - tid * PAGE_SIZE
121}
122
123fn ustack_bottom_from_tid(ustack_base: usize, tid: usize) -> usize {
124    ustack_base + tid * (PAGE_SIZE + USER_STACK_SIZE)
125}
126
127impl TaskUserRes {
128    pub fn new(
129        process: Arc<ProcessControlBlock>,
130        ustack_base: usize,
131        alloc_user_res: bool,
132    ) -> Self {
133        let tid = process.inner_exclusive_access().alloc_tid();
134        let task_user_res = Self {
135            tid,
136            ustack_base,
137            process: Arc::downgrade(&process),
138        };
139        if alloc_user_res {
140            task_user_res.alloc_user_res();
141        }
142        task_user_res
143    }
144
145    pub fn alloc_user_res(&self) {
146        let process = self.process.upgrade().unwrap();
147        let mut process_inner = process.inner_exclusive_access();
148        // alloc user stack
149        let ustack_bottom = ustack_bottom_from_tid(self.ustack_base, self.tid);
150        let ustack_top = ustack_bottom + USER_STACK_SIZE;
151        process_inner.memory_set.insert_framed_area(
152            ustack_bottom.into(),
153            ustack_top.into(),
154            MapPermission::R | MapPermission::W | MapPermission::U,
155        );
156        // alloc trap_cx
157        let trap_cx_bottom = trap_cx_bottom_from_tid(self.tid);
158        let trap_cx_top = trap_cx_bottom + PAGE_SIZE;
159        process_inner.memory_set.insert_framed_area(
160            trap_cx_bottom.into(),
161            trap_cx_top.into(),
162            MapPermission::R | MapPermission::W,
163        );
164    }
165
166    fn dealloc_user_res(&self) {
167        // dealloc tid
168        let process = self.process.upgrade().unwrap();
169        let mut process_inner = process.inner_exclusive_access();
170        // dealloc ustack manually
171        let ustack_bottom_va: VirtAddr = ustack_bottom_from_tid(self.ustack_base, self.tid).into();
172        process_inner
173            .memory_set
174            .remove_area_with_start_vpn(ustack_bottom_va.into());
175        // dealloc trap_cx manually
176        let trap_cx_bottom_va: VirtAddr = trap_cx_bottom_from_tid(self.tid).into();
177        process_inner
178            .memory_set
179            .remove_area_with_start_vpn(trap_cx_bottom_va.into());
180    }
181
182    #[allow(unused)]
183    pub fn alloc_tid(&mut self) {
184        self.tid = self
185            .process
186            .upgrade()
187            .unwrap()
188            .inner_exclusive_access()
189            .alloc_tid();
190    }
191
192    pub fn dealloc_tid(&self) {
193        let process = self.process.upgrade().unwrap();
194        let mut process_inner = process.inner_exclusive_access();
195        process_inner.dealloc_tid(self.tid);
196    }
197
198    pub fn trap_cx_user_va(&self) -> usize {
199        trap_cx_bottom_from_tid(self.tid)
200    }
201
202    pub fn trap_cx_ppn(&self) -> PhysPageNum {
203        let process = self.process.upgrade().unwrap();
204        let process_inner = process.inner_exclusive_access();
205        let trap_cx_bottom_va: VirtAddr = trap_cx_bottom_from_tid(self.tid).into();
206        process_inner
207            .memory_set
208            .translate(trap_cx_bottom_va.into())
209            .unwrap()
210            .ppn()
211    }
212
213    pub fn ustack_base(&self) -> usize {
214        self.ustack_base
215    }
216    pub fn ustack_top(&self) -> usize {
217        ustack_bottom_from_tid(self.ustack_base, self.tid) + USER_STACK_SIZE
218    }
219}
220
221impl Drop for TaskUserRes {
222    fn drop(&mut self) {
223        self.dealloc_tid();
224        self.dealloc_user_res();
225    }
226}