1use super::{FrameTracker, frame_alloc};
4use super::{PTEFlags, PageTable, PageTableEntry};
5use super::{PhysAddr, PhysPageNum, VirtAddr, VirtPageNum};
6use super::{StepByOne, VPNRange};
7use crate::config::{MEMORY_END, MMIO, PAGE_SIZE, TRAMPOLINE, TRAP_CONTEXT, USER_STACK_SIZE};
8use crate::sync::UPSafeCell;
9use alloc::collections::BTreeMap;
10use alloc::sync::Arc;
11use alloc::vec::Vec;
12use core::arch::asm;
13use lazy_static::*;
14use riscv::register::satp;
15
16unsafe extern "C" {
17 safe fn stext();
18 safe fn etext();
19 safe fn srodata();
20 safe fn erodata();
21 safe fn sdata();
22 safe fn edata();
23 safe fn sbss_with_stack();
24 safe fn ebss();
25 safe fn ekernel();
26 safe fn strampoline();
27}
28
29lazy_static! {
30 pub static ref KERNEL_SPACE: Arc<UPSafeCell<MemorySet>> =
32 Arc::new(unsafe { UPSafeCell::new(MemorySet::new_kernel()) });
33}
34pub struct MemorySet {
36 page_table: PageTable,
37 areas: Vec<MapArea>,
38}
39
40impl MemorySet {
41 pub fn new_bare() -> Self {
43 Self {
44 page_table: PageTable::new(),
45 areas: Vec::new(),
46 }
47 }
48 pub fn token(&self) -> usize {
50 self.page_table.token()
51 }
52 pub fn insert_framed_area(
54 &mut self,
55 start_va: VirtAddr,
56 end_va: VirtAddr,
57 permission: MapPermission,
58 ) {
59 self.push(
60 MapArea::new(start_va, end_va, MapType::Framed, permission),
61 None,
62 );
63 }
64 pub fn remove_area_with_start_vpn(&mut self, start_vpn: VirtPageNum) {
66 if let Some((idx, area)) = self
67 .areas
68 .iter_mut()
69 .enumerate()
70 .find(|(_, area)| area.vpn_range.get_start() == start_vpn)
71 {
72 area.unmap(&mut self.page_table);
73 self.areas.remove(idx);
74 }
75 }
76 fn push(&mut self, mut map_area: MapArea, data: Option<&[u8]>) {
77 map_area.map(&mut self.page_table);
78 if let Some(data) = data {
79 map_area.copy_data(&self.page_table, data);
80 }
81 self.areas.push(map_area);
82 }
83 fn map_trampoline(&mut self) {
85 self.page_table.map(
86 VirtAddr::from(TRAMPOLINE).into(),
87 PhysAddr::from(strampoline as usize).into(),
88 PTEFlags::R | PTEFlags::X,
89 );
90 }
91 pub fn new_kernel() -> Self {
93 let mut memory_set = Self::new_bare();
94 memory_set.map_trampoline();
96 println!(".text [{:#x}, {:#x})", stext as usize, etext as usize);
98 println!(".rodata [{:#x}, {:#x})", srodata as usize, erodata as usize);
99 println!(".data [{:#x}, {:#x})", sdata as usize, edata as usize);
100 println!(
101 ".bss [{:#x}, {:#x})",
102 sbss_with_stack as usize, ebss as usize
103 );
104 println!("mapping .text section");
105 memory_set.push(
106 MapArea::new(
107 (stext as usize).into(),
108 (etext as usize).into(),
109 MapType::Identical,
110 MapPermission::R | MapPermission::X,
111 ),
112 None,
113 );
114 println!("mapping .rodata section");
115 memory_set.push(
116 MapArea::new(
117 (srodata as usize).into(),
118 (erodata as usize).into(),
119 MapType::Identical,
120 MapPermission::R,
121 ),
122 None,
123 );
124 println!("mapping .data section");
125 memory_set.push(
126 MapArea::new(
127 (sdata as usize).into(),
128 (edata as usize).into(),
129 MapType::Identical,
130 MapPermission::R | MapPermission::W,
131 ),
132 None,
133 );
134 println!("mapping .bss section");
135 memory_set.push(
136 MapArea::new(
137 (sbss_with_stack as usize).into(),
138 (ebss as usize).into(),
139 MapType::Identical,
140 MapPermission::R | MapPermission::W,
141 ),
142 None,
143 );
144 println!("mapping physical memory");
145 memory_set.push(
146 MapArea::new(
147 (ekernel as usize).into(),
148 MEMORY_END.into(),
149 MapType::Identical,
150 MapPermission::R | MapPermission::W,
151 ),
152 None,
153 );
154 println!("mapping memory-mapped registers");
155 for pair in MMIO {
156 memory_set.push(
157 MapArea::new(
158 (*pair).0.into(),
159 ((*pair).0 + (*pair).1).into(),
160 MapType::Identical,
161 MapPermission::R | MapPermission::W,
162 ),
163 None,
164 );
165 }
166 memory_set
167 }
168 pub fn from_elf(elf_data: &[u8]) -> (Self, usize, usize) {
171 let mut memory_set = Self::new_bare();
172 memory_set.map_trampoline();
174 let elf = xmas_elf::ElfFile::new(elf_data).unwrap();
176 let elf_header = elf.header;
177 let magic = elf_header.pt1.magic;
178 assert_eq!(magic, [0x7f, 0x45, 0x4c, 0x46], "invalid elf!");
179 let ph_count = elf_header.pt2.ph_count();
180 let mut max_end_vpn = VirtPageNum(0);
181 for i in 0..ph_count {
182 let ph = elf.program_header(i).unwrap();
183 if ph.get_type().unwrap() == xmas_elf::program::Type::Load {
184 let start_va: VirtAddr = (ph.virtual_addr() as usize).into();
185 let end_va: VirtAddr = ((ph.virtual_addr() + ph.mem_size()) as usize).into();
186 let mut map_perm = MapPermission::U;
187 let ph_flags = ph.flags();
188 if ph_flags.is_read() {
189 map_perm |= MapPermission::R;
190 }
191 if ph_flags.is_write() {
192 map_perm |= MapPermission::W;
193 }
194 if ph_flags.is_execute() {
195 map_perm |= MapPermission::X;
196 }
197 let map_area = MapArea::new(start_va, end_va, MapType::Framed, map_perm);
198 max_end_vpn = map_area.vpn_range.get_end();
199 memory_set.push(
200 map_area,
201 Some(&elf.input[ph.offset() as usize..(ph.offset() + ph.file_size()) as usize]),
202 );
203 }
204 }
205 let max_end_va: VirtAddr = max_end_vpn.into();
207 let mut user_stack_bottom: usize = max_end_va.into();
208 user_stack_bottom += PAGE_SIZE;
210 let user_stack_top = user_stack_bottom + USER_STACK_SIZE;
211 memory_set.push(
212 MapArea::new(
213 user_stack_bottom.into(),
214 user_stack_top.into(),
215 MapType::Framed,
216 MapPermission::R | MapPermission::W | MapPermission::U,
217 ),
218 None,
219 );
220 memory_set.push(
222 MapArea::new(
223 TRAP_CONTEXT.into(),
224 TRAMPOLINE.into(),
225 MapType::Framed,
226 MapPermission::R | MapPermission::W,
227 ),
228 None,
229 );
230 (
231 memory_set,
232 user_stack_top,
233 elf.header.pt2.entry_point() as usize,
234 )
235 }
236 pub fn from_existed_user(user_space: &Self) -> Self {
238 let mut memory_set = Self::new_bare();
239 memory_set.map_trampoline();
241 for area in user_space.areas.iter() {
243 let new_area = MapArea::from_another(area);
244 memory_set.push(new_area, None);
245 for vpn in area.vpn_range {
247 let src_ppn = user_space.translate(vpn).unwrap().ppn();
248 let dst_ppn = memory_set.translate(vpn).unwrap().ppn();
249 dst_ppn
250 .get_bytes_array()
251 .copy_from_slice(src_ppn.get_bytes_array());
252 }
253 }
254 memory_set
255 }
256 pub fn activate(&self) {
258 let satp = self.page_table.token();
259 unsafe {
260 satp::write(satp);
261 asm!("sfence.vma");
262 }
263 }
264 pub fn translate(&self, vpn: VirtPageNum) -> Option<PageTableEntry> {
266 self.page_table.translate(vpn)
267 }
268 pub fn recycle_data_pages(&mut self) {
270 self.areas.clear();
272 }
273}
274pub struct MapArea {
276 vpn_range: VPNRange,
277 data_frames: BTreeMap<VirtPageNum, FrameTracker>,
278 map_type: MapType,
279 map_perm: MapPermission,
280}
281
282impl MapArea {
283 pub fn new(
284 start_va: VirtAddr,
285 end_va: VirtAddr,
286 map_type: MapType,
287 map_perm: MapPermission,
288 ) -> Self {
289 let start_vpn: VirtPageNum = start_va.floor();
290 let end_vpn: VirtPageNum = end_va.ceil();
291 Self {
292 vpn_range: VPNRange::new(start_vpn, end_vpn),
293 data_frames: BTreeMap::new(),
294 map_type,
295 map_perm,
296 }
297 }
298 pub fn from_another(another: &Self) -> Self {
299 Self {
300 vpn_range: VPNRange::new(another.vpn_range.get_start(), another.vpn_range.get_end()),
301 data_frames: BTreeMap::new(),
302 map_type: another.map_type,
303 map_perm: another.map_perm,
304 }
305 }
306 pub fn map_one(&mut self, page_table: &mut PageTable, vpn: VirtPageNum) {
307 let ppn: PhysPageNum;
308 match self.map_type {
309 MapType::Identical => {
310 ppn = PhysPageNum(vpn.0);
311 }
312 MapType::Framed => {
313 let frame = frame_alloc().unwrap();
314 ppn = frame.ppn;
315 self.data_frames.insert(vpn, frame);
316 }
317 }
318 let pte_flags = PTEFlags::from_bits(self.map_perm.bits).unwrap();
319 page_table.map(vpn, ppn, pte_flags);
320 }
321 pub fn unmap_one(&mut self, page_table: &mut PageTable, vpn: VirtPageNum) {
322 if self.map_type == MapType::Framed {
323 self.data_frames.remove(&vpn);
324 }
325 page_table.unmap(vpn);
326 }
327 pub fn map(&mut self, page_table: &mut PageTable) {
328 for vpn in self.vpn_range {
329 self.map_one(page_table, vpn);
330 }
331 }
332 pub fn unmap(&mut self, page_table: &mut PageTable) {
333 for vpn in self.vpn_range {
334 self.unmap_one(page_table, vpn);
335 }
336 }
337 pub fn copy_data(&mut self, page_table: &PageTable, data: &[u8]) {
340 assert_eq!(self.map_type, MapType::Framed);
341 let mut start: usize = 0;
342 let mut current_vpn = self.vpn_range.get_start();
343 let len = data.len();
344 loop {
345 let src = &data[start..len.min(start + PAGE_SIZE)];
346 let dst = &mut page_table
347 .translate(current_vpn)
348 .unwrap()
349 .ppn()
350 .get_bytes_array()[..src.len()];
351 dst.copy_from_slice(src);
352 start += PAGE_SIZE;
353 if start >= len {
354 break;
355 }
356 current_vpn.step();
357 }
358 }
359}
360
361#[derive(Copy, Clone, PartialEq, Debug)]
362pub enum MapType {
364 Identical,
365 Framed,
366}
367
368bitflags! {
369 pub struct MapPermission: u8 {
371 const R = 1 << 1;
373 const W = 1 << 2;
375 const X = 1 << 3;
377 const U = 1 << 4;
379 }
380}
381
382#[allow(unused)]
383pub fn remap_test() {
385 let mut kernel_space = KERNEL_SPACE.exclusive_access();
386 let mid_text: VirtAddr = ((stext as usize + etext as usize) / 2).into();
387 let mid_rodata: VirtAddr = ((srodata as usize + erodata as usize) / 2).into();
388 let mid_data: VirtAddr = ((sdata as usize + edata as usize) / 2).into();
389 assert!(
390 !kernel_space
391 .page_table
392 .translate(mid_text.floor())
393 .unwrap()
394 .writable(),
395 );
396 assert!(
397 !kernel_space
398 .page_table
399 .translate(mid_rodata.floor())
400 .unwrap()
401 .writable(),
402 );
403 assert!(
404 !kernel_space
405 .page_table
406 .translate(mid_data.floor())
407 .unwrap()
408 .executable(),
409 );
410 println!("remap_test passed!");
411}