xref: /DragonOS/kernel/src/arch/x86_64/mm/fault.rs (revision f79998f626801329580c782fd05e36cb2027f474)
1 use core::{
2     intrinsics::{likely, unlikely},
3     panic,
4 };
5 
6 use alloc::sync::Arc;
7 use log::error;
8 use x86::{bits64::rflags::RFlags, controlregs::Cr4};
9 
10 use crate::{
11     arch::{
12         interrupt::{trap::X86PfErrorCode, TrapFrame},
13         mm::{MemoryManagementArch, X86_64MMArch},
14         CurrentIrqArch, MMArch,
15     },
16     exception::InterruptArch,
17     mm::{
18         fault::{FaultFlags, PageFaultHandler, PageFaultMessage},
19         ucontext::{AddressSpace, LockedVMA},
20         VirtAddr, VmFaultReason, VmFlags,
21     },
22 };
23 
24 use super::LockedFrameAllocator;
25 
26 pub type PageMapper =
27     crate::mm::page::PageMapper<crate::arch::x86_64::mm::X86_64MMArch, LockedFrameAllocator>;
28 
29 impl X86_64MMArch {
30     pub fn vma_access_error(vma: Arc<LockedVMA>, error_code: X86PfErrorCode) -> bool {
31         let vm_flags = *vma.lock().vm_flags();
32         let foreign = false;
33         if error_code.contains(X86PfErrorCode::X86_PF_PK) {
34             return true;
35         }
36 
37         if unlikely(error_code.contains(X86PfErrorCode::X86_PF_SGX)) {
38             return true;
39         }
40 
41         if !Self::vma_access_permitted(
42             vma.clone(),
43             error_code.contains(X86PfErrorCode::X86_PF_WRITE),
44             error_code.contains(X86PfErrorCode::X86_PF_INSTR),
45             foreign,
46         ) {
47             return true;
48         }
49 
50         if error_code.contains(X86PfErrorCode::X86_PF_WRITE) {
51             if unlikely(!vm_flags.contains(VmFlags::VM_WRITE)) {
52                 return true;
53             }
54             return false;
55         }
56 
57         if unlikely(error_code.contains(X86PfErrorCode::X86_PF_PROT)) {
58             return true;
59         }
60 
61         if unlikely(!vma.is_accessible()) {
62             return true;
63         }
64         false
65     }
66 
67     pub fn show_fault_oops(
68         regs: &'static TrapFrame,
69         error_code: X86PfErrorCode,
70         address: VirtAddr,
71     ) {
72         let mapper =
73             unsafe { PageMapper::current(crate::mm::PageTableKind::User, LockedFrameAllocator) };
74         if let Some(entry) = mapper.get_entry(address, 0) {
75             if entry.present() {
76                 if !entry.flags().has_execute() {
77                     error!("kernel tried to execute NX-protected page - exploit attempt?");
78                 } else if mapper.table().phys().data() & MMArch::ENTRY_FLAG_USER != 0
79                     && unsafe { x86::controlregs::cr4().contains(Cr4::CR4_ENABLE_SMEP) }
80                 {
81                     error!("unable to execute userspace code (SMEP?)");
82                 }
83             }
84         }
85         if address.data() < X86_64MMArch::PAGE_SIZE && !regs.is_from_user() {
86             error!(
87                 "BUG: kernel NULL pointer dereference, address: {:#x}",
88                 address.data()
89             );
90         } else {
91             error!(
92                 "BUG: unable to handle page fault for address: {:#x}",
93                 address.data()
94             );
95         }
96 
97         error!(
98             "#PF: {} {} in {} mode\n",
99             if error_code.contains(X86PfErrorCode::X86_PF_USER) {
100                 "user"
101             } else {
102                 "supervisor"
103             },
104             if error_code.contains(X86PfErrorCode::X86_PF_INSTR) {
105                 "instruction fetch"
106             } else if error_code.contains(X86PfErrorCode::X86_PF_WRITE) {
107                 "write access"
108             } else {
109                 "read access"
110             },
111             if regs.is_from_user() {
112                 "user"
113             } else {
114                 "kernel"
115             }
116         );
117         error!(
118             "#PF: error_code({:#04x}) - {}\n",
119             error_code,
120             if !error_code.contains(X86PfErrorCode::X86_PF_PROT) {
121                 "not-present page"
122             } else if error_code.contains(X86PfErrorCode::X86_PF_RSVD) {
123                 "reserved bit violation"
124             } else if error_code.contains(X86PfErrorCode::X86_PF_PK) {
125                 "protection keys violation"
126             } else {
127                 "permissions violation"
128             }
129         );
130     }
131 
132     pub fn page_fault_oops(
133         regs: &'static TrapFrame,
134         error_code: X86PfErrorCode,
135         address: VirtAddr,
136     ) {
137         if regs.is_from_user() {
138             Self::show_fault_oops(regs, error_code, address);
139         }
140         panic!()
141     }
142 
143     /// 内核态缺页异常处理
144     /// ## 参数
145     ///
146     /// - `regs`: 中断栈帧
147     /// - `error_code`: 错误标志
148     /// - `address`: 发生缺页异常的虚拟地址
149     pub fn do_kern_addr_fault(
150         _regs: &'static TrapFrame,
151         error_code: X86PfErrorCode,
152         address: VirtAddr,
153     ) {
154         panic!(
155             "do_kern_addr_fault has not yet been implemented,
156         fault address: {:#x},
157         error_code: {:#b},
158         pid: {}\n",
159             address.data(),
160             error_code,
161             crate::process::ProcessManager::current_pid().data()
162         );
163         //TODO https://code.dragonos.org.cn/xref/linux-6.6.21/arch/x86/mm/fault.c#do_kern_addr_fault
164     }
165 
166     /// 用户态缺页异常处理
167     /// ## 参数
168     ///
169     /// - `regs`: 中断栈帧
170     /// - `error_code`: 错误标志
171     /// - `address`: 发生缺页异常的虚拟地址
172     pub unsafe fn do_user_addr_fault(
173         regs: &'static TrapFrame,
174         error_code: X86PfErrorCode,
175         address: VirtAddr,
176     ) {
177         let rflags = RFlags::from_bits_truncate(regs.rflags);
178         let mut flags: FaultFlags = FaultFlags::FAULT_FLAG_ALLOW_RETRY
179             | FaultFlags::FAULT_FLAG_KILLABLE
180             | FaultFlags::FAULT_FLAG_INTERRUPTIBLE;
181 
182         if error_code & (X86PfErrorCode::X86_PF_USER | X86PfErrorCode::X86_PF_INSTR)
183             == X86PfErrorCode::X86_PF_INSTR
184         {
185             Self::page_fault_oops(regs, error_code, address);
186         }
187 
188         let feature = x86::cpuid::CpuId::new()
189             .get_extended_feature_info()
190             .unwrap();
191         if unlikely(
192             feature.has_smap()
193                 && !error_code.contains(X86PfErrorCode::X86_PF_USER)
194                 && rflags.contains(RFlags::FLAGS_AC),
195         ) {
196             Self::page_fault_oops(regs, error_code, address);
197         }
198 
199         if unlikely(error_code.contains(X86PfErrorCode::X86_PF_RSVD)) {
200             // TODO https://code.dragonos.org.cn/xref/linux-6.6.21/arch/x86/mm/fault.c#pgtable_bad
201             panic!(
202                 "Reserved bits are never expected to be set, error_code: {:#b}, address: {:#x}",
203                 error_code,
204                 address.data()
205             );
206         }
207 
208         if regs.is_from_user() {
209             unsafe { CurrentIrqArch::interrupt_enable() };
210             flags |= FaultFlags::FAULT_FLAG_USER;
211         } else if rflags.contains(RFlags::FLAGS_IF) {
212             unsafe { CurrentIrqArch::interrupt_enable() };
213         }
214 
215         if error_code.contains(X86PfErrorCode::X86_PF_SHSTK) {
216             flags |= FaultFlags::FAULT_FLAG_WRITE;
217         }
218         if error_code.contains(X86PfErrorCode::X86_PF_WRITE) {
219             flags |= FaultFlags::FAULT_FLAG_WRITE;
220         }
221         if error_code.contains(X86PfErrorCode::X86_PF_INSTR) {
222             flags |= FaultFlags::FAULT_FLAG_INSTRUCTION;
223         }
224 
225         let current_address_space: Arc<AddressSpace> = AddressSpace::current().unwrap();
226         let mut space_guard = current_address_space.write();
227         let mut fault;
228         loop {
229             let vma = space_guard.mappings.find_nearest(address);
230             // let vma = space_guard.mappings.contains(address);
231 
232             let vma = vma.unwrap_or_else(|| {
233                 panic!(
234                     "can not find nearest vma, error_code: {:#b}, address: {:#x}",
235                     error_code,
236                     address.data(),
237                 )
238             });
239             let guard = vma.lock();
240             let region = *guard.region();
241             let vm_flags = *guard.vm_flags();
242             drop(guard);
243 
244             if !region.contains(address) {
245                 if vm_flags.contains(VmFlags::VM_GROWSDOWN) {
246                     space_guard
247                         .extend_stack(region.start() - address)
248                         .unwrap_or_else(|_| {
249                             panic!(
250                                 "user stack extend failed, error_code: {:#b}, address: {:#x}",
251                                 error_code,
252                                 address.data(),
253                             )
254                         });
255                 } else {
256                     panic!(
257                         "No mapped vma, error_code: {:#b}, address: {:#x}",
258                         error_code,
259                         address.data(),
260                     )
261                 }
262             }
263 
264             if unlikely(Self::vma_access_error(vma.clone(), error_code)) {
265                 panic!(
266                     "vma access error, error_code: {:#b}, address: {:#x}",
267                     error_code,
268                     address.data(),
269                 );
270             }
271             let mapper = &mut space_guard.user_mapper.utable;
272 
273             fault = PageFaultHandler::handle_mm_fault(
274                 PageFaultMessage::new(vma.clone(), address, flags),
275                 mapper,
276             );
277 
278             if fault.contains(VmFaultReason::VM_FAULT_COMPLETED) {
279                 return;
280             }
281 
282             if unlikely(fault.contains(VmFaultReason::VM_FAULT_RETRY)) {
283                 flags |= FaultFlags::FAULT_FLAG_TRIED;
284             } else {
285                 break;
286             }
287         }
288 
289         let vm_fault_error = VmFaultReason::VM_FAULT_OOM
290             | VmFaultReason::VM_FAULT_SIGBUS
291             | VmFaultReason::VM_FAULT_SIGSEGV
292             | VmFaultReason::VM_FAULT_HWPOISON
293             | VmFaultReason::VM_FAULT_HWPOISON_LARGE
294             | VmFaultReason::VM_FAULT_FALLBACK;
295 
296         if likely(!fault.contains(vm_fault_error)) {
297             panic!("fault error: {:?}", fault)
298         }
299     }
300 }
301