xref: /DragonOS/kernel/src/arch/x86_64/mm/fault.rs (revision 52dc4c3ee87b31fee4f567f776fd8ad58fbfd9a5)
1 use core::{
2     intrinsics::{likely, unlikely},
3     panic,
4 };
5 
6 use alloc::sync::Arc;
7 use log::error;
8 use x86::{bits64::rflags::RFlags, controlregs::Cr4};
9 
10 use crate::{
11     arch::{
12         interrupt::{trap::X86PfErrorCode, TrapFrame},
13         ipc::signal::{SigCode, Signal},
14         mm::{MemoryManagementArch, X86_64MMArch},
15         CurrentIrqArch, MMArch,
16     },
17     exception::InterruptArch,
18     ipc::signal_types::{SigInfo, SigType},
19     mm::{
20         fault::{FaultFlags, PageFaultHandler, PageFaultMessage},
21         ucontext::{AddressSpace, LockedVMA},
22         VirtAddr, VmFaultReason, VmFlags,
23     },
24     process::ProcessManager,
25 };
26 
27 use super::LockedFrameAllocator;
28 
29 pub type PageMapper =
30     crate::mm::page::PageMapper<crate::arch::x86_64::mm::X86_64MMArch, LockedFrameAllocator>;
31 
32 impl X86_64MMArch {
33     pub fn vma_access_error(vma: Arc<LockedVMA>, error_code: X86PfErrorCode) -> bool {
34         let vm_flags = *vma.lock_irqsave().vm_flags();
35         let foreign = false;
36         if error_code.contains(X86PfErrorCode::X86_PF_PK) {
37             return true;
38         }
39 
40         if unlikely(error_code.contains(X86PfErrorCode::X86_PF_SGX)) {
41             return true;
42         }
43 
44         if !Self::vma_access_permitted(
45             vma.clone(),
46             error_code.contains(X86PfErrorCode::X86_PF_WRITE),
47             error_code.contains(X86PfErrorCode::X86_PF_INSTR),
48             foreign,
49         ) {
50             return true;
51         }
52 
53         if error_code.contains(X86PfErrorCode::X86_PF_WRITE) {
54             if unlikely(!vm_flags.contains(VmFlags::VM_WRITE)) {
55                 return true;
56             }
57             return false;
58         }
59 
60         if unlikely(error_code.contains(X86PfErrorCode::X86_PF_PROT)) {
61             return true;
62         }
63 
64         if unlikely(!vma.is_accessible()) {
65             return true;
66         }
67         false
68     }
69 
70     pub fn show_fault_oops(
71         regs: &'static TrapFrame,
72         error_code: X86PfErrorCode,
73         address: VirtAddr,
74     ) {
75         let mapper =
76             unsafe { PageMapper::current(crate::mm::PageTableKind::User, LockedFrameAllocator) };
77         if let Some(entry) = mapper.get_entry(address, 0) {
78             if entry.present() {
79                 if !entry.flags().has_execute() {
80                     error!("kernel tried to execute NX-protected page - exploit attempt?");
81                 } else if mapper.table().phys().data() & MMArch::ENTRY_FLAG_USER != 0
82                     && unsafe { x86::controlregs::cr4().contains(Cr4::CR4_ENABLE_SMEP) }
83                 {
84                     error!("unable to execute userspace code (SMEP?)");
85                 }
86             }
87         }
88         if address.data() < X86_64MMArch::PAGE_SIZE && !regs.is_from_user() {
89             error!(
90                 "BUG: kernel NULL pointer dereference, address: {:#x}",
91                 address.data()
92             );
93         } else {
94             error!(
95                 "BUG: unable to handle page fault for address: {:#x}",
96                 address.data()
97             );
98         }
99 
100         error!(
101             "#PF: {} {} in {} mode\n",
102             if error_code.contains(X86PfErrorCode::X86_PF_USER) {
103                 "user"
104             } else {
105                 "supervisor"
106             },
107             if error_code.contains(X86PfErrorCode::X86_PF_INSTR) {
108                 "instruction fetch"
109             } else if error_code.contains(X86PfErrorCode::X86_PF_WRITE) {
110                 "write access"
111             } else {
112                 "read access"
113             },
114             if regs.is_from_user() {
115                 "user"
116             } else {
117                 "kernel"
118             }
119         );
120         error!(
121             "#PF: error_code({:#04x}) - {}\n",
122             error_code,
123             if !error_code.contains(X86PfErrorCode::X86_PF_PROT) {
124                 "not-present page"
125             } else if error_code.contains(X86PfErrorCode::X86_PF_RSVD) {
126                 "reserved bit violation"
127             } else if error_code.contains(X86PfErrorCode::X86_PF_PK) {
128                 "protection keys violation"
129             } else {
130                 "permissions violation"
131             }
132         );
133     }
134 
135     pub fn page_fault_oops(
136         regs: &'static TrapFrame,
137         error_code: X86PfErrorCode,
138         address: VirtAddr,
139     ) {
140         if regs.is_from_user() {
141             Self::show_fault_oops(regs, error_code, address);
142         }
143         panic!()
144     }
145 
146     /// 内核态缺页异常处理
147     /// ## 参数
148     ///
149     /// - `regs`: 中断栈帧
150     /// - `error_code`: 错误标志
151     /// - `address`: 发生缺页异常的虚拟地址
152     pub fn do_kern_addr_fault(
153         _regs: &'static TrapFrame,
154         error_code: X86PfErrorCode,
155         address: VirtAddr,
156     ) {
157         panic!(
158             "do_kern_addr_fault has not yet been implemented,
159         fault address: {:#x},
160         error_code: {:#b},
161         pid: {}\n",
162             address.data(),
163             error_code,
164             crate::process::ProcessManager::current_pid().data()
165         );
166         //TODO https://code.dragonos.org.cn/xref/linux-6.6.21/arch/x86/mm/fault.c#do_kern_addr_fault
167     }
168 
169     /// 用户态缺页异常处理
170     /// ## 参数
171     ///
172     /// - `regs`: 中断栈帧
173     /// - `error_code`: 错误标志
174     /// - `address`: 发生缺页异常的虚拟地址
175     pub unsafe fn do_user_addr_fault(
176         regs: &'static TrapFrame,
177         error_code: X86PfErrorCode,
178         address: VirtAddr,
179     ) {
180         let rflags = RFlags::from_bits_truncate(regs.rflags);
181         let mut flags: FaultFlags = FaultFlags::FAULT_FLAG_ALLOW_RETRY
182             | FaultFlags::FAULT_FLAG_KILLABLE
183             | FaultFlags::FAULT_FLAG_INTERRUPTIBLE;
184 
185         if error_code & (X86PfErrorCode::X86_PF_USER | X86PfErrorCode::X86_PF_INSTR)
186             == X86PfErrorCode::X86_PF_INSTR
187         {
188             Self::page_fault_oops(regs, error_code, address);
189         }
190 
191         let feature = x86::cpuid::CpuId::new()
192             .get_extended_feature_info()
193             .unwrap();
194         if unlikely(
195             feature.has_smap()
196                 && !error_code.contains(X86PfErrorCode::X86_PF_USER)
197                 && rflags.contains(RFlags::FLAGS_AC),
198         ) {
199             Self::page_fault_oops(regs, error_code, address);
200         }
201 
202         if unlikely(error_code.contains(X86PfErrorCode::X86_PF_RSVD)) {
203             // TODO https://code.dragonos.org.cn/xref/linux-6.6.21/arch/x86/mm/fault.c#pgtable_bad
204             panic!(
205                 "Reserved bits are never expected to be set, error_code: {:#b}, address: {:#x}",
206                 error_code,
207                 address.data()
208             );
209         }
210 
211         if regs.is_from_user() {
212             unsafe { CurrentIrqArch::interrupt_enable() };
213             flags |= FaultFlags::FAULT_FLAG_USER;
214         } else if rflags.contains(RFlags::FLAGS_IF) {
215             unsafe { CurrentIrqArch::interrupt_enable() };
216         }
217 
218         if error_code.contains(X86PfErrorCode::X86_PF_SHSTK) {
219             flags |= FaultFlags::FAULT_FLAG_WRITE;
220         }
221         if error_code.contains(X86PfErrorCode::X86_PF_WRITE) {
222             flags |= FaultFlags::FAULT_FLAG_WRITE;
223         }
224         if error_code.contains(X86PfErrorCode::X86_PF_INSTR) {
225             flags |= FaultFlags::FAULT_FLAG_INSTRUCTION;
226         }
227 
228         let current_address_space: Arc<AddressSpace> = AddressSpace::current().unwrap();
229         let mut space_guard = current_address_space.write_irqsave();
230         let mut fault;
231         loop {
232             let vma = space_guard.mappings.find_nearest(address);
233             // let vma = space_guard.mappings.contains(address);
234 
235             let vma = match vma {
236                 Some(vma) => vma,
237                 None => {
238                     log::error!(
239                         "can not find nearest vma, error_code: {:#b}, address: {:#x}",
240                         error_code,
241                         address.data(),
242                     );
243                     let pid = ProcessManager::current_pid();
244                     let mut info =
245                         SigInfo::new(Signal::SIGSEGV, 0, SigCode::User, SigType::Kill(pid));
246                     Signal::SIGSEGV
247                         .send_signal_info(Some(&mut info), pid)
248                         .expect("failed to send SIGSEGV to process");
249                     return;
250                 }
251             };
252             let guard = vma.lock_irqsave();
253             let region = *guard.region();
254             let vm_flags = *guard.vm_flags();
255             drop(guard);
256 
257             if !region.contains(address) {
258                 if vm_flags.contains(VmFlags::VM_GROWSDOWN) {
259                     space_guard
260                         .extend_stack(region.start() - address)
261                         .unwrap_or_else(|_| {
262                             panic!(
263                                 "user stack extend failed, error_code: {:#b}, address: {:#x}",
264                                 error_code,
265                                 address.data(),
266                             )
267                         });
268                 } else {
269                     log::error!(
270                         "No mapped vma, error_code: {:#b}, address: {:#x}",
271                         error_code,
272                         address.data(),
273                     );
274                     let pid = ProcessManager::current_pid();
275                     let mut info =
276                         SigInfo::new(Signal::SIGSEGV, 0, SigCode::User, SigType::Kill(pid));
277                     Signal::SIGSEGV
278                         .send_signal_info(Some(&mut info), pid)
279                         .expect("failed to send SIGSEGV to process");
280                     return;
281                 }
282             }
283 
284             if unlikely(Self::vma_access_error(vma.clone(), error_code)) {
285                 panic!(
286                     "vma access error, error_code: {:#b}, address: {:#x}",
287                     error_code,
288                     address.data(),
289                 );
290             }
291             let mapper = &mut space_guard.user_mapper.utable;
292             let message = PageFaultMessage::new(vma.clone(), address, flags, mapper);
293 
294             fault = PageFaultHandler::handle_mm_fault(message);
295 
296             if fault.contains(VmFaultReason::VM_FAULT_COMPLETED) {
297                 return;
298             }
299 
300             if unlikely(fault.contains(VmFaultReason::VM_FAULT_RETRY)) {
301                 flags |= FaultFlags::FAULT_FLAG_TRIED;
302             } else {
303                 break;
304             }
305         }
306 
307         let vm_fault_error = VmFaultReason::VM_FAULT_OOM
308             | VmFaultReason::VM_FAULT_SIGBUS
309             | VmFaultReason::VM_FAULT_SIGSEGV
310             | VmFaultReason::VM_FAULT_HWPOISON
311             | VmFaultReason::VM_FAULT_HWPOISON_LARGE
312             | VmFaultReason::VM_FAULT_FALLBACK;
313 
314         if likely(!fault.contains(vm_fault_error)) {
315             panic!("fault error: {:?}", fault)
316         }
317     }
318 }
319