xref: /DragonOS/kernel/src/arch/x86_64/mm/fault.rs (revision a17651b14b86dd70655090381db4a2f710853aa1)
1*a17651b1SMemoryShore use core::{
2*a17651b1SMemoryShore     intrinsics::{likely, unlikely},
3*a17651b1SMemoryShore     panic,
4*a17651b1SMemoryShore };
5*a17651b1SMemoryShore 
6*a17651b1SMemoryShore use alloc::sync::Arc;
7*a17651b1SMemoryShore use x86::{bits64::rflags::RFlags, controlregs::Cr4};
8*a17651b1SMemoryShore 
9*a17651b1SMemoryShore use crate::{
10*a17651b1SMemoryShore     arch::{
11*a17651b1SMemoryShore         interrupt::{trap::X86PfErrorCode, TrapFrame},
12*a17651b1SMemoryShore         mm::{MemoryManagementArch, X86_64MMArch},
13*a17651b1SMemoryShore         CurrentIrqArch, MMArch,
14*a17651b1SMemoryShore     },
15*a17651b1SMemoryShore     exception::InterruptArch,
16*a17651b1SMemoryShore     kerror,
17*a17651b1SMemoryShore     mm::{
18*a17651b1SMemoryShore         fault::{FaultFlags, PageFaultHandler, PageFaultMessage},
19*a17651b1SMemoryShore         ucontext::{AddressSpace, LockedVMA},
20*a17651b1SMemoryShore         VirtAddr, VmFaultReason, VmFlags,
21*a17651b1SMemoryShore     },
22*a17651b1SMemoryShore };
23*a17651b1SMemoryShore 
24*a17651b1SMemoryShore use super::LockedFrameAllocator;
25*a17651b1SMemoryShore 
26*a17651b1SMemoryShore pub type PageMapper =
27*a17651b1SMemoryShore     crate::mm::page::PageMapper<crate::arch::x86_64::mm::X86_64MMArch, LockedFrameAllocator>;
28*a17651b1SMemoryShore 
29*a17651b1SMemoryShore impl X86_64MMArch {
30*a17651b1SMemoryShore     pub fn vma_access_error(vma: Arc<LockedVMA>, error_code: X86PfErrorCode) -> bool {
31*a17651b1SMemoryShore         let vm_flags = *vma.lock().vm_flags();
32*a17651b1SMemoryShore         let foreign = false;
33*a17651b1SMemoryShore         if error_code.contains(X86PfErrorCode::X86_PF_PK) {
34*a17651b1SMemoryShore             return true;
35*a17651b1SMemoryShore         }
36*a17651b1SMemoryShore 
37*a17651b1SMemoryShore         if unlikely(error_code.contains(X86PfErrorCode::X86_PF_SGX)) {
38*a17651b1SMemoryShore             return true;
39*a17651b1SMemoryShore         }
40*a17651b1SMemoryShore 
41*a17651b1SMemoryShore         if !Self::vma_access_permitted(
42*a17651b1SMemoryShore             vma.clone(),
43*a17651b1SMemoryShore             error_code.contains(X86PfErrorCode::X86_PF_WRITE),
44*a17651b1SMemoryShore             error_code.contains(X86PfErrorCode::X86_PF_INSTR),
45*a17651b1SMemoryShore             foreign,
46*a17651b1SMemoryShore         ) {
47*a17651b1SMemoryShore             return true;
48*a17651b1SMemoryShore         }
49*a17651b1SMemoryShore 
50*a17651b1SMemoryShore         if error_code.contains(X86PfErrorCode::X86_PF_WRITE) {
51*a17651b1SMemoryShore             if unlikely(!vm_flags.contains(VmFlags::VM_WRITE)) {
52*a17651b1SMemoryShore                 return true;
53*a17651b1SMemoryShore             }
54*a17651b1SMemoryShore             return false;
55*a17651b1SMemoryShore         }
56*a17651b1SMemoryShore 
57*a17651b1SMemoryShore         if unlikely(error_code.contains(X86PfErrorCode::X86_PF_PROT)) {
58*a17651b1SMemoryShore             return true;
59*a17651b1SMemoryShore         }
60*a17651b1SMemoryShore 
61*a17651b1SMemoryShore         if unlikely(!vma.is_accessible()) {
62*a17651b1SMemoryShore             return true;
63*a17651b1SMemoryShore         }
64*a17651b1SMemoryShore         false
65*a17651b1SMemoryShore     }
66*a17651b1SMemoryShore 
67*a17651b1SMemoryShore     pub fn show_fault_oops(
68*a17651b1SMemoryShore         regs: &'static TrapFrame,
69*a17651b1SMemoryShore         error_code: X86PfErrorCode,
70*a17651b1SMemoryShore         address: VirtAddr,
71*a17651b1SMemoryShore     ) {
72*a17651b1SMemoryShore         let mapper =
73*a17651b1SMemoryShore             unsafe { PageMapper::current(crate::mm::PageTableKind::User, LockedFrameAllocator) };
74*a17651b1SMemoryShore         if let Some(entry) = mapper.get_entry(address, 0) {
75*a17651b1SMemoryShore             if entry.present() {
76*a17651b1SMemoryShore                 if !entry.flags().has_execute() {
77*a17651b1SMemoryShore                     kerror!("kernel tried to execute NX-protected page - exploit attempt?");
78*a17651b1SMemoryShore                 } else if mapper.table().phys().data() & MMArch::ENTRY_FLAG_USER != 0
79*a17651b1SMemoryShore                     && unsafe { x86::controlregs::cr4().contains(Cr4::CR4_ENABLE_SMEP) }
80*a17651b1SMemoryShore                 {
81*a17651b1SMemoryShore                     kerror!("unable to execute userspace code (SMEP?)");
82*a17651b1SMemoryShore                 }
83*a17651b1SMemoryShore             }
84*a17651b1SMemoryShore         }
85*a17651b1SMemoryShore         if address.data() < X86_64MMArch::PAGE_SIZE && !regs.is_from_user() {
86*a17651b1SMemoryShore             kerror!(
87*a17651b1SMemoryShore                 "BUG: kernel NULL pointer dereference, address: {:#x}",
88*a17651b1SMemoryShore                 address.data()
89*a17651b1SMemoryShore             );
90*a17651b1SMemoryShore         } else {
91*a17651b1SMemoryShore             kerror!(
92*a17651b1SMemoryShore                 "BUG: unable to handle page fault for address: {:#x}",
93*a17651b1SMemoryShore                 address.data()
94*a17651b1SMemoryShore             );
95*a17651b1SMemoryShore         }
96*a17651b1SMemoryShore 
97*a17651b1SMemoryShore         kerror!(
98*a17651b1SMemoryShore             "#PF: {} {} in {} mode\n",
99*a17651b1SMemoryShore             if error_code.contains(X86PfErrorCode::X86_PF_USER) {
100*a17651b1SMemoryShore                 "user"
101*a17651b1SMemoryShore             } else {
102*a17651b1SMemoryShore                 "supervisor"
103*a17651b1SMemoryShore             },
104*a17651b1SMemoryShore             if error_code.contains(X86PfErrorCode::X86_PF_INSTR) {
105*a17651b1SMemoryShore                 "instruction fetch"
106*a17651b1SMemoryShore             } else if error_code.contains(X86PfErrorCode::X86_PF_WRITE) {
107*a17651b1SMemoryShore                 "write access"
108*a17651b1SMemoryShore             } else {
109*a17651b1SMemoryShore                 "read access"
110*a17651b1SMemoryShore             },
111*a17651b1SMemoryShore             if regs.is_from_user() {
112*a17651b1SMemoryShore                 "user"
113*a17651b1SMemoryShore             } else {
114*a17651b1SMemoryShore                 "kernel"
115*a17651b1SMemoryShore             }
116*a17651b1SMemoryShore         );
117*a17651b1SMemoryShore         kerror!(
118*a17651b1SMemoryShore             "#PF: error_code({:#04x}) - {}\n",
119*a17651b1SMemoryShore             error_code,
120*a17651b1SMemoryShore             if !error_code.contains(X86PfErrorCode::X86_PF_PROT) {
121*a17651b1SMemoryShore                 "not-present page"
122*a17651b1SMemoryShore             } else if error_code.contains(X86PfErrorCode::X86_PF_RSVD) {
123*a17651b1SMemoryShore                 "reserved bit violation"
124*a17651b1SMemoryShore             } else if error_code.contains(X86PfErrorCode::X86_PF_PK) {
125*a17651b1SMemoryShore                 "protection keys violation"
126*a17651b1SMemoryShore             } else {
127*a17651b1SMemoryShore                 "permissions violation"
128*a17651b1SMemoryShore             }
129*a17651b1SMemoryShore         );
130*a17651b1SMemoryShore     }
131*a17651b1SMemoryShore 
132*a17651b1SMemoryShore     pub fn page_fault_oops(
133*a17651b1SMemoryShore         regs: &'static TrapFrame,
134*a17651b1SMemoryShore         error_code: X86PfErrorCode,
135*a17651b1SMemoryShore         address: VirtAddr,
136*a17651b1SMemoryShore     ) {
137*a17651b1SMemoryShore         if regs.is_from_user() {
138*a17651b1SMemoryShore             Self::show_fault_oops(regs, error_code, address);
139*a17651b1SMemoryShore         }
140*a17651b1SMemoryShore         panic!()
141*a17651b1SMemoryShore     }
142*a17651b1SMemoryShore 
143*a17651b1SMemoryShore     /// 内核态缺页异常处理
144*a17651b1SMemoryShore     /// ## 参数
145*a17651b1SMemoryShore     ///
146*a17651b1SMemoryShore     /// - `regs`: 中断栈帧
147*a17651b1SMemoryShore     /// - `error_code`: 错误标志
148*a17651b1SMemoryShore     /// - `address`: 发生缺页异常的虚拟地址
149*a17651b1SMemoryShore     pub fn do_kern_addr_fault(
150*a17651b1SMemoryShore         _regs: &'static TrapFrame,
151*a17651b1SMemoryShore         error_code: X86PfErrorCode,
152*a17651b1SMemoryShore         address: VirtAddr,
153*a17651b1SMemoryShore     ) {
154*a17651b1SMemoryShore         panic!(
155*a17651b1SMemoryShore             "do_kern_addr_fault has not yet been implemented,
156*a17651b1SMemoryShore         fault address: {:#x},
157*a17651b1SMemoryShore         error_code: {:#b},
158*a17651b1SMemoryShore         pid: {}\n",
159*a17651b1SMemoryShore             address.data(),
160*a17651b1SMemoryShore             error_code,
161*a17651b1SMemoryShore             crate::process::ProcessManager::current_pid().data()
162*a17651b1SMemoryShore         );
163*a17651b1SMemoryShore         //TODO https://code.dragonos.org.cn/xref/linux-6.6.21/arch/x86/mm/fault.c#do_kern_addr_fault
164*a17651b1SMemoryShore     }
165*a17651b1SMemoryShore 
166*a17651b1SMemoryShore     /// 用户态缺页异常处理
167*a17651b1SMemoryShore     /// ## 参数
168*a17651b1SMemoryShore     ///
169*a17651b1SMemoryShore     /// - `regs`: 中断栈帧
170*a17651b1SMemoryShore     /// - `error_code`: 错误标志
171*a17651b1SMemoryShore     /// - `address`: 发生缺页异常的虚拟地址
172*a17651b1SMemoryShore     pub unsafe fn do_user_addr_fault(
173*a17651b1SMemoryShore         regs: &'static TrapFrame,
174*a17651b1SMemoryShore         error_code: X86PfErrorCode,
175*a17651b1SMemoryShore         address: VirtAddr,
176*a17651b1SMemoryShore     ) {
177*a17651b1SMemoryShore         let rflags = RFlags::from_bits_truncate(regs.rflags);
178*a17651b1SMemoryShore         let mut flags: FaultFlags = FaultFlags::FAULT_FLAG_ALLOW_RETRY
179*a17651b1SMemoryShore             | FaultFlags::FAULT_FLAG_KILLABLE
180*a17651b1SMemoryShore             | FaultFlags::FAULT_FLAG_INTERRUPTIBLE;
181*a17651b1SMemoryShore 
182*a17651b1SMemoryShore         if error_code & (X86PfErrorCode::X86_PF_USER | X86PfErrorCode::X86_PF_INSTR)
183*a17651b1SMemoryShore             == X86PfErrorCode::X86_PF_INSTR
184*a17651b1SMemoryShore         {
185*a17651b1SMemoryShore             Self::page_fault_oops(regs, error_code, address);
186*a17651b1SMemoryShore         }
187*a17651b1SMemoryShore 
188*a17651b1SMemoryShore         let feature = x86::cpuid::CpuId::new()
189*a17651b1SMemoryShore             .get_extended_feature_info()
190*a17651b1SMemoryShore             .unwrap();
191*a17651b1SMemoryShore         if unlikely(
192*a17651b1SMemoryShore             feature.has_smap()
193*a17651b1SMemoryShore                 && !error_code.contains(X86PfErrorCode::X86_PF_USER)
194*a17651b1SMemoryShore                 && rflags.contains(RFlags::FLAGS_AC),
195*a17651b1SMemoryShore         ) {
196*a17651b1SMemoryShore             Self::page_fault_oops(regs, error_code, address);
197*a17651b1SMemoryShore         }
198*a17651b1SMemoryShore 
199*a17651b1SMemoryShore         if unlikely(error_code.contains(X86PfErrorCode::X86_PF_RSVD)) {
200*a17651b1SMemoryShore             // TODO https://code.dragonos.org.cn/xref/linux-6.6.21/arch/x86/mm/fault.c#pgtable_bad
201*a17651b1SMemoryShore             panic!(
202*a17651b1SMemoryShore                 "Reserved bits are never expected to be set, error_code: {:#b}, address: {:#x}",
203*a17651b1SMemoryShore                 error_code,
204*a17651b1SMemoryShore                 address.data()
205*a17651b1SMemoryShore             );
206*a17651b1SMemoryShore         }
207*a17651b1SMemoryShore 
208*a17651b1SMemoryShore         if regs.is_from_user() {
209*a17651b1SMemoryShore             unsafe { CurrentIrqArch::interrupt_enable() };
210*a17651b1SMemoryShore             flags |= FaultFlags::FAULT_FLAG_USER;
211*a17651b1SMemoryShore         } else if rflags.contains(RFlags::FLAGS_IF) {
212*a17651b1SMemoryShore             unsafe { CurrentIrqArch::interrupt_enable() };
213*a17651b1SMemoryShore         }
214*a17651b1SMemoryShore 
215*a17651b1SMemoryShore         if error_code.contains(X86PfErrorCode::X86_PF_SHSTK) {
216*a17651b1SMemoryShore             flags |= FaultFlags::FAULT_FLAG_WRITE;
217*a17651b1SMemoryShore         }
218*a17651b1SMemoryShore         if error_code.contains(X86PfErrorCode::X86_PF_WRITE) {
219*a17651b1SMemoryShore             flags |= FaultFlags::FAULT_FLAG_WRITE;
220*a17651b1SMemoryShore         }
221*a17651b1SMemoryShore         if error_code.contains(X86PfErrorCode::X86_PF_INSTR) {
222*a17651b1SMemoryShore             flags |= FaultFlags::FAULT_FLAG_INSTRUCTION;
223*a17651b1SMemoryShore         }
224*a17651b1SMemoryShore 
225*a17651b1SMemoryShore         let current_address_space: Arc<AddressSpace> = AddressSpace::current().unwrap();
226*a17651b1SMemoryShore         let mut space_guard = current_address_space.write();
227*a17651b1SMemoryShore         let mut fault;
228*a17651b1SMemoryShore         loop {
229*a17651b1SMemoryShore             let vma = space_guard.mappings.find_nearest(address);
230*a17651b1SMemoryShore             // let vma = space_guard.mappings.contains(address);
231*a17651b1SMemoryShore 
232*a17651b1SMemoryShore             let vma = vma.unwrap_or_else(|| {
233*a17651b1SMemoryShore                 panic!(
234*a17651b1SMemoryShore                     "can not find nearest vma, error_code: {:#b}, address: {:#x}",
235*a17651b1SMemoryShore                     error_code,
236*a17651b1SMemoryShore                     address.data(),
237*a17651b1SMemoryShore                 )
238*a17651b1SMemoryShore             });
239*a17651b1SMemoryShore             let guard = vma.lock();
240*a17651b1SMemoryShore             let region = *guard.region();
241*a17651b1SMemoryShore             let vm_flags = *guard.vm_flags();
242*a17651b1SMemoryShore             drop(guard);
243*a17651b1SMemoryShore 
244*a17651b1SMemoryShore             if !region.contains(address) {
245*a17651b1SMemoryShore                 if vm_flags.contains(VmFlags::VM_GROWSDOWN) {
246*a17651b1SMemoryShore                     space_guard
247*a17651b1SMemoryShore                         .extend_stack(region.start() - address)
248*a17651b1SMemoryShore                         .unwrap_or_else(|_| {
249*a17651b1SMemoryShore                             panic!(
250*a17651b1SMemoryShore                                 "user stack extend failed, error_code: {:#b}, address: {:#x}",
251*a17651b1SMemoryShore                                 error_code,
252*a17651b1SMemoryShore                                 address.data(),
253*a17651b1SMemoryShore                             )
254*a17651b1SMemoryShore                         });
255*a17651b1SMemoryShore                 } else {
256*a17651b1SMemoryShore                     panic!(
257*a17651b1SMemoryShore                         "No mapped vma, error_code: {:#b}, address: {:#x}",
258*a17651b1SMemoryShore                         error_code,
259*a17651b1SMemoryShore                         address.data(),
260*a17651b1SMemoryShore                     )
261*a17651b1SMemoryShore                 }
262*a17651b1SMemoryShore             }
263*a17651b1SMemoryShore 
264*a17651b1SMemoryShore             if unlikely(Self::vma_access_error(vma.clone(), error_code)) {
265*a17651b1SMemoryShore                 panic!(
266*a17651b1SMemoryShore                     "vma access error, error_code: {:#b}, address: {:#x}",
267*a17651b1SMemoryShore                     error_code,
268*a17651b1SMemoryShore                     address.data(),
269*a17651b1SMemoryShore                 );
270*a17651b1SMemoryShore             }
271*a17651b1SMemoryShore             let mapper = &mut space_guard.user_mapper.utable;
272*a17651b1SMemoryShore 
273*a17651b1SMemoryShore             fault = PageFaultHandler::handle_mm_fault(
274*a17651b1SMemoryShore                 PageFaultMessage::new(vma.clone(), address, flags),
275*a17651b1SMemoryShore                 mapper,
276*a17651b1SMemoryShore             );
277*a17651b1SMemoryShore 
278*a17651b1SMemoryShore             if fault.contains(VmFaultReason::VM_FAULT_COMPLETED) {
279*a17651b1SMemoryShore                 return;
280*a17651b1SMemoryShore             }
281*a17651b1SMemoryShore 
282*a17651b1SMemoryShore             if unlikely(fault.contains(VmFaultReason::VM_FAULT_RETRY)) {
283*a17651b1SMemoryShore                 flags |= FaultFlags::FAULT_FLAG_TRIED;
284*a17651b1SMemoryShore             } else {
285*a17651b1SMemoryShore                 break;
286*a17651b1SMemoryShore             }
287*a17651b1SMemoryShore         }
288*a17651b1SMemoryShore 
289*a17651b1SMemoryShore         let vm_fault_error = VmFaultReason::VM_FAULT_OOM
290*a17651b1SMemoryShore             | VmFaultReason::VM_FAULT_SIGBUS
291*a17651b1SMemoryShore             | VmFaultReason::VM_FAULT_SIGSEGV
292*a17651b1SMemoryShore             | VmFaultReason::VM_FAULT_HWPOISON
293*a17651b1SMemoryShore             | VmFaultReason::VM_FAULT_HWPOISON_LARGE
294*a17651b1SMemoryShore             | VmFaultReason::VM_FAULT_FALLBACK;
295*a17651b1SMemoryShore 
296*a17651b1SMemoryShore         if likely(!fault.contains(vm_fault_error)) {
297*a17651b1SMemoryShore             panic!("fault error: {:?}", fault)
298*a17651b1SMemoryShore         }
299*a17651b1SMemoryShore     }
300*a17651b1SMemoryShore }
301