xref: /DragonOS/kernel/src/exception/handle.rs (revision f79998f626801329580c782fd05e36cb2027f474)
1 use core::{intrinsics::unlikely, ops::BitAnd};
2 
3 use alloc::sync::Arc;
4 use log::{debug, error, warn};
5 use system_error::SystemError;
6 
7 use crate::{
8     arch::{interrupt::TrapFrame, CurrentIrqArch},
9     exception::{irqchip::IrqChipFlags, irqdesc::InnerIrqDesc},
10     libs::{once::Once, spinlock::SpinLockGuard},
11     process::{ProcessFlags, ProcessManager},
12     smp::core::smp_get_processor_id,
13 };
14 
15 use super::{
16     irqchip::IrqChip,
17     irqdata::{IrqData, IrqHandlerData, IrqStatus},
18     irqdesc::{
19         InnerIrqAction, IrqDesc, IrqDescState, IrqFlowHandler, IrqReturn, ThreadedHandlerFlags,
20     },
21     manage::{irq_manager, IrqManager},
22     InterruptArch, IrqNumber,
23 };
24 
25 /// 获取用于处理错误的中断的处理程序
26 #[inline(always)]
27 pub fn bad_irq_handler() -> &'static dyn IrqFlowHandler {
28     &HandleBadIrq
29 }
30 
31 /// 获取用于处理快速EOI的中断的处理程序
32 #[inline(always)]
33 pub fn fast_eoi_irq_handler() -> &'static dyn IrqFlowHandler {
34     &FastEOIIrqHandler
35 }
36 
37 /// 获取用于处理边沿触发中断的处理程序
38 #[inline(always)]
39 #[allow(dead_code)]
40 pub fn edge_irq_handler() -> &'static dyn IrqFlowHandler {
41     &EdgeIrqHandler
42 }
43 
44 /// handle spurious and unhandled irqs
45 #[derive(Debug)]
46 struct HandleBadIrq;
47 
48 impl IrqFlowHandler for HandleBadIrq {
49     /// 参考: https://code.dragonos.org.cn/xref/linux-6.1.9/kernel/irq/handle.c?fi=handle_bad_irq#33
50     fn handle(&self, irq_desc: &Arc<IrqDesc>, _trap_frame: &mut TrapFrame) {
51         // todo: print_irq_desc
52         // todo: 增加kstat计数
53         CurrentIrqArch::ack_bad_irq(irq_desc.irq());
54     }
55 }
56 
57 #[derive(Debug)]
58 struct FastEOIIrqHandler;
59 
60 impl IrqFlowHandler for FastEOIIrqHandler {
61     /// https://code.dragonos.org.cn/xref/linux-6.1.9/kernel/irq/chip.c?r=&mo=17578&fi=689#689
62     fn handle(&self, irq_desc: &Arc<IrqDesc>, _trap_frame: &mut TrapFrame) {
63         let chip = irq_desc.irq_data().chip_info_read_irqsave().chip();
64 
65         let mut desc_inner = irq_desc.inner();
66         let out = |din: SpinLockGuard<InnerIrqDesc>| {
67             if !chip.flags().contains(IrqChipFlags::IRQCHIP_EOI_IF_HANDLED) {
68                 chip.irq_eoi(din.irq_data());
69             }
70         };
71         if !irq_may_run(&desc_inner) {
72             out(desc_inner);
73             return;
74         }
75 
76         desc_inner
77             .internal_state_mut()
78             .remove(IrqDescState::IRQS_REPLAY | IrqDescState::IRQS_WAITING);
79 
80         if desc_inner.actions().is_empty() || desc_inner.common_data().disabled() {
81             desc_inner
82                 .internal_state_mut()
83                 .insert(IrqDescState::IRQS_PENDING);
84             mask_irq(desc_inner.irq_data());
85             out(desc_inner);
86             return;
87         }
88 
89         desc_inner = handle_irq_event(irq_desc, desc_inner);
90         cond_unmask_eoi_irq(&desc_inner, &chip);
91 
92         return;
93     }
94 }
95 
96 #[derive(Debug)]
97 struct EdgeIrqHandler;
98 
99 impl IrqFlowHandler for EdgeIrqHandler {
100     // https://code.dragonos.org.cn/xref/linux-6.1.9/kernel/irq/chip.c?fi=handle_edge_irq#775
101     fn handle(&self, irq_desc: &Arc<IrqDesc>, _trap_frame: &mut TrapFrame) {
102         let mut desc_inner_guard: SpinLockGuard<'_, InnerIrqDesc> = irq_desc.inner();
103         if !irq_may_run(&desc_inner_guard) {
104             // debug!("!irq_may_run");
105             desc_inner_guard
106                 .internal_state_mut()
107                 .insert(IrqDescState::IRQS_PENDING);
108             mask_ack_irq(desc_inner_guard.irq_data());
109             return;
110         }
111 
112         if desc_inner_guard.common_data().disabled() {
113             // debug!("desc_inner_guard.common_data().disabled()");
114             desc_inner_guard
115                 .internal_state_mut()
116                 .insert(IrqDescState::IRQS_PENDING);
117             mask_ack_irq(desc_inner_guard.irq_data());
118             return;
119         }
120 
121         let irq_data = desc_inner_guard.irq_data().clone();
122 
123         irq_data.chip_info_read_irqsave().chip().irq_ack(&irq_data);
124 
125         loop {
126             if unlikely(desc_inner_guard.actions().is_empty()) {
127                 debug!("no action for irq {}", irq_data.irq().data());
128                 irq_manager().mask_irq(&irq_data);
129                 return;
130             }
131 
132             // 当我们在处理一个中断时,如果另一个中断到来,我们本可以屏蔽它.
133             // 如果在此期间没有被禁用,请重新启用它。
134             if desc_inner_guard
135                 .internal_state()
136                 .contains(IrqDescState::IRQS_PENDING)
137             {
138                 let status = desc_inner_guard.common_data().status();
139                 if !status.disabled() && status.masked() {
140                     // debug!("re-enable irq");
141                     irq_manager().unmask_irq(&desc_inner_guard);
142                 }
143             }
144 
145             // debug!("handle_irq_event");
146 
147             desc_inner_guard = handle_irq_event(irq_desc, desc_inner_guard);
148 
149             if !desc_inner_guard
150                 .internal_state()
151                 .contains(IrqDescState::IRQS_PENDING)
152                 || desc_inner_guard.common_data().disabled()
153             {
154                 break;
155             }
156         }
157     }
158 }
159 
160 /// 判断中断是否可以运行
161 fn irq_may_run(desc_inner_guard: &SpinLockGuard<'_, InnerIrqDesc>) -> bool {
162     let mask = IrqStatus::IRQD_IRQ_INPROGRESS | IrqStatus::IRQD_WAKEUP_ARMED;
163     let status = desc_inner_guard.common_data().status();
164 
165     // 如果中断不在处理中并且没有被唤醒,则可以运行
166     if status.bitand(mask).is_empty() {
167         return true;
168     }
169 
170     // todo: 检查其他处理器是否在轮询当前中断
171     return false;
172 }
173 
174 pub(super) fn mask_ack_irq(irq_data: &Arc<IrqData>) {
175     let chip = irq_data.chip_info_read_irqsave().chip();
176     if chip.can_mask_ack() {
177         chip.irq_mask_ack(irq_data);
178         irq_data.common_data().set_masked();
179     } else {
180         irq_manager().mask_irq(irq_data);
181         chip.irq_ack(irq_data);
182     }
183 }
184 
185 pub(super) fn mask_irq(irq_data: &Arc<IrqData>) {
186     if irq_data.common_data().masked() {
187         return;
188     }
189 
190     let chip = irq_data.chip_info_read_irqsave().chip();
191     if chip.irq_mask(irq_data).is_ok() {
192         irq_data.irqd_set(IrqStatus::IRQD_IRQ_MASKED);
193     }
194 }
195 
196 pub(super) fn unmask_irq(irq_data: &Arc<IrqData>) {
197     if !irq_data.common_data().masked() {
198         return;
199     }
200 
201     let chip = irq_data.chip_info_read_irqsave().chip();
202 
203     if chip.irq_unmask(irq_data).is_ok() {
204         irq_data.irqd_clear(IrqStatus::IRQD_IRQ_MASKED);
205     }
206 }
207 
208 impl IrqManager {
209     pub(super) fn do_irq_wake_thread(
210         &self,
211         desc: &Arc<IrqDesc>,
212         action_inner: &mut SpinLockGuard<'_, InnerIrqAction>,
213     ) {
214         let thread = action_inner.thread();
215 
216         if thread.is_none() {
217             return;
218         }
219 
220         let thread = thread.unwrap();
221         if thread.flags().contains(ProcessFlags::EXITING) {
222             return;
223         }
224 
225         // 如果线程已经在运行,我们不需要唤醒它
226         if action_inner
227             .thread_flags_mut()
228             .test_and_set_bit(ThreadedHandlerFlags::IRQTF_RUNTHREAD)
229         {
230             return;
231         }
232 
233         desc.inc_threads_active();
234 
235         ProcessManager::wakeup(&thread).ok();
236     }
237 }
238 
239 fn handle_irq_event<'a>(
240     irq_desc: &'a Arc<IrqDesc>,
241     mut desc_inner_guard: SpinLockGuard<'_, InnerIrqDesc>,
242 ) -> SpinLockGuard<'a, InnerIrqDesc> {
243     desc_inner_guard
244         .internal_state_mut()
245         .remove(IrqDescState::IRQS_PENDING);
246     desc_inner_guard.common_data().set_inprogress();
247 
248     drop(desc_inner_guard);
249 
250     let _r = do_handle_irq_event(irq_desc);
251 
252     let desc_inner_guard = irq_desc.inner();
253     desc_inner_guard.common_data().clear_inprogress();
254 
255     return desc_inner_guard;
256 }
257 /// 处理中断事件
258 ///
259 /// https://code.dragonos.org.cn/xref/linux-6.1.9/kernel/irq/handle.c?fi=handle_irq_event#139
260 #[inline(never)]
261 fn do_handle_irq_event(desc: &Arc<IrqDesc>) -> Result<(), SystemError> {
262     let desc_inner_guard = desc.inner();
263     let irq_data = desc_inner_guard.irq_data().clone();
264     let actions = desc_inner_guard.actions().clone();
265     drop(desc_inner_guard);
266 
267     let irq = irq_data.irq();
268     let mut r = Ok(IrqReturn::NotHandled);
269 
270     for action in actions {
271         let mut action_inner: SpinLockGuard<'_, InnerIrqAction> = action.inner();
272         // debug!("do_handle_irq_event: action: {:?}", action_inner.name());
273         let dynamic_data = action_inner
274             .dev_id()
275             .clone()
276             .map(|d| d as Arc<dyn IrqHandlerData>);
277         r = action_inner
278             .handler()
279             .unwrap()
280             .handle(irq, None, dynamic_data);
281 
282         if let Ok(IrqReturn::WakeThread) = r {
283             if unlikely(action_inner.thread_fn().is_none()) {
284                 warn_no_thread(irq, &mut action_inner);
285             } else {
286                 irq_manager().do_irq_wake_thread(desc, &mut action_inner);
287             }
288         };
289     }
290 
291     return r.map(|_| ());
292 }
293 
294 /// 参考 https://code.dragonos.org.cn/xref/linux-6.1.9/kernel/irq/chip.c?r=&mo=17578&fi=659
295 fn cond_unmask_eoi_irq(
296     desc_inner_guard: &SpinLockGuard<'_, InnerIrqDesc>,
297     chip: &Arc<dyn IrqChip>,
298 ) {
299     if !desc_inner_guard
300         .internal_state()
301         .contains(IrqDescState::IRQS_ONESHOT)
302     {
303         chip.irq_eoi(desc_inner_guard.irq_data());
304         return;
305     }
306 
307     /*
308      * We need to unmask in the following cases:
309      * - Oneshot irq which did not wake the thread (caused by a
310      *   spurious interrupt or a primary handler handling it
311      *   completely).
312      */
313 
314     if !desc_inner_guard.common_data().disabled()
315         && desc_inner_guard.common_data().masked()
316         && desc_inner_guard.threads_oneshot() == 0
317     {
318         debug!(
319             "eoi unmask irq {}",
320             desc_inner_guard.irq_data().irq().data()
321         );
322         chip.irq_eoi(desc_inner_guard.irq_data());
323         unmask_irq(desc_inner_guard.irq_data());
324     } else if !chip.flags().contains(IrqChipFlags::IRQCHIP_EOI_THREADED) {
325         debug!("eoi irq {}", desc_inner_guard.irq_data().irq().data());
326         chip.irq_eoi(desc_inner_guard.irq_data());
327     } else {
328         warn!(
329             "irq {} eoi failed",
330             desc_inner_guard.irq_data().irq().data()
331         );
332     }
333 }
334 
335 fn warn_no_thread(irq: IrqNumber, action_inner: &mut SpinLockGuard<'_, InnerIrqAction>) {
336     // warn on once
337     if action_inner
338         .thread_flags_mut()
339         .test_and_set_bit(ThreadedHandlerFlags::IRQTF_WARNED)
340     {
341         return;
342     }
343 
344     warn!(
345         "irq {}, device {} returned IRQ_WAKE_THREAD, but no threaded handler",
346         irq.data(),
347         action_inner.name()
348     );
349 }
350 
351 /// `handle_percpu_devid_irq` - 带有per-CPU设备id的perCPU本地中断处理程序
352 ///
353 ///
354 /// * `desc`: 此中断的中断描述结构
355 ///
356 /// 在没有锁定要求的SMP机器上的每个CPU中断。与linux的`handle_percpu_irq()`相同,但有以下额外内容:
357 ///
358 /// `action->percpu_dev_id`是一个指向per-cpu变量的指针,这些变量
359 /// 包含调用此处理程序的cpu的真实设备id
360 #[derive(Debug)]
361 pub struct PerCpuDevIdIrqHandler;
362 
363 impl IrqFlowHandler for PerCpuDevIdIrqHandler {
364     fn handle(&self, irq_desc: &Arc<IrqDesc>, _trap_frame: &mut TrapFrame) {
365         let desc_inner_guard = irq_desc.inner();
366         let irq_data = desc_inner_guard.irq_data().clone();
367         let chip = irq_data.chip_info_read().chip();
368 
369         chip.irq_ack(&irq_data);
370 
371         let irq = irq_data.irq();
372 
373         let action = desc_inner_guard.actions().first().cloned();
374 
375         drop(desc_inner_guard);
376 
377         if let Some(action) = action {
378             let action_inner = action.inner();
379             let per_cpu_devid = action_inner.per_cpu_dev_id().cloned();
380 
381             let handler = action_inner.handler().unwrap();
382             drop(action_inner);
383 
384             let _r = handler.handle(
385                 irq,
386                 None,
387                 per_cpu_devid.map(|d| d as Arc<dyn IrqHandlerData>),
388             );
389         } else {
390             let cpu = smp_get_processor_id();
391 
392             let enabled = irq_desc
393                 .inner()
394                 .percpu_enabled()
395                 .as_ref()
396                 .unwrap()
397                 .get(cpu)
398                 .unwrap_or(false);
399 
400             if enabled {
401                 irq_manager().irq_percpu_disable(irq_desc, &irq_data, &chip, cpu);
402             }
403             static ONCE: Once = Once::new();
404 
405             ONCE.call_once(|| {
406                 error!(
407                     "Spurious percpu irq {} on cpu {:?}, enabled: {}",
408                     irq.data(),
409                     cpu,
410                     enabled
411                 );
412             });
413         }
414 
415         chip.irq_eoi(&irq_data);
416     }
417 }
418