1 use core::{intrinsics::unlikely, ops::BitAnd}; 2 3 use alloc::sync::Arc; 4 use system_error::SystemError; 5 6 use crate::{ 7 arch::{interrupt::TrapFrame, CurrentIrqArch}, 8 exception::{irqchip::IrqChipFlags, irqdesc::InnerIrqDesc}, 9 libs::{once::Once, spinlock::SpinLockGuard}, 10 process::{ProcessFlags, ProcessManager}, 11 smp::core::smp_get_processor_id, 12 }; 13 14 use super::{ 15 irqchip::IrqChip, 16 irqdata::{IrqData, IrqHandlerData, IrqStatus}, 17 irqdesc::{ 18 InnerIrqAction, IrqDesc, IrqDescState, IrqFlowHandler, IrqReturn, ThreadedHandlerFlags, 19 }, 20 manage::{irq_manager, IrqManager}, 21 InterruptArch, IrqNumber, 22 }; 23 24 /// 获取用于处理错误的中断的处理程序 25 #[inline(always)] 26 pub fn bad_irq_handler() -> &'static dyn IrqFlowHandler { 27 &HandleBadIrq 28 } 29 30 /// 获取用于处理快速EOI的中断的处理程序 31 #[inline(always)] 32 pub fn fast_eoi_irq_handler() -> &'static dyn IrqFlowHandler { 33 &FastEOIIrqHandler 34 } 35 36 /// 获取用于处理边沿触发中断的处理程序 37 #[inline(always)] 38 #[allow(dead_code)] 39 pub fn edge_irq_handler() -> &'static dyn IrqFlowHandler { 40 &EdgeIrqHandler 41 } 42 43 /// handle spurious and unhandled irqs 44 #[derive(Debug)] 45 struct HandleBadIrq; 46 47 impl IrqFlowHandler for HandleBadIrq { 48 /// 参考: https://code.dragonos.org.cn/xref/linux-6.1.9/kernel/irq/handle.c?fi=handle_bad_irq#33 49 fn handle(&self, irq_desc: &Arc<IrqDesc>, _trap_frame: &mut TrapFrame) { 50 // todo: print_irq_desc 51 // todo: 增加kstat计数 52 CurrentIrqArch::ack_bad_irq(irq_desc.irq()); 53 } 54 } 55 56 #[derive(Debug)] 57 struct FastEOIIrqHandler; 58 59 impl IrqFlowHandler for FastEOIIrqHandler { 60 /// https://code.dragonos.org.cn/xref/linux-6.1.9/kernel/irq/chip.c?r=&mo=17578&fi=689#689 61 fn handle(&self, irq_desc: &Arc<IrqDesc>, _trap_frame: &mut TrapFrame) { 62 let chip = irq_desc.irq_data().chip_info_read_irqsave().chip(); 63 64 let mut desc_inner = irq_desc.inner(); 65 let out = |din: SpinLockGuard<InnerIrqDesc>| { 66 if !chip.flags().contains(IrqChipFlags::IRQCHIP_EOI_IF_HANDLED) { 67 chip.irq_eoi(din.irq_data()); 68 } 69 }; 70 if !irq_may_run(&desc_inner) { 71 out(desc_inner); 72 return; 73 } 74 75 desc_inner 76 .internal_state_mut() 77 .remove(IrqDescState::IRQS_REPLAY | IrqDescState::IRQS_WAITING); 78 79 if desc_inner.actions().is_empty() || desc_inner.common_data().disabled() { 80 desc_inner 81 .internal_state_mut() 82 .insert(IrqDescState::IRQS_PENDING); 83 mask_irq(desc_inner.irq_data()); 84 out(desc_inner); 85 return; 86 } 87 88 desc_inner = handle_irq_event(irq_desc, desc_inner); 89 cond_unmask_eoi_irq(&desc_inner, &chip); 90 91 return; 92 } 93 } 94 95 #[derive(Debug)] 96 struct EdgeIrqHandler; 97 98 impl IrqFlowHandler for EdgeIrqHandler { 99 // https://code.dragonos.org.cn/xref/linux-6.1.9/kernel/irq/chip.c?fi=handle_edge_irq#775 100 fn handle(&self, irq_desc: &Arc<IrqDesc>, _trap_frame: &mut TrapFrame) { 101 let mut desc_inner_guard: SpinLockGuard<'_, InnerIrqDesc> = irq_desc.inner(); 102 if !irq_may_run(&desc_inner_guard) { 103 // kdebug!("!irq_may_run"); 104 desc_inner_guard 105 .internal_state_mut() 106 .insert(IrqDescState::IRQS_PENDING); 107 mask_ack_irq(desc_inner_guard.irq_data()); 108 return; 109 } 110 111 if desc_inner_guard.common_data().disabled() { 112 // kdebug!("desc_inner_guard.common_data().disabled()"); 113 desc_inner_guard 114 .internal_state_mut() 115 .insert(IrqDescState::IRQS_PENDING); 116 mask_ack_irq(desc_inner_guard.irq_data()); 117 return; 118 } 119 120 let irq_data = desc_inner_guard.irq_data().clone(); 121 122 irq_data.chip_info_read_irqsave().chip().irq_ack(&irq_data); 123 124 loop { 125 if unlikely(desc_inner_guard.actions().is_empty()) { 126 kdebug!("no action for irq {}", irq_data.irq().data()); 127 irq_manager().mask_irq(&irq_data); 128 return; 129 } 130 131 // 当我们在处理一个中断时,如果另一个中断到来,我们本可以屏蔽它. 132 // 如果在此期间没有被禁用,请重新启用它。 133 if desc_inner_guard 134 .internal_state() 135 .contains(IrqDescState::IRQS_PENDING) 136 { 137 let status = desc_inner_guard.common_data().status(); 138 if !status.disabled() && status.masked() { 139 // kdebug!("re-enable irq"); 140 irq_manager().unmask_irq(&desc_inner_guard); 141 } 142 } 143 144 // kdebug!("handle_irq_event"); 145 146 desc_inner_guard = handle_irq_event(irq_desc, desc_inner_guard); 147 148 if !desc_inner_guard 149 .internal_state() 150 .contains(IrqDescState::IRQS_PENDING) 151 || desc_inner_guard.common_data().disabled() 152 { 153 break; 154 } 155 } 156 } 157 } 158 159 /// 判断中断是否可以运行 160 fn irq_may_run(desc_inner_guard: &SpinLockGuard<'_, InnerIrqDesc>) -> bool { 161 let mask = IrqStatus::IRQD_IRQ_INPROGRESS | IrqStatus::IRQD_WAKEUP_ARMED; 162 let status = desc_inner_guard.common_data().status(); 163 164 // 如果中断不在处理中并且没有被唤醒,则可以运行 165 if status.bitand(mask).is_empty() { 166 return true; 167 } 168 169 // todo: 检查其他处理器是否在轮询当前中断 170 return false; 171 } 172 173 pub(super) fn mask_ack_irq(irq_data: &Arc<IrqData>) { 174 let chip = irq_data.chip_info_read_irqsave().chip(); 175 if chip.can_mask_ack() { 176 chip.irq_mask_ack(irq_data); 177 irq_data.common_data().set_masked(); 178 } else { 179 irq_manager().mask_irq(irq_data); 180 chip.irq_ack(irq_data); 181 } 182 } 183 184 pub(super) fn mask_irq(irq_data: &Arc<IrqData>) { 185 if irq_data.common_data().masked() { 186 return; 187 } 188 189 let chip = irq_data.chip_info_read_irqsave().chip(); 190 if chip.irq_mask(irq_data).is_ok() { 191 irq_data.irqd_set(IrqStatus::IRQD_IRQ_MASKED); 192 } 193 } 194 195 pub(super) fn unmask_irq(irq_data: &Arc<IrqData>) { 196 if !irq_data.common_data().masked() { 197 return; 198 } 199 200 let chip = irq_data.chip_info_read_irqsave().chip(); 201 202 if chip.irq_unmask(irq_data).is_ok() { 203 irq_data.irqd_clear(IrqStatus::IRQD_IRQ_MASKED); 204 } 205 } 206 207 impl IrqManager { 208 pub(super) fn do_irq_wake_thread( 209 &self, 210 desc: &Arc<IrqDesc>, 211 action_inner: &mut SpinLockGuard<'_, InnerIrqAction>, 212 ) { 213 let thread = action_inner.thread(); 214 215 if thread.is_none() { 216 return; 217 } 218 219 let thread = thread.unwrap(); 220 if thread.flags().contains(ProcessFlags::EXITING) { 221 return; 222 } 223 224 // 如果线程已经在运行,我们不需要唤醒它 225 if action_inner 226 .thread_flags_mut() 227 .test_and_set_bit(ThreadedHandlerFlags::IRQTF_RUNTHREAD) 228 { 229 return; 230 } 231 232 desc.inc_threads_active(); 233 234 ProcessManager::wakeup(&thread).ok(); 235 } 236 } 237 238 fn handle_irq_event<'a>( 239 irq_desc: &'a Arc<IrqDesc>, 240 mut desc_inner_guard: SpinLockGuard<'_, InnerIrqDesc>, 241 ) -> SpinLockGuard<'a, InnerIrqDesc> { 242 desc_inner_guard 243 .internal_state_mut() 244 .remove(IrqDescState::IRQS_PENDING); 245 desc_inner_guard.common_data().set_inprogress(); 246 247 drop(desc_inner_guard); 248 249 let _r = do_handle_irq_event(irq_desc); 250 251 let desc_inner_guard = irq_desc.inner(); 252 desc_inner_guard.common_data().clear_inprogress(); 253 254 return desc_inner_guard; 255 } 256 /// 处理中断事件 257 /// 258 /// https://code.dragonos.org.cn/xref/linux-6.1.9/kernel/irq/handle.c?fi=handle_irq_event#139 259 #[inline(never)] 260 fn do_handle_irq_event(desc: &Arc<IrqDesc>) -> Result<(), SystemError> { 261 let desc_inner_guard = desc.inner(); 262 let irq_data = desc_inner_guard.irq_data().clone(); 263 let actions = desc_inner_guard.actions().clone(); 264 drop(desc_inner_guard); 265 266 let irq = irq_data.irq(); 267 let mut r = Ok(IrqReturn::NotHandled); 268 269 for action in actions { 270 let mut action_inner: SpinLockGuard<'_, InnerIrqAction> = action.inner(); 271 // kdebug!("do_handle_irq_event: action: {:?}", action_inner.name()); 272 let dynamic_data = action_inner 273 .dev_id() 274 .clone() 275 .map(|d| d as Arc<dyn IrqHandlerData>); 276 r = action_inner 277 .handler() 278 .unwrap() 279 .handle(irq, None, dynamic_data); 280 281 if let Ok(IrqReturn::WakeThread) = r { 282 if unlikely(action_inner.thread_fn().is_none()) { 283 warn_no_thread(irq, &mut action_inner); 284 } else { 285 irq_manager().do_irq_wake_thread(desc, &mut action_inner); 286 } 287 }; 288 } 289 290 return r.map(|_| ()); 291 } 292 293 /// 参考 https://code.dragonos.org.cn/xref/linux-6.1.9/kernel/irq/chip.c?r=&mo=17578&fi=659 294 fn cond_unmask_eoi_irq( 295 desc_inner_guard: &SpinLockGuard<'_, InnerIrqDesc>, 296 chip: &Arc<dyn IrqChip>, 297 ) { 298 if !desc_inner_guard 299 .internal_state() 300 .contains(IrqDescState::IRQS_ONESHOT) 301 { 302 chip.irq_eoi(desc_inner_guard.irq_data()); 303 return; 304 } 305 306 /* 307 * We need to unmask in the following cases: 308 * - Oneshot irq which did not wake the thread (caused by a 309 * spurious interrupt or a primary handler handling it 310 * completely). 311 */ 312 313 if !desc_inner_guard.common_data().disabled() 314 && desc_inner_guard.common_data().masked() 315 && desc_inner_guard.threads_oneshot() == 0 316 { 317 kdebug!( 318 "eoi unmask irq {}", 319 desc_inner_guard.irq_data().irq().data() 320 ); 321 chip.irq_eoi(desc_inner_guard.irq_data()); 322 unmask_irq(desc_inner_guard.irq_data()); 323 } else if !chip.flags().contains(IrqChipFlags::IRQCHIP_EOI_THREADED) { 324 kdebug!("eoi irq {}", desc_inner_guard.irq_data().irq().data()); 325 chip.irq_eoi(desc_inner_guard.irq_data()); 326 } else { 327 kwarn!( 328 "irq {} eoi failed", 329 desc_inner_guard.irq_data().irq().data() 330 ); 331 } 332 } 333 334 fn warn_no_thread(irq: IrqNumber, action_inner: &mut SpinLockGuard<'_, InnerIrqAction>) { 335 // warn on once 336 if action_inner 337 .thread_flags_mut() 338 .test_and_set_bit(ThreadedHandlerFlags::IRQTF_WARNED) 339 { 340 return; 341 } 342 343 kwarn!( 344 "irq {}, device {} returned IRQ_WAKE_THREAD, but no threaded handler", 345 irq.data(), 346 action_inner.name() 347 ); 348 } 349 350 /// `handle_percpu_devid_irq` - 带有per-CPU设备id的perCPU本地中断处理程序 351 /// 352 /// 353 /// * `desc`: 此中断的中断描述结构 354 /// 355 /// 在没有锁定要求的SMP机器上的每个CPU中断。与linux的`handle_percpu_irq()`相同,但有以下额外内容: 356 /// 357 /// `action->percpu_dev_id`是一个指向per-cpu变量的指针,这些变量 358 /// 包含调用此处理程序的cpu的真实设备id 359 #[derive(Debug)] 360 pub struct PerCpuDevIdIrqHandler; 361 362 impl IrqFlowHandler for PerCpuDevIdIrqHandler { 363 fn handle(&self, irq_desc: &Arc<IrqDesc>, _trap_frame: &mut TrapFrame) { 364 let desc_inner_guard = irq_desc.inner(); 365 let irq_data = desc_inner_guard.irq_data().clone(); 366 let chip = irq_data.chip_info_read().chip(); 367 368 chip.irq_ack(&irq_data); 369 370 let irq = irq_data.irq(); 371 372 let action = desc_inner_guard.actions().first().cloned(); 373 374 drop(desc_inner_guard); 375 376 if let Some(action) = action { 377 let action_inner = action.inner(); 378 let per_cpu_devid = action_inner.per_cpu_dev_id().cloned(); 379 380 let handler = action_inner.handler().unwrap(); 381 drop(action_inner); 382 383 let _r = handler.handle( 384 irq, 385 None, 386 per_cpu_devid.map(|d| d as Arc<dyn IrqHandlerData>), 387 ); 388 } else { 389 let cpu = smp_get_processor_id(); 390 391 let enabled = irq_desc 392 .inner() 393 .percpu_enabled() 394 .as_ref() 395 .unwrap() 396 .get(cpu) 397 .unwrap_or(false); 398 399 if enabled { 400 irq_manager().irq_percpu_disable(irq_desc, &irq_data, &chip, cpu); 401 } 402 static ONCE: Once = Once::new(); 403 404 ONCE.call_once(|| { 405 kerror!( 406 "Spurious percpu irq {} on cpu {:?}, enabled: {}", 407 irq.data(), 408 cpu, 409 enabled 410 ); 411 }); 412 } 413 414 chip.irq_eoi(&irq_data); 415 } 416 } 417