1/* sun4v_ivec.S: Sun4v interrupt vector handling. 2 * 3 * Copyright (C) 2006 <davem@davemloft.net> 4 */ 5 6#include <asm/cpudata.h> 7#include <asm/intr_queue.h> 8#include <asm/pil.h> 9 10 .text 11 .align 32 12 13sun4v_cpu_mondo: 14 /* Head offset in %g2, tail offset in %g4. 15 * If they are the same, no work. 16 */ 17 mov INTRQ_CPU_MONDO_HEAD, %g2 18 ldxa [%g2] ASI_QUEUE, %g2 19 mov INTRQ_CPU_MONDO_TAIL, %g4 20 ldxa [%g4] ASI_QUEUE, %g4 21 cmp %g2, %g4 22 be,pn %xcc, sun4v_cpu_mondo_queue_empty 23 nop 24 25 /* Get &trap_block[smp_processor_id()] into %g4. */ 26 ldxa [%g0] ASI_SCRATCHPAD, %g4 27 sub %g4, TRAP_PER_CPU_FAULT_INFO, %g4 28 29 /* Get CPU mondo queue base phys address into %g7. */ 30 ldx [%g4 + TRAP_PER_CPU_CPU_MONDO_PA], %g7 31 32 /* Now get the cross-call arguments and handler PC, same 33 * layout as sun4u: 34 * 35 * 1st 64-bit word: low half is 32-bit PC, put into %g3 and jmpl to it 36 * high half is context arg to MMU flushes, into %g5 37 * 2nd 64-bit word: 64-bit arg, load into %g1 38 * 3rd 64-bit word: 64-bit arg, load into %g7 39 */ 40 ldxa [%g7 + %g2] ASI_PHYS_USE_EC, %g3 41 add %g2, 0x8, %g2 42 srlx %g3, 32, %g5 43 ldxa [%g7 + %g2] ASI_PHYS_USE_EC, %g1 44 add %g2, 0x8, %g2 45 srl %g3, 0, %g3 46 ldxa [%g7 + %g2] ASI_PHYS_USE_EC, %g7 47 add %g2, 0x40 - 0x8 - 0x8, %g2 48 49 /* Update queue head pointer. */ 50 lduw [%g4 + TRAP_PER_CPU_CPU_MONDO_QMASK], %g4 51 and %g2, %g4, %g2 52 53 mov INTRQ_CPU_MONDO_HEAD, %g4 54 stxa %g2, [%g4] ASI_QUEUE 55 membar #Sync 56 57 jmpl %g3, %g0 58 nop 59 60sun4v_cpu_mondo_queue_empty: 61 retry 62 63sun4v_dev_mondo: 64 /* Head offset in %g2, tail offset in %g4. */ 65 mov INTRQ_DEVICE_MONDO_HEAD, %g2 66 ldxa [%g2] ASI_QUEUE, %g2 67 mov INTRQ_DEVICE_MONDO_TAIL, %g4 68 ldxa [%g4] ASI_QUEUE, %g4 69 cmp %g2, %g4 70 be,pn %xcc, sun4v_dev_mondo_queue_empty 71 nop 72 73 /* Get &trap_block[smp_processor_id()] into %g4. */ 74 ldxa [%g0] ASI_SCRATCHPAD, %g4 75 sub %g4, TRAP_PER_CPU_FAULT_INFO, %g4 76 77 /* Get DEV mondo queue base phys address into %g5. */ 78 ldx [%g4 + TRAP_PER_CPU_DEV_MONDO_PA], %g5 79 80 /* Load IVEC into %g3. */ 81 ldxa [%g5 + %g2] ASI_PHYS_USE_EC, %g3 82 add %g2, 0x40, %g2 83 84 /* XXX There can be a full 64-byte block of data here. 85 * XXX This is how we can get at MSI vector data. 86 * XXX Current we do not capture this, but when we do we'll 87 * XXX need to add a 64-byte storage area in the struct ino_bucket 88 * XXX or the struct irq_desc. 89 */ 90 91 /* Update queue head pointer, this frees up some registers. */ 92 lduw [%g4 + TRAP_PER_CPU_DEV_MONDO_QMASK], %g4 93 and %g2, %g4, %g2 94 95 mov INTRQ_DEVICE_MONDO_HEAD, %g4 96 stxa %g2, [%g4] ASI_QUEUE 97 membar #Sync 98 99 TRAP_LOAD_IRQ_WORK_PA(%g1, %g4) 100 101 /* For VIRQs, cookie is encoded as ~bucket_phys_addr */ 102 brlz,pt %g3, 1f 103 xnor %g3, %g0, %g4 104 105 /* Get __pa(&ivector_table[IVEC]) into %g4. */ 106 sethi %hi(ivector_table_pa), %g4 107 ldx [%g4 + %lo(ivector_table_pa)], %g4 108 sllx %g3, 4, %g3 109 add %g4, %g3, %g4 110 1111: ldx [%g1], %g2 112 stxa %g2, [%g4] ASI_PHYS_USE_EC 113 stx %g4, [%g1] 114 115 /* Signal the interrupt by setting (1 << pil) in %softint. */ 116 wr %g0, 1 << PIL_DEVICE_IRQ, %set_softint 117 118sun4v_dev_mondo_queue_empty: 119 retry 120 121sun4v_res_mondo: 122 /* Head offset in %g2, tail offset in %g4. */ 123 mov INTRQ_RESUM_MONDO_HEAD, %g2 124 ldxa [%g2] ASI_QUEUE, %g2 125 mov INTRQ_RESUM_MONDO_TAIL, %g4 126 ldxa [%g4] ASI_QUEUE, %g4 127 cmp %g2, %g4 128 be,pn %xcc, sun4v_res_mondo_queue_empty 129 nop 130 131 /* Get &trap_block[smp_processor_id()] into %g3. */ 132 ldxa [%g0] ASI_SCRATCHPAD, %g3 133 sub %g3, TRAP_PER_CPU_FAULT_INFO, %g3 134 135 /* Get RES mondo queue base phys address into %g5. */ 136 ldx [%g3 + TRAP_PER_CPU_RESUM_MONDO_PA], %g5 137 138 /* Get RES kernel buffer base phys address into %g7. */ 139 ldx [%g3 + TRAP_PER_CPU_RESUM_KBUF_PA], %g7 140 141 /* If the first word is non-zero, queue is full. */ 142 ldxa [%g7 + %g2] ASI_PHYS_USE_EC, %g1 143 brnz,pn %g1, sun4v_res_mondo_queue_full 144 nop 145 146 lduw [%g3 + TRAP_PER_CPU_RESUM_QMASK], %g4 147 148 /* Remember this entry's offset in %g1. */ 149 mov %g2, %g1 150 151 /* Copy 64-byte queue entry into kernel buffer. */ 152 ldxa [%g5 + %g2] ASI_PHYS_USE_EC, %g3 153 stxa %g3, [%g7 + %g2] ASI_PHYS_USE_EC 154 add %g2, 0x08, %g2 155 ldxa [%g5 + %g2] ASI_PHYS_USE_EC, %g3 156 stxa %g3, [%g7 + %g2] ASI_PHYS_USE_EC 157 add %g2, 0x08, %g2 158 ldxa [%g5 + %g2] ASI_PHYS_USE_EC, %g3 159 stxa %g3, [%g7 + %g2] ASI_PHYS_USE_EC 160 add %g2, 0x08, %g2 161 ldxa [%g5 + %g2] ASI_PHYS_USE_EC, %g3 162 stxa %g3, [%g7 + %g2] ASI_PHYS_USE_EC 163 add %g2, 0x08, %g2 164 ldxa [%g5 + %g2] ASI_PHYS_USE_EC, %g3 165 stxa %g3, [%g7 + %g2] ASI_PHYS_USE_EC 166 add %g2, 0x08, %g2 167 ldxa [%g5 + %g2] ASI_PHYS_USE_EC, %g3 168 stxa %g3, [%g7 + %g2] ASI_PHYS_USE_EC 169 add %g2, 0x08, %g2 170 ldxa [%g5 + %g2] ASI_PHYS_USE_EC, %g3 171 stxa %g3, [%g7 + %g2] ASI_PHYS_USE_EC 172 add %g2, 0x08, %g2 173 ldxa [%g5 + %g2] ASI_PHYS_USE_EC, %g3 174 stxa %g3, [%g7 + %g2] ASI_PHYS_USE_EC 175 add %g2, 0x08, %g2 176 177 /* Update queue head pointer. */ 178 and %g2, %g4, %g2 179 180 mov INTRQ_RESUM_MONDO_HEAD, %g4 181 stxa %g2, [%g4] ASI_QUEUE 182 membar #Sync 183 184 /* Disable interrupts and save register state so we can call 185 * C code. The etrap handling will leave %g4 in %l4 for us 186 * when it's done. 187 */ 188 rdpr %pil, %g2 189 wrpr %g0, PIL_NORMAL_MAX, %pil 190 mov %g1, %g4 191 ba,pt %xcc, etrap_irq 192 rd %pc, %g7 193#ifdef CONFIG_TRACE_IRQFLAGS 194 call trace_hardirqs_off 195 nop 196#endif 197 /* Log the event. */ 198 add %sp, PTREGS_OFF, %o0 199 call sun4v_resum_error 200 mov %l4, %o1 201 202 /* Return from trap. */ 203 ba,pt %xcc, rtrap_irq 204 nop 205 206sun4v_res_mondo_queue_empty: 207 retry 208 209sun4v_res_mondo_queue_full: 210 /* The queue is full, consolidate our damage by setting 211 * the head equal to the tail. We'll just trap again otherwise. 212 * Call C code to log the event. 213 */ 214 mov INTRQ_RESUM_MONDO_HEAD, %g2 215 stxa %g4, [%g2] ASI_QUEUE 216 membar #Sync 217 218 rdpr %pil, %g2 219 wrpr %g0, PIL_NORMAL_MAX, %pil 220 ba,pt %xcc, etrap_irq 221 rd %pc, %g7 222#ifdef CONFIG_TRACE_IRQFLAGS 223 call trace_hardirqs_off 224 nop 225#endif 226 call sun4v_resum_overflow 227 add %sp, PTREGS_OFF, %o0 228 229 ba,pt %xcc, rtrap_irq 230 nop 231 232sun4v_nonres_mondo: 233 /* Head offset in %g2, tail offset in %g4. */ 234 mov INTRQ_NONRESUM_MONDO_HEAD, %g2 235 ldxa [%g2] ASI_QUEUE, %g2 236 mov INTRQ_NONRESUM_MONDO_TAIL, %g4 237 ldxa [%g4] ASI_QUEUE, %g4 238 cmp %g2, %g4 239 be,pn %xcc, sun4v_nonres_mondo_queue_empty 240 nop 241 242 /* Get &trap_block[smp_processor_id()] into %g3. */ 243 ldxa [%g0] ASI_SCRATCHPAD, %g3 244 sub %g3, TRAP_PER_CPU_FAULT_INFO, %g3 245 246 /* Get RES mondo queue base phys address into %g5. */ 247 ldx [%g3 + TRAP_PER_CPU_NONRESUM_MONDO_PA], %g5 248 249 /* Get RES kernel buffer base phys address into %g7. */ 250 ldx [%g3 + TRAP_PER_CPU_NONRESUM_KBUF_PA], %g7 251 252 /* If the first word is non-zero, queue is full. */ 253 ldxa [%g7 + %g2] ASI_PHYS_USE_EC, %g1 254 brnz,pn %g1, sun4v_nonres_mondo_queue_full 255 nop 256 257 lduw [%g3 + TRAP_PER_CPU_NONRESUM_QMASK], %g4 258 259 /* Remember this entry's offset in %g1. */ 260 mov %g2, %g1 261 262 /* Copy 64-byte queue entry into kernel buffer. */ 263 ldxa [%g5 + %g2] ASI_PHYS_USE_EC, %g3 264 stxa %g3, [%g7 + %g2] ASI_PHYS_USE_EC 265 add %g2, 0x08, %g2 266 ldxa [%g5 + %g2] ASI_PHYS_USE_EC, %g3 267 stxa %g3, [%g7 + %g2] ASI_PHYS_USE_EC 268 add %g2, 0x08, %g2 269 ldxa [%g5 + %g2] ASI_PHYS_USE_EC, %g3 270 stxa %g3, [%g7 + %g2] ASI_PHYS_USE_EC 271 add %g2, 0x08, %g2 272 ldxa [%g5 + %g2] ASI_PHYS_USE_EC, %g3 273 stxa %g3, [%g7 + %g2] ASI_PHYS_USE_EC 274 add %g2, 0x08, %g2 275 ldxa [%g5 + %g2] ASI_PHYS_USE_EC, %g3 276 stxa %g3, [%g7 + %g2] ASI_PHYS_USE_EC 277 add %g2, 0x08, %g2 278 ldxa [%g5 + %g2] ASI_PHYS_USE_EC, %g3 279 stxa %g3, [%g7 + %g2] ASI_PHYS_USE_EC 280 add %g2, 0x08, %g2 281 ldxa [%g5 + %g2] ASI_PHYS_USE_EC, %g3 282 stxa %g3, [%g7 + %g2] ASI_PHYS_USE_EC 283 add %g2, 0x08, %g2 284 ldxa [%g5 + %g2] ASI_PHYS_USE_EC, %g3 285 stxa %g3, [%g7 + %g2] ASI_PHYS_USE_EC 286 add %g2, 0x08, %g2 287 288 /* Update queue head pointer. */ 289 and %g2, %g4, %g2 290 291 mov INTRQ_NONRESUM_MONDO_HEAD, %g4 292 stxa %g2, [%g4] ASI_QUEUE 293 membar #Sync 294 295 /* Disable interrupts and save register state so we can call 296 * C code. The etrap handling will leave %g4 in %l4 for us 297 * when it's done. 298 */ 299 rdpr %pil, %g2 300 wrpr %g0, PIL_NORMAL_MAX, %pil 301 mov %g1, %g4 302 ba,pt %xcc, etrap_irq 303 rd %pc, %g7 304#ifdef CONFIG_TRACE_IRQFLAGS 305 call trace_hardirqs_off 306 nop 307#endif 308 /* Log the event. */ 309 add %sp, PTREGS_OFF, %o0 310 call sun4v_nonresum_error 311 mov %l4, %o1 312 313 /* Return from trap. */ 314 ba,pt %xcc, rtrap_irq 315 nop 316 317sun4v_nonres_mondo_queue_empty: 318 retry 319 320sun4v_nonres_mondo_queue_full: 321 /* The queue is full, consolidate our damage by setting 322 * the head equal to the tail. We'll just trap again otherwise. 323 * Call C code to log the event. 324 */ 325 mov INTRQ_NONRESUM_MONDO_HEAD, %g2 326 stxa %g4, [%g2] ASI_QUEUE 327 membar #Sync 328 329 rdpr %pil, %g2 330 wrpr %g0, PIL_NORMAL_MAX, %pil 331 ba,pt %xcc, etrap_irq 332 rd %pc, %g7 333#ifdef CONFIG_TRACE_IRQFLAGS 334 call trace_hardirqs_off 335 nop 336#endif 337 call sun4v_nonresum_overflow 338 add %sp, PTREGS_OFF, %o0 339 340 ba,pt %xcc, rtrap_irq 341 nop 342