1 /*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 2003 Silicon Graphics, Inc. All Rights Reserved.
7 */
8
9
10 /* This is the top level IOC4 device driver. It does very little, farming
11 * out actual tasks to the various slave IOC4 drivers (serial, keyboard/mouse,
12 * and real-time interrupt).
13 */
14
15 #include <linux/config.h>
16 #include <asm/sn/types.h>
17 #include <asm/sn/sgi.h>
18 #include <asm/sn/invent.h>
19 #include <asm/sn/iograph.h>
20 #include <asm/atomic.h>
21 #include <asm/sn/pci/pci_defs.h>
22 #include <asm/sn/pci/pciio.h>
23 #include <linux/pci.h>
24 #include <asm/sn/ioc4.h>
25 #include <asm/sn/pci/pci_bus_cvlink.h>
26
27 /* #define DEBUG_INTERRUPTS */
28 #define SUPPORT_ATOMICS
29
30 #ifdef SUPPORT_ATOMICS
31
32 /*
33 * support routines for local atomic operations.
34 */
35
36 static spinlock_t local_lock;
37
38 static inline unsigned int
atomicSetInt(atomic_t * a,unsigned int b)39 atomicSetInt(atomic_t *a, unsigned int b)
40 {
41 unsigned long s;
42 unsigned int ret, new;
43
44 spin_lock_irqsave(&local_lock, s);
45 new = ret = atomic_read(a);
46 new |= b;
47 atomic_set(a, new);
48 spin_unlock_irqrestore(&local_lock, s);
49
50 return ret;
51 }
52
53 static unsigned int
atomicClearInt(atomic_t * a,unsigned int b)54 atomicClearInt(atomic_t *a, unsigned int b)
55 {
56 unsigned long s;
57 unsigned int ret, new;
58
59 spin_lock_irqsave(&local_lock, s);
60 new = ret = atomic_read(a);
61 new &= ~b;
62 atomic_set(a, new);
63 spin_unlock_irqrestore(&local_lock, s);
64
65 return ret;
66 }
67
68 #else
69
70 #define atomicAddInt(a,b) *(a) += ((unsigned int)(b))
71
72 static inline unsigned int
atomicSetInt(unsigned int * a,unsigned int b)73 atomicSetInt(unsigned int *a, unsigned int b)
74 {
75 unsigned int ret = *a;
76
77 *a |= b;
78 return ret;
79 }
80
81 #define atomicSetUint64(a,b) *(a) |= ((unsigned long long )(b))
82
83 static inline unsigned int
atomicClearInt(unsigned int * a,unsigned int b)84 atomicClearInt(unsigned int *a, unsigned int b)
85 {
86 unsigned int ret = *a;
87
88 *a &= ~b;
89 return ret;
90 }
91
92 #define atomicClearUint64(a,b) *(a) &= ~((unsigned long long)(b))
93 #endif /* SUPPORT_ATOMICS */
94
95
96 /* pci device struct */
97 static const struct pci_device_id __devinitdata ioc4_s_id_table[] =
98 {
99 { IOC4_VENDOR_ID_NUM, IOC4_DEVICE_ID_NUM, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
100 { 0, 0, 0, 0, 0, 0, 0 }
101 };
102
103 int __devinit ioc4_attach(struct pci_dev *, const struct pci_device_id *);
104
105 struct pci_driver ioc4_s_driver =
106 {
107 name : "IOC4 Serial",
108 id_table: ioc4_s_id_table,
109 probe: ioc4_attach,
110 };
111
112 int __init
ioc4_serial_detect(void)113 ioc4_serial_detect(void)
114 {
115 int rc;
116
117 rc = pci_register_driver(&ioc4_s_driver);
118 return 0;
119 }
120 module_init(ioc4_serial_detect);
121
122
123 /*
124 * Some external functions we still need.
125 */
126 extern int ioc4_serial_attach(vertex_hdl_t conn, void *mem);
127 extern cpuid_t cpuvertex_to_cpuid(vertex_hdl_t vhdl);
128
129
130 /*
131 * per-IOC4 data structure
132 */
133 typedef struct ioc4_soft_s {
134 vertex_hdl_t is_ioc4_vhdl;
135 vertex_hdl_t is_conn_vhdl;
136
137 struct pci_dev *is_pci_dev;
138 ioc4_mem_t *is_ioc4_mem;
139
140 /* Each interrupt type has an entry in the array */
141 struct ioc4_intr_type {
142
143 /*
144 * Each in-use entry in this array contains at least
145 * one nonzero bit in sd_bits; no two entries in this
146 * array have overlapping sd_bits values.
147 */
148 #define MAX_IOC4_INTR_ENTS (8 * sizeof(ioc4reg_t))
149 struct ioc4_intr_info {
150 ioc4reg_t sd_bits;
151 ioc4_intr_func_f *sd_intr;
152 intr_arg_t sd_info;
153 vertex_hdl_t sd_vhdl;
154 struct ioc4_soft_s *sd_soft;
155 } is_intr_info[MAX_IOC4_INTR_ENTS];
156
157 /* Number of entries active in the above array */
158 atomic_t is_num_intrs;
159 atomic_t is_intr_bits_busy; /* Bits assigned */
160 atomic_t is_intr_ents_free; /* Free active entries mask*/
161 } is_intr_type[ioc4_num_intr_types];
162
163 /* is_ir_lock must be held while
164 * modifying sio_ie values, so
165 * we can be sure that sio_ie is
166 * not changing when we read it
167 * along with sio_ir.
168 */
169 spinlock_t is_ir_lock; /* SIO_IE[SC] mod lock */
170 } ioc4_soft_t;
171
172 #define ioc4_soft_set(v,i) hwgraph_fastinfo_set((v), (arbitrary_info_t)(i))
173 #define ioc4_soft_get(v) ((ioc4_soft_t *)hwgraph_fastinfo_get(v))
174
175
176 /* =====================================================================
177 * Function Table of Contents
178 */
179
180
181 /* The IOC4 hardware provides no atomic way to determine if interrupts
182 * are pending since two reads are required to do so. The handler must
183 * read the SIO_IR and the SIO_IES, and take the logical and of the
184 * two. When this value is zero, all interrupts have been serviced and
185 * the handler may return.
186 *
187 * This has the unfortunate "hole" that, if some other CPU or
188 * some other thread or some higher level interrupt manages to
189 * modify SIO_IE between our reads of SIO_IR and SIO_IE, we may
190 * think we have observed SIO_IR&SIO_IE==0 when in fact this
191 * condition never really occurred.
192 *
193 * To solve this, we use a simple spinlock that must be held
194 * whenever modifying SIO_IE; holding this lock while observing
195 * both SIO_IR and SIO_IE guarantees that we do not falsely
196 * conclude that no enabled interrupts are pending.
197 */
198
199 void
ioc4_write_ireg(void * ioc4_soft,ioc4reg_t val,int which,ioc4_intr_type_t type)200 ioc4_write_ireg(void *ioc4_soft, ioc4reg_t val, int which, ioc4_intr_type_t type)
201 {
202 ioc4_mem_t *mem = ((ioc4_soft_t *) ioc4_soft)->is_ioc4_mem;
203 spinlock_t *lp = &((ioc4_soft_t *) ioc4_soft)->is_ir_lock;
204 unsigned long s;
205
206
207 spin_lock_irqsave(lp, s);
208
209 switch (type) {
210 case ioc4_sio_intr_type:
211 switch (which) {
212 case IOC4_W_IES:
213 mem->sio_ies_ro = val;
214 break;
215
216 case IOC4_W_IEC:
217 mem->sio_iec_ro = val;
218 break;
219 }
220 break;
221
222 case ioc4_other_intr_type:
223 switch (which) {
224 case IOC4_W_IES:
225 mem->other_ies_ro = val;
226 break;
227
228 case IOC4_W_IEC:
229 mem->other_iec_ro = val;
230 break;
231 }
232 break;
233
234 case ioc4_num_intr_types:
235 break;
236 }
237 spin_unlock_irqrestore(lp, s);
238 }
239
240
241 static inline ioc4reg_t
ioc4_pending_intrs(ioc4_soft_t * ioc4_soft,ioc4_intr_type_t type)242 ioc4_pending_intrs(ioc4_soft_t * ioc4_soft, ioc4_intr_type_t type)
243 {
244 ioc4_mem_t *mem = ioc4_soft->is_ioc4_mem;
245 spinlock_t *lp = &ioc4_soft->is_ir_lock;
246 unsigned long s;
247 ioc4reg_t intrs = (ioc4reg_t)0;
248
249 ASSERT((type == ioc4_sio_intr_type) || (type == ioc4_other_intr_type));
250
251 spin_lock_irqsave(lp, s);
252
253 switch (type) {
254 case ioc4_sio_intr_type:
255 intrs = mem->sio_ir & mem->sio_ies_ro;
256 break;
257
258 case ioc4_other_intr_type:
259 intrs = mem->other_ir & mem->other_ies_ro;
260
261 /* Don't process any ATA interrupte, leave them for the ATA driver */
262 intrs &= ~(IOC4_OTHER_IR_ATA_INT | IOC4_OTHER_IR_ATA_MEMERR);
263 break;
264
265 case ioc4_num_intr_types:
266 break;
267 }
268
269 spin_unlock_irqrestore(lp, s);
270 return intrs;
271 }
272
273
274 int __devinit
ioc4_attach(struct pci_dev * pci_handle,const struct pci_device_id * pci_id)275 ioc4_attach(struct pci_dev *pci_handle, const struct pci_device_id *pci_id)
276 {
277 ioc4_mem_t *mem;
278 /*REFERENCED*/
279 graph_error_t rc;
280 vertex_hdl_t ioc4_vhdl;
281 ioc4_soft_t *soft;
282 vertex_hdl_t conn_vhdl = PCIDEV_VERTEX(pci_handle);
283 int tmp;
284 extern void ioc4_ss_connect_interrupt(int, void *, void *);
285 extern void ioc4_intr(int, void *, struct pt_regs *);
286
287 if ( pci_enable_device(pci_handle) ) {
288 printk("ioc4_attach: Failed to enable device with pci_dev 0x%p... returning\n", (void *)pci_handle);
289 return(-1);
290 }
291
292 pci_set_master(pci_handle);
293 snia_pciio_endian_set(pci_handle, PCIDMA_ENDIAN_LITTLE, PCIDMA_ENDIAN_BIG);
294
295 /*
296 * Get PIO mappings through our "primary"
297 * connection point to the IOC4's CFG and
298 * MEM spaces.
299 */
300
301 /*
302 * Map in the ioc4 memory - we'll do config accesses thru the pci_????() interfaces.
303 */
304
305 mem = (ioc4_mem_t *)pci_resource_start(pci_handle, 0);
306 if ( !mem ) {
307 printk(KERN_ALERT "%p/" EDGE_LBL_IOC4
308 ": unable to get PIO mapping for my MEM space\n", (void *)pci_handle);
309 return -1;
310 }
311
312 if ( !request_region((unsigned long)mem, sizeof(*mem), "sioc4_mem")) {
313 printk(KERN_ALERT
314 "%p/" EDGE_LBL_IOC4
315 ": unable to get request region for my MEM space\n",
316 (void *)pci_handle);
317 return -1;
318 }
319
320 /*
321 * Create the "ioc4" vertex which hangs off of
322 * the connect points.
323 * This code is slightly paranoid.
324 */
325 rc = hwgraph_path_add(conn_vhdl, EDGE_LBL_IOC4, &ioc4_vhdl);
326 ASSERT(rc == GRAPH_SUCCESS);
327
328 /*
329 * Allocate the soft structure, fill it in a bit,
330 * and attach it to the ioc4 vertex.
331 */
332 NEW(soft);
333
334 spin_lock_init(&soft->is_ir_lock);
335 soft->is_ioc4_vhdl = ioc4_vhdl;
336 soft->is_conn_vhdl = conn_vhdl;
337 soft->is_ioc4_mem = mem;
338 soft->is_pci_dev = pci_handle;
339
340 ioc4_soft_set(ioc4_vhdl, soft);
341
342 /* Init the IOC4 */
343
344 /* SN boot PROMs allocate the PCI
345 * space and set up the pci_addr fields.
346 * Other systems need to set the base address.
347 * This is handled automatically if the PCI infrastructure
348 * is used.
349 *
350 * No need to set the latency timer since the PCI
351 * infrastructure sets it to 1 us.
352 */
353
354 pci_read_config_dword(pci_handle, IOC4_PCI_SCR, &tmp);
355
356 pci_write_config_dword(pci_handle, IOC4_PCI_SCR,
357 tmp | PCI_CMD_BUS_MASTER | PCI_CMD_MEM_SPACE |
358 PCI_CMD_PAR_ERR_RESP | PCI_CMD_SERR_ENABLE);
359
360 PCI_OUTW(&mem->sio_cr, (0xf << IOC4_SIO_CR_CMD_PULSE_SHIFT));
361
362 /* Enable serial port mode select generic PIO pins as outputs */
363 PCI_OUTW(&mem->gpcr_s, IOC4_GPCR_UART0_MODESEL | IOC4_GPCR_UART1_MODESEL);
364
365 /* Clear and disable all interrupts */
366 IOC4_WRITE_IEC(soft, ~0, ioc4_sio_intr_type);
367 PCI_OUTW(&mem->sio_ir, ~0);
368
369 IOC4_WRITE_IEC(soft, ~0, ioc4_other_intr_type);
370 PCI_OUTW(&mem->other_ir, ~0);
371
372 /*
373 * Alloc the IOC4 intr before attaching the subdevs, so the
374 * cpu handling the IOC4 intr is known (for setmustrun on
375 * the ioc4 ithreads).
376 */
377
378 /* attach interrupt handler */
379
380 ioc4_ss_connect_interrupt(pci_handle->irq, (void *)ioc4_intr, (void *)soft);
381
382 /* =============================================================
383 * Attach Sub-devices
384 *
385 * NB: As subdevs start calling pciio_driver_register(),
386 * we can stop explicitly calling subdev drivers.
387 *
388 * The drivers attached here have not been converted
389 * to stand on their own. However, they *do* know
390 * to call ioc4_subdev_enabled() to decide whether
391 * to actually attach themselves.
392 *
393 * It would be nice if we could convert these
394 * few remaining drivers over so they would
395 * register as proper PCI device drivers ...
396 */
397
398 ioc4_serial_attach(conn_vhdl, (void *)soft->is_ioc4_mem); /* DMA serial ports */
399
400 /* Normally we'd return 0 - but we need to get the ide driver init'd too.
401 * Returning an error will keep the IOC4 on the pci list */
402 return -1;
403 }
404
405
406 /*
407 * ioc4_intr_connect:
408 * Arrange for interrupts for a sub-device
409 * to be delivered to the right bit of
410 * code with the right parameter.
411 *
412 * XXX- returning an error instead of panicing
413 * might be a good idea (think bugs in loadable
414 * ioc4 sub-devices).
415 */
416
417
418
419 void
ioc4_intr_connect(vertex_hdl_t conn_vhdl,ioc4_intr_type_t type,ioc4reg_t intrbits,ioc4_intr_func_f * intr,intr_arg_t info,vertex_hdl_t owner_vhdl,vertex_hdl_t intr_dev_vhdl)420 ioc4_intr_connect(vertex_hdl_t conn_vhdl,
421 ioc4_intr_type_t type,
422 ioc4reg_t intrbits,
423 ioc4_intr_func_f *intr,
424 intr_arg_t info,
425 vertex_hdl_t owner_vhdl,
426 vertex_hdl_t intr_dev_vhdl)
427 {
428 graph_error_t rc;
429 vertex_hdl_t ioc4_vhdl;
430 ioc4_soft_t *soft;
431 ioc4reg_t old, bits;
432 int i;
433
434 ASSERT((type == ioc4_sio_intr_type) || (type == ioc4_other_intr_type));
435
436 rc = hwgraph_traverse(conn_vhdl, EDGE_LBL_IOC4, &ioc4_vhdl);
437 if (rc != GRAPH_SUCCESS) {
438 printk(KERN_ALERT "ioc4_intr_connect(%p): ioc4_attach not yet called", (void *)owner_vhdl);
439 return;
440 }
441
442 soft = ioc4_soft_get(ioc4_vhdl);
443 ASSERT(soft != NULL);
444
445 /*
446 * Try to allocate a slot in the array
447 * that has been marked free; if there
448 * are none, extend the high water mark.
449 */
450 while (1) {
451 bits = atomic_read(&soft->is_intr_type[type].is_intr_ents_free);
452 if (bits == 0) {
453 i = atomic_inc(&soft->is_intr_type[type].is_num_intrs) - 1;
454 ASSERT(i < MAX_IOC4_INTR_ENTS || (printk("i %d\n", i), 0));
455 break;
456 }
457 bits &= ~(bits - 1); /* keep only the ls bit */
458 old = atomicClearInt(&soft->is_intr_type[type].is_intr_ents_free, bits);
459 if (bits & old) {
460 ioc4reg_t shf;
461
462 i = 31;
463 if ((shf = (bits >> 16)))
464 bits = shf;
465 else
466 i -= 16;
467 if ((shf = (bits >> 8)))
468 bits = shf;
469 else
470 i -= 8;
471 if ((shf = (bits >> 4)))
472 bits = shf;
473 else
474 i -= 4;
475 if ((shf = (bits >> 2)))
476 bits = shf;
477 else
478 i -= 2;
479 if ((shf = (bits >> 1)))
480 bits = shf;
481 else
482 i -= 1;
483 ASSERT(i < MAX_IOC4_INTR_ENTS || (printk("i %d\n", i), 0));
484 break;
485 }
486 }
487
488 soft->is_intr_type[type].is_intr_info[i].sd_bits = intrbits;
489 soft->is_intr_type[type].is_intr_info[i].sd_intr = intr;
490 soft->is_intr_type[type].is_intr_info[i].sd_info = info;
491 soft->is_intr_type[type].is_intr_info[i].sd_vhdl = owner_vhdl;
492 soft->is_intr_type[type].is_intr_info[i].sd_soft = soft;
493
494 /* Make sure there are no bitmask overlaps */
495 {
496 ioc4reg_t old;
497
498 old = atomicSetInt(&soft->is_intr_type[type].is_intr_bits_busy, intrbits);
499 if (old & intrbits) {
500 printk("%p: trying to share ioc4 intr bits 0x%X\n",
501 (void *)owner_vhdl, old & intrbits);
502
503 #if DEBUG && IOC4_DEBUG
504 {
505 int x;
506
507 for (x = 0; x < i; x++)
508 if (intrbits & soft->is_intr_type[type].is_intr_info[x].sd_bits) {
509 printk("%p: ioc4 intr bits 0x%X already call "
510 "0x%X(0x%X, ...)\n",
511 (void *)soft->is_intr_type[type].is_intr_info[x].sd_vhdl,
512 soft->is_intr_type[type].is_intr_info[i].sd_bits,
513 soft->is_intr_type[type].is_intr_info[i].sd_intr,
514 soft->is_intr_type[type].is_intr_info[i].sd_info);
515 }
516 }
517 #endif
518 panic("ioc4_intr_connect: no IOC4 interrupt source sharing allowed");
519 }
520 }
521 }
522
523 /*
524 * ioc4_intr_disconnect:
525 * Turn off interrupt request service for a
526 * specific service function and argument.
527 * Scans the array for connections to the specified
528 * function with the specified info and owner; turns off
529 * the bits specified in intrbits. If this results in
530 * an empty entry, logs it in the free entry map.
531 */
532 void
ioc4_intr_disconnect(vertex_hdl_t conn_vhdl,ioc4_intr_type_t type,ioc4reg_t intrbits,ioc4_intr_func_f * intr,intr_arg_t info,vertex_hdl_t owner_vhdl)533 ioc4_intr_disconnect(vertex_hdl_t conn_vhdl,
534 ioc4_intr_type_t type,
535 ioc4reg_t intrbits,
536 ioc4_intr_func_f *intr,
537 intr_arg_t info,
538 vertex_hdl_t owner_vhdl)
539 {
540 graph_error_t rc;
541 vertex_hdl_t ioc4_vhdl;
542 ioc4_soft_t *soft;
543 ioc4reg_t bits;
544 int i, num_intrs;
545
546 ASSERT((type == ioc4_sio_intr_type) || (type == ioc4_other_intr_type));
547
548 rc = hwgraph_traverse(conn_vhdl, EDGE_LBL_IOC4, &ioc4_vhdl);
549 if (rc != GRAPH_SUCCESS) {
550 printk(KERN_ALERT "%p: ioc4_intr_disconnect: ioc4_attach not yet called", (void *)owner_vhdl);
551 return;
552 }
553
554 soft = ioc4_soft_get(ioc4_vhdl);
555 ASSERT(soft != NULL);
556
557 num_intrs = (int)atomic_read(&soft->is_intr_type[type].is_num_intrs);
558 for (i = 0; i < num_intrs; ++i) {
559 if ((soft->is_intr_type[type].is_intr_info[i].sd_intr == intr) &&
560 (soft->is_intr_type[type].is_intr_info[i].sd_info == info) &&
561 (soft->is_intr_type[type].is_intr_info[i].sd_vhdl == owner_vhdl) &&
562 (bits = soft->is_intr_type[type].is_intr_info[i].sd_bits & intrbits)) {
563 soft->is_intr_type[type].is_intr_info[i].sd_bits &= ~bits;
564 atomicClearInt(&soft->is_intr_type[type].is_intr_bits_busy, bits);
565 if (!(soft->is_intr_type[type].is_intr_info[i].sd_bits)) {
566 soft->is_intr_type[type].is_intr_info[i].sd_intr = NULL;
567 soft->is_intr_type[type].is_intr_info[i].sd_info = NULL;
568 soft->is_intr_type[type].is_intr_info[i].sd_vhdl = GRAPH_VERTEX_NONE;
569 atomicSetInt(&soft->is_intr_type[type].is_intr_ents_free, 1 << i);
570 }
571 }
572 }
573 }
574
575 /* Top level IOC4 interrupt handler. Farms out the interrupt to
576 * the various IOC4 device drivers.
577 */
578
579 void
ioc4_intr(int irq,void * arg,struct pt_regs * regs)580 ioc4_intr(int irq, void *arg, struct pt_regs *regs)
581 {
582 ioc4_soft_t *soft;
583 ioc4reg_t this_ir;
584 ioc4reg_t this_mir;
585 int x, num_intrs = 0;
586 ioc4_intr_type_t t;
587
588 soft = (ioc4_soft_t *)arg;
589
590 if (!soft)
591 return; /* Polled but no console ioc4 registered */
592
593 for (t = ioc4_first_intr_type; t < ioc4_num_intr_types; t++) {
594 num_intrs = (int)atomic_read(&soft->is_intr_type[t].is_num_intrs);
595
596 this_mir = this_ir = ioc4_pending_intrs(soft, t);
597 #ifdef DEBUG_INTERRUPTS
598 printk("%s : %d : this_mir 0x%x num_intrs %d\n", __FUNCTION__, __LINE__, this_mir, num_intrs);
599 #endif
600
601 /* Farm out the interrupt to the various drivers depending on
602 * which interrupt bits are set.
603 */
604 for (x = 0; x < num_intrs; x++) {
605 struct ioc4_intr_info *ii = &soft->is_intr_type[t].is_intr_info[x];
606 if ((this_mir = this_ir & ii->sd_bits)) {
607 /* Disable owned interrupts, and call the interrupt handler */
608 IOC4_WRITE_IEC(soft, ii->sd_bits, t);
609 ii->sd_intr(ii->sd_info, this_mir);
610 this_ir &= ~this_mir;
611 }
612 }
613
614 if (this_ir)
615 printk(KERN_ALERT "unknown IOC4 %s interrupt 0x%x, sio_ir = 0x%x, sio_ies = 0x%x, other_ir = 0x%x, other_ies = 0x%x\n",
616 (t == ioc4_sio_intr_type) ? "sio" : "other",
617 this_ir,
618 soft->is_ioc4_mem->sio_ir,
619 soft->is_ioc4_mem->sio_ies_ro,
620 soft->is_ioc4_mem->other_ir,
621 soft->is_ioc4_mem->other_ies_ro);
622 }
623 #ifdef DEBUG_INTERRUPTS
624 {
625 ioc4_mem_t *mem = soft->is_ioc4_mem;
626 spinlock_t *lp = &soft->is_ir_lock;
627 unsigned long s;
628
629 spin_lock_irqsave(lp, s);
630 printk("%s : %d : sio_ir 0x%x sio_ies_ro 0x%x other_ir 0x%x other_ies_ro 0x%x mask 0x%x\n",
631 __FUNCTION__, __LINE__,
632 mem->sio_ir,
633 mem->sio_ies_ro,
634 mem->other_ir,
635 mem->other_ies_ro,
636 IOC4_OTHER_IR_ATA_INT | IOC4_OTHER_IR_ATA_MEMERR);
637
638 spin_unlock_irqrestore(lp, s);
639 }
640 #endif
641 }
642