1 /*
2  *  drivers/s390/s390io.c
3  *   S/390 common I/O routines
4  *   $Revision: 1.247.4.4 $
5  *
6  *  S390 version
7  *    Copyright (C) 1999, 2000 IBM Deutschland Entwicklung GmbH,
8  *                             IBM Corporation
9  *    Author(s): Ingo Adlung (adlung@de.ibm.com)
10  *               Cornelia Huck (cohuck@de.ibm.com)
11  *    ChangeLog: 01/07/2001 Blacklist cleanup (djbarrow@de.ibm.com,barrow_dj@yahoo.com)
12  *               01/04/2001 Holger Smolinski (smolinsk@de.ibm.com)
13  *                          Fixed lost interrupts and do_adapter_IO
14  *               xx/xx/xxxx nnn          multiple changes not reflected
15  *               03/12/2001 Ingo Adlung  blacklist= - changed to cio_ignore=
16  *               03/14/2001 Ingo Adlung  disable interrupts before start_IO
17  *                                        in Path Group processing
18  *                                       decrease retry2 on busy while
19  *                                        disabling sync_isc; reset isc_cnt
20  *                                        on io error during sync_isc enablement
21  *               05/09/2001 Cornelia Huck added exploitation of debug feature
22  *               05/16/2001 Cornelia Huck added /proc/deviceinfo/<devno>/
23  *               05/22/2001 Cornelia Huck added /proc/cio_ignore
24  *                                        un-ignore blacklisted devices by piping
25  *                                        to /proc/cio_ignore
26  *               xx/xx/xxxx some bugfixes & cleanups
27  *               08/02/2001 Cornelia Huck not already known devices can be blacklisted
28  *                                        by piping to /proc/cio_ignore
29  *               09/xx/2001 couple more fixes
30  *               10/15/2001 Cornelia Huck xsch - internal only for now
31  *               10/29/2001 Cornelia Huck Blacklisting reworked again
32  *               10/29/2001 Cornelia Huck improved utilization of debug feature
33  *               10/29/2001 Cornelia Huck more work on cancel_IO - use the flag
34  *                                        DOIO_CANCEL_ON_TIMEOUT in do_IO to get
35  *                                        io cancelled
36  *               11/15/2001 Cornelia Huck proper behaviour with procfs off
37  *               12/10/2001 Cornelia Huck added private_data + functions to
38  *                                        ioinfo_t
39  *               11-12/2001 Cornelia Huck various cleanups
40  *               01/09/2002 Cornelia Huck PGID fixes
41  *                                        process css machine checks
42  *               01/10/2002 Cornelia Huck added /proc/chpids
43  *               04/10/2002 Cornelia Huck fixed reaction on css machine checks
44  *               04/23/2002 Cornelia Huck fixed console isc (un)setting
45  *               06/06/2002 Cornelia Huck added detection of locked devices
46  */
47 
48 #include <linux/module.h>
49 #include <linux/config.h>
50 #include <linux/errno.h>
51 #include <linux/kernel_stat.h>
52 #include <linux/signal.h>
53 #include <linux/sched.h>
54 #include <linux/interrupt.h>
55 #include <linux/slab.h>
56 #include <linux/string.h>
57 #include <linux/smp.h>
58 #include <linux/threads.h>
59 #include <linux/smp_lock.h>
60 #include <linux/init.h>
61 #include <linux/bootmem.h>
62 #include <linux/ctype.h>
63 #ifdef CONFIG_PROC_FS
64 #include <linux/proc_fs.h>
65 #endif
66 #include <asm/system.h>
67 #include <asm/io.h>
68 #include <asm/irq.h>
69 #include <asm/bitops.h>
70 #include <asm/smp.h>
71 #include <asm/pgtable.h>
72 #include <asm/delay.h>
73 #include <asm/processor.h>
74 #include <asm/lowcore.h>
75 #include <asm/idals.h>
76 #include <asm/uaccess.h>
77 #include <asm/cpcmd.h>
78 
79 #include <asm/s390io.h>
80 #include <asm/s390dyn.h>
81 #include <asm/s390mach.h>
82 #include <asm/debug.h>
83 #include <asm/queue.h>
84 
85 #ifndef TRUE
86 #define TRUE  1
87 #define FALSE 0
88 #endif
89 
90 #define SANITY_CHECK(irq) do { \
91 if (irq > highest_subchannel || irq < 0) \
92 		return (-ENODEV); \
93 	if (ioinfo[irq] == INVALID_STORAGE_AREA) \
94 		return (-ENODEV); \
95         if (ioinfo[irq]->st) \
96                 return -ENODEV; \
97 	} while(0)
98 
99 #define CIO_TRACE_EVENT(imp, txt) do { \
100 	if (cio_debug_initialized) \
101 		debug_text_event(cio_debug_trace_id, \
102 				 imp, \
103 				 txt); \
104         }while (0)
105 
106 #define CIO_MSG_EVENT(imp, args...) do { \
107         if (cio_debug_initialized) \
108                 debug_sprintf_event(cio_debug_msg_id, \
109                                     imp, \
110                                     ##args); \
111         } while (0)
112 
113 #define CIO_CRW_EVENT(imp, args...) do { \
114         if (cio_debug_initialized) \
115                 debug_sprintf_event(cio_debug_crw_id, \
116                                     imp, \
117                                     ##args); \
118         } while (0)
119 
120 #define CIO_HEX_EVENT(imp, args...) do { \
121 	if (cio_debug_initialized) \
122                 debug_event(cio_debug_trace_id, imp, ##args); \
123         } while (0)
124 
125 #undef  CONFIG_DEBUG_IO
126 #define CONFIG_DEBUG_CRW
127 #define CONFIG_DEBUG_CHSC
128 
129 unsigned int highest_subchannel;
130 ioinfo_t *ioinfo_head = NULL;
131 ioinfo_t *ioinfo_tail = NULL;
132 ioinfo_t *ioinfo[__MAX_SUBCHANNELS] = {
133 	[0 ... (__MAX_SUBCHANNELS - 1)] = INVALID_STORAGE_AREA
134 };
135 
136 #ifdef CONFIG_CHSC
137 __u64 chpids[4] = {0,0,0,0};
138 __u64 chpids_logical[4] = {-1,-1,-1,-1};
139 __u64 chpids_known[4] = {0,0,0,0};
140 #endif /* CONFIG_CHSC */
141 
142 static atomic_t sync_isc = ATOMIC_INIT (-1);
143 static int sync_isc_cnt = 0;	/* synchronous irq processing lock */
144 
145 static spinlock_t adapter_lock = SPIN_LOCK_UNLOCKED;	/* adapter interrupt lock */
146 static int cons_dev = -1;	/* identify console device */
147 static int init_IRQ_complete = 0;
148 static int cio_show_msg = 0;
149 static schib_t *p_init_schib = NULL;
150 static irb_t *p_init_irb = NULL;
151 static __u64 irq_IPL_TOD;
152 static adapter_int_handler_t adapter_handler = NULL;
153 static pgid_t * global_pgid;
154 
155 /* for use of debug feature */
156 debug_info_t *cio_debug_msg_id = NULL;
157 debug_info_t *cio_debug_trace_id = NULL;
158 debug_info_t *cio_debug_crw_id = NULL;
159 int cio_debug_initialized = 0;
160 
161 #ifdef CONFIG_CHSC
162 int cio_chsc_desc_avail = 0;
163 int cio_chsc_err_msg = 0;
164 #endif
165 
166 static void init_IRQ_handler (int irq, void *dev_id, struct pt_regs *regs);
167 static void s390_process_subchannels (void);
168 static void s390_device_recognition_all (void);
169 static void s390_device_recognition_irq (int irq);
170 #ifdef CONFIG_PROC_FS
171 static void s390_redo_validation (void);
172 #endif
173 static int s390_validate_subchannel (int irq, int enable);
174 static int s390_SenseID (int irq, senseid_t * sid, __u8 lpm);
175 static int s390_SetPGID (int irq, __u8 lpm);
176 static int s390_SensePGID (int irq, __u8 lpm, pgid_t * pgid);
177 static int s390_process_IRQ (unsigned int irq);
178 static int enable_subchannel (unsigned int irq);
179 static int disable_subchannel (unsigned int irq);
180 int cancel_IO (int irq);
181 int s390_start_IO (int irq, ccw1_t * cpa, unsigned long user_intparm,
182 		   __u8 lpm, unsigned long flag);
183 
184 #ifdef CONFIG_PROC_FS
185 static int chan_proc_init (void);
186 #endif
187 
188 static inline void do_adapter_IO (__u32 intparm);
189 
190 static void s390_schedule_path_verification(unsigned long irq);
191 int s390_DevicePathVerification (int irq, __u8 domask);
192 int s390_register_adapter_interrupt (adapter_int_handler_t handler);
193 int s390_unregister_adapter_interrupt (adapter_int_handler_t handler);
194 
195 extern int do_none (unsigned int irq, int cpu, struct pt_regs *regs);
196 extern int enable_none (unsigned int irq);
197 extern int disable_none (unsigned int irq);
198 
199 asmlinkage void do_IRQ (struct pt_regs regs);
200 
201 #ifdef CONFIG_CHSC
202 static chsc_area_t *chsc_area_ssd = NULL;
203 static chsc_area_t *chsc_area_sei = NULL;
204 static spinlock_t chsc_lock_ssd = SPIN_LOCK_UNLOCKED;
205 static spinlock_t chsc_lock_sei = SPIN_LOCK_UNLOCKED;
206 static int chsc_get_sch_descriptions( void );
207 int s390_vary_chpid( __u8 chpid, int on );
208 #endif
209 
210 #ifdef CONFIG_PROC_FS
211 #define MAX_CIO_PROCFS_ENTRIES 0x300
212 /* magic number; we want to have some room to spare */
213 
214 int cio_procfs_device_create (int devno);
215 int cio_procfs_device_remove (int devno);
216 int cio_procfs_device_purge (void);
217 #endif
218 
219 int cio_notoper_msg = 1;
220 
221 #ifdef CONFIG_PROC_FS
222 int cio_proc_devinfo = 0;	/* switch off the /proc/deviceinfo/ stuff by default
223 				   until problems are dealt with */
224 #endif
225 
226 unsigned long s390_irq_count[NR_CPUS];	/* trace how many irqs have occured per cpu... */
227 int cio_count_irqs = 1;		/* toggle use here... */
228 
229 int cio_sid_with_pgid = 0;     /* if we need a PGID for SenseID, switch this on */
230 
231 /*
232  * "Blacklisting" of certain devices:
233  * Device numbers given in the commandline as cio_ignore=... won't be known to Linux
234  * These can be single devices or ranges of devices
235  *
236  * 10/23/01 reworked to get rid of lists
237  */
238 
239 static u32 bl_dev[2048];
240 
241 static spinlock_t blacklist_lock = SPIN_LOCK_UNLOCKED;
242 static int highest_ignored = 0;
243 static int nr_ignored = 0;
244 
245 /*
246  * Function: blacklist_range_add
247  * Blacklist the devices from-to
248  */
249 
250 static inline void
blacklist_range_add(int from,int to,int locked)251 blacklist_range_add (int from, int to, int locked)
252 {
253 
254 	unsigned long flags;
255 	int i;
256 
257 	if ((to && (from > to))
258 	    || (to<0) || (to > 0xffff)
259 	    || (from<0) || (from > 0xffff))
260 		return;
261 
262 	if (!locked)
263 		spin_lock_irqsave (&blacklist_lock, flags);
264 
265 	if (!to)
266 		to = from;
267 	for (i = from; i <= to; i++) {
268 		if (!test_and_set_bit (i, &bl_dev))
269 			nr_ignored++;
270 	}
271 
272 	if (to >= highest_ignored)
273 		highest_ignored = to;
274 
275 	if (!locked)
276 		spin_unlock_irqrestore (&blacklist_lock, flags);
277 }
278 
279 /*
280  * Function: blacklist_range_remove
281  * Removes a range from the blacklist chain
282  */
283 
284 static inline void
blacklist_range_remove(int from,int to)285 blacklist_range_remove (int from, int to)
286 {
287 	long flags;
288 	int i;
289 
290 	if ((to && (from > to))
291 	    || (to<0) || (to > 0xffff)
292 	    || (from<0) || (from > 0xffff))
293 		return;
294 
295 	spin_lock_irqsave (&blacklist_lock, flags);
296 
297 	for (i = from; i <= to; i++) {
298 		if (test_and_clear_bit (i, &bl_dev))
299 			nr_ignored--;
300 	}
301 
302 	if (to == highest_ignored)
303 		for (highest_ignored = from; (highest_ignored > 0)
304 		     && (!test_bit (highest_ignored, &bl_dev));
305 		     highest_ignored--) ;
306 
307 	spin_unlock_irqrestore (&blacklist_lock, flags);
308 }
309 
310 /* Parsing the commandline for blacklist parameters */
311 
312 /*
313  * Variable to hold the blacklisted devices given by the parameter line
314  * cio_ignore=...
315  */
316 char *blacklist[256] = { NULL, };
317 
318 /*
319  * Get the cio_ignore=... items from the parameter line
320  */
321 
322 static void
blacklist_split_parm_string(char * str)323 blacklist_split_parm_string (char *str)
324 {
325 	char *tmp = str;
326 	int count = 0;
327 	do {
328 		char *end;
329 		int len;
330 		end = strchr (tmp, ',');
331 		if (end == NULL) {
332 			len = strlen (tmp) + 1;
333 		} else {
334 			len = (long) end - (long) tmp + 1;
335 			*end = '\0';
336 			end++;
337 		}
338 		blacklist[count] = alloc_bootmem (len * sizeof (char));
339 		if (blacklist == NULL) {
340 			printk (KERN_WARNING
341 				"can't store cio_ignore= parameter no %d\n",
342 				count + 1);
343 			break;
344 		}
345 		memset (blacklist[count], 0, len * sizeof (char));
346 		memcpy (blacklist[count], tmp, len * sizeof (char));
347 		count++;
348 		tmp = end;
349 	} while (tmp != NULL && *tmp != '\0');
350 }
351 
352 /*
353  * The blacklist parameters as one concatenated string
354  */
355 
356 static char blacklist_parm_string[1024] __initdata = { 0, };
357 
358 /*
359  * function: blacklist_strtoul
360  * Strip leading '0x' and interpret the values as Hex
361  */
362 static inline int
blacklist_strtoul(char * str,char ** stra)363 blacklist_strtoul (char *str, char **stra)
364 {
365 	if (*str == '0') {
366 		str++;		/* strip leading zero */
367 		if (*str == 'x')
368 			str++;	/* strip leading x */
369 	}
370 	return simple_strtoul (str, stra, 16);	/* interpret anything as hex */
371 }
372 
373 /*
374  * Function: blacklist_parse
375  * Parse the parameters given to cio_ignore=...
376  * Add the blacklisted devices to the blacklist chain
377  */
378 
379 static inline void
blacklist_parse(char ** str)380 blacklist_parse (char **str)
381 {
382 	char *temp;
383 	int from, to;
384 
385 	while (*str) {
386 		temp = *str;
387 		from = 0;
388 		to = 0;
389 
390 		from = blacklist_strtoul (temp, &temp);
391 		if (*temp == '-') {
392 			temp++;
393 			to = blacklist_strtoul (temp, &temp);
394 		}
395 		blacklist_range_add (from, to, 0);
396 #ifdef CONFIG_DEBUG_IO
397 		printk (KERN_INFO "Blacklisted range from %X to %X\n", from,
398 			to);
399 #endif
400 		str++;
401 	}
402 }
403 
404 /*
405  * Initialisation of blacklist
406  */
407 
408 void __init
blacklist_init(void)409 blacklist_init (void)
410 {
411 #ifdef CONFIG_DEBUG_IO
412 	printk (KERN_DEBUG "Reading blacklist...\n");
413 #endif
414 	CIO_MSG_EVENT(6, "Reading blacklist\n");
415 
416 	blacklist_split_parm_string (blacklist_parm_string);
417 	blacklist_parse (blacklist);
418 }
419 
420 /*
421  * Get all the blacklist parameters from parameter line
422  */
423 
424 void __init
blacklist_setup(char * str,int * ints)425 blacklist_setup (char *str, int *ints)
426 {
427 	int len = strlen (blacklist_parm_string);
428 	if (len != 0) {
429 		strcat (blacklist_parm_string, ",");
430 	}
431 	strcat (blacklist_parm_string, str);
432 }
433 
434 int __init
blacklist_call_setup(char * str)435 blacklist_call_setup (char *str)
436 {
437 	int dummy;
438 #ifdef CONFIG_DEBUG_IO
439 	printk (KERN_DEBUG "Reading blacklist parameters...\n");
440 #endif
441 	CIO_MSG_EVENT(6, "Reading blacklist parameters\n");
442 
443 	blacklist_setup (str, &dummy);
444 
445 	/* Blacklist ranges must be ready when device recognition starts */
446 	blacklist_init ();
447 
448 	return 1;
449 }
450 
451 __setup ("cio_ignore=", blacklist_call_setup);
452 
453 /* Checking if devices are blacklisted */
454 
455 /*
456  * Function: is_blacklisted
457  * Returns 1 if the given devicenumber can be found in the blacklist, otherwise 0.
458  */
459 
460 static inline int
is_blacklisted(int devno)461 is_blacklisted (int devno)
462 {
463 	long flags;
464 	int retval = 0;
465 
466 	spin_lock_irqsave (&blacklist_lock, flags);
467 
468 	if (test_bit (devno, &bl_dev))
469 		retval = 1;
470 
471 	spin_unlock_irqrestore (&blacklist_lock, flags);
472 	return retval;
473 }
474 
475 /*
476  * Function: blacklist_free_all_ranges
477  * set all blacklisted devices free...
478  */
479 
480 void
blacklist_free_all_ranges(void)481 blacklist_free_all_ranges (void)
482 {
483 	unsigned long flags;
484 	int i;
485 
486 	spin_lock_irqsave (&blacklist_lock, flags);
487 
488 	for (i = 0; i <= highest_ignored; i++)
489 		clear_bit (i, &bl_dev);
490 	highest_ignored = 0;
491 	nr_ignored = 0;
492 
493 	spin_unlock_irqrestore (&blacklist_lock, flags);
494 }
495 
496 #ifdef CONFIG_PROC_FS
497 /*
498  * Function: blacklist_parse_proc_parameters
499  * parse the stuff which is piped to /proc/cio_ignore
500  */
501 void
blacklist_parse_proc_parameters(char * buf)502 blacklist_parse_proc_parameters (char *buf)
503 {
504 	int i;
505 	int from = 0;
506 	int to = 0;
507 	long flags;
508 	int err = 0;
509 
510 	if (strstr (buf, "free ")) {
511 		for (i = 0; i < 5; i++) {
512 			buf++;
513 		}
514 		if (strstr (buf, "all")) {
515 			blacklist_free_all_ranges ();
516 			s390_redo_validation ();
517 		} else {
518 			while (*buf != 0 && *buf != '\n') {
519 				if (!isxdigit(*buf)) {
520 					printk(KERN_WARNING "%s: error parsing "
521 					       "\"%s\"\n", __FUNCTION__, buf);
522 					return;
523 				}
524 
525 				from = blacklist_strtoul (buf, &buf);
526 				to = (*buf == '-') ?
527 					blacklist_strtoul (buf+1, &buf) : from;
528 
529 				blacklist_range_remove (from, to);
530 
531 				if (*buf == ',')
532 					buf++;
533 			}
534 			s390_redo_validation();
535 		}
536 	} else if (strstr (buf, "add ")) {
537 		for (i = 0; i < 4; i++) {
538 			buf++;
539 		}
540 		while (*buf != 0 && *buf != '\n') {
541 			if (!isxdigit(*buf)) {
542 				printk(KERN_WARNING "%s: error parsing "
543 				       "\"%s\"\n", __FUNCTION__, buf);
544 				return;
545 			}
546 
547 			from = blacklist_strtoul (buf, &buf);
548 			to = (*buf == '-') ?
549 				blacklist_strtoul (buf+1, &buf) : from;
550 
551 			spin_lock_irqsave (&blacklist_lock, flags);
552 
553 			/*
554 			 * Don't allow for already known devices to be
555 			 * blacklisted
556 			 * The criterion is a bit dumb, devices which once were
557 			 * there but are already gone are also caught...
558 			 */
559 
560 			err = 0;
561 			for (i = 0; i <= highest_subchannel; i++) {
562 				if (ioinfo[i] != INVALID_STORAGE_AREA) {
563 					if (!ioinfo[i]->st)
564 						if ((ioinfo[i]->schib.pmcw.dev >= from)
565 						    && (ioinfo[i]->schib.pmcw.dev <=
566 							to)) {
567 							printk (KERN_WARNING
568 								"cio_ignore: Won't blacklist "
569 								"already known devices, "
570 								"skipping range %x to %x\n",
571 								from, to);
572 							err = 1;
573 							break;
574 						}
575 				}
576 			}
577 
578 			if (!err)
579 				blacklist_range_add (from, to, 1);
580 
581 			spin_unlock_irqrestore (&blacklist_lock, flags);
582 			if (*buf == ',')
583 				buf++;
584 		}
585 
586 	} else {
587 		printk (KERN_WARNING
588 			"cio_ignore: Parse error; "
589 			"try using 'free all|<devno-range>,<devno-range>,...'\n");
590 		printk (KERN_WARNING
591 			"or 'add <devno-range>,<devno-range>,...'\n");
592 	}
593 }
594 #endif
595 /* End of blacklist handling */
596 
597 void s390_displayhex (char *str, void *ptr, s32 cnt);
598 
599 void
s390_displayhex(char * str,void * ptr,s32 cnt)600 s390_displayhex (char *str, void *ptr, s32 cnt)
601 {
602 	s32 cnt1, cnt2, maxcnt2;
603 	u32 *currptr = (__u32 *) ptr;
604 
605 	printk ("\n%s\n", str);
606 
607 	for (cnt1 = 0; cnt1 < cnt; cnt1 += 16) {
608 		printk ("%08lX ", (unsigned long) currptr);
609 		maxcnt2 = cnt - cnt1;
610 		if (maxcnt2 > 16)
611 			maxcnt2 = 16;
612 		for (cnt2 = 0; cnt2 < maxcnt2; cnt2 += 4)
613 			printk ("%08X ", *currptr++);
614 		printk ("\n");
615 	}
616 }
617 
618 static int __init
cio_setup(char * parm)619 cio_setup (char *parm)
620 {
621 	if (!strcmp (parm, "yes")) {
622 		cio_show_msg = 1;
623 	} else if (!strcmp (parm, "no")) {
624 		cio_show_msg = 0;
625 	} else {
626 		printk (KERN_ERR "cio_setup : invalid cio_msg parameter '%s'",
627 			parm);
628 
629 	}
630 
631 	return 1;
632 }
633 
634 __setup ("cio_msg=", cio_setup);
635 
636 static int __init
cio_notoper_setup(char * parm)637 cio_notoper_setup (char *parm)
638 {
639 	if (!strcmp (parm, "yes")) {
640 		cio_notoper_msg = 1;
641 	} else if (!strcmp (parm, "no")) {
642 		cio_notoper_msg = 0;
643 	} else {
644 		printk (KERN_ERR
645 			"cio_notoper_setup: "
646 			"invalid cio_notoper_msg parameter '%s'", parm);
647 	}
648 
649 	return 1;
650 }
651 
652 __setup ("cio_notoper_msg=", cio_notoper_setup);
653 
654 #ifdef CONFIG_PROC_FS
655 static int __init
cio_proc_devinfo_setup(char * parm)656 cio_proc_devinfo_setup (char *parm)
657 {
658 	if (!strcmp (parm, "yes")) {
659 		cio_proc_devinfo = 1;
660 	} else if (!strcmp (parm, "no")) {
661 		cio_proc_devinfo = 0;
662 	} else {
663 		printk (KERN_ERR
664 			"cio_proc_devinfo_setup: invalid parameter '%s'\n",
665 			parm);
666 	}
667 
668 	return 1;
669 }
670 
671 __setup ("cio_proc_devinfo=", cio_proc_devinfo_setup);
672 #endif
673 
674 static int __init
cio_pgid_setup(char * parm)675 cio_pgid_setup (char *parm)
676 {
677 	if (!strcmp (parm, "yes")) {
678 		cio_sid_with_pgid = 1;
679 	} else if (!strcmp (parm, "no")) {
680 		cio_sid_with_pgid = 0;
681 	} else {
682 		printk (KERN_ERR
683 			"cio_pgid_setup : invalid cio_msg parameter '%s'",
684 			parm);
685 
686 	}
687 
688 	return 1;
689 }
690 
691 __setup ("cio_sid_with_pgid=", cio_pgid_setup);
692 
693 /*
694  * register for adapter interrupts
695  *
696  * With HiperSockets the zSeries architecture provides for
697  *  means of adapter interrups, pseudo I/O interrupts that are
698  *  not tied to an I/O subchannel, but to an adapter. However,
699  *  it doesn't disclose the info how to enable/disable them, but
700  *  to recognize them only. Perhaps we should consider them
701  *  being shared interrupts, and thus build a linked list
702  *  of adapter handlers ... to be evaluated ...
703  */
704 int
s390_register_adapter_interrupt(adapter_int_handler_t handler)705 s390_register_adapter_interrupt (adapter_int_handler_t handler)
706 {
707 	int ret = 0;
708 	char dbf_txt[15];
709 
710 	CIO_TRACE_EVENT (4, "rgaint");
711 
712 	spin_lock (&adapter_lock);
713 
714 	if (handler == NULL)
715 		ret = -EINVAL;
716 	else if (adapter_handler)
717 		ret = -EBUSY;
718 	else
719 		adapter_handler = handler;
720 
721 	spin_unlock (&adapter_lock);
722 
723 	sprintf (dbf_txt, "ret:%d", ret);
724 	CIO_TRACE_EVENT (4, dbf_txt);
725 
726 	return (ret);
727 }
728 
729 int
s390_unregister_adapter_interrupt(adapter_int_handler_t handler)730 s390_unregister_adapter_interrupt (adapter_int_handler_t handler)
731 {
732 	int ret = 0;
733 	char dbf_txt[15];
734 
735 	CIO_TRACE_EVENT (4, "urgaint");
736 
737 	spin_lock (&adapter_lock);
738 
739 	if (handler == NULL)
740 		ret = -EINVAL;
741 	else if (handler != adapter_handler)
742 		ret = -EINVAL;
743 	else
744 		adapter_handler = NULL;
745 
746 	spin_unlock (&adapter_lock);
747 
748 	sprintf (dbf_txt, "ret:%d", ret);
749 	CIO_TRACE_EVENT (4, dbf_txt);
750 
751 	return (ret);
752 }
753 
754 static inline void
do_adapter_IO(__u32 intparm)755 do_adapter_IO (__u32 intparm)
756 {
757 	CIO_TRACE_EVENT (4, "doaio");
758 
759 	spin_lock (&adapter_lock);
760 
761 	if (adapter_handler)
762 		(*adapter_handler) (intparm);
763 
764 	spin_unlock (&adapter_lock);
765 
766 	return;
767 }
768 
769 void s390_free_irq (unsigned int irq, void *dev_id);
770 
771 /*
772  * Note : internal use of irqflags SA_PROBE for NOT path grouping
773  *
774  */
775 int
s390_request_irq_special(int irq,io_handler_func_t io_handler,not_oper_handler_func_t not_oper_handler,unsigned long irqflags,const char * devname,void * dev_id)776 s390_request_irq_special (int irq,
777 			  io_handler_func_t io_handler,
778 			  not_oper_handler_func_t not_oper_handler,
779 			  unsigned long irqflags,
780 			  const char *devname, void *dev_id)
781 {
782 	int retval = 0;
783 	unsigned long flags;
784 	char dbf_txt[15];
785 	int retry;
786 
787 	if (irq >= __MAX_SUBCHANNELS)
788 		return -EINVAL;
789 
790 	if (!io_handler || !dev_id)
791 		return -EINVAL;
792 
793 	if (ioinfo[irq] == INVALID_STORAGE_AREA)
794 		return -ENODEV;
795 
796 	if (ioinfo[irq]->st)
797 		return -ENODEV;
798 
799 	sprintf (dbf_txt, "reqsp%x", irq);
800 	CIO_TRACE_EVENT (4, dbf_txt);
801 
802 	/*
803 	 * The following block of code has to be executed atomically
804 	 */
805 	s390irq_spin_lock_irqsave (irq, flags);
806 
807 	if (ioinfo[irq]->ui.flags.unfriendly &&
808 	    !(irqflags & SA_FORCE)) {
809 		retval = -EUSERS;
810 
811 	} else if (!ioinfo[irq]->ui.flags.ready) {
812 		retry = 5;
813 
814 		ioinfo[irq]->irq_desc.handler = io_handler;
815 		ioinfo[irq]->irq_desc.name = devname;
816 		ioinfo[irq]->irq_desc.dev_id = dev_id;
817 		ioinfo[irq]->ui.flags.ready = 1;
818 
819 		do {
820 			retval = enable_subchannel (irq);
821 			if (retval) {
822 				ioinfo[irq]->ui.flags.ready = 0;
823 				break;
824 			}
825 
826 			stsch (irq, &ioinfo[irq]->schib);
827 			if (ioinfo[irq]->schib.pmcw.ena)
828 				retry = 0;
829 			else
830 				retry--;
831 
832 		} while (retry);
833 	} else {
834 		/*
835 		 *  interrupt already owned, and shared interrupts
836 		 *   aren't supported on S/390.
837 		 */
838 		retval = -EBUSY;
839 
840 	}
841 
842 	s390irq_spin_unlock_irqrestore (irq, flags);
843 
844 	if (retval == 0) {
845 		if (irqflags & SA_DOPATHGROUP) {
846 			ioinfo[irq]->ui.flags.pgid_supp = 1;
847 			ioinfo[irq]->ui.flags.notacccap = 1;
848 		}
849 		if ((irqflags & SA_DOPATHGROUP) &&
850 		    (!ioinfo[irq]->ui.flags.pgid ||
851 		     irqflags & SA_PROBE)) {
852 			pgid_t pgid;
853 			int i, mask;
854 			/*
855 			 * Do an initial SensePGID to find out if device
856 			 * is locked by someone else.
857 			 */
858 			memcpy(&pgid, global_pgid, sizeof(pgid_t));
859 
860 			retval = -EAGAIN;
861 			for (i=0; i<8 && retval==-EAGAIN; i++) {
862 
863 				mask = (0x80 >> i) & ioinfo[irq]->opm;
864 
865 				if (!mask)
866 					continue;
867 
868 				retval = s390_SensePGID(irq, mask, &pgid);
869 
870 				if (retval == -EOPNOTSUPP)
871 					/* Doesn't prevent us from proceeding */
872 					retval = 0;
873 			}
874 
875 		}
876 		if (!(irqflags & SA_PROBE) &&
877 		    (irqflags & SA_DOPATHGROUP) &&
878 		    (!ioinfo[irq]->ui.flags.unfriendly))
879 			s390_DevicePathVerification (irq, 0);
880 
881 		if (ioinfo[irq]->ui.flags.unfriendly &&
882 		    !(irqflags & SA_FORCE)) {
883 			/*
884 			 * We found out during path verification that the
885 			 * device is locked by someone else and we have to
886 			 * let the device driver know.
887 			 */
888 			retval = -EUSERS;
889 			free_irq(irq, dev_id);
890 		} else {
891 			ioinfo[irq]->ui.flags.newreq = 1;
892 			ioinfo[irq]->nopfunc = not_oper_handler;
893 		}
894 	}
895 
896 	if (cio_debug_initialized)
897 		debug_int_event (cio_debug_trace_id, 4, retval);
898 
899 	return retval;
900 }
901 
902 int
s390_request_irq(unsigned int irq,void (* handler)(int,void *,struct pt_regs *),unsigned long irqflags,const char * devname,void * dev_id)903 s390_request_irq (unsigned int irq,
904 		  void (*handler) (int, void *, struct pt_regs *),
905 		  unsigned long irqflags, const char *devname, void *dev_id)
906 {
907 	int ret;
908 
909 	ret = s390_request_irq_special (irq,
910 					(io_handler_func_t) handler,
911 					NULL, irqflags, devname, dev_id);
912 
913 	if (ret == 0) {
914 		ioinfo[irq]->ui.flags.newreq = 0;
915 
916 	}
917 	return (ret);
918 }
919 
920 void
s390_free_irq(unsigned int irq,void * dev_id)921 s390_free_irq (unsigned int irq, void *dev_id)
922 {
923 	unsigned long flags;
924 	int ret;
925 
926 	char dbf_txt[15];
927 
928 	if (irq >= __MAX_SUBCHANNELS || ioinfo[irq] == INVALID_STORAGE_AREA)
929 		return;
930 
931 	if (ioinfo[irq]->st)
932 		return;
933 
934 	sprintf (dbf_txt, "free%x", irq);
935 	CIO_TRACE_EVENT (2, dbf_txt);
936 
937 	s390irq_spin_lock_irqsave (irq, flags);
938 
939 #ifdef  CONFIG_KERNEL_DEBUG
940 	if (irq != cons_dev)
941 		printk (KERN_DEBUG "Trying to free IRQ%d\n", irq);
942 #endif
943 	CIO_MSG_EVENT(2, "Trying to free IRQ %d\n", irq);
944 
945 	/*
946 	 * disable the device and reset all IRQ info if
947 	 *  the IRQ is actually owned by the handler ...
948 	 */
949 	if (ioinfo[irq]->ui.flags.ready) {
950 		if (dev_id == ioinfo[irq]->irq_desc.dev_id) {
951 			/* start deregister */
952 			ioinfo[irq]->ui.flags.unready = 1;
953 
954 			ret = disable_subchannel (irq);
955 
956 			if (ret == -EBUSY) {
957 
958 				/*
959 				 * kill it !
960 				 * We try to terminate the I/O by halt_IO first,
961 				 * then clear_IO.
962 				 * Because the device may be gone (machine
963 				 * check handling), we can't use sync I/O.
964 				 */
965 
966 				halt_IO (irq, 0xC8C1D3E3, 0);
967 				s390irq_spin_unlock_irqrestore (irq, flags);
968 				udelay (200000);	/* 200 ms */
969 				s390irq_spin_lock_irqsave (irq, flags);
970 
971 				ret = disable_subchannel (irq);
972 
973 				if (ret == -EBUSY) {
974 
975 					clear_IO (irq, 0x40C3D3D9, 0);
976 					s390irq_spin_unlock_irqrestore (irq,
977 									flags);
978 					udelay (1000000);	/* 1000 ms */
979 					s390irq_spin_lock_irqsave (irq, flags);
980 
981 					/* give it a very last try ... */
982 					disable_subchannel (irq);
983 
984 					if (ioinfo[irq]->ui.flags.busy) {
985 						printk (KERN_CRIT
986 							"free_irq(%04X) "
987 							"- device %04X busy, retry "
988 							"count exceeded\n", irq,
989 							ioinfo[irq]->devstat.
990 							devno);
991 						CIO_MSG_EVENT( 0,
992 							       "free_irq(%04X) - "
993 							       "device %04X busy, "
994 							       "retry count exceeded\n",
995 							       irq,
996 							       ioinfo[irq]->
997 							       devstat.devno);
998 
999 					}
1000 				}
1001 			}
1002 
1003 			ioinfo[irq]->ui.flags.ready = 0;
1004 			ioinfo[irq]->ui.flags.unready = 0;	/* deregister ended */
1005 
1006 			ioinfo[irq]->nopfunc = NULL;
1007 
1008 			s390irq_spin_unlock_irqrestore (irq, flags);
1009 		} else {
1010 			s390irq_spin_unlock_irqrestore (irq, flags);
1011 
1012 			printk (KERN_ERR "free_irq(%04X) : error, "
1013 				"dev_id does not match !\n", irq);
1014 			CIO_MSG_EVENT( 0,
1015 				       "free_irq(%04X) : error, "
1016 				       "dev_id does not match !\n",
1017 				       irq);
1018 
1019 		}
1020 	} else {
1021 		s390irq_spin_unlock_irqrestore (irq, flags);
1022 
1023 		printk (KERN_ERR "free_irq(%04X) : error, "
1024 			"no action block ... !\n", irq);
1025 		CIO_MSG_EVENT(0,
1026 			      "free_irq(%04X) : error, "
1027 			      "no action block ... !\n", irq);
1028 
1029 	}
1030 }
1031 
1032 /*
1033  * Enable IRQ by modifying the subchannel
1034  */
1035 static int
enable_subchannel(unsigned int irq)1036 enable_subchannel (unsigned int irq)
1037 {
1038 	int ret = 0;
1039 	int ccode;
1040 	int retry = 5;
1041 	char dbf_txt[15];
1042 
1043 	SANITY_CHECK (irq);
1044 
1045 	sprintf (dbf_txt, "ensch%x", irq);
1046 	CIO_TRACE_EVENT (2, dbf_txt);
1047 
1048 	/*
1049 	 * If a previous disable request is pending we reset it. However, this
1050 	 *  status implies that the device may (still) be not-operational.
1051 	 */
1052 	if (ioinfo[irq]->ui.flags.d_disable) {
1053 		ioinfo[irq]->ui.flags.d_disable = 0;
1054 		ret = 0;
1055 	} else {
1056 		ccode = stsch (irq, &(ioinfo[irq]->schib));
1057 
1058 		if (ccode) {
1059 			ret = -ENODEV;
1060 		} else {
1061 			ioinfo[irq]->schib.pmcw.ena = 1;
1062 
1063 			if (irq == cons_dev) {
1064 				ioinfo[irq]->schib.pmcw.isc = 7;
1065 			} else {
1066 				ioinfo[irq]->schib.pmcw.isc = 3;
1067 
1068 			}
1069 
1070 			do {
1071 				ccode = msch (irq, &(ioinfo[irq]->schib));
1072 
1073 				switch (ccode) {
1074 				case 0:	/* ok */
1075 					ret = 0;
1076 					retry = 0;
1077 					break;
1078 
1079 				case 1:	/* status pending */
1080 
1081 					ioinfo[irq]->ui.flags.s_pend = 1;
1082 					s390_process_IRQ (irq);
1083 					ioinfo[irq]->ui.flags.s_pend = 0;
1084 
1085 					ret = -EIO;
1086 					/*
1087 					 * might be overwritten on re-driving
1088 					 * the msch()
1089 					 */
1090 					retry--;
1091 					break;
1092 
1093 				case 2:	/* busy */
1094 					udelay (100);	/* allow for recovery */
1095 					ret = -EBUSY;
1096 					retry--;
1097 					break;
1098 
1099 				case 3:	/* not oper */
1100 					ioinfo[irq]->ui.flags.oper = 0;
1101 					retry = 0;
1102 					ret = -ENODEV;
1103 					break;
1104 				}
1105 
1106 			} while (retry);
1107 
1108 		}
1109 	}
1110 
1111 	sprintf (dbf_txt, "ret:%d", ret);
1112 	CIO_TRACE_EVENT (2, dbf_txt);
1113 
1114 	return (ret);
1115 }
1116 
1117 /*
1118  * Disable IRQ by modifying the subchannel
1119  */
1120 static int
disable_subchannel(unsigned int irq)1121 disable_subchannel (unsigned int irq)
1122 {
1123 	int cc;			/* condition code */
1124 	int ret = 0;		/* function return value */
1125 	int retry = 5;
1126 	char dbf_txt[15];
1127 
1128 	SANITY_CHECK (irq);
1129 
1130 	sprintf (dbf_txt, "dissch%x", irq);
1131 	CIO_TRACE_EVENT (2, dbf_txt);
1132 
1133 	if (ioinfo[irq]->ui.flags.busy) {
1134 		/*
1135 		 * the disable function must not be called while there are
1136 		 *  requests pending for completion !
1137 		 */
1138 		ret = -EBUSY;
1139 	} else {
1140 
1141 		/*
1142 		 * If device isn't operational we have to perform delayed
1143 		 *  disabling when the next interrupt occurs - unless the
1144 		 *  irq is re-requested prior to the interrupt to occur.
1145 		 */
1146 		cc = stsch (irq, &(ioinfo[irq]->schib));
1147 
1148 		if (cc == 3) {
1149 			ioinfo[irq]->ui.flags.oper = 0;
1150 			ioinfo[irq]->ui.flags.d_disable = 1;
1151 
1152 			ret = 0;
1153 		} else {	/* cc == 0 */
1154 
1155 			ioinfo[irq]->schib.pmcw.ena = 0;
1156 
1157 			do {
1158 				cc = msch (irq, &(ioinfo[irq]->schib));
1159 
1160 				switch (cc) {
1161 				case 0:	/* ok */
1162 					retry = 0;
1163 					ret = 0;
1164 					break;
1165 
1166 				case 1:	/* status pending */
1167 					ioinfo[irq]->ui.flags.s_pend = 1;
1168 					s390_process_IRQ (irq);
1169 					ioinfo[irq]->ui.flags.s_pend = 0;
1170 
1171 					ret = -EIO;
1172 					/*
1173 					 * might be overwritten on re-driving
1174 					 * the msch() call
1175 					 */
1176 					retry--;
1177 					break;
1178 
1179 				case 2:	/* busy; this should not happen! */
1180 					printk (KERN_CRIT
1181 						"disable_subchannel(%04X) "
1182 						"- unexpected busy condition for "
1183 						"device %04X received !\n", irq,
1184 						ioinfo[irq]->devstat.devno);
1185 					CIO_MSG_EVENT(0,
1186 						      "disable_subchannel(%04X) "
1187 						      "- unexpected busy condition "
1188 						      "for device %04X received !\n",
1189 						      irq,
1190 						      ioinfo[irq]->devstat.
1191 						      devno);
1192 					retry = 0;
1193 					ret = -EBUSY;
1194 					break;
1195 
1196 				case 3:	/* not oper */
1197 					/*
1198 					 * should hardly occur ?!
1199 					 */
1200 					ioinfo[irq]->ui.flags.oper = 0;
1201 					ioinfo[irq]->ui.flags.d_disable = 1;
1202 					retry = 0;
1203 
1204 					ret = 0;
1205 					/*
1206 					 * if the device has gone, we don't need
1207 					 * to disable it anymore !
1208 					 */
1209 					break;
1210 
1211 				}
1212 
1213 			} while (retry);
1214 
1215 		}
1216 	}
1217 
1218 	sprintf (dbf_txt, "ret:%d", ret);
1219 	CIO_TRACE_EVENT (2, dbf_txt);
1220 
1221 	return (ret);
1222 }
1223 
1224 void
s390_init_IRQ(void)1225 s390_init_IRQ (void)
1226 {
1227 	unsigned long flags;	/* PSW flags */
1228 	long cr6 __attribute__ ((aligned (8)));
1229 	cpuid_t cpuid;
1230 
1231 	asm volatile ("STCK %0":"=m" (irq_IPL_TOD));
1232 
1233 	p_init_schib = alloc_bootmem_low (sizeof (schib_t));
1234 	p_init_irb = alloc_bootmem_low (sizeof (irb_t));
1235 
1236 	/*
1237 	 * As we don't know about the calling environment
1238 	 *  we assure running disabled. Before leaving the
1239 	 *  function we resestablish the old environment.
1240 	 *
1241 	 * Note : as we don't need a system wide lock, therefore
1242 	 *        we shouldn't use cli(), but __cli() as this
1243 	 *        affects the current CPU only.
1244 	 */
1245 	__save_flags (flags);
1246 	__cli ();
1247 
1248 	/*
1249 	 * disable all interrupts
1250 	 */
1251 	cr6 = 0;
1252 	__ctl_load (cr6, 6, 6);
1253 
1254 	s390_process_subchannels ();
1255 
1256 	if (cio_count_irqs) {
1257 		int i;
1258 		for (i = 0; i < NR_CPUS; i++)
1259 			s390_irq_count[i] = 0;
1260 	}
1261 
1262 
1263 	/*
1264 	 * Let's build our path group ID here.
1265 	 */
1266 
1267 	global_pgid = (pgid_t *)alloc_bootmem(sizeof(pgid_t));
1268 
1269 	cpuid = *(cpuid_t*) __LC_CPUID;
1270 
1271 	if (MACHINE_NEW_STIDP)
1272 		global_pgid->cpu_addr = 0x8000;
1273 	else {
1274 #ifdef CONFIG_SMP
1275 		global_pgid->cpu_addr = hard_smp_processor_id();
1276 #else
1277 		global_pgid->cpu_addr = 0;
1278 #endif
1279 	}
1280 	global_pgid->cpu_id = cpuid.ident;
1281 	global_pgid->cpu_model = ((cpuid_t *) __LC_CPUID)->machine;
1282 	global_pgid->tod_high = *(__u32 *) & irq_IPL_TOD;
1283 
1284 
1285 	/*
1286 	 * enable default I/O-interrupt sublass 3
1287 	 */
1288 	cr6 = 0x10000000;
1289 	__ctl_load (cr6, 6, 6);
1290 
1291 	s390_device_recognition_all ();
1292 
1293 	init_IRQ_complete = 1;
1294 
1295 	__restore_flags (flags);
1296 
1297 	return;
1298 }
1299 
1300 /*
1301  * dummy handler, used during init_IRQ() processing for compatibility only
1302  */
1303 void
init_IRQ_handler(int irq,void * dev_id,struct pt_regs * regs)1304 init_IRQ_handler (int irq, void *dev_id, struct pt_regs *regs)
1305 {
1306 	/* this is a dummy handler only ... */
1307 }
1308 
1309 int
s390_start_IO(int irq,ccw1_t * cpa,unsigned long user_intparm,__u8 lpm,unsigned long flag)1310 s390_start_IO (int irq,		/* IRQ */
1311 	       ccw1_t * cpa,	/* logical channel prog addr */
1312 	       unsigned long user_intparm,	/* interruption parameter */
1313 	       __u8 lpm,	/* logical path mask */
1314 	       unsigned long flag)
1315 {				/* flags */
1316 	int ccode;
1317 	int ret = 0;
1318 	char dbf_txt[15];
1319 
1320 	SANITY_CHECK (irq);
1321 
1322 	/*
1323 	 * The flag usage is mutal exclusive ...
1324 	 */
1325 	if ((flag & DOIO_EARLY_NOTIFICATION)
1326 	    && (flag & DOIO_REPORT_ALL)) {
1327 		return (-EINVAL);
1328 
1329 	}
1330 
1331 	sprintf (dbf_txt, "stIO%x", irq);
1332 	CIO_TRACE_EVENT (4, dbf_txt);
1333 
1334 	/*
1335 	 * setup ORB
1336 	 */
1337 	ioinfo[irq]->orb.intparm = (__u32) (long) &ioinfo[irq]->u_intparm;
1338 	ioinfo[irq]->orb.fmt = 1;
1339 
1340 	ioinfo[irq]->orb.pfch = !(flag & DOIO_DENY_PREFETCH);
1341 	ioinfo[irq]->orb.spnd = (flag & DOIO_ALLOW_SUSPEND ? TRUE : FALSE);
1342 	ioinfo[irq]->orb.ssic = ((flag & DOIO_ALLOW_SUSPEND)
1343 				 && (flag & DOIO_SUPPRESS_INTER));
1344 
1345 	if (flag & DOIO_VALID_LPM) {
1346 		ioinfo[irq]->orb.lpm = lpm;
1347 	} else {
1348 		ioinfo[irq]->orb.lpm = ioinfo[irq]->opm;
1349 
1350 	}
1351 
1352 #ifdef CONFIG_ARCH_S390X
1353 	/*
1354 	 * for 64 bit we always support 64 bit IDAWs with 4k page size only
1355 	 */
1356 	ioinfo[irq]->orb.c64 = 1;
1357 	ioinfo[irq]->orb.i2k = 0;
1358 #endif
1359 
1360 	ioinfo[irq]->orb.cpa = (__u32) virt_to_phys (cpa);
1361 
1362 	/*
1363 	 * If sync processing was requested we lock the sync ISC, modify the
1364 	 *  device to present interrupts for this ISC only and switch the
1365 	 *  CPU to handle this ISC + the console ISC exclusively.
1366 	 */
1367 	if (flag & DOIO_WAIT_FOR_INTERRUPT) {
1368 		ret = enable_cpu_sync_isc (irq);
1369 
1370 		if (ret) {
1371 			return (ret);
1372 		}
1373 
1374 	}
1375 
1376 	if (flag & DOIO_DONT_CALL_INTHDLR) {
1377 		ioinfo[irq]->ui.flags.repnone = 1;
1378 
1379 	}
1380 
1381 	/*
1382 	 * Issue "Start subchannel" and process condition code
1383 	 */
1384 	if (flag & DOIO_USE_DIAG98) {
1385 		ioinfo[irq]->orb.key = get_storage_key() >> 4;
1386 		ioinfo[irq]->orb.cpa =
1387 			(__u32) pfix_get_addr((void *)ioinfo[irq]->orb.cpa);
1388 		ccode = diag98 (irq, &(ioinfo[irq]->orb));
1389 	} else {
1390 		ccode = ssch (irq, &(ioinfo[irq]->orb));
1391 	}
1392 
1393 	sprintf (dbf_txt, "ccode:%d", ccode);
1394 	CIO_TRACE_EVENT (4, dbf_txt);
1395 
1396 	switch (ccode) {
1397 	case 0:
1398 
1399 		if (!ioinfo[irq]->ui.flags.w4sense) {
1400 			/*
1401 			 * init the device driver specific devstat irb area
1402 			 *
1403 			 * Note : don�t clear saved irb info in case of sense !
1404 			 */
1405 			memset (&((devstat_t *) ioinfo[irq]->irq_desc.dev_id)->
1406 				ii.irb, '\0', sizeof (irb_t));
1407 		}
1408 
1409 		memset (&ioinfo[irq]->devstat.ii.irb, '\0', sizeof (irb_t));
1410 
1411 		/*
1412 		 * initialize device status information
1413 		 */
1414 		ioinfo[irq]->ui.flags.busy = 1;
1415 		ioinfo[irq]->ui.flags.doio = 1;
1416 
1417 		ioinfo[irq]->u_intparm = user_intparm;
1418 		ioinfo[irq]->devstat.cstat = 0;
1419 		ioinfo[irq]->devstat.dstat = 0;
1420 		ioinfo[irq]->devstat.lpum = 0;
1421 		ioinfo[irq]->devstat.flag = DEVSTAT_START_FUNCTION;
1422 		ioinfo[irq]->devstat.scnt = 0;
1423 
1424 		ioinfo[irq]->ui.flags.fast = 0;
1425 		ioinfo[irq]->ui.flags.repall = 0;
1426 
1427 		/*
1428 		 * Check for either early (FAST) notification requests
1429 		 *  or if we are to return all interrupt info.
1430 		 * Default is to call IRQ handler at secondary status only
1431 		 */
1432 		if (flag & DOIO_EARLY_NOTIFICATION) {
1433 			ioinfo[irq]->ui.flags.fast = 1;
1434 		} else if (flag & DOIO_REPORT_ALL) {
1435 			ioinfo[irq]->ui.flags.repall = 1;
1436 
1437 		}
1438 
1439 		/*
1440 		 * If synchronous I/O processing is requested, we have
1441 		 *  to wait for the corresponding interrupt to occur by
1442 		 *  polling the interrupt condition. However, as multiple
1443 		 *  interrupts may be outstanding, we must not just wait
1444 		 *  for the first interrupt, but must poll until ours
1445 		 *  pops up.
1446 		 */
1447 		if (flag & DOIO_WAIT_FOR_INTERRUPT) {
1448 			unsigned long psw_mask;
1449 			int ccode;
1450 			uint64_t time_start;
1451 			uint64_t time_curr;
1452 
1453 			int ready = 0;
1454 			int io_sub = -1;
1455 			int do_retry = 1;
1456 
1457 			/*
1458 			 * We shouldn't perform a TPI loop, waiting for an
1459 			 *  interrupt to occur, but should load a WAIT PSW
1460 			 *  instead. Otherwise we may keep the channel subsystem
1461 			 *  busy, not able to present the interrupt. When our
1462 			 *  sync. interrupt arrived we reset the I/O old PSW to
1463 			 *  its original value.
1464 			 */
1465 
1466 			ccode = iac ();
1467 
1468 			switch (ccode) {
1469 			case 0:	/* primary-space */
1470 				psw_mask = _IO_PSW_MASK
1471 				    | _PSW_PRIM_SPACE_MODE | _PSW_IO_WAIT;
1472 				break;
1473 			case 1:	/* secondary-space */
1474 				psw_mask = _IO_PSW_MASK
1475 				    | _PSW_SEC_SPACE_MODE | _PSW_IO_WAIT;
1476 				break;
1477 			case 2:	/* access-register */
1478 				psw_mask = _IO_PSW_MASK
1479 				    | _PSW_ACC_REG_MODE | _PSW_IO_WAIT;
1480 				break;
1481 			case 3:	/* home-space */
1482 				psw_mask = _IO_PSW_MASK
1483 				    | _PSW_HOME_SPACE_MODE | _PSW_IO_WAIT;
1484 				break;
1485 			default:
1486 				panic ("start_IO() : unexpected "
1487 				       "address-space-control %d\n", ccode);
1488 				break;
1489 			}
1490 
1491 			/*
1492 			 * Martin didn't like modifying the new PSW, now we take
1493 			 *  a fast exit in do_IRQ() instead
1494 			 */
1495 			*(__u32 *) __LC_SYNC_IO_WORD = 1;
1496 
1497 			asm volatile ("STCK %0":"=m" (time_start));
1498 
1499 			time_start = time_start >> 32;
1500 
1501 			do {
1502 				if (flag & DOIO_TIMEOUT) {
1503 					tpi_info_t tpi_info = { 0, };
1504 
1505 					do {
1506 						if (tpi (&tpi_info) == 1) {
1507 							io_sub = tpi_info.irq;
1508 							break;
1509 						} else {
1510 							udelay (100);	/* usecs */
1511 							asm volatile
1512 							 ("STCK %0":"=m"
1513 							  (time_curr));
1514 
1515 							if (((time_curr >> 32) -
1516 							     time_start) >= 3)
1517 								do_retry = 0;
1518 
1519 						}
1520 
1521 					} while (do_retry);
1522 				} else {
1523 					__load_psw_mask (psw_mask);
1524 
1525 					io_sub =
1526 					    (__u32) *
1527 					    (__u16 *) __LC_SUBCHANNEL_NR;
1528 
1529 				}
1530 
1531 				if (do_retry)
1532 					ready = s390_process_IRQ (io_sub);
1533 
1534 				/*
1535 				 * surrender when retry count's exceeded ...
1536 				 */
1537 			} while (!((io_sub == irq)
1538 				   && (ready == 1))
1539 				 && do_retry);
1540 
1541 			*(__u32 *) __LC_SYNC_IO_WORD = 0;
1542 
1543 			if (!do_retry)
1544 				ret = -ETIMEDOUT;
1545 
1546 		}
1547 
1548 		break;
1549 
1550 	case 1:		/* status pending */
1551 
1552 		/*
1553 		 * Don't do an inline processing of pending interrupt conditions
1554 		 * while doing async. I/O. The interrupt will pop up when we are
1555 		 * enabled again and the I/O can be retried.
1556 		 */
1557 		if (!ioinfo[irq]->ui.flags.syncio) {
1558 			ret = -EBUSY;
1559 			break;
1560 		}
1561 
1562 		ioinfo[irq]->devstat.flag = DEVSTAT_START_FUNCTION
1563 		    | DEVSTAT_STATUS_PENDING;
1564 
1565 		/*
1566 		 * initialize the device driver specific devstat irb area
1567 		 */
1568 		memset (&((devstat_t *) ioinfo[irq]->irq_desc.dev_id)->ii.irb,
1569 			'\0', sizeof (irb_t));
1570 
1571 		/*
1572 		 * Let the common interrupt handler process the pending status.
1573 		 *  However, we must avoid calling the user action handler, as
1574 		 *  it won't be prepared to handle a pending status during
1575 		 *  do_IO() processing inline. This also implies that process_IRQ
1576 		 *  must terminate synchronously - especially if device sensing
1577 		 *  is required.
1578 		 */
1579 		ioinfo[irq]->ui.flags.s_pend = 1;
1580 		ioinfo[irq]->ui.flags.busy = 1;
1581 		ioinfo[irq]->ui.flags.doio = 1;
1582 
1583 		s390_process_IRQ (irq);
1584 
1585 		ioinfo[irq]->ui.flags.s_pend = 0;
1586 		ioinfo[irq]->ui.flags.busy = 0;
1587 		ioinfo[irq]->ui.flags.doio = 0;
1588 
1589 		ioinfo[irq]->ui.flags.repall = 0;
1590 		ioinfo[irq]->ui.flags.w4final = 0;
1591 
1592 		ioinfo[irq]->devstat.flag |= DEVSTAT_FINAL_STATUS;
1593 
1594 		/*
1595 		 * In multipath mode a condition code 3 implies the last path
1596 		 *  has gone, except we have previously restricted the I/O to
1597 		 *  a particular path. A condition code 1 (0 won't occur)
1598 		 *  results in return code EIO as well as 3 with another path
1599 		 *  than the one used (i.e. path available mask is non-zero).
1600 		 */
1601 		if (ioinfo[irq]->devstat.ii.irb.scsw.cc == 3) {
1602 
1603 			if (ioinfo[irq]->opm == 0) {
1604 				ret = -ENODEV;
1605 				ioinfo[irq]->ui.flags.oper = 0;
1606 			} else {
1607 				ret = -EIO;
1608 
1609 			}
1610 
1611 			ioinfo[irq]->devstat.flag |= DEVSTAT_NOT_OPER;
1612 
1613 #ifdef CONFIG_DEBUG_IO
1614 			{
1615 				char buffer[80];
1616 
1617 				stsch (irq, &(ioinfo[irq]->schib));
1618 
1619 				sprintf (buffer,
1620 					 "s390_start_IO(%04X) - irb for "
1621 					 "device %04X, after status pending\n",
1622 					 irq, ioinfo[irq]->devstat.devno);
1623 
1624 				s390_displayhex (buffer,
1625 						 &(ioinfo[irq]->devstat.ii.irb),
1626 						 sizeof (irb_t));
1627 
1628 				sprintf (buffer,
1629 					 "s390_start_IO(%04X) - schib for "
1630 					 "device %04X, after status pending\n",
1631 					 irq, ioinfo[irq]->devstat.devno);
1632 
1633 				s390_displayhex (buffer,
1634 						 &(ioinfo[irq]->schib),
1635 						 sizeof (schib_t));
1636 
1637 				if (ioinfo[irq]->devstat.
1638 				    flag & DEVSTAT_FLAG_SENSE_AVAIL) {
1639 					sprintf (buffer,
1640 						 "s390_start_IO(%04X) "
1641 						 "- sense data for device %04X,"
1642 						 " after status pending\n",
1643 						 irq,
1644 						 ioinfo[irq]->devstat.devno);
1645 
1646 					s390_displayhex (buffer,
1647 							 ioinfo[irq]->irq_desc.
1648 							 dev_id->ii.sense.data,
1649 							 ioinfo[irq]->irq_desc.
1650 							 dev_id->rescnt);
1651 
1652 				}
1653 			}
1654 #endif
1655 			if (cio_debug_initialized) {
1656 				stsch (irq, &(ioinfo[irq]->schib));
1657 
1658 				sprintf(dbf_txt, "sp%x", irq);
1659 				CIO_TRACE_EVENT(2, dbf_txt);
1660 				CIO_TRACE_EVENT(2, "irb:");
1661 				CIO_HEX_EVENT(2, &(ioinfo[irq]->devstat.ii.irb),
1662 					      sizeof (irb_t));
1663 				CIO_TRACE_EVENT(2, "schib:");
1664 				CIO_HEX_EVENT(2, &(ioinfo[irq]->schib),
1665 					      sizeof (schib_t));
1666 
1667 				if (ioinfo[irq]->devstat.
1668 				    flag & DEVSTAT_FLAG_SENSE_AVAIL) {
1669 					CIO_TRACE_EVENT(2, "sense:");
1670 					CIO_HEX_EVENT(2, ioinfo[irq]->irq_desc.
1671 						      dev_id->ii.sense.data,
1672 						      ioinfo[irq]->irq_desc.
1673 						      dev_id->rescnt);
1674 
1675 				}
1676 			}
1677 		} else {
1678 			ret = -EIO;
1679 			ioinfo[irq]->devstat.flag &= ~DEVSTAT_NOT_OPER;
1680 			ioinfo[irq]->ui.flags.oper = 1;
1681 
1682 		}
1683 
1684 		break;
1685 
1686 	case 2:		/* busy */
1687 
1688 		ret = -EBUSY;
1689 		break;
1690 
1691 	default:		/* device/path not operational */
1692 
1693 		if (flag & DOIO_VALID_LPM) {
1694 			ioinfo[irq]->opm &= ~lpm;
1695 		} else {
1696 			ioinfo[irq]->opm = 0;
1697 
1698 		}
1699 
1700 		if (ioinfo[irq]->opm == 0) {
1701 			ioinfo[irq]->ui.flags.oper = 0;
1702 			ioinfo[irq]->devstat.flag |= DEVSTAT_NOT_OPER;
1703 
1704 		}
1705 
1706 		ret = -ENODEV;
1707 
1708 		memcpy (ioinfo[irq]->irq_desc.dev_id,
1709 			&(ioinfo[irq]->devstat), sizeof (devstat_t));
1710 
1711 #ifdef CONFIG_DEBUG_IO
1712 
1713 		stsch (irq, &(ioinfo[irq]->schib));
1714 
1715 		sprintf (buffer, "s390_start_IO(%04X) - schib for "
1716 			 "device %04X, after 'not oper' status\n",
1717 			 irq, ioinfo[irq]->devstat.devno);
1718 
1719 		s390_displayhex (buffer,
1720 				 &(ioinfo[irq]->schib), sizeof (schib_t));
1721 #endif
1722 		if (cio_debug_initialized) {
1723 			stsch (irq, &(ioinfo[irq]->schib));
1724 			sprintf(dbf_txt, "no%x", irq);
1725 			CIO_TRACE_EVENT(2, dbf_txt);
1726 			CIO_HEX_EVENT(2, &(ioinfo[irq]->schib),
1727 				      sizeof (schib_t));
1728 		}
1729 
1730 		break;
1731 
1732 	}
1733 
1734 	if (flag & DOIO_WAIT_FOR_INTERRUPT) {
1735 		disable_cpu_sync_isc (irq);
1736 
1737 	}
1738 
1739 	if (flag & DOIO_DONT_CALL_INTHDLR) {
1740 		ioinfo[irq]->ui.flags.repnone = 0;
1741 
1742 	}
1743 
1744 	return (ret);
1745 }
1746 
1747 int
do_IO(int irq,ccw1_t * cpa,unsigned long user_intparm,__u8 lpm,unsigned long flag)1748 do_IO (int irq,			/* IRQ */
1749        ccw1_t * cpa,		/* channel program address */
1750        unsigned long user_intparm,	/* interruption parameter */
1751        __u8 lpm,		/* logical path mask */
1752        unsigned long flag)
1753 {				/* flags : see above */
1754 	int ret = 0;
1755 	char dbf_txt[15];
1756 
1757 	SANITY_CHECK (irq);
1758 
1759 	/* handler registered ? or free_irq() in process already ? */
1760 	if (!ioinfo[irq]->ui.flags.ready || ioinfo[irq]->ui.flags.unready) {
1761 		return (-ENODEV);
1762 
1763 	}
1764 
1765 	sprintf (dbf_txt, "doIO%x", irq);
1766 	CIO_TRACE_EVENT (4, dbf_txt);
1767 
1768 	if (ioinfo[irq]->ui.flags.noio)
1769 		return -EBUSY;
1770 
1771 	/*
1772 	 * Note: We ignore the device operational status - if not operational,
1773 	 *        the SSCH will lead to an -ENODEV condition ...
1774 	 */
1775 	if (!ioinfo[irq]->ui.flags.busy) {	/* last I/O completed ? */
1776 		ret = s390_start_IO (irq, cpa, user_intparm, lpm, flag);
1777 	} else {
1778 		ret = -EBUSY;
1779 
1780 	}
1781 
1782 	return (ret);
1783 
1784 }
1785 
1786 /*
1787  * resume suspended I/O operation
1788  */
1789 int
resume_IO(int irq)1790 resume_IO (int irq)
1791 {
1792 	int ret = 0;
1793 	char dbf_txt[15];
1794 
1795 	SANITY_CHECK (irq);
1796 
1797 	sprintf (dbf_txt, "resIO%x", irq);
1798 	CIO_TRACE_EVENT (4, dbf_txt);
1799 
1800 	/*
1801 	 * We allow for 'resume' requests only for active I/O operations
1802 	 */
1803 	if (ioinfo[irq]->ui.flags.busy) {
1804 		int ccode;
1805 
1806 		ccode = rsch (irq);
1807 
1808 		sprintf (dbf_txt, "ccode:%d", ccode);
1809 		CIO_TRACE_EVENT (4, dbf_txt);
1810 
1811 		switch (ccode) {
1812 		case 0:
1813 			break;
1814 
1815 		case 1:
1816 			ret = -EBUSY;
1817 			break;
1818 
1819 		case 2:
1820 			ret = -EINVAL;
1821 			break;
1822 
1823 		case 3:
1824 			/*
1825 			 * useless to wait for request completion
1826 			 *  as device is no longer operational !
1827 			 */
1828 			ioinfo[irq]->ui.flags.oper = 0;
1829 			ioinfo[irq]->ui.flags.busy = 0;
1830 			ret = -ENODEV;
1831 			break;
1832 
1833 		}
1834 
1835 	} else {
1836 		ret = -ENOTCONN;
1837 
1838 	}
1839 
1840 	return (ret);
1841 }
1842 
1843 /*
1844  * Note: The "intparm" parameter is not used by the halt_IO() function
1845  *       itself, as no ORB is built for the HSCH instruction. However,
1846  *       it allows the device interrupt handler to associate the upcoming
1847  *       interrupt with the halt_IO() request.
1848  */
1849 int
halt_IO(int irq,unsigned long user_intparm,unsigned long flag)1850 halt_IO (int irq, unsigned long user_intparm, unsigned long flag)
1851 {				/* possible DOIO_WAIT_FOR_INTERRUPT */
1852 	int ret;
1853 	int ccode;
1854 	char dbf_txt[15];
1855 
1856 	SANITY_CHECK (irq);
1857 
1858 	if (ioinfo[irq]->ui.flags.noio)
1859 		return -EBUSY;
1860 
1861 	/*
1862 	 * we only allow for halt_IO if the device has an I/O handler associated
1863 	 */
1864 	if (!ioinfo[irq]->ui.flags.ready) {
1865 		return -ENODEV;
1866 	}
1867 	/*
1868 	 * we ignore the halt_io() request if ending_status was received but
1869 	 *  a SENSE operation is waiting for completion.
1870 	 */
1871 	if (ioinfo[irq]->ui.flags.w4sense) {
1872 		return 0;
1873 	}
1874 	sprintf (dbf_txt, "haltIO%x", irq);
1875 	CIO_TRACE_EVENT (2, dbf_txt);
1876 
1877 	/*
1878 	 * If sync processing was requested we lock the sync ISC,
1879 	 *  modify the device to present interrupts for this ISC only
1880 	 *  and switch the CPU to handle this ISC + the console ISC
1881 	 *  exclusively.
1882 	 */
1883 	if (flag & DOIO_WAIT_FOR_INTERRUPT) {
1884 		ret = enable_cpu_sync_isc (irq);
1885 
1886 		if (ret)
1887 			return (ret);
1888 	}
1889 
1890 	/*
1891 	 * Issue "Halt subchannel" and process condition code
1892 	 */
1893 	ccode = hsch (irq);
1894 
1895 	sprintf (dbf_txt, "ccode:%d", ccode);
1896 	CIO_TRACE_EVENT (2, dbf_txt);
1897 
1898 	switch (ccode) {
1899 	case 0:
1900 
1901 		ioinfo[irq]->ui.flags.haltio = 1;
1902 
1903 		if (!ioinfo[irq]->ui.flags.doio) {
1904 			ioinfo[irq]->ui.flags.busy = 1;
1905 			ioinfo[irq]->u_intparm = user_intparm;
1906 			ioinfo[irq]->devstat.cstat = 0;
1907 			ioinfo[irq]->devstat.dstat = 0;
1908 			ioinfo[irq]->devstat.lpum = 0;
1909 			ioinfo[irq]->devstat.flag = DEVSTAT_HALT_FUNCTION;
1910 			ioinfo[irq]->devstat.scnt = 0;
1911 
1912 		} else {
1913 			ioinfo[irq]->devstat.flag |= DEVSTAT_HALT_FUNCTION;
1914 
1915 		}
1916 
1917 		/*
1918 		 * If synchronous I/O processing is requested, we have
1919 		 *  to wait for the corresponding interrupt to occur by
1920 		 *  polling the interrupt condition. However, as multiple
1921 		 *  interrupts may be outstanding, we must not just wait
1922 		 *  for the first interrupt, but must poll until ours
1923 		 *  pops up.
1924 		 */
1925 		if (flag & DOIO_WAIT_FOR_INTERRUPT) {
1926 			int io_sub;
1927 			__u32 io_parm;
1928 			unsigned long psw_mask;
1929 			int ccode;
1930 
1931 			int ready = 0;
1932 
1933 			/*
1934 			 * We shouldn't perform a TPI loop, waiting for
1935 			 *  an interrupt to occur, but should load a
1936 			 *  WAIT PSW instead. Otherwise we may keep the
1937 			 *  channel subsystem busy, not able to present
1938 			 *  the interrupt. When our sync. interrupt
1939 			 *  arrived we reset the I/O old PSW to its
1940 			 *  original value.
1941 			 */
1942 
1943 			ccode = iac ();
1944 
1945 			switch (ccode) {
1946 			case 0:	/* primary-space */
1947 				psw_mask = _IO_PSW_MASK
1948 				    | _PSW_PRIM_SPACE_MODE | _PSW_IO_WAIT;
1949 				break;
1950 			case 1:	/* secondary-space */
1951 				psw_mask = _IO_PSW_MASK
1952 				    | _PSW_SEC_SPACE_MODE | _PSW_IO_WAIT;
1953 				break;
1954 			case 2:	/* access-register */
1955 				psw_mask = _IO_PSW_MASK
1956 				    | _PSW_ACC_REG_MODE | _PSW_IO_WAIT;
1957 				break;
1958 			case 3:	/* home-space */
1959 				psw_mask = _IO_PSW_MASK
1960 				    | _PSW_HOME_SPACE_MODE | _PSW_IO_WAIT;
1961 				break;
1962 			default:
1963 				panic ("halt_IO() : unexpected "
1964 				       "address-space-control %d\n", ccode);
1965 				break;
1966 			}
1967 
1968 			/*
1969 			 * Martin didn't like modifying the new PSW, now we take
1970 			 *  a fast exit in do_IRQ() instead
1971 			 */
1972 			*(__u32 *) __LC_SYNC_IO_WORD = 1;
1973 
1974 			do {
1975 				__load_psw_mask (psw_mask);
1976 
1977 				io_parm = *(__u32 *) __LC_IO_INT_PARM;
1978 				io_sub = (__u32) * (__u16 *) __LC_SUBCHANNEL_NR;
1979 
1980 				ready = s390_process_IRQ (io_sub);
1981 
1982 			} while (!((io_sub == irq) && (ready == 1)));
1983 
1984 			*(__u32 *) __LC_SYNC_IO_WORD = 0;
1985 
1986 		}
1987 
1988 		ret = 0;
1989 		break;
1990 
1991 	case 1:		/* status pending */
1992 
1993 		/*
1994 		 * Don't do an inline processing of pending interrupt conditions
1995 		 * while doing async. I/O. The interrupt will pop up when we are
1996 		 * enabled again and the I/O can be retried.
1997 		 */
1998 		if (!ioinfo[irq]->ui.flags.syncio) {
1999 			ret = -EBUSY;
2000 			break;
2001 		}
2002 
2003 		ioinfo[irq]->devstat.flag |= DEVSTAT_STATUS_PENDING;
2004 
2005 		/*
2006 		 * initialize the device driver specific devstat irb area
2007 		 */
2008 		memset (&ioinfo[irq]->irq_desc.dev_id->ii.irb,
2009 			'\0', sizeof (irb_t));
2010 
2011 		/*
2012 		 * Let the common interrupt handler process the pending
2013 		 *  status. However, we must avoid calling the user
2014 		 *  action handler, as it won't be prepared to handle
2015 		 *  a pending status during do_IO() processing inline.
2016 		 *  This also implies that s390_process_IRQ must
2017 		 *  terminate synchronously - especially if device
2018 		 *  sensing is required.
2019 		 */
2020 		ioinfo[irq]->ui.flags.s_pend = 1;
2021 		ioinfo[irq]->ui.flags.busy = 1;
2022 		ioinfo[irq]->ui.flags.doio = 1;
2023 
2024 		s390_process_IRQ (irq);
2025 
2026 		ioinfo[irq]->ui.flags.s_pend = 0;
2027 		ioinfo[irq]->ui.flags.busy = 0;
2028 		ioinfo[irq]->ui.flags.doio = 0;
2029 		ioinfo[irq]->ui.flags.repall = 0;
2030 		ioinfo[irq]->ui.flags.w4final = 0;
2031 
2032 		ioinfo[irq]->devstat.flag |= DEVSTAT_FINAL_STATUS;
2033 
2034 		/*
2035 		 * In multipath mode a condition code 3 implies the last
2036 		 *  path has gone, except we have previously restricted
2037 		 *  the I/O to a particular path. A condition code 1
2038 		 *  (0 won't occur) results in return code EIO as well
2039 		 *  as 3 with another path than the one used (i.e. path
2040 		 *  available mask is non-zero).
2041 		 */
2042 		if (ioinfo[irq]->devstat.ii.irb.scsw.cc == 3) {
2043 			ret = -ENODEV;
2044 			ioinfo[irq]->devstat.flag |= DEVSTAT_NOT_OPER;
2045 			ioinfo[irq]->ui.flags.oper = 0;
2046 		} else {
2047 			ret = -EIO;
2048 			ioinfo[irq]->devstat.flag &= ~DEVSTAT_NOT_OPER;
2049 			ioinfo[irq]->ui.flags.oper = 1;
2050 
2051 		}
2052 
2053 		break;
2054 
2055 	case 2:		/* busy */
2056 
2057 		ret = -EBUSY;
2058 		break;
2059 
2060 	default:		/* device not operational */
2061 
2062 		ret = -ENODEV;
2063 		break;
2064 
2065 	}
2066 
2067 	if (flag & DOIO_WAIT_FOR_INTERRUPT) {
2068 		disable_cpu_sync_isc (irq);
2069 
2070 	}
2071 
2072 	return (ret);
2073 }
2074 
2075 /*
2076  * Note: The "intparm" parameter is not used by the clear_IO() function
2077  *       itself, as no ORB is built for the CSCH instruction. However,
2078  *       it allows the device interrupt handler to associate the upcoming
2079  *       interrupt with the clear_IO() request.
2080  */
2081 int
clear_IO(int irq,unsigned long user_intparm,unsigned long flag)2082 clear_IO (int irq, unsigned long user_intparm, unsigned long flag)
2083 {				/* possible DOIO_WAIT_FOR_INTERRUPT */
2084 	int ret = 0;
2085 	int ccode;
2086 	char dbf_txt[15];
2087 
2088 	SANITY_CHECK (irq);
2089 
2090 	if (ioinfo[irq] == INVALID_STORAGE_AREA)
2091 		return (-ENODEV);
2092 
2093 	if (ioinfo[irq]->ui.flags.noio)
2094 		return -EBUSY;
2095 	/*
2096 	 * we only allow for clear_IO if the device has an I/O handler associated
2097 	 */
2098 	if (!ioinfo[irq]->ui.flags.ready)
2099 		return -ENODEV;
2100 	/*
2101 	 * we ignore the clear_io() request if ending_status was received but
2102 	 *  a SENSE operation is waiting for completion.
2103 	 */
2104 	if (ioinfo[irq]->ui.flags.w4sense)
2105 		return 0;
2106 
2107 	sprintf (dbf_txt, "clearIO%x", irq);
2108 	CIO_TRACE_EVENT (2, dbf_txt);
2109 
2110 	/*
2111 	 * If sync processing was requested we lock the sync ISC,
2112 	 *  modify the device to present interrupts for this ISC only
2113 	 *  and switch the CPU to handle this ISC + the console ISC
2114 	 *  exclusively.
2115 	 */
2116 	if (flag & DOIO_WAIT_FOR_INTERRUPT) {
2117 		ret = enable_cpu_sync_isc (irq);
2118 
2119 		if (ret)
2120 			return (ret);
2121 	}
2122 
2123 	/*
2124 	 * Issue "Clear subchannel" and process condition code
2125 	 */
2126 	ccode = csch (irq);
2127 
2128 	sprintf (dbf_txt, "ccode:%d", ccode);
2129 	CIO_TRACE_EVENT (2, dbf_txt);
2130 
2131 	switch (ccode) {
2132 	case 0:
2133 
2134 		ioinfo[irq]->ui.flags.haltio = 1;
2135 
2136 		if (!ioinfo[irq]->ui.flags.doio) {
2137 			ioinfo[irq]->ui.flags.busy = 1;
2138 			ioinfo[irq]->u_intparm = user_intparm;
2139 			ioinfo[irq]->devstat.cstat = 0;
2140 			ioinfo[irq]->devstat.dstat = 0;
2141 			ioinfo[irq]->devstat.lpum = 0;
2142 			ioinfo[irq]->devstat.flag = DEVSTAT_CLEAR_FUNCTION;
2143 			ioinfo[irq]->devstat.scnt = 0;
2144 
2145 		} else {
2146 			ioinfo[irq]->devstat.flag |= DEVSTAT_CLEAR_FUNCTION;
2147 
2148 		}
2149 
2150 		/*
2151 		 * If synchronous I/O processing is requested, we have
2152 		 *  to wait for the corresponding interrupt to occur by
2153 		 *  polling the interrupt condition. However, as multiple
2154 		 *  interrupts may be outstanding, we must not just wait
2155 		 *  for the first interrupt, but must poll until ours
2156 		 *  pops up.
2157 		 */
2158 		if (flag & DOIO_WAIT_FOR_INTERRUPT) {
2159 			int io_sub;
2160 			__u32 io_parm;
2161 			unsigned long psw_mask;
2162 			int ccode;
2163 
2164 			int ready = 0;
2165 
2166 			/*
2167 			 * We shouldn't perform a TPI loop, waiting for
2168 			 *  an interrupt to occur, but should load a
2169 			 *  WAIT PSW instead. Otherwise we may keep the
2170 			 *  channel subsystem busy, not able to present
2171 			 *  the interrupt. When our sync. interrupt
2172 			 *  arrived we reset the I/O old PSW to its
2173 			 *  original value.
2174 			 */
2175 
2176 			ccode = iac ();
2177 
2178 			switch (ccode) {
2179 			case 0:	/* primary-space */
2180 				psw_mask = _IO_PSW_MASK
2181 				    | _PSW_PRIM_SPACE_MODE | _PSW_IO_WAIT;
2182 				break;
2183 			case 1:	/* secondary-space */
2184 				psw_mask = _IO_PSW_MASK
2185 				    | _PSW_SEC_SPACE_MODE | _PSW_IO_WAIT;
2186 				break;
2187 			case 2:	/* access-register */
2188 				psw_mask = _IO_PSW_MASK
2189 				    | _PSW_ACC_REG_MODE | _PSW_IO_WAIT;
2190 				break;
2191 			case 3:	/* home-space */
2192 				psw_mask = _IO_PSW_MASK
2193 				    | _PSW_HOME_SPACE_MODE | _PSW_IO_WAIT;
2194 				break;
2195 			default:
2196 				panic ("clear_IO() : unexpected "
2197 				       "address-space-control %d\n", ccode);
2198 				break;
2199 			}
2200 
2201 			/*
2202 			 * Martin didn't like modifying the new PSW, now we take
2203 			 *  a fast exit in do_IRQ() instead
2204 			 */
2205 			*(__u32 *) __LC_SYNC_IO_WORD = 1;
2206 
2207 			do {
2208 				__load_psw_mask (psw_mask);
2209 
2210 				io_parm = *(__u32 *) __LC_IO_INT_PARM;
2211 				io_sub = (__u32) * (__u16 *) __LC_SUBCHANNEL_NR;
2212 
2213 				ready = s390_process_IRQ (io_sub);
2214 
2215 			} while (!((io_sub == irq) && (ready == 1)));
2216 
2217 			*(__u32 *) __LC_SYNC_IO_WORD = 0;
2218 
2219 		}
2220 
2221 		ret = 0;
2222 		break;
2223 
2224 	case 1:		/* no status pending for csh */
2225 		BUG ();
2226 		break;
2227 
2228 	case 2:		/* no busy for csh */
2229 		BUG ();
2230 		break;
2231 
2232 	default:		/* device not operational */
2233 
2234 		ret = -ENODEV;
2235 		break;
2236 
2237 	}
2238 
2239 	if (flag & DOIO_WAIT_FOR_INTERRUPT) {
2240 		disable_cpu_sync_isc (irq);
2241 
2242 	}
2243 
2244 	return (ret);
2245 }
2246 
2247 /*
2248  * Function: cancel_IO
2249  * Issues a "Cancel Subchannel" on the specified subchannel
2250  * Note: We don't need any fancy intparms and flags here
2251  *       since xsch is executed synchronously.
2252  * Only for common I/O internal use as for now.
2253  */
2254 int
cancel_IO(int irq)2255 cancel_IO (int irq)
2256 {
2257 
2258 	int ccode;
2259 	char dbf_txt[15];
2260 	int ret = 0;
2261 
2262 	SANITY_CHECK (irq);
2263 
2264 	sprintf (dbf_txt, "cancelIO%x", irq);
2265 	CIO_TRACE_EVENT (2, dbf_txt);
2266 
2267 	ccode = xsch (irq);
2268 
2269 	sprintf (dbf_txt, "ccode:%d", ccode);
2270 	CIO_TRACE_EVENT (2, dbf_txt);
2271 
2272 	switch (ccode) {
2273 
2274 	case 0:		/* success */
2275 		ret = 0;
2276 		break;
2277 
2278 	case 1:		/* status pending */
2279 
2280 		ret = -EBUSY;
2281 		break;
2282 
2283 	case 2:		/* not applicable */
2284 		ret = -EINVAL;
2285 		break;
2286 
2287 	default:		/* not oper */
2288 		ret = -ENODEV;
2289 	}
2290 
2291 	return ret;
2292 }
2293 
2294 /*
2295  * do_IRQ() handles all normal I/O device IRQ's (the special
2296  *          SMP cross-CPU interrupts have their own specific
2297  *          handlers).
2298  *
2299  */
2300 asmlinkage void
do_IRQ(struct pt_regs regs)2301 do_IRQ (struct pt_regs regs)
2302 {
2303 	/*
2304 	 * Get interrupt info from lowcore
2305 	 */
2306 	volatile tpi_info_t *tpi_info = (tpi_info_t *) (__LC_SUBCHANNEL_ID);
2307 	int cpu = smp_processor_id ();
2308 
2309 	/*
2310 	 * take fast exit if CPU is in sync. I/O state
2311 	 *
2312 	 * Note: we have to turn off the WAIT bit and re-disable
2313 	 *       interrupts prior to return as this was the initial
2314 	 *       entry condition to synchronous I/O.
2315 	 */
2316 	if (*(__u32 *) __LC_SYNC_IO_WORD) {
2317 		regs.psw.mask &= ~(_PSW_WAIT_MASK_BIT | _PSW_IO_MASK_BIT);
2318 		return;
2319 	}
2320 	/* endif */
2321 #ifdef CONFIG_FAST_IRQ
2322 	do {
2323 #endif				/* CONFIG_FAST_IRQ */
2324 
2325 		/*
2326 		 * Non I/O-subchannel thin interrupts are processed differently
2327 		 */
2328 		if (tpi_info->adapter_IO == 1 &&
2329 		    tpi_info->int_type == IO_INTERRUPT_TYPE) {
2330 			irq_enter (cpu, -1);
2331 			do_adapter_IO (tpi_info->intparm);
2332 			irq_exit (cpu, -1);
2333 		} else {
2334 			unsigned int irq = tpi_info->irq;
2335 
2336 			/*
2337 			 * fix me !!!
2338 			 *
2339 			 * instead of boxing the device, we need to schedule device
2340 			 * recognition, the interrupt stays pending. We need to
2341 			 * dynamically allocate an ioinfo structure, etc..
2342 			 */
2343 			if (ioinfo[irq] == INVALID_STORAGE_AREA) {
2344 				return;	/* this keeps the device boxed ... */
2345 			}
2346 
2347 			if (ioinfo[irq]->st) {
2348 				/* How can that be? */
2349 				printk(KERN_WARNING "Received interrupt on "
2350 				       "non-IO subchannel %x!\n", irq);
2351 				return;
2352 			}
2353 
2354 			irq_enter (cpu, irq);
2355 			s390irq_spin_lock (irq);
2356 			s390_process_IRQ (irq);
2357 			s390irq_spin_unlock (irq);
2358 			irq_exit (cpu, irq);
2359 		}
2360 
2361 #ifdef CONFIG_FAST_IRQ
2362 
2363 		/*
2364 		 * Are more interrupts pending?
2365 		 * If so, the tpi instruction will update the lowcore
2366 		 * to hold the info for the next interrupt.
2367 		 */
2368 	} while (tpi (NULL) != 0);
2369 
2370 #endif				/* CONFIG_FAST_IRQ */
2371 
2372 	return;
2373 }
2374 
2375 /*
2376  * s390_process_IRQ() handles status pending situations and interrupts
2377  *
2378  * Called by : do_IRQ()             - for "real" interrupts
2379  *             s390_start_IO, halt_IO()
2380  *                                  - status pending cond. after SSCH, or HSCH
2381  *             disable_subchannel() - status pending conditions (after MSCH)
2382  *
2383  * Returns: 0 - no ending status received, no further action taken
2384  *          1 - interrupt handler was called with ending status
2385  */
2386 int
s390_process_IRQ(unsigned int irq)2387 s390_process_IRQ (unsigned int irq)
2388 {
2389 	int ccode;		/* cond code from tsch() operation */
2390 	int irb_cc;		/* cond code from irb */
2391 	int sdevstat;		/* struct devstat size to copy */
2392 	unsigned int fctl;	/* function control */
2393 	unsigned int stctl;	/* status   control */
2394 	unsigned int actl;	/* activity control */
2395 
2396 	int issense = 0;
2397 	int ending_status = 0;
2398 	int allow4handler = 1;
2399 	int chnchk = 0;
2400 	devstat_t *dp;
2401 	devstat_t *udp;
2402 
2403 	char dbf_txt[15];
2404 	char buffer[256];
2405 
2406 	if (cio_count_irqs) {
2407 		int cpu = smp_processor_id ();
2408 		s390_irq_count[cpu]++;
2409 	}
2410 
2411 	CIO_TRACE_EVENT (3, "procIRQ");
2412 	sprintf (dbf_txt, "%x", irq);
2413 	CIO_TRACE_EVENT (3, dbf_txt);
2414 
2415 	if (ioinfo[irq] == INVALID_STORAGE_AREA) {
2416 		/* we can't properly process the interrupt ... */
2417 #ifdef CONFIG_DEBUG_IO
2418 		printk (KERN_CRIT "s390_process_IRQ(%04X) - got interrupt "
2419 			"for non-initialized subchannel!\n", irq);
2420 #endif /* CONFIG_DEBUG_IO */
2421 		CIO_MSG_EVENT (0,
2422 			       "s390_process_IRQ(%04X) - got interrupt "
2423 			       "for non-initialized subchannel!\n",
2424 			       irq);
2425 		tsch (irq, p_init_irb);
2426 		return (1);
2427 
2428 	}
2429 
2430 	if (ioinfo[irq]->st) {
2431 		/* can't be */
2432 		BUG();
2433 		return 1;
2434 	}
2435 
2436 	dp = &ioinfo[irq]->devstat;
2437 	udp = ioinfo[irq]->irq_desc.dev_id;
2438 
2439 	/*
2440 	 * It might be possible that a device was not-oper. at the time
2441 	 *  of free_irq() processing. This means the handler is no longer
2442 	 *  available when the device possibly becomes ready again. In
2443 	 *  this case we perform delayed disable_subchannel() processing.
2444 	 */
2445 	if (!ioinfo[irq]->ui.flags.ready) {
2446 		if (!ioinfo[irq]->ui.flags.d_disable) {
2447 #ifdef CONFIG_DEBUG_IO
2448 			printk (KERN_CRIT "s390_process_IRQ(%04X) "
2449 				"- no interrupt handler registered "
2450 				"for device %04X !\n",
2451 				irq, ioinfo[irq]->devstat.devno);
2452 #endif				/* CONFIG_DEBUG_IO */
2453 			CIO_MSG_EVENT(0,
2454 				      "s390_process_IRQ(%04X) "
2455 				      "- no interrupt handler "
2456 				      "registered for device "
2457 				      "%04X !\n",
2458 				      irq,
2459 				      ioinfo[irq]->devstat.devno);
2460 		}
2461 	}
2462 
2463 	/*
2464 	 * retrieve the i/o interrupt information (irb),
2465 	 *  update the device specific status information
2466 	 *  and possibly call the interrupt handler.
2467 	 *
2468 	 * Note 1: At this time we don't process the resulting
2469 	 *         condition code (ccode) from tsch(), although
2470 	 *         we probably should.
2471 	 *
2472 	 * Note 2: Here we will have to check for channel
2473 	 *         check conditions and call a channel check
2474 	 *         handler.
2475 	 *
2476 	 * Note 3: If a start function was issued, the interruption
2477 	 *         parameter relates to it. If a halt function was
2478 	 *         issued for an idle device, the intparm must not
2479 	 *         be taken from lowcore, but from the devstat area.
2480 	 */
2481 	ccode = tsch (irq, &(dp->ii.irb));
2482 
2483 	sprintf (dbf_txt, "ccode:%d", ccode);
2484 	CIO_TRACE_EVENT (3, dbf_txt);
2485 
2486 	if (ccode == 1) {
2487 #ifdef CONFIG_DEBUG_IO
2488 		printk (KERN_INFO "s390_process_IRQ(%04X) - no status "
2489 			 "pending...\n", irq);
2490 #endif /* CONFIG_DEBUG_IO */
2491 		CIO_MSG_EVENT(2,
2492 			      "s390_process_IRQ(%04X) - no status pending\n",
2493 			      irq);
2494 	} else if (ccode == 3) {
2495 #ifdef CONFIG_DEBUG_IO
2496 		printk (KERN_WARNING "s390_process_IRQ(%04X) - subchannel "
2497 			"is not operational!\n",
2498 			irq);
2499 #endif /* CONFIG_DEBUG_IO */
2500 		CIO_MSG_EVENT(0,
2501 			      "s390_process_IRQ(%04X) - subchannel "
2502 			      "is not operational!\n",
2503 			      irq);
2504 	}
2505 
2506 	/*
2507 	 * We must only accumulate the status if the device is busy already
2508 	 */
2509 	if (ioinfo[irq]->ui.flags.busy) {
2510 		dp->dstat |= dp->ii.irb.scsw.dstat;
2511 		dp->cstat |= dp->ii.irb.scsw.cstat;
2512 		dp->intparm = ioinfo[irq]->u_intparm;
2513 
2514 	} else {
2515 		dp->dstat = dp->ii.irb.scsw.dstat;
2516 		dp->cstat = dp->ii.irb.scsw.cstat;
2517 
2518 		dp->flag = 0;	/* reset status flags */
2519 		dp->intparm = 0;
2520 
2521 	}
2522 
2523 	dp->lpum = dp->ii.irb.esw.esw1.lpum;
2524 
2525 	/*
2526 	 * reset device-busy bit if no longer set in irb
2527 	 */
2528 	if ((dp->dstat & DEV_STAT_BUSY)
2529 	    && ((dp->ii.irb.scsw.dstat & DEV_STAT_BUSY) == 0)) {
2530 		dp->dstat &= ~DEV_STAT_BUSY;
2531 
2532 	}
2533 
2534 	/*
2535 	   * Save residual count and CCW information in case primary and
2536 	   *  secondary status are presented with different interrupts.
2537 	 */
2538 	if (dp->ii.irb.scsw.stctl
2539 	    & (SCSW_STCTL_PRIM_STATUS | SCSW_STCTL_INTER_STATUS)) {
2540 
2541 		/*
2542 		 * If the subchannel status shows status pending
2543 		 * and we received a check condition, the count
2544 		 * information is not meaningful.
2545 		 */
2546 
2547 		if (!((dp->ii.irb.scsw.stctl & SCSW_STCTL_STATUS_PEND)
2548 		      && (dp->ii.irb.scsw.cstat
2549 			  & (SCHN_STAT_CHN_DATA_CHK
2550 			     | SCHN_STAT_CHN_CTRL_CHK
2551 			     | SCHN_STAT_INTF_CTRL_CHK
2552 			     | SCHN_STAT_PROG_CHECK
2553 			     | SCHN_STAT_PROT_CHECK
2554 			     | SCHN_STAT_CHAIN_CHECK)))) {
2555 
2556 			dp->rescnt = dp->ii.irb.scsw.count;
2557 		} else {
2558 			dp->rescnt = SENSE_MAX_COUNT;
2559 		}
2560 
2561 		dp->cpa = dp->ii.irb.scsw.cpa;
2562 
2563 	}
2564 	irb_cc = dp->ii.irb.scsw.cc;
2565 
2566 	/*
2567 	 * check for any kind of channel or interface control check but don't
2568 	 * issue the message for the console device
2569 	 */
2570 	if ((dp->ii.irb.scsw.cstat
2571 	     & (SCHN_STAT_CHN_DATA_CHK
2572 		| SCHN_STAT_CHN_CTRL_CHK | SCHN_STAT_INTF_CTRL_CHK))) {
2573 		if (irq != cons_dev)
2574 			printk (KERN_WARNING
2575 				"Channel-Check or Interface-Control-Check "
2576 				"received\n"
2577 				" ... device %04X on subchannel %04X, dev_stat "
2578 				": %02X sch_stat : %02X\n",
2579 				ioinfo[irq]->devstat.devno, irq, dp->dstat,
2580 				dp->cstat);
2581 		CIO_MSG_EVENT(0,
2582 			      "Channel-Check or "
2583 			      "Interface-Control-Check received\n");
2584 		CIO_MSG_EVENT(0,
2585 			      "... device %04X on subchannel %04X,"
2586 			      " dev_stat: %02X sch_stat: %02X\n",
2587 			      ioinfo[irq]->devstat.devno, irq,
2588 			      dp->dstat, dp->cstat);
2589 
2590 
2591 		chnchk = 1;
2592 
2593 	}
2594 
2595 	if (dp->ii.irb.scsw.ectl == 0) {
2596 		issense = 0;
2597 	} else if ((dp->ii.irb.scsw.stctl == SCSW_STCTL_STATUS_PEND)
2598 		   && (dp->ii.irb.scsw.eswf == 0)) {
2599 		issense = 0;
2600 	} else if ((dp->ii.irb.scsw.stctl ==
2601 		    (SCSW_STCTL_STATUS_PEND | SCSW_STCTL_INTER_STATUS))
2602 		   && ((dp->ii.irb.scsw.actl & SCSW_ACTL_SUSPENDED) == 0)) {
2603 		issense = 0;
2604 	} else {
2605 		issense = dp->ii.irb.esw.esw0.erw.cons;
2606 
2607 	}
2608 
2609 	if (issense) {
2610 		dp->scnt = dp->ii.irb.esw.esw0.erw.scnt;
2611 		dp->flag |= DEVSTAT_FLAG_SENSE_AVAIL;
2612 
2613 		sdevstat = sizeof (devstat_t);
2614 
2615 #ifdef CONFIG_DEBUG_IO
2616 		if (irq != cons_dev)
2617 			printk (KERN_DEBUG "s390_process_IRQ( %04X ) : "
2618 				"concurrent sense bytes avail %d\n",
2619 				irq, dp->scnt);
2620 #endif
2621 		CIO_MSG_EVENT(4,
2622 			      "s390_process_IRQ( %04X ): "
2623 			      "concurrent sense bytes avail %d\n",
2624 			      irq, dp->scnt);
2625 	} else {
2626 		/* don't copy the sense data area ! */
2627 		sdevstat = sizeof (devstat_t) - SENSE_MAX_COUNT;
2628 
2629 	}
2630 
2631 	switch (irb_cc) {
2632 	case 1:		/* status pending */
2633 
2634 		dp->flag |= DEVSTAT_STATUS_PENDING;
2635 
2636 	case 0:		/* normal i/o interruption */
2637 
2638 		fctl = dp->ii.irb.scsw.fctl;
2639 		stctl = dp->ii.irb.scsw.stctl;
2640 		actl = dp->ii.irb.scsw.actl;
2641 
2642 		if (chnchk) {
2643 			sprintf (buffer, "s390_process_IRQ(%04X) - irb for "
2644 				 "device %04X after channel check "
2645 				 "or interface control check\n",
2646 				 irq, dp->devno);
2647 
2648 			s390_displayhex (buffer, &(dp->ii.irb), sizeof (irb_t));
2649 			sprintf(dbf_txt, "chk%x", irq);
2650 			CIO_TRACE_EVENT(0, dbf_txt);
2651 			CIO_HEX_EVENT(0, &(dp->ii.irb), sizeof (irb_t));
2652 		}
2653 
2654 		ioinfo[irq]->stctl |= stctl;
2655 
2656 		ending_status = (stctl & SCSW_STCTL_SEC_STATUS)
2657 		    || (stctl ==
2658 			(SCSW_STCTL_ALERT_STATUS | SCSW_STCTL_STATUS_PEND))
2659 		    || (stctl == SCSW_STCTL_STATUS_PEND);
2660 
2661 		/*
2662 		 * Check for unsolicited interrupts - for debug purposes only
2663 		 *
2664 		 * We only consider an interrupt as unsolicited, if the device was not
2665 		 *  actively in use (busy) and an interrupt other than an ALERT status
2666 		 *  was received.
2667 		 *
2668 		 * Note: We must not issue a message to the console, if the
2669 		 *       unsolicited interrupt applies to the console device
2670 		 *       itself !
2671 		 */
2672 		if (!(stctl & SCSW_STCTL_ALERT_STATUS)
2673 		    && (ioinfo[irq]->ui.flags.busy == 0)) {
2674 
2675 #ifdef CONFIG_DEBUG_IO
2676 			if (irq != cons_dev)
2677 				printk (KERN_INFO
2678 					"Unsolicited interrupt received for "
2679 					"device %04X on subchannel %04X\n"
2680 					" ... device status : %02X "
2681 					"subchannel status : %02X\n",
2682 					dp->devno, irq, dp->dstat, dp->cstat);
2683 
2684 			sprintf (buffer, "s390_process_IRQ(%04X) - irb for "
2685 				 "device %04X, ending_status %d\n",
2686 				 irq, dp->devno, ending_status);
2687 
2688 			s390_displayhex (buffer, &(dp->ii.irb), sizeof (irb_t));
2689 #endif
2690 			CIO_MSG_EVENT(2,
2691 				      "Unsolicited interrupt "
2692 				      "received for device %04X "
2693 				      "on subchannel %04X\n"
2694 				      " ... device status : %02X "
2695 				      "subchannel status : %02X\n",
2696 				      dp->devno,
2697 				      irq, dp->dstat, dp->cstat);
2698 			sprintf(dbf_txt, "uint%x", irq);
2699 			CIO_TRACE_EVENT(2, dbf_txt);
2700 			CIO_HEX_EVENT(2, &(dp->ii.irb), sizeof (irb_t));
2701 		}
2702 
2703 		/*
2704 		 * take fast exit if no handler is available
2705 		 */
2706 		if (!ioinfo[irq]->ui.flags.ready)
2707 			return (ending_status);
2708 
2709 		/*
2710 		 * Check whether we must issue a SENSE CCW ourselves if there is no
2711 		 *  concurrent sense facility installed for the subchannel.
2712 		 *
2713 		 * Note: We should check for ioinfo[irq]->ui.flags.consns but VM
2714 		 *       violates the ESA/390 architecture and doesn't present an
2715 		 *       operand exception for virtual devices without concurrent
2716 		 *       sense facility available/supported when enabling the
2717 		 *       concurrent sense facility.
2718 		 */
2719 		if (((dp->ii.irb.scsw.dstat & DEV_STAT_UNIT_CHECK)
2720 		     && (!issense))
2721 		    || (ioinfo[irq]->ui.flags.delsense && ending_status)) {
2722 			int ret_io;
2723 			ccw1_t *s_ccw = &ioinfo[irq]->senseccw;
2724 			unsigned long s_flag = 0;
2725 
2726 			if (ending_status) {
2727 				/* there is a chance that the command
2728 				 * that gave us the unit check actually
2729 				 * was a basic sense, so we must not
2730 				 * overwrite *udp in that case
2731 				 */
2732 				if (ioinfo[irq]->ui.flags.w4sense &&
2733 					(dp->ii.irb.scsw.dstat & DEV_STAT_UNIT_CHECK)) {
2734 					CIO_MSG_EVENT(4,"double unit check irq %04x, dstat %02x,"
2735 							"flags %8x\n", irq, dp->ii.irb.scsw.dstat,
2736 							 ioinfo[irq]->ui.info, ending_status);
2737 				} else {
2738 				/*
2739 				 * We copy the current status information into the device driver
2740 				 *  status area. Then we can use the local devstat area for device
2741 				 *  sensing. When finally calling the IRQ handler we must not overlay
2742 				 *  the original device status but copy the sense data only.
2743 				 */
2744 					memcpy (udp, dp, sizeof (devstat_t));
2745 				}
2746 
2747 				s_ccw->cmd_code = CCW_CMD_BASIC_SENSE;
2748 				s_ccw->cda =
2749 				    (__u32) virt_to_phys (ioinfo[irq]->
2750 							  sense_data);
2751 				s_ccw->count = SENSE_MAX_COUNT;
2752 				s_ccw->flags = CCW_FLAG_SLI;
2753 
2754 				/*
2755 				 * If free_irq() or a sync do_IO/s390_start_IO() is in
2756 				 *  process we have to sense synchronously
2757 				 */
2758 				if (ioinfo[irq]->ui.flags.unready
2759 				    || ioinfo[irq]->ui.flags.syncio)
2760 					s_flag = DOIO_WAIT_FOR_INTERRUPT
2761 						|  DOIO_TIMEOUT
2762 						|  DOIO_VALID_LPM;
2763 
2764 				else
2765 					s_flag = DOIO_VALID_LPM;
2766 
2767 				/*
2768 				 * Reset status info
2769 				 *
2770 				 * It does not matter whether this is a sync. or async.
2771 				 *  SENSE request, but we have to assure we don't call
2772 				 *  the irq handler now, but keep the irq in busy state.
2773 				 *  In sync. mode s390_process_IRQ() is called recursively,
2774 				 *  while in async. mode we re-enter do_IRQ() with the
2775 				 *  next interrupt.
2776 				 *
2777 				 * Note : this may be a delayed sense request !
2778 				 */
2779 				allow4handler = 0;
2780 
2781 				ioinfo[irq]->ui.flags.fast = 0;
2782 				ioinfo[irq]->ui.flags.repall = 0;
2783 				ioinfo[irq]->ui.flags.w4final = 0;
2784 				ioinfo[irq]->ui.flags.delsense = 0;
2785 
2786 				dp->cstat = 0;
2787 				dp->dstat = 0;
2788 				dp->rescnt = SENSE_MAX_COUNT;
2789 
2790 				ioinfo[irq]->ui.flags.w4sense = 1;
2791 
2792 				ret_io = s390_start_IO (irq, s_ccw, 0xE2C5D5E2,	/* = SENSe */
2793 							0xff,
2794 							s_flag);
2795 				switch (ret_io) {
2796 				case 0: /* OK */
2797 					break;
2798 				case -ENODEV:
2799 					/*
2800 					 * The device is no longer operational.
2801 					 * We won't get any sense data.
2802 					 */
2803 					ioinfo[irq]->ui.flags.w4sense = 0;
2804 					ioinfo[irq]->ui.flags.oper = 0;
2805 					allow4handler = 1; /* to notify the driver */
2806 					break;
2807 				case -EBUSY:
2808 					/*
2809 					 * The channel subsystem is either busy, or we have
2810 					 * a status pending. Retry later.
2811 					 */
2812 					ioinfo[irq]->ui.flags.w4sense = 0;
2813 					ioinfo[irq]->ui.flags.delsense = 1;
2814 					break;
2815 				default:
2816 					printk(KERN_ERR"irq %04X: Unexpected rc %d "
2817 					       "for BASIC SENSE!\n", irq, ret_io);
2818 					ioinfo[irq]->ui.flags.w4sense = 0;
2819 					allow4handler = 1;
2820 				}
2821 			} else {
2822 				/*
2823 				 * we received an Unit Check but we have no final
2824 				 *  status yet, therefore we must delay the SENSE
2825 				 *  processing. However, we must not report this
2826 				 *  intermediate status to the device interrupt
2827 				 *  handler.
2828 				 */
2829 				ioinfo[irq]->ui.flags.fast = 0;
2830 				ioinfo[irq]->ui.flags.repall = 0;
2831 
2832 				ioinfo[irq]->ui.flags.delsense = 1;
2833 				allow4handler = 0;
2834 
2835 			}
2836 
2837 		}
2838 
2839 		/*
2840 		 * we allow for the device action handler if .
2841 		 *  - we received ending status
2842 		 *  - the action handler requested to see all interrupts
2843 		 *  - we received an intermediate status
2844 		 *  - fast notification was requested (primary status)
2845 		 *  - unsollicited interrupts
2846 		 *
2847 		 */
2848 		if (allow4handler) {
2849 			allow4handler = ending_status
2850 			    || (ioinfo[irq]->ui.flags.repall)
2851 			    || (stctl & SCSW_STCTL_INTER_STATUS)
2852 			    || ((ioinfo[irq]->ui.flags.fast)
2853 				&& (stctl & SCSW_STCTL_PRIM_STATUS))
2854 			    || (ioinfo[irq]->ui.flags.oper == 0);
2855 
2856 		}
2857 
2858 		/*
2859 		 * We used to copy the device status information right before
2860 		 *  calling the device action handler. However, in status
2861 		 *  pending situations during do_IO() or halt_IO(), as well as
2862 		 *  enable_subchannel/disable_subchannel processing we must
2863 		 *  synchronously return the status information and must not
2864 		 *  call the device action handler.
2865 		 *
2866 		 */
2867 		if (allow4handler) {
2868 			/*
2869 			 * if we were waiting for sense data we copy the sense
2870 			 *  bytes only as the original status information was
2871 			 *  saved prior to sense already.
2872 			 */
2873 			if (ioinfo[irq]->ui.flags.w4sense) {
2874 				int sense_count =
2875 				    SENSE_MAX_COUNT -
2876 				    ioinfo[irq]->devstat.rescnt;
2877 
2878 #ifdef CONFIG_DEBUG_IO
2879 				if (irq != cons_dev)
2880 					printk (KERN_DEBUG
2881 						"s390_process_IRQ( %04X ) : "
2882 						"BASIC SENSE bytes avail %d\n",
2883 						irq, sense_count);
2884 #endif
2885 				CIO_MSG_EVENT(4,
2886 					      "s390_process_IRQ( %04X ): "
2887 					      "BASIC SENSE bytes avail %d\n",
2888 					      irq, sense_count);
2889 				ioinfo[irq]->ui.flags.w4sense = 0;
2890 				udp->flag |= DEVSTAT_FLAG_SENSE_AVAIL;
2891 				udp->scnt = sense_count;
2892 
2893 				if (sense_count > 0) {
2894 					memcpy (udp->ii.sense.data,
2895 						ioinfo[irq]->sense_data,
2896 						sense_count);
2897 				} else if (sense_count == 0) {
2898 					udp->flag &= ~DEVSTAT_FLAG_SENSE_AVAIL;
2899 				} else {
2900 					panic
2901 					    ("s390_process_IRQ(%04x) encountered "
2902 					     "negative sense count\n", irq);
2903 
2904 				}
2905 			} else {
2906 				memcpy (udp, dp, sdevstat);
2907 
2908 			}
2909 
2910 		}
2911 
2912 		/*
2913 		 * for status pending situations other than deferred interrupt
2914 		 *  conditions detected by s390_process_IRQ() itself we must not
2915 		 *  call the handler. This will synchronously be reported back
2916 		 *  to the caller instead, e.g. when detected during do_IO().
2917 		 */
2918 		if (ioinfo[irq]->ui.flags.s_pend
2919 		    || ioinfo[irq]->ui.flags.unready
2920 		    || ioinfo[irq]->ui.flags.repnone) {
2921 			if (ending_status) {
2922 
2923 				ioinfo[irq]->ui.flags.busy = 0;
2924 				ioinfo[irq]->ui.flags.doio = 0;
2925 				ioinfo[irq]->ui.flags.haltio = 0;
2926 				ioinfo[irq]->ui.flags.fast = 0;
2927 				ioinfo[irq]->ui.flags.repall = 0;
2928 				ioinfo[irq]->ui.flags.w4final = 0;
2929 
2930 				dp->flag |= DEVSTAT_FINAL_STATUS;
2931 				udp->flag |= DEVSTAT_FINAL_STATUS;
2932 
2933 			}
2934 
2935 			allow4handler = 0;
2936 
2937 		}
2938 
2939 		/*
2940 		 * Call device action handler if applicable
2941 		 */
2942 		if (allow4handler) {
2943 
2944 			/*
2945 			 *  We only reset the busy condition when we are sure that no further
2946 			 *   interrupt is pending for the current I/O request (ending_status).
2947 			 */
2948 			if (ending_status || !ioinfo[irq]->ui.flags.oper) {
2949 				ioinfo[irq]->ui.flags.oper = 1;	/* dev IS oper */
2950 
2951 				ioinfo[irq]->ui.flags.busy = 0;
2952 				ioinfo[irq]->ui.flags.doio = 0;
2953 				ioinfo[irq]->ui.flags.haltio = 0;
2954 				ioinfo[irq]->ui.flags.fast = 0;
2955 				ioinfo[irq]->ui.flags.repall = 0;
2956 				ioinfo[irq]->ui.flags.w4final = 0;
2957 
2958 				dp->flag |= DEVSTAT_FINAL_STATUS;
2959 				udp->flag |= DEVSTAT_FINAL_STATUS;
2960 
2961 				if (!ioinfo[irq]->ui.flags.killio)
2962 					ioinfo[irq]->irq_desc.handler (irq, udp, NULL);
2963 
2964 				/*
2965 				 * reset intparm after final status or we will badly present unsolicited
2966 				 *  interrupts with a intparm value possibly no longer valid.
2967 				 */
2968 				dp->intparm = 0;
2969 
2970 			} else {
2971 				ioinfo[irq]->ui.flags.w4final = 1;
2972 
2973 				/*
2974 				 * Eventually reset subchannel PCI status and
2975 				 *  set the PCI or SUSPENDED flag in the user
2976 				 *  device status block if appropriate.
2977 				 */
2978 				if (dp->cstat & SCHN_STAT_PCI) {
2979 					udp->flag |= DEVSTAT_PCI;
2980 					dp->cstat &= ~SCHN_STAT_PCI;
2981 				}
2982 
2983 				if (actl & SCSW_ACTL_SUSPENDED) {
2984 					udp->flag |= DEVSTAT_SUSPENDED;
2985 
2986 				}
2987 
2988 				ioinfo[irq]->irq_desc.handler (irq, udp, NULL);
2989 
2990 			}
2991 
2992 		}
2993 
2994 		break;
2995 
2996 	case 3:		/* device/path not operational */
2997 
2998 		ioinfo[irq]->ui.flags.busy = 0;
2999 		ioinfo[irq]->ui.flags.doio = 0;
3000 		ioinfo[irq]->ui.flags.haltio = 0;
3001 
3002 		dp->cstat = 0;
3003 		dp->dstat = 0;
3004 
3005  		if ((dp->ii.irb.scsw.fctl != 0) &&
3006  		    ((dp->ii.irb.scsw.stctl & SCSW_STCTL_STATUS_PEND) != 0) &&
3007  		    (((dp->ii.irb.scsw.stctl & SCSW_STCTL_INTER_STATUS) == 0) ||
3008  		     ((dp->ii.irb.scsw.actl & SCSW_ACTL_SUSPENDED) != 0)))
3009  			if (dp->ii.irb.scsw.pno) {
3010  				stsch(irq, &ioinfo[irq]->schib);
3011  				ioinfo[irq]->opm &=
3012  					~ioinfo[irq]->schib.pmcw.pnom;
3013  			}
3014 
3015 		if (ioinfo[irq]->opm == 0) {
3016 			ioinfo[irq]->ui.flags.oper = 0;
3017 
3018 		}
3019 
3020 		ioinfo[irq]->devstat.flag |= DEVSTAT_NOT_OPER;
3021 		ioinfo[irq]->devstat.flag |= DEVSTAT_FINAL_STATUS;
3022 
3023 
3024                /*
3025                 * When we find a device "not oper" we save the status
3026                 *  information into the device status area and call the
3027                 *  device specific interrupt handler.
3028                 *
3029                 * Note: currently we don't have any way to reenable
3030                 *       the device unless an unsolicited interrupt
3031                 *       is presented. We don't check for spurious
3032                 *       interrupts on "not oper" conditions.
3033                 */
3034 
3035 
3036 		ioinfo[irq]->ui.flags.fast = 0;
3037 		ioinfo[irq]->ui.flags.repall = 0;
3038 		ioinfo[irq]->ui.flags.w4final = 0;
3039 
3040 		/*
3041 		 * take fast exit if no handler is available
3042 		 */
3043 		if (!ioinfo[irq]->ui.flags.ready)
3044 			return (ending_status);
3045 
3046 		/*
3047 		 * Special case: We got a deferred cc 3 on a basic sense.
3048 		 * We have to notify the device driver of the former unit
3049 		 * check, but must not confuse it by calling it with the status
3050 		 * for the failed basic sense.
3051 		 */
3052 		if (ioinfo[irq]->ui.flags.w4sense)
3053 			ioinfo[irq]->ui.flags.w4sense = 0;
3054 		else
3055 			memcpy (udp, &(ioinfo[irq]->devstat), sdevstat);
3056 
3057 		ioinfo[irq]->devstat.intparm = 0;
3058 
3059 		if (!ioinfo[irq]->ui.flags.s_pend
3060 		    && !ioinfo[irq]->ui.flags.repnone
3061 		    && !ioinfo[irq]->ui.flags.killio) {
3062 
3063 			ioinfo[irq]->irq_desc.handler (irq, udp, NULL);
3064 		}
3065 
3066 		ending_status = 1;
3067 
3068 		break;
3069 
3070 	}
3071 
3072 	if (ending_status &&
3073 	    ioinfo[irq]->ui.flags.noio &&
3074 	    !ioinfo[irq]->ui.flags.syncio &&
3075 	    !ioinfo[irq]->ui.flags.w4sense) {
3076 		if(ioinfo[irq]->ui.flags.ready) {
3077 			s390_schedule_path_verification(irq);
3078 		} else {
3079 			ioinfo[irq]->ui.flags.killio = 0;
3080 			ioinfo[irq]->ui.flags.noio = 0;
3081 		}
3082 	}
3083 
3084 	return (ending_status);
3085 }
3086 
3087 /*
3088  * Set the special i/o-interruption sublass 7 for the
3089  *  device specified by parameter irq. There can only
3090  *  be a single device been operated on this special
3091  *  isc. This function is aimed being able to check
3092  *  on special device interrupts in disabled state,
3093  *  without having to delay I/O processing (by queueing)
3094  *  for non-console devices.
3095  *
3096  * Setting of this isc is done by set_cons_dev().
3097  *  wait_cons_dev() allows
3098  *  to actively wait on an interrupt for this device in
3099  *  disabed state. When the interrupt condition is
3100  *  encountered, wait_cons_dev(9 calls do_IRQ() to have
3101  *  the console device driver processing the interrupt.
3102  */
3103 int
set_cons_dev(int irq)3104 set_cons_dev (int irq)
3105 {
3106 	int ccode;
3107 	int rc = 0;
3108 	char dbf_txt[15];
3109 
3110 	SANITY_CHECK (irq);
3111 
3112 	if (cons_dev != -1)
3113 		return -EBUSY;
3114 
3115 	sprintf (dbf_txt, "scons%x", irq);
3116 	CIO_TRACE_EVENT (4, dbf_txt);
3117 
3118 	/*
3119 	 * modify the indicated console device to operate
3120 	 *  on special console interrupt sublass 7
3121 	 */
3122 	ccode = stsch (irq, &(ioinfo[irq]->schib));
3123 
3124 	if (ccode) {
3125 		rc = -ENODEV;
3126 		ioinfo[irq]->devstat.flag |= DEVSTAT_NOT_OPER;
3127 	} else {
3128 		ioinfo[irq]->schib.pmcw.isc = 7;
3129 
3130 		ccode = msch (irq, &(ioinfo[irq]->schib));
3131 
3132 		if (ccode) {
3133 			rc = -EIO;
3134 		} else {
3135 			cons_dev = irq;
3136 
3137 			/*
3138 			 * enable console I/O-interrupt sublass 7
3139 			 */
3140 			ctl_set_bit (6, 24);
3141 
3142 		}
3143 	}
3144 
3145 	return (rc);
3146 }
3147 
3148 int
wait_cons_dev(int irq)3149 wait_cons_dev (int irq)
3150 {
3151 	int rc = 0;
3152 	long save_cr6;
3153 	char dbf_txt[15];
3154 
3155 	if (irq != cons_dev)
3156 		return -EINVAL;
3157 
3158 	sprintf (dbf_txt, "wcons%x", irq);
3159 	CIO_TRACE_EVENT (4, dbf_txt);
3160 
3161 	/*
3162 	 * before entering the spinlock we may already have
3163 	 *  processed the interrupt on a different CPU ...
3164 	 */
3165 	if (ioinfo[irq]->ui.flags.busy == 1) {
3166 		long cr6 __attribute__ ((aligned (8)));
3167 
3168 		/*
3169 		 * disable all, but isc 7 (console device)
3170 		 */
3171 		__ctl_store (cr6, 6, 6);
3172 		save_cr6 = cr6;
3173 		cr6 &= 0x01FFFFFF;
3174 		__ctl_load (cr6, 6, 6);
3175 
3176 		do {
3177 			tpi_info_t tpi_info = { 0, };
3178 			if (tpi (&tpi_info) == 1) {
3179 				s390_process_IRQ (tpi_info.irq);
3180 			} else {
3181 				s390irq_spin_unlock (irq);
3182 				udelay (100);
3183 				s390irq_spin_lock (irq);
3184 			}
3185 			eieio ();
3186 		} while (ioinfo[irq]->ui.flags.busy == 1);
3187 
3188 		/*
3189 		 * restore previous isc value
3190 		 */
3191 		cr6 = save_cr6;
3192 		__ctl_load (cr6, 6, 6);
3193 
3194 	}
3195 
3196 	return (rc);
3197 }
3198 
3199 int
enable_cpu_sync_isc(int irq)3200 enable_cpu_sync_isc (int irq)
3201 {
3202 	int ccode;
3203 	long cr6 __attribute__ ((aligned (8)));
3204 
3205 	int retry = 3;
3206 	int rc = 0;
3207 	char dbf_txt[15];
3208 
3209 	sprintf (dbf_txt, "enisc%x", irq);
3210 	CIO_TRACE_EVENT (4, dbf_txt);
3211 
3212 	/* This one spins until it can get the sync_isc lock for irq# irq */
3213 
3214 	if ((irq <= highest_subchannel) &&
3215 	    (ioinfo[irq] != INVALID_STORAGE_AREA) &&
3216 	    (!ioinfo[irq]->st)) {
3217 		if (atomic_read (&sync_isc) != irq)
3218 			atomic_compare_and_swap_spin (-1, irq, &sync_isc);
3219 
3220 		sync_isc_cnt++;
3221 
3222 		if (sync_isc_cnt > 255) {	/* fixme : magic number */
3223 			panic ("Too many recursive calls to enable_sync_isc");
3224 
3225 		}
3226 		/*
3227 		 * we only run the STSCH/MSCH path for the first enablement
3228 		 */
3229 		else if (sync_isc_cnt == 1) {
3230 
3231 			ccode = stsch (irq, &(ioinfo[irq]->schib));
3232 
3233 			if (!ccode) {
3234 				ioinfo[irq]->schib.pmcw.isc = 5;
3235 
3236 				do {
3237 					ccode = msch (irq,
3238 						      &(ioinfo[irq]->schib));
3239 
3240 					switch (ccode) {
3241 					case 0:
3242 						/*
3243 						 * enable special isc
3244 						 */
3245 						__ctl_store (cr6, 6, 6);
3246 						/* enable sync isc 5 */
3247 						cr6 |= 0x04000000;
3248 						/* disable standard isc 3 */
3249 						cr6 &= 0xEFFFFFFF;
3250 						/* disable console isc 7 */
3251 						cr6 &= 0xFEFFFFFF;
3252 						ioinfo[irq]->ui.flags.syncio = 1;
3253 						__ctl_load (cr6, 6, 6);
3254 						rc = 0;
3255 						retry = 0;
3256 						break;
3257 
3258 					case 1:
3259 						/*
3260 						 * process pending status
3261 						 */
3262 						ioinfo[irq]->ui.flags.s_pend =
3263 						    1;
3264 						s390_process_IRQ (irq);
3265 						ioinfo[irq]->ui.flags.s_pend =
3266 						    0;
3267 
3268 						rc = -EIO;	/* might be overwritten... */
3269 						retry--;
3270 						break;
3271 
3272 					case 2:	/* busy */
3273 						retry = 0;
3274 						rc = -EBUSY;
3275 						break;
3276 
3277 					case 3:	/* not oper */
3278 						retry = 0;
3279 						rc = -ENODEV;
3280 						break;
3281 
3282 					}
3283 
3284 				} while (retry);
3285 
3286 			} else {
3287 				rc = -ENODEV;	/* device is not-operational */
3288 
3289 			}
3290 		}
3291 
3292 		if (rc) {	/* can only happen if stsch/msch fails */
3293 			sync_isc_cnt = 0;
3294 			atomic_set (&sync_isc, -1);
3295 		} else if (sync_isc_cnt == 1) {
3296 			int ccode;
3297 
3298 			ccode = stsch(irq, &ioinfo[irq]->schib);
3299 			if (!ccode && ioinfo[irq]->schib.pmcw.isc != 5) {
3300 				ioinfo[irq]->ui.flags.syncio = 0;
3301 				sync_isc_cnt = 0;
3302 				atomic_set (&sync_isc, -1);
3303 			}
3304 		}
3305 	} else {
3306 #ifdef CONFIG_SYNC_ISC_PARANOIA
3307 		panic ("enable_sync_isc: called with invalid %x\n", irq);
3308 #endif
3309 
3310 		rc = -EINVAL;
3311 
3312 	}
3313 
3314 	return (rc);
3315 }
3316 
3317 int
disable_cpu_sync_isc(int irq)3318 disable_cpu_sync_isc (int irq)
3319 {
3320 	int rc = 0;
3321 	int retry1 = 5;
3322 	int retry2 = 5;
3323 	int clear_pend = 0;
3324 
3325 	int ccode;
3326 	long cr6 __attribute__ ((aligned (8)));
3327 
3328 	char dbf_txt[15];
3329 
3330 	sprintf (dbf_txt, "disisc%x", irq);
3331 	CIO_TRACE_EVENT (4, dbf_txt);
3332 
3333 	if ((irq <= highest_subchannel) &&
3334 	    (ioinfo[irq] != INVALID_STORAGE_AREA) &&
3335 	    (!ioinfo[irq]->st)) {
3336 		/*
3337 		 * We disable if we're the top user only, as we may
3338 		 *  run recursively ...
3339 		 * We must not decrease the count immediately; during
3340 		 *  msch() processing we may face another pending
3341 		 *  status we have to process recursively (sync).
3342 		 */
3343 
3344 #ifdef CONFIG_SYNC_ISC_PARANOIA
3345 		if (atomic_read (&sync_isc) != irq)
3346 			panic
3347 			    ("disable_sync_isc: called for %x while %x locked\n",
3348 			     irq, atomic_read (&sync_isc));
3349 #endif
3350 
3351 		if (sync_isc_cnt == 1) {
3352 			ccode = stsch (irq, &(ioinfo[irq]->schib));
3353 
3354 			ioinfo[irq]->schib.pmcw.isc = 3;
3355 
3356 			do {
3357 				retry2 = 5;
3358 				do {
3359 					ccode =
3360 					    msch (irq, &(ioinfo[irq]->schib));
3361 
3362 					switch (ccode) {
3363 					case 0:
3364 						/*
3365 						 * disable special interrupt subclass in CPU
3366 						 */
3367 						__ctl_store (cr6, 6, 6);
3368 						/* disable sync isc 5 */
3369 						cr6 &= 0xFBFFFFFF;
3370 						/* enable standard isc 3 */
3371 						cr6 |= 0x10000000;
3372 						/* enable console isc 7 */
3373 						cr6 |= 0x01000000;
3374 						__ctl_load (cr6, 6, 6);
3375 
3376 						retry2 = 0;
3377 						break;
3378 
3379 					case 1:	/* status pending */
3380 						ioinfo[irq]->ui.flags.s_pend =
3381 						    1;
3382 						s390_process_IRQ (irq);
3383 						ioinfo[irq]->ui.flags.s_pend =
3384 						    0;
3385 
3386 						retry2--;
3387 						break;
3388 
3389 					case 2:	/* busy */
3390 						retry2--;
3391 						udelay (100);	/* give it time */
3392 						break;
3393 
3394 					default:	/* not oper */
3395 						retry2 = 0;
3396 						break;
3397 					}
3398 
3399 				} while (retry2);
3400 
3401 				retry1--;
3402 
3403 				/* try stopping it ... */
3404 				if ((ccode) && !clear_pend) {
3405 					clear_IO (irq, 0x00004711, 0);
3406 					clear_pend = 1;
3407 
3408 				}
3409 
3410 				udelay (100);
3411 
3412 			} while (retry1 && ccode);
3413 
3414 			ioinfo[irq]->ui.flags.syncio = 0;
3415 
3416 			sync_isc_cnt = 0;
3417 			atomic_set (&sync_isc, -1);
3418 
3419 		} else {
3420 			sync_isc_cnt--;
3421 
3422 		}
3423 	} else {
3424 #ifdef CONFIG_SYNC_ISC_PARANOIA
3425 		if (atomic_read (&sync_isc) != -1)
3426 			panic
3427 			    ("disable_sync_isc: called with invalid %x while %x locked\n",
3428 			     irq, atomic_read (&sync_isc));
3429 #endif
3430 
3431 		rc = -EINVAL;
3432 
3433 	}
3434 
3435 	return (rc);
3436 }
3437 
diag210(diag210_t * addr)3438 int diag210 (diag210_t *addr)
3439 {
3440         int ccode;
3441 
3442         __asm__ __volatile__(
3443 #ifdef CONFIG_ARCH_S390X
3444                 "   sam31\n"
3445                 "   diag  %1,0,0x210\n"
3446                 "   sam64\n"
3447 #else
3448                 "   diag  %1,0,0x210\n"
3449 #endif
3450                 "   ipm   %0\n"
3451                 "   srl   %0,28"
3452                 : "=d" (ccode)
3453 		: "a" (addr)
3454                 : "cc" );
3455         return ccode;
3456 }
3457 
3458 /*
3459  * Input :
3460  *   devno - device number
3461  *   ps    - pointer to sense ID data area
3462  * Output : none
3463  */
3464 void
VM_virtual_device_info(__u16 devno,senseid_t * ps)3465 VM_virtual_device_info (__u16 devno, senseid_t * ps)
3466 {
3467 	diag210_t *p_diag_data;
3468 	int ccode;
3469 
3470 	int error = 0;
3471 
3472 	CIO_TRACE_EVENT (4, "VMvdinf");
3473 
3474 	if (init_IRQ_complete) {
3475 		p_diag_data = kmalloc (sizeof (diag210_t), GFP_DMA | GFP_ATOMIC);
3476 	} else {
3477 		p_diag_data = alloc_bootmem_low (sizeof (diag210_t));
3478 
3479 	}
3480 	if (!p_diag_data)
3481 		return;
3482 
3483 	p_diag_data->vrdcdvno = devno;
3484 	p_diag_data->vrdclen = sizeof (diag210_t);
3485 	ccode = diag210 ((diag210_t *) virt_to_phys (p_diag_data));
3486 	ps->reserved = 0xff;
3487 
3488 	switch (p_diag_data->vrdcvcla) {
3489 	case 0x80:
3490 
3491 		switch (p_diag_data->vrdcvtyp) {
3492 		case 00:
3493 
3494 			ps->cu_type = 0x3215;
3495 
3496 			break;
3497 
3498 		default:
3499 
3500 			error = 1;
3501 
3502 			break;
3503 
3504 		}
3505 
3506 		break;
3507 
3508 	case 0x40:
3509 
3510 		switch (p_diag_data->vrdcvtyp) {
3511 		case 0xC0:
3512 
3513 			ps->cu_type = 0x5080;
3514 
3515 			break;
3516 
3517 		case 0x80:
3518 
3519 			ps->cu_type = 0x2250;
3520 
3521 			break;
3522 
3523 		case 0x04:
3524 
3525 			ps->cu_type = 0x3277;
3526 
3527 			break;
3528 
3529 		case 0x01:
3530 
3531 			ps->cu_type = 0x3278;
3532 
3533 			break;
3534 
3535 		default:
3536 
3537 			error = 1;
3538 
3539 			break;
3540 
3541 		}
3542 
3543 		break;
3544 
3545 	case 0x20:
3546 
3547 		switch (p_diag_data->vrdcvtyp) {
3548 		case 0x84:
3549 
3550 			ps->cu_type = 0x3505;
3551 
3552 			break;
3553 
3554 		case 0x82:
3555 
3556 			ps->cu_type = 0x2540;
3557 
3558 			break;
3559 
3560 		case 0x81:
3561 
3562 			ps->cu_type = 0x2501;
3563 
3564 			break;
3565 
3566 		default:
3567 
3568 			error = 1;
3569 
3570 			break;
3571 
3572 		}
3573 
3574 		break;
3575 
3576 	case 0x10:
3577 
3578 		switch (p_diag_data->vrdcvtyp) {
3579 		case 0x84:
3580 
3581 			ps->cu_type = 0x3525;
3582 
3583 			break;
3584 
3585 		case 0x82:
3586 
3587 			ps->cu_type = 0x2540;
3588 
3589 			break;
3590 
3591 		case 0x4F:
3592 		case 0x4E:
3593 		case 0x48:
3594 
3595 			ps->cu_type = 0x3820;
3596 
3597 			break;
3598 
3599 		case 0x4D:
3600 		case 0x49:
3601 		case 0x45:
3602 
3603 			ps->cu_type = 0x3800;
3604 
3605 			break;
3606 
3607 		case 0x4B:
3608 
3609 			ps->cu_type = 0x4248;
3610 
3611 			break;
3612 
3613 		case 0x4A:
3614 
3615 			ps->cu_type = 0x4245;
3616 
3617 			break;
3618 
3619 		case 0x47:
3620 
3621 			ps->cu_type = 0x3262;
3622 
3623 			break;
3624 
3625 		case 0x43:
3626 
3627 			ps->cu_type = 0x3203;
3628 
3629 			break;
3630 
3631 		case 0x42:
3632 
3633 			ps->cu_type = 0x3211;
3634 
3635 			break;
3636 
3637 		case 0x41:
3638 
3639 			ps->cu_type = 0x1403;
3640 
3641 			break;
3642 
3643 		default:
3644 
3645 			error = 1;
3646 
3647 			break;
3648 
3649 		}
3650 
3651 		break;
3652 
3653 	case 0x08:
3654 
3655 		switch (p_diag_data->vrdcvtyp) {
3656 		case 0x82:
3657 
3658 			ps->cu_type = 0x3422;
3659 
3660 			break;
3661 
3662 		case 0x81:
3663 
3664 			ps->cu_type = 0x3490;
3665 
3666 			break;
3667 
3668 		case 0x10:
3669 
3670 			ps->cu_type = 0x3420;
3671 
3672 			break;
3673 
3674 		case 0x02:
3675 
3676 			ps->cu_type = 0x3430;
3677 
3678 			break;
3679 
3680 		case 0x01:
3681 
3682 			ps->cu_type = 0x3480;
3683 
3684 			break;
3685 
3686 		case 0x42:
3687 
3688 			ps->cu_type = 0x3424;
3689 
3690 			break;
3691 
3692 		case 0x44:
3693 
3694 			ps->cu_type = 0x9348;
3695 
3696 			break;
3697 
3698 		default:
3699 
3700 			error = 1;
3701 
3702 			break;
3703 
3704 		}
3705 
3706 		break;
3707 
3708 	case 02:		/* special device class ... */
3709 
3710 		switch (p_diag_data->vrdcvtyp) {
3711 		case 0x20:	/* OSA */
3712 
3713 			ps->cu_type = 0x3088;
3714 			ps->cu_model = 0x60;
3715 
3716 			break;
3717 
3718 		default:
3719 
3720 			error = 1;
3721 			break;
3722 
3723 		}
3724 
3725 		break;
3726 
3727 	default:
3728 
3729 		error = 1;
3730 
3731 		break;
3732 
3733 	}
3734 
3735 	if (init_IRQ_complete) {
3736 		kfree (p_diag_data);
3737 	} else {
3738 		free_bootmem ((unsigned long) p_diag_data, sizeof (diag210_t));
3739 
3740 	}
3741 
3742 	if (error) {
3743 		printk (KERN_ERR "DIAG X'210' for "
3744 			"device %04X returned "
3745 			"(cc = %d): vdev class : %02X, "
3746 			"vdev type : %04X \n"
3747 			" ...  rdev class : %02X, rdev type : %04X, "
3748 			"rdev model: %02X\n",
3749 			devno,
3750 			ccode,
3751 			p_diag_data->vrdcvcla,
3752 			p_diag_data->vrdcvtyp,
3753 			p_diag_data->vrdcrccl,
3754 			p_diag_data->vrdccrty, p_diag_data->vrdccrmd);
3755 		CIO_MSG_EVENT(0,
3756 			      "DIAG X'210' for "
3757 			      "device %04X returned "
3758 			      "(cc = %d): vdev class : %02X, "
3759 			      "vdev type : %04X \n ...  "
3760 			      "rdev class : %02X, rdev type : %04X, "
3761 			      "rdev model: %02X\n",
3762 			      devno,
3763 			      ccode,
3764 			      p_diag_data->vrdcvcla,
3765 			      p_diag_data->vrdcvtyp,
3766 			      p_diag_data->vrdcrccl,
3767 			      p_diag_data->vrdccrty,
3768 			      p_diag_data->vrdccrmd);
3769 
3770 	}
3771 }
3772 
3773 /*
3774  * This routine returns the characteristics for the device
3775  *  specified. Some old devices might not provide the necessary
3776  *  command code information during SenseID processing. In this
3777  *  case the function returns -EINVAL. Otherwise the function
3778  *  allocates a decice specific data buffer and provides the
3779  *  device characteristics together with the buffer size. Its
3780  *  the callers responability to release the kernel memory if
3781  *  not longer needed. In case of persistent I/O problems -EBUSY
3782  *  is returned.
3783  *
3784  *  The function may be called enabled or disabled. However, the
3785  *   caller must have locked the irq it is requesting data for.
3786  *
3787  * Note : It would have been nice to collect this information
3788  *         during init_IRQ() processing but this is not possible
3789  *
3790  *         a) without statically pre-allocation fixed size buffers
3791  *            as virtual memory management isn't available yet.
3792  *
3793  *         b) without unnecessarily increase system startup by
3794  *            evaluating devices eventually not used at all.
3795  */
3796 int
read_dev_chars(int irq,void ** buffer,int length)3797 read_dev_chars (int irq, void **buffer, int length)
3798 {
3799 	unsigned long flags;
3800 	ccw1_t *rdc_ccw;
3801 	devstat_t devstat;
3802 	char *rdc_buf;
3803 	int devflag = 0;
3804 
3805 	int ret = 0;
3806 	int emulated = 0;
3807 	int retry = 5;
3808 
3809 	char dbf_txt[15];
3810 
3811 	if (!buffer || !length) {
3812 		return (-EINVAL);
3813 
3814 	}
3815 
3816 	SANITY_CHECK (irq);
3817 
3818 	if (ioinfo[irq]->ui.flags.oper == 0) {
3819 		return (-ENODEV);
3820 
3821 	}
3822 
3823  	if (ioinfo[irq]->ui.flags.unfriendly) {
3824  		/* don't even try it */
3825  		return -EUSERS;
3826  	}
3827 
3828 	sprintf (dbf_txt, "rddevch%x", irq);
3829 	CIO_TRACE_EVENT (4, dbf_txt);
3830 
3831 	/*
3832 	 * Before playing around with irq locks we should assure
3833 	 *   running disabled on (just) our CPU. Sync. I/O requests
3834 	 *   also require to run disabled.
3835 	 *
3836 	 * Note : as no global lock is required, we must not use
3837 	 *        cli(), but __cli() instead.
3838 	 */
3839 	__save_flags (flags);
3840 	__cli ();
3841 
3842 	rdc_ccw = &ioinfo[irq]->senseccw;
3843 
3844 	if (!ioinfo[irq]->ui.flags.ready) {
3845 		ret = request_irq (irq,
3846 				   init_IRQ_handler, SA_PROBE, "RDC", &devstat);
3847 
3848 		if (!ret) {
3849 			emulated = 1;
3850 
3851 		}
3852 
3853 	}
3854 
3855 	if (!ret) {
3856 		if (!*buffer) {
3857 			rdc_buf = kmalloc (length, GFP_KERNEL);
3858 		} else {
3859 			rdc_buf = *buffer;
3860 
3861 		}
3862 
3863 		if (!rdc_buf) {
3864 			ret = -ENOMEM;
3865 		} else {
3866 			do {
3867 				rdc_ccw->cmd_code = CCW_CMD_RDC;
3868 				rdc_ccw->count = length;
3869 				rdc_ccw->flags = CCW_FLAG_SLI;
3870 				ret =
3871 				    set_normalized_cda (rdc_ccw, rdc_buf);
3872 				if (!ret) {
3873 
3874 					memset (ioinfo[irq]->irq_desc.dev_id,
3875 						'\0', sizeof (devstat_t));
3876 
3877 					ret = s390_start_IO (irq, rdc_ccw, 0x00524443,	/* RDC */
3878 							     0,	/* n/a */
3879 							     DOIO_WAIT_FOR_INTERRUPT
3880 							     |
3881 							     DOIO_DONT_CALL_INTHDLR);
3882 					retry--;
3883 					devflag =
3884 					    ioinfo[irq]->irq_desc.dev_id->flag;
3885 
3886 					clear_normalized_cda (rdc_ccw);
3887 				} else {
3888 					udelay (100);	/* wait for recovery */
3889 					retry--;
3890 				}
3891 
3892 			} while ((retry)
3893 				 && (ret
3894 				     || (devflag & DEVSTAT_STATUS_PENDING)));
3895 
3896 		}
3897 
3898 		if (!retry) {
3899 			ret = (ret == -ENOMEM) ? -ENOMEM : -EBUSY;
3900 
3901 		}
3902 
3903 		__restore_flags (flags);
3904 
3905 		/*
3906 		 * on success we update the user input parms
3907 		 */
3908 		if (!ret) {
3909 			*buffer = rdc_buf;
3910 
3911 		}
3912 
3913 		if (emulated) {
3914 			free_irq (irq, &devstat);
3915 
3916 		}
3917 
3918 	} else {
3919 		__restore_flags (flags);
3920 	}
3921 
3922 	return (ret);
3923 }
3924 
3925 /*
3926  *  Read Configuration data
3927  */
3928 int
read_conf_data(int irq,void ** buffer,int * length,__u8 lpm)3929 read_conf_data (int irq, void **buffer, int *length, __u8 lpm)
3930 {
3931 	unsigned long flags;
3932 	int ciw_cnt;
3933 
3934 	int found = 0;		/* RCD CIW found */
3935 	int ret = 0;		/* return code */
3936 
3937 	char dbf_txt[15];
3938 
3939 	SANITY_CHECK (irq);
3940 
3941 	if (!buffer || !length) {
3942 		return (-EINVAL);
3943 	} else if (ioinfo[irq]->ui.flags.oper == 0) {
3944 		return (-ENODEV);
3945 	} else if (ioinfo[irq]->ui.flags.esid == 0) {
3946 		*buffer = NULL;
3947 		*length = 0;
3948 		return (-EOPNOTSUPP);
3949 
3950 	}
3951 
3952  	if (ioinfo[irq]->ui.flags.unfriendly) {
3953  		/* don't even try it */
3954  		return -EUSERS;
3955  	}
3956 
3957 	sprintf (dbf_txt, "rdconf%x", irq);
3958 	CIO_TRACE_EVENT (4, dbf_txt);
3959 
3960 	/*
3961 	 * scan for RCD command in extended SenseID data
3962 	 */
3963 
3964 	for (ciw_cnt = 0; (found == 0) && (ciw_cnt < MAX_CIWS); ciw_cnt++) {
3965 		if (ioinfo[irq]->senseid.ciw[ciw_cnt].ct == CIW_TYPE_RCD) {
3966 			/*
3967 			 * paranoia check ...
3968 			 */
3969 			if (ioinfo[irq]->senseid.ciw[ciw_cnt].cmd != 0
3970 			    && ioinfo[irq]->senseid.ciw[ciw_cnt].count != 0) {
3971 				found = 1;
3972 
3973 			}
3974 
3975 			break;
3976 
3977 		}
3978 	}
3979 
3980 	if (found) {
3981 		devstat_t devstat;	/* inline device status area */
3982 		devstat_t *pdevstat;
3983 		int ioflags;
3984 
3985 		ccw1_t *rcd_ccw = &ioinfo[irq]->senseccw;
3986 		char *rcd_buf = NULL;
3987 		int emulated = 0;	/* no i/O handler installed */
3988 		int retry = 5;	/* retry count */
3989 
3990 		__save_flags (flags);
3991 		__cli ();
3992 
3993 		if (!ioinfo[irq]->ui.flags.ready) {
3994 			pdevstat = &devstat;
3995 			ret = request_irq (irq,
3996 					   init_IRQ_handler,
3997 					   SA_PROBE, "RCD", pdevstat);
3998 
3999 			if (!ret) {
4000 				emulated = 1;
4001 
4002 			}	/* endif */
4003 		} else {
4004 			pdevstat = ioinfo[irq]->irq_desc.dev_id;
4005 
4006 		}		/* endif */
4007 
4008 		if (!ret) {
4009 			if (init_IRQ_complete) {
4010 				rcd_buf =
4011 				    kmalloc (ioinfo[irq]->senseid.ciw[ciw_cnt].
4012 					     count, GFP_DMA | GFP_ATOMIC);
4013 			} else {
4014 				rcd_buf =
4015 				    alloc_bootmem_low (ioinfo[irq]->senseid.
4016 						       ciw[ciw_cnt].count);
4017 
4018 			}
4019 
4020 			if (rcd_buf == NULL) {
4021 				ret = -ENOMEM;
4022 
4023 			}
4024 			if (!ret) {
4025 				memset (rcd_buf,
4026 					'\0',
4027 					ioinfo[irq]->senseid.ciw[ciw_cnt].
4028 					count);
4029 
4030 				do {
4031 					rcd_ccw->cmd_code =
4032 					    ioinfo[irq]->senseid.ciw[ciw_cnt].
4033 					    cmd;
4034 					rcd_ccw->cda =
4035 					    (__u32) virt_to_phys (rcd_buf);
4036 					rcd_ccw->count =
4037 					    ioinfo[irq]->senseid.ciw[ciw_cnt].
4038 					    count;
4039 					rcd_ccw->flags = CCW_FLAG_SLI;
4040 
4041 					memset (pdevstat, '\0',
4042 						sizeof (devstat_t));
4043 
4044 					if (lpm) {
4045 						ioflags =
4046 						    DOIO_WAIT_FOR_INTERRUPT |
4047 						    DOIO_VALID_LPM |
4048 						    DOIO_DONT_CALL_INTHDLR;
4049 					} else {
4050 						ioflags =
4051 						    DOIO_WAIT_FOR_INTERRUPT |
4052 						    DOIO_DONT_CALL_INTHDLR;
4053 
4054 					}
4055 
4056 					ret = s390_start_IO (irq, rcd_ccw, 0x00524344,	/* == RCD */
4057 							     lpm, ioflags);
4058 					switch (ret) {
4059 					case 0:
4060 					case -EIO:
4061 
4062 						if (!
4063 						    (pdevstat->
4064 						     flag &
4065 						     (DEVSTAT_STATUS_PENDING |
4066 						      DEVSTAT_NOT_OPER |
4067 						      DEVSTAT_FLAG_SENSE_AVAIL)))
4068 						{
4069 							retry = 0;	/* we got it ... */
4070 						} else {
4071 							retry--;	/* try again ... */
4072 
4073 						}
4074 
4075 						break;
4076 
4077 					default:	/* -EBUSY, -ENODEV, ??? */
4078 						retry = 0;
4079 
4080 					}
4081 
4082 				} while (retry);
4083 			}
4084 		}
4085 
4086 		__restore_flags (flags);
4087 
4088 		/*
4089 		 * on success we update the user input parms
4090 		 */
4091 		if (ret == 0) {
4092 			*length = ioinfo[irq]->senseid.ciw[ciw_cnt].count;
4093 			*buffer = rcd_buf;
4094 		} else {
4095 			if (rcd_buf != NULL) {
4096 				if (init_IRQ_complete) {
4097 					kfree (rcd_buf);
4098 				} else {
4099 					free_bootmem ((unsigned long) rcd_buf,
4100 						      ioinfo[irq]->senseid.
4101 						      ciw[ciw_cnt].count);
4102 
4103 				}
4104 
4105 			}
4106 
4107 			*buffer = NULL;
4108 			*length = 0;
4109 
4110 		}
4111 
4112 		if (emulated)
4113 			free_irq (irq, pdevstat);
4114 	} else {
4115 		*buffer = NULL;
4116 		*length = 0;
4117 		ret = -EOPNOTSUPP;
4118 
4119 	}
4120 
4121 	return (ret);
4122 
4123 }
4124 
4125 int
get_dev_info(int irq,s390_dev_info_t * pdi)4126 get_dev_info (int irq, s390_dev_info_t * pdi)
4127 {
4128 	return (get_dev_info_by_irq (irq, pdi));
4129 }
4130 
4131 static int __inline__
get_next_available_irq(ioinfo_t * pi)4132 get_next_available_irq (ioinfo_t * pi)
4133 {
4134 	int ret_val = -ENODEV;
4135 
4136 	while (pi != NULL) {
4137 		if ((!pi->st)
4138 		    && (pi->ui.flags.oper)
4139 		    && (!pi->ui.flags.unfriendly)) {
4140 			ret_val = pi->irq;
4141 			break;
4142 		} else {
4143 			pi = pi->next;
4144 		}
4145 	}
4146 
4147 	return ret_val;
4148 }
4149 
4150 int
get_irq_first(void)4151 get_irq_first (void)
4152 {
4153 	int ret_irq;
4154 
4155 	if (ioinfo_head) {
4156 		if ((ioinfo_head->ui.flags.oper) &&
4157 		    (!ioinfo_head->ui.flags.unfriendly) &&
4158 		    (!ioinfo_head->st)) {
4159 			ret_irq = ioinfo_head->irq;
4160 		} else if (ioinfo_head->next) {
4161 			ret_irq = get_next_available_irq (ioinfo_head->next);
4162 
4163 		} else {
4164 			ret_irq = -ENODEV;
4165 
4166 		}
4167 	} else {
4168 		ret_irq = -ENODEV;
4169 
4170 	}
4171 
4172 	return ret_irq;
4173 }
4174 
4175 int
get_irq_next(int irq)4176 get_irq_next (int irq)
4177 {
4178 	int ret_irq;
4179 
4180 	if (ioinfo[irq] != INVALID_STORAGE_AREA) {
4181 		if (ioinfo[irq]->next) {
4182 			if ((ioinfo[irq]->next->ui.flags.oper) &&
4183 			    (!ioinfo[irq]->next->ui.flags.unfriendly) &&
4184 			    (!ioinfo[irq]->next->st)) {
4185 				ret_irq = ioinfo[irq]->next->irq;
4186 			} else {
4187 				ret_irq =
4188 				    get_next_available_irq (ioinfo[irq]->next);
4189 
4190 			}
4191 		} else {
4192 			ret_irq = -ENODEV;
4193 
4194 		}
4195 	} else {
4196 		ret_irq = -EINVAL;
4197 
4198 	}
4199 
4200 	return ret_irq;
4201 }
4202 
4203 int
get_dev_info_by_irq(int irq,s390_dev_info_t * pdi)4204 get_dev_info_by_irq (int irq, s390_dev_info_t * pdi)
4205 {
4206 
4207 	SANITY_CHECK (irq);
4208 
4209 	if (pdi == NULL)
4210 		return -EINVAL;
4211 
4212 	pdi->devno = ioinfo[irq]->schib.pmcw.dev;
4213 	pdi->irq = irq;
4214 
4215 	if (ioinfo[irq]->ui.flags.oper && !ioinfo[irq]->ui.flags.unknown) {
4216 		pdi->status = 0;
4217 		memcpy (&(pdi->sid_data),
4218 			&ioinfo[irq]->senseid, sizeof (senseid_t));
4219 
4220  	} else if (ioinfo[irq]->ui.flags.unfriendly) {
4221  		pdi->status = DEVSTAT_UNFRIENDLY_DEV;
4222  		memset (&(pdi->sid_data), '\0', sizeof (senseid_t));
4223  		pdi->sid_data.cu_type = 0xFFFF;
4224 
4225 	} else if (ioinfo[irq]->ui.flags.unknown) {
4226 		pdi->status = DEVSTAT_UNKNOWN_DEV;
4227 		memset (&(pdi->sid_data), '\0', sizeof (senseid_t));
4228 		pdi->sid_data.cu_type = 0xFFFF;
4229 
4230 	} else {
4231 		pdi->status = DEVSTAT_NOT_OPER;
4232 		memset (&(pdi->sid_data), '\0', sizeof (senseid_t));
4233 		pdi->sid_data.cu_type = 0xFFFF;
4234 
4235 	}
4236 
4237 	if (ioinfo[irq]->ui.flags.ready)
4238 		pdi->status |= DEVSTAT_DEVICE_OWNED;
4239 
4240 	return 0;
4241 }
4242 
4243 int
get_dev_info_by_devno(__u16 devno,s390_dev_info_t * pdi)4244 get_dev_info_by_devno (__u16 devno, s390_dev_info_t * pdi)
4245 {
4246 	int i;
4247 	int rc = -ENODEV;
4248 
4249 	if (devno > 0x0000ffff)
4250 		return -ENODEV;
4251 	if (pdi == NULL)
4252 		return -EINVAL;
4253 
4254 	for (i = 0; i <= highest_subchannel; i++) {
4255 
4256 		if ((ioinfo[i] != INVALID_STORAGE_AREA) &&
4257 		    (!ioinfo[i]->st) &&
4258 		    (ioinfo[i]->schib.pmcw.dev == devno)) {
4259 
4260 			pdi->irq = i;
4261 			pdi->devno = devno;
4262 
4263 			if (ioinfo[i]->ui.flags.oper
4264 			    && !ioinfo[i]->ui.flags.unknown) {
4265 				pdi->status = 0;
4266 				memcpy (&(pdi->sid_data),
4267 					&ioinfo[i]->senseid,
4268 					sizeof (senseid_t));
4269 
4270  			} else if (ioinfo[i]->ui.flags.unfriendly) {
4271  				pdi->status = DEVSTAT_UNFRIENDLY_DEV;
4272  				memset (&(pdi->sid_data), '\0',
4273  					sizeof (senseid_t));
4274  				pdi->sid_data.cu_type = 0xFFFF;
4275 
4276 
4277 			} else if (ioinfo[i]->ui.flags.unknown) {
4278 				pdi->status = DEVSTAT_UNKNOWN_DEV;
4279 
4280 				memset (&(pdi->sid_data),
4281 					'\0', sizeof (senseid_t));
4282 
4283 				pdi->sid_data.cu_type = 0xFFFF;
4284 			} else {
4285 				pdi->status = DEVSTAT_NOT_OPER;
4286 
4287 				memset (&(pdi->sid_data),
4288 					'\0', sizeof (senseid_t));
4289 
4290 				pdi->sid_data.cu_type = 0xFFFF;
4291 
4292 			}
4293 
4294 			if (ioinfo[i]->ui.flags.ready)
4295 				pdi->status |= DEVSTAT_DEVICE_OWNED;
4296 
4297 			if (!ioinfo[i]->ui.flags.unfriendly)
4298 				rc = 0;	/* found */
4299 			else
4300 				rc = -EUSERS;
4301 			break;
4302 
4303 		}
4304 	}
4305 
4306 	return (rc);
4307 
4308 }
4309 
4310 int
get_irq_by_devno(__u16 devno)4311 get_irq_by_devno (__u16 devno)
4312 {
4313 	int i;
4314 	int rc = -1;
4315 
4316 	if (devno <= 0x0000ffff) {
4317 		for (i = 0; i <= highest_subchannel; i++) {
4318 			if ((ioinfo[i] != INVALID_STORAGE_AREA)
4319 			    && (!ioinfo[i]->st)
4320 			    && (ioinfo[i]->schib.pmcw.dev == devno)
4321 			    && (ioinfo[i]->schib.pmcw.dnv == 1)) {
4322 				rc = i;
4323 				break;
4324 			}
4325 		}
4326 	}
4327 
4328 	return (rc);
4329 }
4330 
4331 unsigned int
get_devno_by_irq(int irq)4332 get_devno_by_irq (int irq)
4333 {
4334 
4335 	if ((irq > highest_subchannel)
4336 	    || (irq < 0)
4337 	    || (ioinfo[irq] == INVALID_STORAGE_AREA)) {
4338 		return -1;
4339 
4340 	}
4341 
4342 	if (ioinfo[irq]->st)
4343 		return -1;
4344 
4345 	/*
4346 	 * we don't need to check for the device be operational
4347 	 *  as the initial STSCH will always present the device
4348 	 *  number defined by the IOCDS regardless of the device
4349 	 *  existing or not. However, there could be subchannels
4350 	 *  defined who's device number isn't valid ...
4351 	 */
4352 	if (ioinfo[irq]->schib.pmcw.dnv)
4353 		return (ioinfo[irq]->schib.pmcw.dev);
4354 	else
4355 		return -1;
4356 }
4357 
4358 /*
4359  * s390_device_recognition_irq
4360  *
4361  * Used for individual device recognition. Issues the device
4362  *  independant SenseID command to obtain info the device type.
4363  *
4364  */
4365 void
s390_device_recognition_irq(int irq)4366 s390_device_recognition_irq (int irq)
4367 {
4368 	int ret;
4369 	char dbf_txt[15];
4370 
4371 	sprintf (dbf_txt, "devrec%x", irq);
4372 	CIO_TRACE_EVENT (4, dbf_txt);
4373 
4374 	/*
4375 	 * We issue the SenseID command on I/O subchannels we think are
4376 	 *  operational only.
4377 	 */
4378 	if ((ioinfo[irq] != INVALID_STORAGE_AREA)
4379 	    && (!ioinfo[irq]->st)
4380 	    && (ioinfo[irq]->schib.pmcw.st == 0)
4381 	    && (ioinfo[irq]->ui.flags.oper == 1)) {
4382 		int irq_ret;
4383 		devstat_t devstat;
4384 
4385 		if (ioinfo[irq]->ui.flags.pgid_supp)
4386 			irq_ret = request_irq (irq,
4387 					       init_IRQ_handler,
4388 					       SA_PROBE | SA_DOPATHGROUP,
4389 					       "INIT", &devstat);
4390 		else
4391 			irq_ret = request_irq (irq,
4392 					       init_IRQ_handler,
4393 					       SA_PROBE, "INIT", &devstat);
4394 
4395 		if (!irq_ret) {
4396 			ret = enable_cpu_sync_isc (irq);
4397 
4398 			if (!ret) {
4399 				ioinfo[irq]->ui.flags.unknown = 0;
4400 
4401 				memset (&ioinfo[irq]->senseid, '\0',
4402 					sizeof (senseid_t));
4403 
4404 				if (cio_sid_with_pgid) {
4405 
4406 					ret = s390_DevicePathVerification(irq,0);
4407 
4408 					if (ret == -EOPNOTSUPP)
4409 						/*
4410 						 * Doesn't prevent us from proceeding
4411 						 */
4412 						ret = 0;
4413 				}
4414 
4415 				/*
4416 				 * we'll fallthrough here if we don't want
4417 				 * to do SPID before SID
4418 				 */
4419 				if (!ret) {
4420 					ret = s390_SenseID (irq, &ioinfo[irq]->senseid, 0xff);
4421 					if (ret == -ETIMEDOUT) {
4422 						/* SenseID timed out.
4423 						 * We consider this device to be
4424 						 * boxed for now.
4425 						 */
4426 						ioinfo[irq]->ui.flags.unfriendly = 1;
4427 					}
4428 
4429 #if 0				/* FIXME */
4430 				/*
4431 				 * We initially check the configuration data for
4432 				 *  those devices with more than a single path
4433 				 */
4434 				if (ioinfo[irq]->schib.pmcw.pim != 0x80) {
4435 					char *prcd;
4436 					int lrcd;
4437 
4438 					ret =
4439 					    read_conf_data (irq,
4440 							    (void **) &prcd,
4441 							    &lrcd, 0);
4442 
4443 					if (!ret)	// on success only ...
4444 					{
4445 						char buffer[80];
4446 #ifdef CONFIG_DEBUG_IO
4447 						sprintf (buffer,
4448 							 "RCD for device(%04X)/"
4449 							 "subchannel(%04X) returns :\n",
4450 							 ioinfo[irq]->schib.
4451 							 pmcw.dev, irq);
4452 
4453 						s390_displayhex (buffer, prcd,
4454 								 lrcd);
4455 #endif
4456 						CIO_TRACE_EVENT(2, "rcddata:");
4457 						CIO_HEX_EVENT(2, prcd, lrcd);
4458 
4459 						if (init_IRQ_complete) {
4460 							kfree (prcd);
4461 						} else {
4462 							free_bootmem ((unsigned
4463 								       long)
4464 								      prcd,
4465 								      lrcd);
4466 
4467 						}
4468 					}
4469 				}
4470 #endif
4471 				}
4472 				disable_cpu_sync_isc (irq);
4473 
4474 			}
4475 
4476 			free_irq (irq, &devstat);
4477 
4478 		}
4479 	}
4480 }
4481 
4482 /*
4483  * s390_device_recognition_all
4484  *
4485  * Used for system wide device recognition.
4486  *
4487  */
4488 void
s390_device_recognition_all(void)4489 s390_device_recognition_all (void)
4490 {
4491 	int irq = 0;		/* let's start with subchannel 0 ... */
4492 
4493 	do {
4494 		s390_device_recognition_irq (irq);
4495 
4496 		irq++;
4497 
4498 	} while (irq <= highest_subchannel);
4499 
4500 }
4501 
4502 /*
4503  * Function: s390_redo_validation
4504  * Look for no longer blacklisted devices
4505  * FIXME: there must be a better way to do this...
4506  */
4507 
4508 void
s390_redo_validation(void)4509 s390_redo_validation (void)
4510 {
4511 	int irq = 0;
4512 	int ret;
4513 
4514 	CIO_TRACE_EVENT (0, "redoval");
4515 
4516 	do {
4517 		if (ioinfo[irq] == INVALID_STORAGE_AREA) {
4518 			ret = s390_validate_subchannel (irq, 0);
4519 			if (!ret) {
4520 				s390_device_recognition_irq (irq);
4521 				if (ioinfo[irq]->ui.flags.oper) {
4522 					devreg_t *pdevreg;
4523 
4524 					pdevreg =
4525 					    s390_search_devreg (ioinfo[irq]);
4526 					if (pdevreg != NULL) {
4527 						if (pdevreg->oper_func != NULL)
4528 							pdevreg->oper_func (irq,
4529 									    pdevreg);
4530 
4531 					}
4532 				}
4533 #ifdef CONFIG_PROC_FS
4534 				if (cio_proc_devinfo)
4535 					if (irq < MAX_CIO_PROCFS_ENTRIES) {
4536 						cio_procfs_device_create (ioinfo
4537 									  [irq]->
4538 									  devno);
4539 					}
4540 #endif
4541 			}
4542 		}
4543 		irq++;
4544 	} while (irq <= highest_subchannel);
4545 }
4546 
4547 
4548 /*
4549  * s390_trigger_resense
4550  *
4551  * try to re-sense the device on subchannel irq
4552  * only to be called without interrupt handler
4553  */
4554 int
s390_trigger_resense(int irq)4555 s390_trigger_resense(int irq)
4556 {
4557 
4558 	SANITY_CHECK(irq);
4559 
4560 	if (ioinfo[irq]->ui.flags.ready) {
4561 		printk (KERN_WARNING "s390_trigger_resense(%04X): "
4562 			"Device is in use!\n", irq);
4563 		return -EBUSY;
4564 	}
4565 
4566 	/*
4567 	 * This function is called by dasd if it just executed a "steal lock".
4568 	 * Therefore, re-initialize the 'unfriendly' flag to 0.
4569 	 * We run into timeouts if the device is still boxed...
4570 	 */
4571 	ioinfo[irq]->ui.flags.unfriendly = 0;
4572 
4573 	s390_device_recognition_irq(irq);
4574 
4575 	return 0;
4576 }
4577 
4578 /*
4579  * s390_search_devices
4580  *
4581  * Determines all subchannels available to the system.
4582  *
4583  */
4584 void
s390_process_subchannels(void)4585 s390_process_subchannels (void)
4586 {
4587 	int ret;
4588 	int irq = 0;		/* Evaluate all subchannels starting with 0 ... */
4589 
4590 	do {
4591 		ret = s390_validate_subchannel (irq, 0);
4592 
4593 		if (ret != -ENXIO)
4594 			irq++;
4595 
4596 	} while ((ret != -ENXIO) && (irq < __MAX_SUBCHANNELS));
4597 
4598 	highest_subchannel = (--irq);
4599 
4600 	printk (KERN_INFO "Highest subchannel number detected (hex) : %04X\n",
4601 		highest_subchannel);
4602 	CIO_MSG_EVENT(0,
4603 		      "Highest subchannel number detected "
4604 		      "(hex) : %04X\n", highest_subchannel);
4605 }
4606 
4607 /*
4608  * s390_validate_subchannel()
4609  *
4610  * Process the subchannel for the requested irq. Returns 1 for valid
4611  *  subchannels, otherwise 0.
4612  */
4613 int
s390_validate_subchannel(int irq,int enable)4614 s390_validate_subchannel (int irq, int enable)
4615 {
4616 
4617 	int retry;		/* retry count for status pending conditions */
4618 	int ccode;		/* condition code for stsch() only */
4619 	int ccode2;		/* condition code for other I/O routines */
4620 	schib_t *p_schib;
4621 	int ret;
4622 #ifdef CONFIG_CHSC
4623 	int      chp = 0;
4624 	int      mask;
4625 #endif /* CONFIG_CHSC */
4626 
4627 	char dbf_txt[15];
4628 
4629 	sprintf (dbf_txt, "valsch%x", irq);
4630 	CIO_TRACE_EVENT (4, dbf_txt);
4631 
4632 	/*
4633 	 * The first subchannel that is not-operational (ccode==3)
4634 	 *  indicates that there aren't any more devices available.
4635 	 */
4636 	if ((init_IRQ_complete)
4637 	    && (ioinfo[irq] != INVALID_STORAGE_AREA)) {
4638 		p_schib = &ioinfo[irq]->schib;
4639 	} else {
4640 		p_schib = p_init_schib;
4641 
4642 	}
4643 
4644 	/*
4645 	 * If we knew the device before we assume the worst case ...
4646 	 */
4647 	if (ioinfo[irq] != INVALID_STORAGE_AREA) {
4648 		ioinfo[irq]->ui.flags.oper = 0;
4649 		ioinfo[irq]->ui.flags.dval = 0;
4650 
4651 	}
4652 
4653 	ccode = stsch (irq, p_schib);
4654 
4655 	if (ccode) {
4656 		return -ENXIO;
4657 	}
4658 	/*
4659 	 * ... just being curious we check for non I/O subchannels
4660 	 */
4661 	if (p_schib->pmcw.st) {
4662 		if (cio_show_msg) {
4663 			printk (KERN_INFO "Subchannel %04X reports "
4664 				"non-I/O subchannel type %04X\n",
4665 				irq, p_schib->pmcw.st);
4666 		}
4667 		CIO_MSG_EVENT(0,
4668 			      "Subchannel %04X reports "
4669 			      "non-I/O subchannel type %04X\n",
4670 			      irq, p_schib->pmcw.st);
4671 
4672 		if (ioinfo[irq] != INVALID_STORAGE_AREA)
4673 			ioinfo[irq]->ui.flags.oper = 0;
4674 
4675 	}
4676 
4677 	if ((!p_schib->pmcw.dnv) && (!p_schib->pmcw.st)) {
4678 		return -ENODEV;
4679 	}
4680 	if (!p_schib->pmcw.st) {
4681 		if (is_blacklisted (p_schib->pmcw.dev)) {
4682 			/*
4683 			 * This device must not be known to Linux. So we simply say that
4684 			 * there is no device and return ENODEV.
4685 			 */
4686 #ifdef CONFIG_DEBUG_IO
4687 			printk (KERN_DEBUG
4688 				"Blacklisted device detected at devno %04X\n",
4689 				p_schib->pmcw.dev);
4690 #endif
4691 			CIO_MSG_EVENT(0,
4692 				      "Blacklisted device detected at devno %04X\n",
4693 				      p_schib->pmcw.dev);
4694 			return -ENODEV;
4695 		}
4696 	}
4697 
4698 	if (ioinfo[irq] == INVALID_STORAGE_AREA) {
4699 		if (!init_IRQ_complete) {
4700 			ioinfo[irq] = (ioinfo_t *)
4701 			    alloc_bootmem_low (sizeof (ioinfo_t));
4702 		} else {
4703 			ioinfo[irq] = (ioinfo_t *)
4704 			    kmalloc (sizeof (ioinfo_t), GFP_DMA | GFP_ATOMIC);
4705 
4706 		}
4707 		if (!ioinfo[irq])
4708 			return -ENOMEM;
4709 
4710 
4711 		memset (ioinfo[irq], '\0', sizeof (ioinfo_t));
4712 		memcpy (&ioinfo[irq]->schib, p_init_schib, sizeof (schib_t));
4713 
4714 		/*
4715 		 * We have to insert the new ioinfo element
4716 		 *  into the linked list, either at its head,
4717 		 *  its tail or insert it.
4718 		 */
4719 		if (ioinfo_head == NULL) {	/* first element */
4720 			ioinfo_head = ioinfo[irq];
4721 			ioinfo_tail = ioinfo[irq];
4722 		} else if (irq < ioinfo_head->irq) {	/* new head */
4723 			ioinfo[irq]->next = ioinfo_head;
4724 			ioinfo_head->prev = ioinfo[irq];
4725 			ioinfo_head = ioinfo[irq];
4726 		} else if (irq > ioinfo_tail->irq) {	/* new tail */
4727 			ioinfo_tail->next = ioinfo[irq];
4728 			ioinfo[irq]->prev = ioinfo_tail;
4729 			ioinfo_tail = ioinfo[irq];
4730 		} else {	/* insert element */
4731 
4732 			ioinfo_t *pi = ioinfo_head;
4733 
4734 			for (pi = ioinfo_head; pi != NULL; pi = pi->next) {
4735 
4736 				if (irq < pi->next->irq) {
4737 					ioinfo[irq]->next = pi->next;
4738 					ioinfo[irq]->prev = pi;
4739 					pi->next->prev = ioinfo[irq];
4740 					pi->next = ioinfo[irq];
4741 					break;
4742 
4743 				}
4744 			}
4745 		}
4746 	}
4747 
4748 	/* initialize some values ... */
4749 	ioinfo[irq]->irq = irq;
4750 	ioinfo[irq]->st = ioinfo[irq]->schib.pmcw.st;
4751 	if (ioinfo[irq]->st)
4752 		return -ENODEV;
4753 
4754 	ioinfo[irq]->opm = ioinfo[irq]->schib.pmcw.pim
4755 	    & ioinfo[irq]->schib.pmcw.pam & ioinfo[irq]->schib.pmcw.pom;
4756 
4757 #ifdef CONFIG_CHSC
4758 	if (ioinfo[irq]->opm) {
4759 		for (chp=0;chp<=7;chp++) {
4760 			mask = 0x80 >> chp;
4761 			if (ioinfo[irq]->opm & mask) {
4762 				if (!test_bit
4763 				    (ioinfo[irq]->schib.pmcw.chpid[chp],
4764 				     &chpids_logical)) {
4765 					/* disable using this path */
4766 					ioinfo[irq]->opm &= ~mask;
4767 				}
4768 			} else {
4769 				/* This chpid is not available to us */
4770 				clear_bit(ioinfo[irq]->schib.pmcw.chpid[chp],
4771 					  &chpids);
4772 			}
4773 		}
4774 	}
4775 #endif /* CONFIG_CHSC */
4776 
4777 	if (cio_show_msg) {
4778 		printk (KERN_INFO
4779 			"Detected device %04X "
4780 			"on subchannel %04X"
4781 			" - PIM = %02X, PAM = %02X, POM = %02X\n",
4782 			ioinfo[irq]->schib.pmcw.dev,
4783 			irq,
4784 			ioinfo[irq]->schib.pmcw.pim,
4785 			ioinfo[irq]->schib.pmcw.pam,
4786 			ioinfo[irq]->schib.pmcw.pom);
4787 
4788 	}
4789 	CIO_MSG_EVENT(0,
4790 		      "Detected device %04X "
4791 		      "on subchannel %04X"
4792 		      " - PIM = %02X, "
4793 		      "PAM = %02X, POM = %02X\n",
4794 		      ioinfo[irq]->schib.pmcw.dev,
4795 		      irq,
4796 		      ioinfo[irq]->schib.pmcw.pim,
4797 		      ioinfo[irq]->schib.pmcw.pam,
4798 		      ioinfo[irq]->schib.pmcw.pom);
4799 
4800 	/*
4801 	 * initialize ioinfo structure
4802 	 */
4803 	if (!ioinfo[irq]->ui.flags.ready) {
4804 		ioinfo[irq]->nopfunc = NULL;
4805 		ioinfo[irq]->ui.flags.busy = 0;
4806 		ioinfo[irq]->ui.flags.dval = 1;
4807 		ioinfo[irq]->devstat.intparm = 0;
4808 
4809 	}
4810 	ioinfo[irq]->devstat.devno = ioinfo[irq]->schib.pmcw.dev;
4811 	ioinfo[irq]->devno = ioinfo[irq]->schib.pmcw.dev;
4812 
4813 	/*
4814 	 * We should have at least one CHPID ...
4815 	 */
4816 	if (ioinfo[irq]->opm) {
4817 		/*
4818 		 * We now have to initially ...
4819 		 *  ... set "interruption sublass"
4820 		 *  ... enable "concurrent sense"
4821 		 *  ... enable "multipath mode" if more than one
4822 		 *        CHPID is available. This is done regardless
4823 		 *        whether multiple paths are available for us.
4824 		 *
4825 		 * Note : we don't enable the device here, this is temporarily
4826 		 *        done during device sensing below.
4827 		 */
4828 		ioinfo[irq]->schib.pmcw.isc = 3;	/* could be smth. else */
4829 		ioinfo[irq]->schib.pmcw.csense = 1;	/* concurrent sense */
4830 		ioinfo[irq]->schib.pmcw.ena = enable;
4831 		ioinfo[irq]->schib.pmcw.intparm = ioinfo[irq]->schib.pmcw.dev;
4832 
4833 		if ((ioinfo[irq]->opm != 0x80)
4834 		    && (ioinfo[irq]->opm != 0x40)
4835 		    && (ioinfo[irq]->opm != 0x20)
4836 		    && (ioinfo[irq]->opm != 0x10)
4837 		    && (ioinfo[irq]->opm != 0x08)
4838 		    && (ioinfo[irq]->opm != 0x04)
4839 		    && (ioinfo[irq]->opm != 0x02)
4840 		    && (ioinfo[irq]->opm != 0x01)) {
4841 			ioinfo[irq]->schib.pmcw.mp = 1;	/* multipath mode */
4842 
4843 		}
4844 
4845 		retry = 5;
4846 
4847 		do {
4848 			ccode2 = msch_err (irq, &ioinfo[irq]->schib);
4849 
4850 			switch (ccode2) {
4851 			case 0:
4852 				/*
4853 				 * successful completion
4854 				 *
4855 				 * concurrent sense facility available
4856 				 */
4857 				ioinfo[irq]->ui.flags.oper = 1;
4858 				ioinfo[irq]->ui.flags.consns = 1;
4859 				ret = 0;
4860 				break;
4861 
4862 			case 1:
4863 				/*
4864 				 * status pending
4865 				 *
4866 				 * How can we have a pending status
4867 				 * as the device is disabled for
4868 				 * interrupts ?
4869 				 * Anyway, process it ...
4870 				 */
4871 				ioinfo[irq]->ui.flags.s_pend = 1;
4872 				s390_process_IRQ (irq);
4873 				ioinfo[irq]->ui.flags.s_pend = 0;
4874 				retry--;
4875 				ret = -EIO;
4876 				break;
4877 
4878 			case 2:
4879 				/*
4880 				 * busy
4881 				 *
4882 				 * we mark it not-oper as we can't
4883 				 * properly operate it !
4884 				 */
4885 				ioinfo[irq]->ui.flags.oper = 0;
4886 				udelay (100);	/* allow for recovery */
4887 				retry--;
4888 				ret = -EBUSY;
4889 				break;
4890 
4891 			case 3:	/* not operational */
4892 				ioinfo[irq]->ui.flags.oper = 0;
4893 				retry = 0;
4894 				ret = -ENODEV;
4895 				break;
4896 
4897 			default:
4898 #define PGMCHK_OPERAND_EXC      0x15
4899 
4900 				if ((ccode2 & PGMCHK_OPERAND_EXC)
4901 				    == PGMCHK_OPERAND_EXC) {
4902 					/*
4903 					 * re-issue the modify subchannel without trying to
4904 					 *  enable the concurrent sense facility
4905 					 */
4906 					ioinfo[irq]->schib.pmcw.csense = 0;
4907 
4908 					ccode2 =
4909 					    msch_err (irq, &ioinfo[irq]->schib);
4910 
4911 					if (ccode2 != 0) {
4912 						printk (KERN_ERR
4913 							" ... msch() (2) failed"
4914 							" with CC = %X\n",
4915 							ccode2);
4916 						CIO_MSG_EVENT(0,
4917 							      "msch() (2) failed"
4918 							      " with CC=%X\n",
4919 							      ccode2);
4920 						ioinfo[irq]->ui.flags.oper = 0;
4921 						ret = -EIO;
4922 					} else {
4923 						ioinfo[irq]->ui.flags.oper = 1;
4924 						ioinfo[irq]->ui.
4925 						    flags.consns = 0;
4926 						ret = 0;
4927 
4928 					}
4929 
4930 				} else {
4931 					printk (KERN_ERR
4932 						" ... msch() (1) failed with "
4933 						"CC = %X\n", ccode2);
4934 					CIO_MSG_EVENT(0,
4935 						      "msch() (1) failed with "
4936 						      "CC = %X\n", ccode2);
4937 					ioinfo[irq]->ui.flags.oper = 0;
4938 					ret = -EIO;
4939 
4940 				}
4941 
4942 				retry = 0;
4943 				break;
4944 
4945 			}
4946 
4947 		} while (ccode2 && retry);
4948 
4949 		if ((ccode2 != 0) && (ccode2 != 3)
4950 		    && (!retry)) {
4951 			printk (KERN_ERR
4952 				" ... msch() retry count for "
4953 				"subchannel %04X exceeded, CC = %d\n",
4954 				irq, ccode2);
4955 			CIO_MSG_EVENT(0,
4956 				      " ... msch() retry count for "
4957 				      "subchannel %04X exceeded, CC = %d\n",
4958 				      irq, ccode2);
4959 
4960 		}
4961 	} else {
4962 		/* no path available ... */
4963 		ioinfo[irq]->ui.flags.oper = 0;
4964 		ret = -ENODEV;
4965 
4966 	}
4967 
4968 	return (ret);
4969 }
4970 
4971 /*
4972  * s390_SenseID
4973  *
4974  * Try to obtain the 'control unit'/'device type' information
4975  *  associated with the subchannel.
4976  *
4977  * The function is primarily meant to be called without irq
4978  *  action handler in place. However, it also allows for
4979  *  use with an action handler in place. If there is already
4980  *  an action handler registered assure it can handle the
4981  *  s390_SenseID() related device interrupts - interruption
4982  *  parameter used is 0x00E2C9C4 ( SID ).
4983  */
4984 int
s390_SenseID(int irq,senseid_t * sid,__u8 lpm)4985 s390_SenseID (int irq, senseid_t * sid, __u8 lpm)
4986 {
4987 	ccw1_t *sense_ccw;	/* ccw area for SenseID command */
4988 	senseid_t isid;		/* internal sid */
4989 	devstat_t devstat;	/* required by request_irq() */
4990 	__u8 pathmask;		/* calulate path mask */
4991 	__u8 domask;		/* path mask to use */
4992 	int inlreq;		/* inline request_irq() */
4993 	int irq_ret;		/* return code */
4994 	devstat_t *pdevstat;	/* ptr to devstat in use */
4995 	int retry;		/* retry count */
4996 	int io_retry;		/* retry indicator */
4997 
4998 	senseid_t *psid = sid;	/* start with the external buffer */
4999 	int sbuffer = 0;	/* switch SID data buffer */
5000 
5001 	char dbf_txt[15];
5002 	int i;
5003 	int failure = 0;	/* nothing went wrong yet */
5004 
5005 	SANITY_CHECK (irq);
5006 
5007 	if (ioinfo[irq]->ui.flags.oper == 0) {
5008 		return (-ENODEV);
5009 
5010 	}
5011 
5012  	if (ioinfo[irq]->ui.flags.unfriendly) {
5013  		/* don't even try it */
5014  		return -EUSERS;
5015  	}
5016 
5017 	sprintf (dbf_txt, "snsID%x", irq);
5018 	CIO_TRACE_EVENT (4, dbf_txt);
5019 
5020 	inlreq = 0;		/* to make the compiler quiet... */
5021 
5022 	if (!ioinfo[irq]->ui.flags.ready) {
5023 
5024 		pdevstat = &devstat;
5025 
5026 		/*
5027 		 * Perform SENSE ID command processing. We have to request device
5028 		 *  ownership and provide a dummy I/O handler. We issue sync. I/O
5029 		 *  requests and evaluate the devstat area on return therefore
5030 		 *  we don't need a real I/O handler in place.
5031 		 */
5032 		irq_ret =
5033 		    request_irq (irq, init_IRQ_handler, SA_PROBE, "SID",
5034 				 &devstat);
5035 
5036 		if (irq_ret == 0)
5037 			inlreq = 1;
5038 	} else {
5039 		inlreq = 0;
5040 		irq_ret = 0;
5041 		pdevstat = ioinfo[irq]->irq_desc.dev_id;
5042 
5043 	}
5044 
5045 	if (irq_ret) {
5046 		return irq_ret;
5047 	}
5048 
5049 	s390irq_spin_lock (irq);
5050 
5051 	if (init_IRQ_complete) {
5052 		sense_ccw = kmalloc (2 * sizeof (ccw1_t), GFP_DMA | GFP_ATOMIC);
5053 	} else {
5054 		sense_ccw = alloc_bootmem_low (2 * sizeof (ccw1_t));
5055 
5056 	}
5057 	if (!sense_ccw) {
5058 		s390irq_spin_unlock (irq);
5059 		if (inlreq)
5060 			free_irq (irq, &devstat);
5061 		return -ENOMEM;
5062 	}
5063 
5064 	/* more than one path installed ? */
5065 	if (ioinfo[irq]->schib.pmcw.pim != 0x80) {
5066 		sense_ccw[0].cmd_code = CCW_CMD_SUSPEND_RECONN;
5067 		sense_ccw[0].cda = 0;
5068 		sense_ccw[0].count = 0;
5069 		sense_ccw[0].flags = CCW_FLAG_SLI | CCW_FLAG_CC;
5070 
5071 		sense_ccw[1].cmd_code = CCW_CMD_SENSE_ID;
5072 		sense_ccw[1].cda = (__u32) virt_to_phys (sid);
5073 		sense_ccw[1].count = sizeof (senseid_t);
5074 		sense_ccw[1].flags = CCW_FLAG_SLI;
5075 	} else {
5076 		sense_ccw[0].cmd_code = CCW_CMD_SENSE_ID;
5077 		sense_ccw[0].cda = (__u32) virt_to_phys (sid);
5078 		sense_ccw[0].count = sizeof (senseid_t);
5079 		sense_ccw[0].flags = CCW_FLAG_SLI;
5080 
5081 	}
5082 
5083 	for (i = 0; (i < 8); i++) {
5084 		pathmask = 0x80 >> i;
5085 
5086 		domask = ioinfo[irq]->opm & pathmask;
5087 
5088 		if (lpm)
5089 			domask &= lpm;
5090 
5091 		if (!domask)
5092 			continue;
5093 
5094 		failure = 0;
5095 
5096 		memset(psid, 0, sizeof(senseid_t));
5097 		psid->cu_type = 0xFFFF;	/* initialize fields ... */
5098 
5099 		retry = 5;	/* retry count    */
5100 		io_retry = 1;	/* enable retries */
5101 
5102 		/*
5103 		 * We now issue a SenseID request. In case of BUSY,
5104 		 *  STATUS PENDING or non-CMD_REJECT error conditions
5105 		 *  we run simple retries.
5106 		 */
5107 		do {
5108 			memset (pdevstat, '\0', sizeof (devstat_t));
5109 
5110 			irq_ret = s390_start_IO (irq, sense_ccw, 0x00E2C9C4,	/* == SID */
5111 						 domask,
5112 						 DOIO_WAIT_FOR_INTERRUPT
5113 						 | DOIO_TIMEOUT
5114 						 | DOIO_VALID_LPM
5115 						 | DOIO_DONT_CALL_INTHDLR);
5116 
5117 			if ((psid->cu_type != 0xFFFF)
5118 			    && (psid->reserved == 0xFF)) {
5119 				if (!sbuffer) {	/* switch buffers */
5120 					/*
5121 					 * we report back the
5122 					 *  first hit only
5123 					 */
5124 					psid = &isid;
5125 
5126 					if (ioinfo[irq]->schib.pmcw.pim != 0x80) {
5127 						sense_ccw[1].cda = (__u32)
5128 						    virt_to_phys (psid);
5129 					} else {
5130 						sense_ccw[0].cda = (__u32)
5131 						    virt_to_phys (psid);
5132 
5133 					}
5134 
5135 					/*
5136 					 * if just the very first
5137 					 *  was requested to be
5138 					 *  sensed disable further
5139 					 *  scans.
5140 					 */
5141 					if (!lpm)
5142 						lpm = domask;
5143 
5144 					sbuffer = 1;
5145 
5146 				}
5147 
5148 				if (pdevstat->rescnt < (sizeof (senseid_t) - 8)) {
5149 					ioinfo[irq]->ui.flags.esid = 1;
5150 
5151 				}
5152 
5153 				io_retry = 0;
5154 
5155 				break;
5156 			}
5157 
5158 			failure = 1;
5159 
5160 			if (pdevstat->flag & DEVSTAT_STATUS_PENDING) {
5161 #ifdef CONFIG_DEBUG_IO
5162 				printk (KERN_DEBUG
5163 					"SenseID : device %04X on "
5164 					"Subchannel %04X "
5165 					"reports pending status, "
5166 					"retry : %d\n",
5167 					ioinfo[irq]->schib.pmcw.dev, irq,
5168 					retry);
5169 #endif
5170 				CIO_MSG_EVENT(2,
5171 					      "SenseID : device %04X on "
5172 					      "Subchannel %04X "
5173 					      "reports pending status, "
5174 					      "retry : %d\n",
5175 					      ioinfo
5176 					      [irq]->schib.pmcw.dev, irq, retry);
5177 			}
5178 
5179 			else if (pdevstat->flag & DEVSTAT_FLAG_SENSE_AVAIL) {
5180 				/*
5181 				 * if the device doesn't support the SenseID
5182 				 *  command further retries wouldn't help ...
5183 				 */
5184 				if (pdevstat->ii.sense.data[0]
5185 				    & (SNS0_CMD_REJECT | SNS0_INTERVENTION_REQ)) {
5186 #ifdef CONFIG_DEBUG_IO
5187 					printk (KERN_ERR
5188 						"SenseID : device %04X on "
5189 						"Subchannel %04X "
5190 						"reports cmd reject or "
5191 						"intervention required\n",
5192 						ioinfo[irq]->schib.pmcw.dev,
5193 						irq);
5194 #endif
5195 					CIO_MSG_EVENT(2,
5196 						      "SenseID : device %04X on "
5197 						      "Subchannel %04X "
5198 						      "reports cmd reject or "
5199 						      "intervention required\n",
5200 						      ioinfo[irq]->schib.pmcw.dev,
5201 						      irq);
5202 					io_retry = 0;
5203 				} else {
5204 #ifdef CONFIG_DEBUG_IO
5205 					printk
5206 					    (KERN_WARNING
5207 					     "SenseID : UC on "
5208 					     "dev %04X, "
5209 					     "retry %d, "
5210 					     "lpum %02X, "
5211 					     "cnt %02d, "
5212 					     "sns :"
5213 					     " %02X%02X%02X%02X "
5214 					     "%02X%02X%02X%02X ...\n",
5215 					     ioinfo[irq]->schib.pmcw.dev,
5216 					     retry,
5217 					     pdevstat->lpum,
5218 					     pdevstat->scnt,
5219 					     pdevstat->ii.sense.data[0],
5220 					     pdevstat->ii.sense.data[1],
5221 					     pdevstat->ii.sense.data[2],
5222 					     pdevstat->ii.sense.data[3],
5223 					     pdevstat->ii.sense.data[4],
5224 					     pdevstat->ii.sense.data[5],
5225 					     pdevstat->ii.sense.data[6],
5226 					     pdevstat->ii.sense.data[7]);
5227 #endif
5228 					CIO_MSG_EVENT(2,
5229 						      "SenseID : UC on "
5230 						      "dev %04X, "
5231 						      "retry %d, "
5232 						      "lpum %02X, "
5233 						      "cnt %02d, "
5234 						      "sns :"
5235 						      " %02X%02X%02X%02X "
5236 						      "%02X%02X%02X%02X ...\n",
5237 						      ioinfo[irq]->
5238 						      schib.pmcw.dev,
5239 						      retry,
5240 						      pdevstat->lpum,
5241 						      pdevstat->scnt,
5242 						      pdevstat->
5243 						      ii.sense.data[0],
5244 						      pdevstat->
5245 						      ii.sense.data[1],
5246 						      pdevstat->
5247 						      ii.sense.data[2],
5248 						      pdevstat->
5249 						      ii.sense.data[3],
5250 						      pdevstat->
5251 						      ii.sense.data[4],
5252 						      pdevstat->
5253 						      ii.sense.data[5],
5254 						      pdevstat->
5255 						      ii.sense.data[6],
5256 						      pdevstat->
5257 						      ii.sense.data[7]);
5258 
5259 				}
5260 
5261 			} else if ((pdevstat->flag & DEVSTAT_NOT_OPER)
5262 				   || (irq_ret == -ENODEV)) {
5263 #ifdef CONFIG_DEBUG_IO
5264 				printk (KERN_ERR
5265 					"SenseID : path %02X for "
5266 					"device %04X on "
5267 					"subchannel %04X "
5268 					"is 'not operational'\n",
5269 					domask,
5270 					ioinfo[irq]->schib.pmcw.dev, irq);
5271 #endif
5272 				CIO_MSG_EVENT(2,
5273 					      "SenseID : path %02X for "
5274 					      "device %04X on "
5275 					      "subchannel %04X "
5276 					      "is 'not operational'\n",
5277 					      domask,
5278 					      ioinfo[irq]->schib.pmcw.dev, irq);
5279 
5280 				io_retry = 0;
5281 				ioinfo[irq]->opm &= ~domask;
5282 
5283 			} else {
5284 #ifdef CONFIG_DEBUG_IO
5285 				printk (KERN_INFO
5286 					"SenseID : start_IO() for "
5287 					"device %04X on "
5288 					"subchannel %04X "
5289 					"returns %d, retry %d, "
5290 					"status %04X\n",
5291 					ioinfo[irq]->schib.pmcw.dev,
5292 					irq, irq_ret, retry, pdevstat->flag);
5293 #endif
5294 				CIO_MSG_EVENT(2,
5295 					     "SenseID : start_IO() for "
5296 					     "device %04X on "
5297 					     "subchannel %04X "
5298 					     "returns %d, retry %d, "
5299 					     "status %04X\n",
5300 					     ioinfo[irq]->schib.pmcw.dev, irq,
5301 					     irq_ret, retry, pdevstat->flag);
5302 
5303 				if (irq_ret == -ETIMEDOUT) {
5304 					int xret;
5305 
5306 					/*
5307 					 * Seems we need to cancel the first ssch sometimes...
5308 					 * On the next try, the ssch will usually be fine.
5309 					 */
5310 
5311 					xret = cancel_IO (irq);
5312 
5313 					if (!xret)
5314 						CIO_MSG_EVENT(2,
5315 							      "SenseID: sch canceled "
5316 							      "successfully for irq %x\n",
5317 							      irq);
5318 				}
5319 
5320 			}
5321 
5322 			if (io_retry) {
5323 				retry--;
5324 
5325 				if (retry == 0) {
5326 					io_retry = 0;
5327 
5328 				}
5329 			}
5330 
5331 			if ((failure) && (io_retry)) {
5332 				/* reset fields... */
5333 
5334 				failure = 0;
5335 
5336 				memset(psid, 0, sizeof(senseid_t));
5337 				psid->cu_type = 0xFFFF;
5338 			}
5339 
5340 		} while ((io_retry));
5341 
5342 	}
5343 
5344 	if (init_IRQ_complete) {
5345 		kfree (sense_ccw);
5346 	} else {
5347 		free_bootmem ((unsigned long) sense_ccw, 2 * sizeof (ccw1_t));
5348 
5349 	}
5350 
5351 	s390irq_spin_unlock (irq);
5352 
5353 	/*
5354 	 * If we installed the irq action handler we have to
5355 	 *  release it too.
5356 	 */
5357 	if (inlreq)
5358 		free_irq (irq, pdevstat);
5359 
5360 	/*
5361 	 * if running under VM check there ... perhaps we should do
5362 	 *  only if we suffered a command reject, but it doesn't harm
5363 	 */
5364 	if ((sid->cu_type == 0xFFFF)
5365 	    && (MACHINE_IS_VM)) {
5366 		VM_virtual_device_info (ioinfo[irq]->schib.pmcw.dev, sid);
5367 	}
5368 
5369 	if (sid->cu_type == 0xFFFF) {
5370 		/*
5371 		 * SenseID CU-type of 0xffff indicates that no device
5372 		 *  information could be retrieved (pre-init value).
5373 		 *
5374 		 * If we can't couldn't identify the device type we
5375 		 *  consider the device "not operational".
5376 		 */
5377 #ifdef CONFIG_DEBUG_IO
5378 		printk (KERN_WARNING
5379 			"SenseID : unknown device %04X on subchannel %04X\n",
5380 			ioinfo[irq]->schib.pmcw.dev, irq);
5381 #endif
5382 		CIO_MSG_EVENT(2,
5383 			      "SenseID : unknown device %04X on subchannel %04X\n",
5384 			      ioinfo[irq]->schib.pmcw.dev, irq);
5385 		ioinfo[irq]->ui.flags.unknown = 1;
5386 
5387 	}
5388 
5389 	/*
5390 	 * Issue device info message if unit was operational .
5391 	 */
5392 	if (!ioinfo[irq]->ui.flags.unknown) {
5393 		if (sid->dev_type != 0) {
5394 			if (cio_show_msg)
5395 				printk (KERN_INFO
5396 					"SenseID : device %04X reports: "
5397 					"CU  Type/Mod = %04X/%02X,"
5398 					" Dev Type/Mod = %04X/%02X\n",
5399 					ioinfo[irq]->schib.pmcw.dev,
5400 					sid->cu_type, sid->cu_model,
5401 					sid->dev_type, sid->dev_model);
5402 			CIO_MSG_EVENT(2,
5403 				      "SenseID : device %04X reports: "
5404 				      "CU  Type/Mod = %04X/%02X,"
5405 				      " Dev Type/Mod = %04X/%02X\n",
5406 				      ioinfo[irq]->schib.
5407 				      pmcw.dev,
5408 				      sid->cu_type,
5409 				      sid->cu_model,
5410 				      sid->dev_type,
5411 				      sid->dev_model);
5412 		} else {
5413 			if (cio_show_msg)
5414 				printk (KERN_INFO
5415 					"SenseID : device %04X reports:"
5416 					" Dev Type/Mod = %04X/%02X\n",
5417 					ioinfo[irq]->schib.pmcw.dev,
5418 					sid->cu_type, sid->cu_model);
5419 			CIO_MSG_EVENT(2,
5420 				      "SenseID : device %04X reports:"
5421 				      " Dev Type/Mod = %04X/%02X\n",
5422 				      ioinfo[irq]->schib.
5423 				      pmcw.dev,
5424 				      sid->cu_type,
5425 				      sid->cu_model);
5426 		}
5427 
5428 	}
5429 
5430 	if (!ioinfo[irq]->ui.flags.unknown)
5431 		irq_ret = 0;
5432 	else if (irq_ret != -ETIMEDOUT)
5433 		irq_ret = -ENODEV;
5434 
5435 	return (irq_ret);
5436 }
5437 
5438 static int __inline__
s390_SetMultiPath(int irq)5439 s390_SetMultiPath (int irq)
5440 {
5441 	int cc;
5442 
5443 	cc = stsch (irq, &ioinfo[irq]->schib);
5444 
5445 	if (!cc) {
5446 		ioinfo[irq]->schib.pmcw.mp = 1;	/* multipath mode */
5447 
5448 		cc = msch (irq, &ioinfo[irq]->schib);
5449 
5450 	}
5451 
5452 	return (cc);
5453 }
5454 
5455 static int
s390_do_path_verification(int irq,__u8 usermask)5456 s390_do_path_verification(int irq, __u8 usermask)
5457 {
5458 	__u8 domask;
5459 	int i;
5460 	pgid_t pgid;
5461 	__u8 dev_path;
5462 	int first = 1;
5463 	int ret = 0;
5464 	char dbf_txt[15];
5465 
5466 	sprintf(dbf_txt, "dopv%x", irq);
5467 	CIO_TRACE_EVENT(2, dbf_txt);
5468 
5469 	dev_path = usermask ? usermask : ioinfo[irq]->opm;
5470 
5471 	if (ioinfo[irq]->ui.flags.pgid == 0) {
5472 		memcpy (&ioinfo[irq]->pgid, global_pgid, sizeof (pgid_t));
5473 		ioinfo[irq]->ui.flags.pgid = 1;
5474 	}
5475 
5476 	for (i = 0; i < 8 && !ret; i++) {
5477 
5478 		domask = dev_path & (0x80>>i);
5479 
5480 		if (!domask)
5481 			continue;
5482 
5483 		if (!test_bit(ioinfo[irq]->schib.pmcw.chpid[i],
5484 			      &chpids_logical))
5485 			/* Chpid is logically offline, don't do io */
5486 			continue;
5487 
5488 		ret = s390_SetPGID (irq, domask);
5489 
5490 		/*
5491 		 * For the *first* path we are prepared for recovery
5492 		 *
5493 		 *  - If we fail setting the PGID we assume its
5494 		 *     using  a different PGID already (VM) we
5495 		 *     try to sense.
5496 		 */
5497 		if (ret == -EOPNOTSUPP && first) {
5498 			*(int *) &pgid = 0;
5499 
5500 			ret = s390_SensePGID (irq, domask, &pgid);
5501 			first = 0;
5502 
5503 			if (ret == 0) {
5504 				/*
5505 				 * Check whether we retrieved
5506 				 *  a reasonable PGID ...
5507 				 */
5508 				if (pgid.inf.ps.state1 == SNID_STATE1_GROUPED)
5509 					memcpy (&ioinfo[irq]->pgid,
5510 						&pgid, sizeof (pgid_t));
5511 				else /* ungrouped or garbage ... */
5512 					ret = -EOPNOTSUPP;
5513 
5514 			} else {
5515 				ioinfo[irq]->ui.flags.pgid_supp = 0;
5516 
5517 #ifdef CONFIG_DEBUG_IO
5518 				printk (KERN_WARNING
5519 					"PathVerification(%04X) - Device %04X "
5520 					"doesn't support path grouping\n",
5521 					irq, ioinfo[irq]->schib.pmcw.dev);
5522 #endif
5523 				CIO_MSG_EVENT(2, "PathVerification(%04X) "
5524 					      "- Device %04X doesn't "
5525 					      " support path grouping\n",
5526 					      irq,
5527 					      ioinfo[irq]->schib.pmcw.dev);
5528 
5529 			}
5530 		} else if (ret == -EIO) {
5531 #ifdef CONFIG_DEBUG_IO
5532 			printk (KERN_ERR "PathVerification(%04X) - I/O error "
5533 				"on device %04X\n", irq,
5534 				ioinfo[irq]->schib.pmcw.dev);
5535 #endif
5536 
5537 			CIO_MSG_EVENT(2, "PathVerification(%04X) - I/O error "
5538 				      "on device %04X\n", irq,
5539 				      ioinfo[irq]->schib.pmcw.dev);
5540 
5541 			ioinfo[irq]->ui.flags.pgid_supp = 0;
5542 
5543 		} else if (ret == -ETIMEDOUT) {
5544 #ifdef CONFIG_DEBUG_IO
5545 			printk (KERN_ERR "PathVerification(%04X) - I/O timed "
5546 				"out on device %04X\n", irq,
5547 				ioinfo[irq]->schib.pmcw.dev);
5548 #endif
5549 			CIO_MSG_EVENT(2, "PathVerification(%04X) - I/O timed "
5550 				      "out on device %04X\n", irq,
5551 				      ioinfo[irq]->schib.pmcw.dev);
5552 
5553 			ioinfo[irq]->ui.flags.pgid_supp = 0;
5554 
5555 		} else if (ret == -EAGAIN) {
5556 
5557 			ret = 0;
5558 		} else if (ret == -EUSERS) {
5559 
5560 #ifdef CONFIG_DEBUG_IO
5561 			printk (KERN_ERR "PathVerification(%04X) "
5562 				"- Device is locked by someone else!\n",
5563 				irq);
5564 #endif
5565 			CIO_MSG_EVENT(2, "PathVerification(%04X) "
5566 				      "- Device is locked by someone else!\n",
5567 				      irq);
5568 		} else if (ret == -ENODEV) {
5569 #ifdef CONFIG_DEBUG_IO
5570 			printk (KERN_ERR "PathVerification(%04X) "
5571 				"- Device %04X is no longer there?!?\n",
5572 				irq, ioinfo[irq]->schib.pmcw.dev);
5573 #endif
5574 			CIO_MSG_EVENT(2, "PathVerification(%04X) "
5575 				      "- Device %04X is no longer there?!?\n",
5576 				      irq, ioinfo[irq]->schib.pmcw.dev);
5577 
5578 		} else if (ret == -EBUSY) {
5579 			/*
5580 			 * The device is busy. Schedule the path verification
5581 			 * bottom half and we'll hopefully get in next time.
5582 			 */
5583 			if (!ioinfo[irq]->ui.flags.noio) {
5584 				s390_schedule_path_verification(irq);
5585 			}
5586 			return -EINPROGRESS;
5587 		} else if (ret) {
5588 #ifdef CONFIG_DEBUG_IO
5589 			printk (KERN_ERR "PathVerification(%04X) "
5590 				"- Unexpected error %d on device %04X\n",
5591 				irq, ret, ioinfo[irq]->schib.pmcw.dev);
5592 #endif
5593 			CIO_MSG_EVENT(2, "PathVerification(%04X) - "
5594 				      "Unexpected error %d on device %04X\n",
5595 				      irq, ret, ioinfo[irq]->schib.pmcw.dev);
5596 
5597 			ioinfo[irq]->ui.flags.pgid_supp = 0;
5598 		}
5599 	}
5600 	if (stsch(irq, &ioinfo[irq]->schib) != 0)
5601 		/* FIXME: tell driver device is dead. */
5602 		return -ENODEV;
5603 
5604 	/*
5605 	 * stsch() doesn't always yield the correct pim, pam, and pom
5606 	 * values, if no device selection has been performed yet.
5607 	 * However, after complete path verification they are up to date.
5608 	 */
5609 	ioinfo[irq]->opm = ioinfo[irq]->schib.pmcw.pim &
5610 		ioinfo[irq]->schib.pmcw.pam &
5611 		ioinfo[irq]->schib.pmcw.pom;
5612 
5613 #ifdef CONFIG_CHSC
5614 	if (ioinfo[irq]->opm) {
5615 		for (i=0;i<=7;i++) {
5616 			int mask = 0x80 >> i;
5617 			if ((ioinfo[irq]->opm & mask) &&
5618 			    (!test_bit(ioinfo[irq]->schib.pmcw.chpid[i],
5619 				       &chpids_logical)))
5620 				/* disable using this path */
5621 				ioinfo[irq]->opm &= ~mask;
5622 		}
5623 	}
5624 #endif /* CONFIG_CHSC */
5625 
5626 	ioinfo[irq]->ui.flags.noio = 0;
5627 
5628 	/* Eventually wake up the device driver. */
5629 	if (ioinfo[irq]->opm != 0) {
5630 		devreg_t *pdevreg;
5631 		pdevreg = s390_search_devreg(ioinfo[irq]);
5632 
5633 		if (pdevreg && pdevreg->oper_func)
5634 			pdevreg->oper_func(irq, pdevreg);
5635 	}
5636 	return ret;
5637 
5638 }
5639 
5640 /*
5641  * Device Path Verification
5642  *
5643  * Path verification is accomplished by checking which paths (CHPIDs) are
5644  *  available. Further, a path group ID is set, if possible in multipath
5645  *  mode, otherwise in single path mode.
5646  *
5647  * Note : This function must not be called during normal device recognition,
5648  *         but during device driver initiated request_irq() processing only.
5649  */
5650 int
s390_DevicePathVerification(int irq,__u8 usermask)5651 s390_DevicePathVerification (int irq, __u8 usermask)
5652 {
5653 	int ccode;
5654 #ifdef CONFIG_CHSC
5655 	int chp;
5656 	int mask;
5657 	int old_opm = 0;
5658 #endif /* CONFIG_CHSC */
5659 
5660 	int ret = 0;
5661 
5662 	char dbf_txt[15];
5663 	devreg_t *pdevreg;
5664 
5665 	sprintf (dbf_txt, "dpver%x", irq);
5666 	CIO_TRACE_EVENT (4, dbf_txt);
5667 
5668 	if (ioinfo[irq]->st)
5669 		return -ENODEV;
5670 
5671 #ifdef CONFIG_CHSC
5672 	old_opm = ioinfo[irq]->opm;
5673 #endif /* CONFIG_CHSC */
5674 	ccode = stsch (irq, &(ioinfo[irq]->schib));
5675 
5676 	if (ccode)
5677 		return -ENODEV;
5678 
5679 	if (ioinfo[irq]->schib.pmcw.pim == 0x80) {
5680 		/*
5681 		 * no error, just not required for single path only devices
5682 		 */
5683 		ioinfo[irq]->ui.flags.pgid_supp = 0;
5684 		ret = 0;
5685 		ioinfo[irq]->ui.flags.noio = 0;
5686 
5687 #ifdef CONFIG_CHSC
5688 		/*
5689 		 * disable if chpid is logically offline
5690 		 */
5691 		if (!test_bit(ioinfo[irq]->schib.pmcw.chpid[0],
5692 			      &chpids_logical)) {
5693 
5694 			ioinfo[irq]->opm = 0;
5695 			ioinfo[irq]->ui.flags.oper = 0;
5696 			printk(KERN_WARNING
5697 			       "No logical path for sch %d...\n",
5698 			       irq);
5699 
5700 			if (ioinfo[irq]->nopfunc) {
5701 				if (ioinfo[irq]->ui.flags.notacccap)
5702 					ioinfo[irq]->nopfunc(irq,
5703 							     DEVSTAT_NOT_ACC);
5704 				else {
5705 					not_oper_handler_func_t nopfunc =
5706 						ioinfo[irq]->nopfunc;
5707 #ifdef CONFIG_PROC_FS
5708 					/* remove procfs entry */
5709 					if (cio_proc_devinfo)
5710 						cio_procfs_device_remove
5711 							(ioinfo[irq]->devno);
5712 #endif
5713 					free_irq(irq,
5714 						 ioinfo[irq]->irq_desc.dev_id);
5715 					nopfunc(irq, DEVSTAT_DEVICE_GONE);
5716 				}
5717 			}
5718 			return -ENODEV;
5719 		}
5720 		if (!old_opm) {
5721 
5722 			ioinfo[irq]->opm = ioinfo[irq]->schib.pmcw.pim
5723 				& ioinfo[irq]->schib.pmcw.pam
5724 				& ioinfo[irq]->schib.pmcw.pom;
5725 
5726 			if (ioinfo[irq]->opm) {
5727 
5728 				ioinfo[irq]->ui.flags.oper = 1;
5729 				pdevreg = s390_search_devreg(ioinfo[irq]);
5730 
5731 				if (pdevreg && pdevreg->oper_func)
5732 					pdevreg->oper_func(irq, pdevreg);
5733 				ret = 0;
5734 			} else {
5735 				ret = -ENODEV;
5736 			}
5737 		}
5738 #endif /* CONFIG_CHSC */
5739 		return ret;
5740 	}
5741 
5742 	ioinfo[irq]->opm = ioinfo[irq]->schib.pmcw.pim
5743 	    & ioinfo[irq]->schib.pmcw.pam & ioinfo[irq]->schib.pmcw.pom;
5744 
5745 #ifdef CONFIG_CHSC
5746 	if (ioinfo[irq]->opm) {
5747 		for (chp=0;chp<=7;chp++) {
5748 			mask = 0x80 >> chp;
5749 			if ((ioinfo[irq]->opm & mask)
5750 			    &&(!test_bit(ioinfo[irq]->schib.pmcw.chpid[chp],
5751 					 &chpids_logical)))
5752 				/* disable using this path */
5753 				ioinfo[irq]->opm &= ~mask;
5754 		}
5755 	}
5756 
5757 #endif /* CONFIG_CHSC */
5758 
5759 	if (ioinfo[irq]->ui.flags.pgid_supp == 0) {
5760 
5761 		if (ioinfo[irq]->opm == 0)
5762 			return -ENODEV;
5763 
5764 		ioinfo[irq]->ui.flags.oper = 1;
5765 		ioinfo[irq]->ui.flags.noio = 0;
5766 
5767 		pdevreg = s390_search_devreg(ioinfo[irq]);
5768 
5769 		if (pdevreg && pdevreg->oper_func)
5770 			pdevreg->oper_func(irq, pdevreg);
5771 
5772 		return 0;
5773 	}
5774 
5775 	if (ioinfo[irq]->ui.flags.ready)
5776 		return s390_do_path_verification (irq, usermask);
5777 	return 0;
5778 
5779 }
5780 
5781 void
s390_kick_path_verification(unsigned long irq)5782 s390_kick_path_verification (unsigned long irq)
5783 {
5784 	long cr6 __attribute__ ((aligned (8)));
5785 
5786 	atomic_set (&ioinfo[irq]->pver_pending, 0);
5787 	/* Do not enter path verification if sync_isc is enabled. */
5788 	__ctl_store (cr6, 6, 6);
5789 	if (cr6 & 0x04000000) {
5790 		s390_schedule_path_verification (irq);
5791 		return;
5792 	}
5793 	ioinfo[irq]->ui.flags.killio = 0;
5794 	s390_DevicePathVerification(irq, 0xff);
5795 
5796 }
5797 
5798 static void
s390_schedule_path_verification(unsigned long irq)5799 s390_schedule_path_verification(unsigned long irq)
5800 {
5801 	/* Protect against rescheduling, when already running */
5802 	if (atomic_compare_and_swap (0, 1, &ioinfo[irq]->pver_pending)) {
5803 		return;
5804 	}
5805 
5806 	/*
5807 	 * Call path verification.
5808 	 * Note this is always called from inside the i/o layer, so we don't
5809 	 * need to care about the usermask.
5810 	 */
5811 	INIT_LIST_HEAD (&ioinfo[irq]->pver_bh.list);
5812 	ioinfo[irq]->pver_bh.sync = 0;
5813 	ioinfo[irq]->pver_bh.routine = (void*) (void*) s390_kick_path_verification;
5814 	ioinfo[irq]->pver_bh.data = (void*) irq;
5815 	queue_task (&ioinfo[irq]->pver_bh, &tq_immediate);
5816 	mark_bh (IMMEDIATE_BH);
5817 }
5818 
5819 /*
5820  * s390_SetPGID
5821  *
5822  * Set Path Group ID
5823  *
5824  */
5825 int
s390_SetPGID(int irq,__u8 lpm)5826 s390_SetPGID (int irq, __u8 lpm)
5827 {
5828 	ccw1_t *spid_ccw;	/* ccw area for SPID command */
5829 	devstat_t devstat;	/* required by request_irq() */
5830 	devstat_t *pdevstat = &devstat;
5831 	unsigned long flags;
5832 	char dbf_txt[15];
5833 
5834 	int irq_ret = 0;	/* return code */
5835 	int retry = 5;		/* retry count */
5836 	int inlreq = 0;		/* inline request_irq() */
5837 	int mpath = 1;		/* try multi-path first */
5838 
5839 	SANITY_CHECK (irq);
5840 
5841 	if (ioinfo[irq]->ui.flags.oper == 0) {
5842 		return (-ENODEV);
5843 
5844 	}
5845 
5846  	if (ioinfo[irq]->ui.flags.unfriendly) {
5847  		/* don't even try it */
5848  		return -EUSERS;
5849  	}
5850 
5851 	sprintf (dbf_txt, "SPID%x", irq);
5852 	CIO_TRACE_EVENT (4, dbf_txt);
5853 
5854 	if (!ioinfo[irq]->ui.flags.ready) {
5855 		/*
5856 		 * Perform SetPGID command processing. We have to request device
5857 		 *  ownership and provide a dummy I/O handler. We issue sync. I/O
5858 		 *  requests and evaluate the devstat area on return therefore
5859 		 *  we don't need a real I/O handler in place.
5860 		 */
5861 		irq_ret = request_irq (irq,
5862 				       init_IRQ_handler,
5863 				       SA_PROBE, "SPID", pdevstat);
5864 
5865 		if (irq_ret == 0)
5866 			inlreq = 1;
5867 	} else {
5868 		pdevstat = ioinfo[irq]->irq_desc.dev_id;
5869 
5870 	}
5871 
5872 	if (irq_ret) {
5873 		return irq_ret;
5874 	}
5875 
5876 	s390irq_spin_lock_irqsave (irq, flags);
5877 
5878 	if (init_IRQ_complete) {
5879 		spid_ccw = kmalloc (2 * sizeof (ccw1_t), GFP_DMA | GFP_ATOMIC);
5880 	} else {
5881 		spid_ccw = alloc_bootmem_low (2 * sizeof (ccw1_t));
5882 	}
5883 	if (!spid_ccw) {
5884 		s390irq_spin_unlock_irqrestore(irq, flags);
5885 		if (inlreq)
5886 			free_irq(irq, pdevstat);
5887 		return -ENOMEM;
5888 	}
5889 
5890 	spid_ccw[0].cmd_code = CCW_CMD_SUSPEND_RECONN;
5891 	spid_ccw[0].cda = 0;
5892 	spid_ccw[0].count = 0;
5893 	spid_ccw[0].flags = CCW_FLAG_SLI | CCW_FLAG_CC;
5894 
5895 	spid_ccw[1].cmd_code = CCW_CMD_SET_PGID;
5896 	spid_ccw[1].cda = (__u32) virt_to_phys (&ioinfo[irq]->pgid);
5897 	spid_ccw[1].count = sizeof (pgid_t);
5898 	spid_ccw[1].flags = CCW_FLAG_SLI;
5899 
5900 	ioinfo[irq]->pgid.inf.fc = SPID_FUNC_MULTI_PATH | SPID_FUNC_ESTABLISH;
5901 
5902 	/*
5903 	 * We now issue a SetPGID request. In case of BUSY
5904 	 *  or STATUS PENDING conditions we retry 5 times.
5905 	 */
5906 	do {
5907 		memset (pdevstat, '\0', sizeof (devstat_t));
5908 
5909 		irq_ret = s390_start_IO (irq, spid_ccw, 0xE2D7C9C4,	/* == SPID */
5910 					 lpm,	/* n/a */
5911 					 DOIO_WAIT_FOR_INTERRUPT
5912 					 | DOIO_VALID_LPM
5913 					 | DOIO_DONT_CALL_INTHDLR
5914 					 | DOIO_TIMEOUT);
5915 
5916 		if (!irq_ret) {
5917 			if (pdevstat->flag & DEVSTAT_STATUS_PENDING) {
5918 #ifdef CONFIG_DEBUG_IO
5919 				printk (KERN_DEBUG "SPID - Device %04X "
5920 					"on Subchannel %04X "
5921 					"reports pending status, "
5922 					"lpm = %x, "
5923 					"retry : %d\n",
5924 					ioinfo[irq]->schib.pmcw.dev,
5925 					irq, lpm, retry);
5926 #endif
5927 				CIO_MSG_EVENT(2,
5928 					      "SPID - Device %04X "
5929 					      "on Subchannel %04X "
5930 					      "reports pending status, "
5931 					      "lpm = %x, "
5932 					      "retry : %d\n",
5933 					      ioinfo[irq]->schib.pmcw.
5934 					      dev, irq, lpm, retry);
5935 				retry--;
5936 				irq_ret = -EIO;
5937 			}
5938 
5939 			if (pdevstat->flag == (DEVSTAT_START_FUNCTION
5940 					       | DEVSTAT_FINAL_STATUS)) {
5941 				retry = 0;	/* successfully set ... */
5942 				irq_ret = 0;
5943 			} else if (pdevstat->flag & DEVSTAT_FLAG_SENSE_AVAIL) {
5944 				/*
5945 				 * If the device doesn't support the
5946 				 *  Sense Path Group ID command
5947 				 *  further retries wouldn't help ...
5948 				 */
5949 				if (pdevstat->ii.sense.
5950 				    data[0] & SNS0_CMD_REJECT) {
5951 					if (mpath) {
5952 						/*
5953 						 * We now try single path mode.
5954 						 * Note we must not issue the suspend
5955 						 * multipath reconnect, or we will get
5956 						 * a command reject by tapes.
5957 						 */
5958 
5959 						spid_ccw[0].cmd_code =
5960 						    CCW_CMD_SET_PGID;
5961 						spid_ccw[0].cda = (__u32)
5962 						    virt_to_phys (&ioinfo[irq]->pgid);
5963 						spid_ccw[0].count =
5964 						    sizeof (pgid_t);
5965 						spid_ccw[0].flags =
5966 						    CCW_FLAG_SLI;
5967 
5968 						ioinfo[irq]->pgid.inf.fc =
5969 						    SPID_FUNC_SINGLE_PATH
5970 						    | SPID_FUNC_ESTABLISH;
5971 						mpath = 0;
5972 						retry--;
5973 						irq_ret = -EIO;
5974 					} else {
5975 						irq_ret = -EOPNOTSUPP;
5976 						retry = 0;
5977 
5978 					}
5979 				} else {
5980 #ifdef CONFIG_DEBUG_IO
5981 					printk (KERN_WARNING
5982 						"SPID - device %04X,"
5983 						" unit check,"
5984 						" retry %d, cnt %02d,"
5985 						" lpm %x, sns :"
5986 						" %02X%02X%02X%02X %02X%02X%02X%02X ...\n",
5987 						ioinfo[irq]->schib.pmcw.
5988 						dev, retry,
5989 						pdevstat->scnt,
5990 						lpm,
5991 						pdevstat->ii.sense.
5992 						data[0],
5993 						pdevstat->ii.sense.
5994 						data[1],
5995 						pdevstat->ii.sense.
5996 						data[2],
5997 						pdevstat->ii.sense.
5998 						data[3],
5999 						pdevstat->ii.sense.
6000 						data[4],
6001 						pdevstat->ii.sense.
6002 						data[5],
6003 						pdevstat->ii.sense.
6004 						data[6],
6005 						pdevstat->ii.sense.data[7]);
6006 #endif
6007 
6008 					CIO_MSG_EVENT(2,
6009 						     "SPID - device %04X,"
6010 						     " unit check,"
6011 						     " retry %d, cnt %02d,"
6012 						     " lpm %x, sns :"
6013 						     " %02X%02X%02X%02X %02X%02X%02X%02X ...\n",
6014 						     ioinfo[irq]->schib.
6015 						     pmcw.dev, retry,
6016 						     pdevstat->scnt,
6017 						     lpm,
6018 						     pdevstat->ii.sense.
6019 						     data[0],
6020 						     pdevstat->ii.sense.
6021 						     data[1],
6022 						     pdevstat->ii.sense.
6023 						     data[2],
6024 						     pdevstat->ii.sense.
6025 						     data[3],
6026 						     pdevstat->ii.sense.
6027 						     data[4],
6028 						     pdevstat->ii.sense.
6029 						     data[5],
6030 						     pdevstat->ii.sense.
6031 						     data[6],
6032 						     pdevstat->ii.sense.
6033 						     data[7]);
6034 
6035 					retry--;
6036 					irq_ret = -EIO;
6037 
6038 				}
6039 
6040 			} else if (pdevstat->flag & DEVSTAT_NOT_OPER) {
6041 				/* don't issue warnings during startup unless requested */
6042 				if (init_IRQ_complete || cio_notoper_msg) {
6043 
6044 					printk (KERN_INFO
6045 						"SPID - Device %04X "
6046 						"on Subchannel %04X, "
6047 						"lpm %02X, "
6048 						"became 'not operational'\n",
6049 						ioinfo[irq]->schib.pmcw.
6050 						dev, irq,
6051 						lpm);
6052 					CIO_MSG_EVENT(2,
6053 						     "SPID - Device %04X "
6054 						     "on Subchannel %04X, "
6055 						      "lpm %02X, "
6056 						     "became 'not operational'\n",
6057 						     ioinfo[irq]->schib.
6058 						     pmcw.dev, irq,
6059 						     lpm);
6060 				}
6061 
6062 				retry = 0;
6063 				ioinfo[irq]->opm &= ~lpm;
6064 				irq_ret = -EAGAIN;
6065 
6066 			}
6067 
6068 		} else if (irq_ret == -ETIMEDOUT) {
6069 			/*
6070 			 * SetPGID timed out, so we cancel it before
6071 			 * we retry
6072 			 */
6073 			int xret;
6074 
6075 			xret = cancel_IO(irq);
6076 
6077 			if (!xret)
6078 				CIO_MSG_EVENT(2,
6079 					      "SetPGID: sch canceled "
6080 					      "successfully for irq %x\n",
6081 					      irq);
6082 			retry--;
6083 
6084 		} else if (irq_ret == -EBUSY) {
6085 #ifdef CONFIG_DEBUG_IO
6086 			printk(KERN_WARNING
6087 			       "SPID - device %x, irq %x is busy!\n",
6088 			       ioinfo[irq]->schib.pmcw.dev, irq);
6089 #endif /* CONFIG_DEBUG_IO */
6090 			CIO_MSG_EVENT(2,
6091 				      "SPID - device %x, irq %x is busy!\n",
6092 				      ioinfo[irq]->schib.pmcw.dev, irq);
6093 			retry = 0;
6094 
6095 		} else if (irq_ret != -ENODEV) {
6096 			retry--;
6097 			irq_ret = -EIO;
6098 		} else if (!pdevstat->flag & DEVSTAT_NOT_OPER) {
6099 			retry = 0;
6100 			irq_ret = -ENODEV;
6101 		} else {
6102 			/* don't issue warnings during startup unless requested */
6103 			if (init_IRQ_complete || cio_notoper_msg) {
6104 
6105 				printk (KERN_INFO
6106 					"SPID - Device %04X "
6107 					"on Subchannel %04X, "
6108 					"lpm %02X, "
6109 					"became 'not operational'\n",
6110 					ioinfo[irq]->schib.pmcw.
6111 					dev, irq,
6112 					lpm);
6113 				CIO_MSG_EVENT(2,
6114 					      "SPID - Device %04X "
6115 					      "on Subchannel %04X, "
6116 					      "lpm %02X, "
6117 					      "became 'not operational'\n",
6118 					      ioinfo[irq]->schib.
6119 					      pmcw.dev, irq,
6120 					      lpm);
6121 			}
6122 
6123 			retry = 0;
6124 			ioinfo[irq]->opm &= ~lpm;
6125 
6126 			if (ioinfo[irq]->opm != 0)
6127 				irq_ret = -EAGAIN;
6128 			else
6129 				irq_ret = -ENODEV;
6130 
6131 		}
6132 
6133 	} while (retry > 0);
6134 
6135 	if (init_IRQ_complete) {
6136 		kfree (spid_ccw);
6137 	} else {
6138 		free_bootmem ((unsigned long) spid_ccw, 2 * sizeof (ccw1_t));
6139 
6140 	}
6141 
6142 	s390irq_spin_unlock_irqrestore (irq, flags);
6143 
6144 	/*
6145 	 * If we installed the irq action handler we have to
6146 	 *  release it too.
6147 	 */
6148 	if (inlreq)
6149 		free_irq (irq, pdevstat);
6150 
6151 	return (irq_ret);
6152 }
6153 
6154 /*
6155  * s390_SensePGID
6156  *
6157  * Sense Path Group ID
6158  *
6159  */
6160 int
s390_SensePGID(int irq,__u8 lpm,pgid_t * pgid)6161 s390_SensePGID (int irq, __u8 lpm, pgid_t * pgid)
6162 {
6163 	ccw1_t *snid_ccw;	/* ccw area for SNID command */
6164 	devstat_t devstat;	/* required by request_irq() */
6165 	devstat_t *pdevstat = &devstat;
6166 	char dbf_txt[15];
6167 	pgid_t * tmp_pgid;
6168 
6169 	int irq_ret = 0;	/* return code */
6170 	int retry = 5;		/* retry count */
6171 	int inlreq = 0;		/* inline request_irq() */
6172 	unsigned long flags;
6173 
6174 	SANITY_CHECK (irq);
6175 
6176 	if (ioinfo[irq]->ui.flags.oper == 0) {
6177 		return (-ENODEV);
6178 
6179 	}
6180 
6181 	sprintf (dbf_txt, "SNID%x", irq);
6182 	CIO_TRACE_EVENT (4, dbf_txt);
6183 
6184 	if (!ioinfo[irq]->ui.flags.ready) {
6185 		/*
6186 		 * Perform SENSE PGID command processing. We have to request device
6187 		 *  ownership and provide a dummy I/O handler. We issue sync. I/O
6188 		 *  requests and evaluate the devstat area on return therefore
6189 		 *  we don't need a real I/O handler in place.
6190 		 */
6191 		irq_ret = request_irq (irq,
6192 				       init_IRQ_handler,
6193 				       SA_PROBE, "SNID", pdevstat);
6194 
6195 		if (irq_ret == 0)
6196 			inlreq = 1;
6197 
6198 	} else {
6199 		pdevstat = ioinfo[irq]->irq_desc.dev_id;
6200 
6201 	}
6202 
6203 	if (irq_ret) {
6204 		return irq_ret;
6205 	}
6206 
6207 	s390irq_spin_lock_irqsave (irq, flags);
6208 
6209 	ioinfo[irq]->ui.flags.unfriendly = 0; /* assume it's friendly... */
6210 
6211 	if (init_IRQ_complete) {
6212 		snid_ccw = kmalloc (sizeof (ccw1_t), GFP_DMA | GFP_ATOMIC);
6213 		tmp_pgid = kmalloc (sizeof (pgid_t), GFP_DMA | GFP_ATOMIC);
6214 	} else {
6215 		snid_ccw = alloc_bootmem_low (sizeof (ccw1_t));
6216 		tmp_pgid = alloc_bootmem_low (sizeof (pgid_t));
6217 	}
6218 
6219 	if (!snid_ccw || !tmp_pgid) {
6220 		if (snid_ccw) {
6221 			if (init_IRQ_complete)
6222 				kfree(snid_ccw);
6223 			else
6224 				free_bootmem((unsigned long) snid_ccw, sizeof(ccw1_t));
6225 		}
6226 		if (tmp_pgid) {
6227 			if (init_IRQ_complete)
6228 				kfree(tmp_pgid);
6229 			else
6230 				free_bootmem((unsigned long) tmp_pgid, sizeof(pgid_t));
6231 		}
6232 		s390irq_spin_unlock_irqrestore(irq, flags);
6233 		if (inlreq)
6234 			free_irq (irq, pdevstat);
6235 		return -ENOMEM;
6236 	}
6237 
6238 	snid_ccw->cmd_code = CCW_CMD_SENSE_PGID;
6239 	snid_ccw->cda = (__u32) virt_to_phys (tmp_pgid);
6240 	snid_ccw->count = sizeof (pgid_t);
6241 	snid_ccw->flags = CCW_FLAG_SLI;
6242 
6243 	/*
6244 	 * We now issue a SensePGID request. In case of BUSY
6245 	 *  or STATUS PENDING conditions we retry 5 times.
6246 	 */
6247 	do {
6248 		memset (pdevstat, '\0', sizeof (devstat_t));
6249 
6250 		irq_ret = s390_start_IO (irq, snid_ccw, 0xE2D5C9C4,	/* == SNID */
6251 					 lpm,	/* n/a */
6252 					 DOIO_WAIT_FOR_INTERRUPT
6253  					 | DOIO_TIMEOUT
6254 					 | DOIO_VALID_LPM
6255 					 | DOIO_DONT_CALL_INTHDLR);
6256 
6257 		if (irq_ret == 0) {
6258 			if (pdevstat->flag & DEVSTAT_FLAG_SENSE_AVAIL) {
6259 				/*
6260 				 * If the device doesn't support the
6261 				 *  Sense Path Group ID command
6262 				 *  further retries wouldn't help ...
6263 				 */
6264 				if (pdevstat->ii.sense.data[0]
6265 				    & (SNS0_CMD_REJECT | SNS0_INTERVENTION_REQ)) {
6266 					retry = 0;
6267 					irq_ret = -EOPNOTSUPP;
6268 				} else {
6269 #ifdef CONFIG_DEBUG_IO
6270 					printk (KERN_WARNING
6271 						"SNID - device %04X,"
6272 						" unit check,"
6273 						" flag %04X, "
6274 						" retry %d, cnt %02d,"
6275 						" sns :"
6276 						" %02X%02X%02X%02X %02X%02X%02X%02X ...\n",
6277 						ioinfo[irq]->schib.pmcw.
6278 						dev, pdevstat->flag,
6279 						retry, pdevstat->scnt,
6280 						pdevstat->ii.sense.
6281 						data[0],
6282 						pdevstat->ii.sense.
6283 						data[1],
6284 						pdevstat->ii.sense.
6285 						data[2],
6286 						pdevstat->ii.sense.
6287 						data[3],
6288 						pdevstat->ii.sense.
6289 						data[4],
6290 						pdevstat->ii.sense.
6291 						data[5],
6292 						pdevstat->ii.sense.
6293 						data[6],
6294 						pdevstat->ii.sense.data[7]);
6295 
6296 #endif
6297 					CIO_MSG_EVENT(2,
6298 						     "SNID - device %04X,"
6299 						     " unit check,"
6300 						     " flag %04X, "
6301 						     " retry %d, cnt %02d,"
6302 						     " sns :"
6303 						     " %02X%02X%02X%02X %02X%02X%02X%02X ...\n",
6304 						     ioinfo[irq]->schib.
6305 						     pmcw.dev,
6306 						     pdevstat->flag,
6307 						     retry,
6308 						     pdevstat->scnt,
6309 						     pdevstat->ii.sense.
6310 						     data[0],
6311 						     pdevstat->ii.sense.
6312 						     data[1],
6313 						     pdevstat->ii.sense.
6314 						     data[2],
6315 						     pdevstat->ii.sense.
6316 						     data[3],
6317 						     pdevstat->ii.sense.
6318 						     data[4],
6319 						     pdevstat->ii.sense.
6320 						     data[5],
6321 						     pdevstat->ii.sense.
6322 						     data[6],
6323 						     pdevstat->ii.sense.
6324 						     data[7]);
6325 					retry--;
6326 					irq_ret = -EIO;
6327 
6328 				}
6329 			} else if (pdevstat->flag & DEVSTAT_NOT_OPER) {
6330 				/* don't issue warnings during startup unless requested */
6331 				if (init_IRQ_complete || cio_notoper_msg) {
6332 					printk (KERN_INFO
6333 						"SNID - Device %04X "
6334 						"on Subchannel %04X, "
6335 						"lpm %02X, "
6336 						"became 'not operational'\n",
6337 						ioinfo[irq]->schib.pmcw.
6338 						dev, irq,
6339 						lpm);
6340 					CIO_MSG_EVENT(2,
6341 						     "SNID - Device %04X "
6342 						     "on Subchannel %04X, "
6343 						     "lpm %02X, "
6344 						     "became 'not operational'\n",
6345 						     ioinfo[irq]->schib.
6346 						     pmcw.dev, irq,
6347 						     lpm);
6348 				}
6349 
6350 				retry = 0;
6351 				ioinfo[irq]->opm &= ~lpm;
6352 				irq_ret = -EAGAIN;
6353 
6354 			} else {
6355 				retry = 0;	/* success ... */
6356 				irq_ret = 0;
6357  				/*
6358  				 * Check if device is locked by someone else
6359  				 * -- we'll fail other commands if that is
6360  				 * the case
6361  				 */
6362  				if (tmp_pgid->inf.ps.state2 ==
6363  				    SNID_STATE2_RESVD_ELSE) {
6364  					printk (KERN_WARNING
6365  						"SNID - Device %04X "
6366  						"on Subchannel %04X "
6367  						"is reserved by "
6368  						"someone else\n",
6369  						ioinfo[irq]->schib.pmcw.dev,
6370  						irq);
6371  					CIO_MSG_EVENT(2,
6372  						      "SNID - Device %04X "
6373  						      "on Subchannel %04X "
6374  						      "is reserved by "
6375  						      "someone else\n",
6376  						      ioinfo[irq]->schib.
6377  						      pmcw.dev,
6378  						      irq);
6379 
6380  					ioinfo[irq]->ui.flags.unfriendly = 1;
6381  				} else {
6382  					/*
6383  					 * device is friendly to us :)
6384  					 */
6385  					ioinfo[irq]->ui.flags.unfriendly = 0;
6386  				}
6387 				memcpy(pgid, tmp_pgid, sizeof(pgid_t));
6388 			}
6389 
6390  		} else if (irq_ret == -ETIMEDOUT) {
6391 #ifdef CONFIG_DEBUG_IO
6392  			printk(KERN_INFO "SNID - Operation timed out "
6393  			       "on Device %04X, Subchannel %04X... "
6394  			       "cancelling IO\n",
6395  			       ioinfo[irq]->schib.pmcw.dev,
6396  			       irq);
6397 #endif /* CONFIG_DEBUG_IO */
6398  			CIO_MSG_EVENT(2,
6399  				      "SNID - Operation timed out "
6400  				      "on Device %04X, Subchannel %04X... "
6401  				      "cancelling IO\n",
6402  				      ioinfo[irq]->schib.pmcw.dev,
6403  				      irq);
6404  			cancel_IO(irq);
6405  			retry--;
6406 
6407 		} else if (irq_ret != -ENODEV) {	/* -EIO, or -EBUSY */
6408 
6409 			if (pdevstat->flag & DEVSTAT_STATUS_PENDING) {
6410 #ifdef CONFIG_DEBUG_IO
6411 				printk (KERN_INFO "SNID - Device %04X "
6412 					"on Subchannel %04X "
6413 					"reports pending status, "
6414 					"retry : %d\n",
6415 					ioinfo[irq]->schib.pmcw.dev,
6416 					irq, retry);
6417 #endif
6418 				CIO_MSG_EVENT(2,
6419 					     "SNID - Device %04X "
6420 					     "on Subchannel %04X "
6421 					     "reports pending status, "
6422 					     "retry : %d\n",
6423 					     ioinfo[irq]->schib.pmcw.
6424 					     dev, irq, retry);
6425 			}
6426 
6427 			printk (KERN_WARNING "SNID - device %04X,"
6428 				" start_io() reports rc : %d, retrying ...\n",
6429 				ioinfo[irq]->schib.pmcw.dev, irq_ret);
6430 			CIO_MSG_EVENT(2,
6431 				      "SNID - device %04X,"
6432 				      " start_io() reports rc : %d,"
6433 				      " retrying ...\n",
6434 				      ioinfo[irq]->schib.pmcw.dev, irq_ret);
6435 			retry--;
6436 			irq_ret = -EIO;
6437 		} else if (!pdevstat->flag & DEVSTAT_NOT_OPER) {
6438 			retry = 0;
6439 			irq_ret = -ENODEV;
6440 		} else {
6441 			/* don't issue warnings during startup unless requested */
6442 			if (init_IRQ_complete || cio_notoper_msg) {
6443 
6444 				printk (KERN_INFO
6445 					"SNID - Device %04X "
6446 					"on Subchannel %04X, "
6447 					"lpm %02X, "
6448 					"became 'not operational'\n",
6449 					ioinfo[irq]->schib.pmcw.
6450 					dev, irq,
6451 					lpm);
6452 				CIO_MSG_EVENT(2,
6453 					      "SNID - Device %04X "
6454 					      "on Subchannel %04X, "
6455 					      "lpm %02X, "
6456 					      "became 'not operational'\n",
6457 					      ioinfo[irq]->schib.
6458 					      pmcw.dev, irq,
6459 					      lpm);
6460 			}
6461 
6462 			retry = 0;
6463 			ioinfo[irq]->opm &= ~lpm;
6464 
6465 			if (ioinfo[irq]->opm != 0)
6466 				irq_ret = -EAGAIN;
6467 			else
6468 				irq_ret = -ENODEV;
6469 
6470 		}
6471 
6472 	} while (retry > 0);
6473 
6474 	if (init_IRQ_complete) {
6475 		kfree (snid_ccw);
6476 		kfree (tmp_pgid);
6477 	} else {
6478 		free_bootmem ((unsigned long) snid_ccw, sizeof (ccw1_t));
6479 		free_bootmem ((unsigned long) tmp_pgid, sizeof (pgid_t));
6480 
6481 	}
6482 
6483 	s390irq_spin_unlock_irqrestore (irq, flags);
6484 
6485 	/*
6486 	 * If we installed the irq action handler we have to
6487 	 *  release it too.
6488 	 */
6489 	if (inlreq)
6490 		free_irq (irq, pdevstat);
6491 
6492 	return (irq_ret);
6493 }
6494 
6495 void
s390_process_subchannel_source(int irq)6496 s390_process_subchannel_source (int irq)
6497 {
6498 	int dev_oper = 0;
6499 	int dev_no = -1;
6500 	int lock = 0;
6501 	int is_owned = 0;
6502 
6503 	/*
6504 	 * If the device isn't known yet
6505 	 *   we can't lock it ...
6506 	 */
6507 	if (ioinfo[irq] != INVALID_STORAGE_AREA) {
6508 		s390irq_spin_lock (irq);
6509 		lock = 1;
6510 
6511 		if (!ioinfo[irq]->st) {
6512 			dev_oper = ioinfo[irq]->ui.flags.oper;
6513 
6514 			if (ioinfo[irq]->ui.flags.dval)
6515 				dev_no = ioinfo[irq]->devno;
6516 
6517 			is_owned = ioinfo[irq]->ui.flags.ready;
6518 		}
6519 
6520 	}
6521 #ifdef CONFIG_DEBUG_CRW
6522 	printk (KERN_DEBUG
6523 		"do_crw_pending : subchannel validation - start ...\n");
6524 #endif
6525 	CIO_CRW_EVENT(4, "subchannel validation - start\n");
6526 	s390_validate_subchannel (irq, is_owned);
6527 
6528 	if (irq > highest_subchannel)
6529 		highest_subchannel = irq;
6530 
6531 #ifdef CONFIG_DEBUG_CRW
6532 	printk (KERN_DEBUG "do_crw_pending : subchannel validation - done\n");
6533 #endif
6534 	CIO_CRW_EVENT(4, "subchannel validation - done\n");
6535 	/*
6536 	 * After the validate processing
6537 	 *   the ioinfo control block
6538 	 *   should be allocated ...
6539 	 */
6540 	if (lock) {
6541 		s390irq_spin_unlock (irq);
6542 	}
6543 
6544 	if (ioinfo[irq] != INVALID_STORAGE_AREA) {
6545 #ifdef CONFIG_DEBUG_CRW
6546 		printk (KERN_DEBUG "do_crw_pending : ioinfo at "
6547 #ifdef CONFIG_ARCH_S390X
6548 			"%08lX\n", (unsigned long) ioinfo[irq]
6549 #else				/* CONFIG_ARCH_S390X */
6550 			"%08X\n", (unsigned) ioinfo[irq]
6551 #endif				/* CONFIG_ARCH_S390X */
6552 			);
6553 #endif
6554 #ifdef CONFIG_ARCH_S390X
6555 		CIO_CRW_EVENT(4, "ioinfo at %08lX\n",
6556 			      (unsigned long)ioinfo[irq]);
6557 #else				/* CONFIG_ARCH_S390X */
6558 		CIO_CRW_EVENT(4, "ioinfo at %08X\n",
6559 			      (unsigned)ioinfo[irq]);
6560 #endif				/* CONFIG_ARCH_S390X */
6561 
6562 		if (ioinfo[irq]->st)
6563 			return;
6564 
6565 		if (ioinfo[irq]->ui.flags.oper == 0) {
6566 			not_oper_handler_func_t nopfunc = ioinfo[irq]->nopfunc;
6567 #ifdef CONFIG_PROC_FS
6568 			/* remove procfs entry */
6569 			if (cio_proc_devinfo)
6570 				cio_procfs_device_remove (dev_no);
6571 #endif
6572 			/*
6573 			 * If the device has gone
6574 			 *  call not oper handler
6575 			 */
6576 			if ((dev_oper == 1)
6577 			    && (nopfunc != NULL)) {
6578 
6579 				free_irq (irq, ioinfo[irq]->irq_desc.dev_id);
6580 				nopfunc (irq, DEVSTAT_DEVICE_GONE);
6581 
6582 			}
6583 		} else {
6584 #ifdef CONFIG_DEBUG_CRW
6585 			printk (KERN_DEBUG
6586 				"do_crw_pending : device "
6587 				"recognition - start ...\n");
6588 #endif
6589 			CIO_CRW_EVENT( 4,
6590 				       "device recognition - start\n");
6591 			s390_device_recognition_irq (irq);
6592 
6593 #ifdef CONFIG_DEBUG_CRW
6594 			printk (KERN_DEBUG
6595 				"do_crw_pending : device "
6596 				"recognition - done\n");
6597 #endif
6598 			CIO_CRW_EVENT( 4,
6599 				       "device recognition - done\n");
6600 			/*
6601 			 * the device became operational
6602 			 */
6603 			if (dev_oper == 0) {
6604 				devreg_t *pdevreg;
6605 
6606 				pdevreg = s390_search_devreg (ioinfo[irq]);
6607 
6608 				if (pdevreg && pdevreg->oper_func)
6609 					pdevreg->oper_func(irq, pdevreg);
6610 
6611 #ifdef CONFIG_PROC_FS
6612 				/* add new procfs entry */
6613 				if (cio_proc_devinfo)
6614 					if (highest_subchannel <
6615 					    MAX_CIO_PROCFS_ENTRIES) {
6616 						cio_procfs_device_create
6617 						    (ioinfo[irq]->devno);
6618 					}
6619 #endif
6620 			}
6621 			/*
6622 			 * ... it is and was operational, but
6623 			 *      the devno may have changed
6624 			 */
6625 			else if ((ioinfo[irq]->devno != dev_no)
6626 				 && (ioinfo[irq]->nopfunc != NULL)) {
6627 #ifdef CONFIG_PROC_FS
6628 				int devno_old = ioinfo[irq]->devno;
6629 #endif
6630 				ioinfo[irq]->nopfunc (irq, DEVSTAT_REVALIDATE);
6631 #ifdef CONFIG_PROC_FS
6632 				/* remove old entry, add new */
6633 				if (cio_proc_devinfo) {
6634 					cio_procfs_device_remove (devno_old);
6635 					cio_procfs_device_create
6636 					    (ioinfo[irq]->devno);
6637 				}
6638 #endif
6639 			}
6640 		}
6641 #ifdef CONFIG_PROC_FS
6642 		/* get rid of dead procfs entries */
6643 		if (cio_proc_devinfo)
6644 			cio_procfs_device_purge ();
6645 #endif
6646 	}
6647 }
6648 
6649 #ifdef CONFIG_CHSC
6650 static int
chsc_get_sch_desc_irq(int irq)6651 chsc_get_sch_desc_irq(int irq)
6652 {
6653 	int j = 0;
6654 	int ccode;
6655 
6656 	spin_lock(&chsc_lock_ssd);
6657 
6658 	if (!chsc_area_ssd)
6659 		chsc_area_ssd = kmalloc(sizeof(chsc_area_t),GFP_KERNEL);
6660 
6661 	if (!chsc_area_ssd) {
6662 		printk( KERN_CRIT "No memory to determine sch descriptions...\n");
6663 		spin_unlock(&chsc_lock_ssd);
6664 		return -ENOMEM;
6665 	}
6666 
6667 	memset(chsc_area_ssd, 0, sizeof(chsc_area_t));
6668 
6669 	chsc_area_ssd->request_block.command_code1=0x0010;
6670 	chsc_area_ssd->request_block.command_code2=0x0004;
6671 	chsc_area_ssd->request_block.request_block_data.ssd_req.f_sch=irq;
6672 	chsc_area_ssd->request_block.request_block_data.ssd_req.l_sch=irq;
6673 
6674 	ccode = chsc(chsc_area_ssd);
6675 #ifdef CONFIG_DEBUG_CHSC
6676 	if (ccode)
6677 		printk( KERN_DEBUG "chsc returned with ccode = %d\n",ccode);
6678 #endif /* CONFIG_DEBUG_CHSC */
6679 	if (!ccode) {
6680 		if (chsc_area_ssd->response_block.response_code == 0x0003) {
6681 #ifdef CONFIG_DEBUG_CHSC
6682 			printk( KERN_WARNING "Error in chsc request block!\n");
6683 #endif /* CONFIG_DEBUG_CHSC */
6684 			CIO_CRW_EVENT( 2, "Error in chsc request block!\n");
6685 			spin_unlock(&chsc_lock_ssd);
6686 			return -EINVAL;
6687 
6688 		} else if (chsc_area_ssd->response_block.response_code == 0x0004) {
6689 #ifdef CONFIG_DEBUG_CHSC
6690 			printk( KERN_WARNING "Model does not provide ssd\n");
6691 #endif /* CONFIG_DEBUG_CHSC */
6692 			CIO_CRW_EVENT( 2, "Model does not provide ssd\n");
6693 			spin_unlock(&chsc_lock_ssd);
6694 			return -EOPNOTSUPP;
6695 
6696 		} else if (chsc_area_ssd->response_block.response_code == 0x0002) {
6697 #ifdef CONFIG_DEBUG_CHSC
6698 			printk( KERN_WARNING "chsc: Invalid command!\n");
6699 #endif /* CONFIG_DEBUG_CHSC */
6700 			CIO_CRW_EVENT( 2,
6701 				       "chsc: Invalid command!\n");
6702 			return -EINVAL;
6703 
6704 		} else if (chsc_area_ssd->response_block.response_code == 0x0001) {
6705 			/* everything ok */
6706 
6707 			switch (chsc_area_ssd->response_block.response_block_data.ssd_res.st) {
6708 
6709 			case 0:  /* I/O subchannel */
6710 
6711 				/*
6712 				 * All fields have meaning
6713 				 */
6714 #ifdef CONFIG_DEBUG_CHSC
6715 				if (cio_show_msg)
6716 					printk( KERN_DEBUG
6717 						"ssd: sch %x is I/O subchannel\n",
6718 						irq);
6719 #endif /* CONFIG_DEBUG_CHSC */
6720 				CIO_CRW_EVENT( 6,
6721 					       "ssd: sch %x is I/O subchannel\n",
6722 					       irq);
6723 
6724 				if (ioinfo[irq] == INVALID_STORAGE_AREA)
6725 					/* FIXME: we should do device rec. here... */
6726 					break;
6727 
6728 				ioinfo[irq]->ssd_info.valid = 1;
6729 				ioinfo[irq]->ssd_info.type = 0;
6730 				for (j=0;j<8;j++) {
6731 					if ((0x80 >> j) &
6732 					    chsc_area_ssd->response_block.
6733 					    response_block_data.ssd_res.path_mask &
6734 					    chsc_area_ssd->response_block.
6735 					    response_block_data.ssd_res.fla_valid_mask) {
6736 
6737 						if (chsc_area_ssd->response_block.
6738 						    response_block_data.ssd_res.chpid[j])
6739 
6740 							if (!test_and_set_bit
6741 							    (chsc_area_ssd->response_block.
6742 							     response_block_data.
6743 							     ssd_res.chpid[j],
6744 							     &chpids_known))
6745 
6746 								if (test_bit
6747 								    (chsc_area_ssd->response_block.
6748 								     response_block_data.
6749 								     ssd_res.chpid[j],
6750 								     &chpids_logical))
6751 
6752 									set_bit(chsc_area_ssd->response_block.
6753 										response_block_data.
6754 										ssd_res.chpid[j],
6755 										&chpids);
6756 
6757 						ioinfo[irq]->ssd_info.chpid[j] =
6758 							chsc_area_ssd->response_block.
6759 							response_block_data.ssd_res.chpid[j];
6760 						ioinfo[irq]->ssd_info.fla[j] =
6761 							chsc_area_ssd->response_block.
6762 							response_block_data.ssd_res.fla[j];
6763 					}
6764 				}
6765 				break;
6766 
6767 			case 1:  /* CHSC subchannel */
6768 
6769 				/*
6770 				 * Only sch_val, st and sch have meaning
6771 				 */
6772 #ifdef CONFIG_DEBUG_CHSC
6773 				if (cio_show_msg)
6774 					printk( KERN_DEBUG
6775 						"ssd: sch %x is chsc subchannel\n",
6776 						irq);
6777 #endif /* CONFIG_DEBUG_CHSC */
6778 				CIO_CRW_EVENT( 6,
6779 					       "ssd: sch %x is chsc subchannel\n",
6780 					       irq);
6781 
6782 				if (ioinfo[irq] == INVALID_STORAGE_AREA)
6783 					/* FIXME: we should do device rec. here... */
6784 					break;
6785 
6786 				ioinfo[irq]->ssd_info.valid = 1;
6787 				ioinfo[irq]->ssd_info.type = 1;
6788 				break;
6789 
6790 			case 2: /* Message subchannel */
6791 
6792 				/*
6793 				 * All fields except unit_addr have meaning
6794 				 */
6795 #ifdef CONFIG_DEBUG_CHSC
6796 				if (cio_show_msg)
6797 					printk( KERN_DEBUG
6798 						"ssd: sch %x is message subchannel\n",
6799 						irq);
6800 #endif
6801 				CIO_CRW_EVENT( 6,
6802 					       "ssd: sch %x is message subchannel\n",
6803 					       irq);
6804 
6805 				if (ioinfo[irq] == INVALID_STORAGE_AREA)
6806 					/* FIXME: we should do device rec. here... */
6807 					break;
6808 
6809 				ioinfo[irq]->ssd_info.valid = 1;
6810 				ioinfo[irq]->ssd_info.type = 2;
6811 				for (j=0;j<8;j++) {
6812 					if ((0x80 >> j) &
6813 					    chsc_area_ssd->response_block.
6814 					    response_block_data.ssd_res.path_mask &
6815 					    chsc_area_ssd->response_block.
6816 					    response_block_data.ssd_res.fla_valid_mask) {
6817 						if (chsc_area_ssd->response_block.
6818 						    response_block_data.ssd_res.chpid[j])
6819 
6820 							if (!test_and_set_bit
6821 							    (chsc_area_ssd->response_block.
6822 							     response_block_data.
6823 							     ssd_res.chpid[j],
6824 							     &chpids_known))
6825 
6826 								if (test_bit
6827 								    (chsc_area_ssd->response_block.
6828 								     response_block_data.
6829 								     ssd_res.chpid[j],
6830 								     &chpids_logical))
6831 
6832 									set_bit(chsc_area_ssd->response_block.
6833 										response_block_data.
6834 										ssd_res.chpid[j],
6835 										&chpids);
6836 
6837 						ioinfo[irq]->ssd_info.chpid[j] =
6838 							chsc_area_ssd->response_block.
6839 							response_block_data.ssd_res.chpid[j];
6840 						ioinfo[irq]->ssd_info.fla[j] =
6841 							chsc_area_ssd->response_block.
6842 							response_block_data.ssd_res.fla[j];
6843 					}
6844 				}
6845 				break;
6846 
6847 			case 3: /* ADM subchannel */
6848 
6849 				/*
6850 				 * Only sch_val, st and sch have meaning
6851 				 */
6852 #ifdef CONFIG_DEBUG_CHSC
6853 				if (cio_show_msg)
6854 					printk( KERN_DEBUG
6855 						"ssd: sch %x is ADM subchannel\n",
6856 						irq);
6857 #endif /* CONFIG_DEBUG_CHSC */
6858 				CIO_CRW_EVENT( 6,
6859 					       "ssd: sch %x is ADM subchannel\n",
6860 					       irq);
6861 
6862 				if (ioinfo[irq] == INVALID_STORAGE_AREA)
6863 					/* FIXME: we should do device rec. here... */
6864 					break;
6865 
6866 				ioinfo[irq]->ssd_info.valid = 1;
6867 				ioinfo[irq]->ssd_info.type = 3;
6868 				break;
6869 
6870 			default: /* uhm, that looks strange... */
6871 #ifdef CONFIG_DEBUG_CHSC
6872 				if (cio_show_msg)
6873 					printk( KERN_DEBUG
6874 						"Strange subchannel type %d for sch %x\n",
6875 						chsc_area_ssd->response_block.
6876 						response_block_data.ssd_res.st,
6877 						irq);
6878 #endif /* CONFIG_DEBUG_CHSC */
6879 				CIO_CRW_EVENT( 0,
6880 					       "Strange subchannel type %d for "
6881 					       "sch %x\n",
6882 					       chsc_area_ssd->response_block.
6883 					       response_block_data.ssd_res.st,
6884 					       irq);
6885 			}
6886 			spin_unlock(&chsc_lock_ssd);
6887 			return 0;
6888 		}
6889 	} else {
6890 		spin_unlock(&chsc_lock_ssd);
6891 		if (ccode == 3)
6892 			return -ENODEV;
6893 		return -EBUSY;
6894 	}
6895 	return -EIO;
6896 }
6897 
6898 
6899 static int
chsc_get_sch_descriptions(void)6900 chsc_get_sch_descriptions( void )
6901 {
6902 
6903 	int irq = 0;
6904 	int err = 0;
6905 
6906 	CIO_TRACE_EVENT( 4, "gsdesc");
6907 
6908 	/*
6909 	 * get information about chpids and link addresses
6910 	 * by executing the chsc command 'store subchannel description'
6911 	 */
6912 
6913 	if (init_IRQ_complete) {
6914 
6915 		for (irq=0; irq<=highest_subchannel; irq++) {
6916 
6917 			/*
6918 			 * retrieve information for each sch
6919 			 */
6920 			err = chsc_get_sch_desc_irq(irq);
6921 			if (err) {
6922 				if (!cio_chsc_err_msg) {
6923 					printk( KERN_ERR
6924 						"chsc_get_sch_descriptions:"
6925 						" Error %d while doing chsc; "
6926 						"processing "
6927 						"some machine checks may "
6928 						"not work\n",
6929 						err);
6930 					cio_chsc_err_msg=1;
6931 				}
6932 				return err;
6933 			}
6934 		}
6935 		cio_chsc_desc_avail = 1;
6936 		return 0;
6937 	} else {
6938 		/* Paranoia... */
6939 
6940 		printk( KERN_ERR
6941 			"Error: chsc_get_sch_descriptions called before "
6942 		       "initialization complete\n");
6943 		return -EINVAL;
6944 	}
6945 
6946 }
6947 
6948 __initcall(chsc_get_sch_descriptions);
6949 
6950 static int
__check_for_io_and_kill(int irq,__u8 mask,int fatal)6951 __check_for_io_and_kill(int irq, __u8 mask, int fatal)
6952 {
6953 	schib_t *schib = &ioinfo[irq]->schib;
6954 	int ret = 0;
6955 
6956 	if (schib->scsw.actl & SCSW_ACTL_DEVACT) {
6957 		if ((ioinfo[irq]->opm != mask) ||
6958 		     (fatal == 0)) {
6959 			ret = CIO_PATHGONE_WAIT4INT;
6960 		}
6961 		if ((schib->scsw.actl & SCSW_ACTL_SCHACT) &&
6962 		    (schib->pmcw.lpum == mask) &&
6963 		    (fatal != 0)) {
6964 			int cc;
6965 			/* Kill the IO. It won't complete. */
6966 			ioinfo[irq]->ui.flags.noio = 0;
6967 			ioinfo[irq]->ui.flags.killio = 1;
6968 			cc = clear_IO(irq, 0xD2C9D3D3, 0);
6969 			if (cc != 0) {
6970 				/* Eek, can't kill io. */
6971 				CIO_CRW_EVENT(0,
6972 					      "Can't kill io on "
6973 					      "sch %x, clear_IO "
6974 					      "returned %d!\n",
6975 					      irq, cc);
6976 				ioinfo[irq]->ui.flags.killio = 0;
6977 				s390irq_spin_unlock(irq);
6978 				if ((cc == -ENODEV) &&
6979 				    (ioinfo[irq]->nopfunc)) {
6980 					ioinfo[irq]->ui.flags.oper = 0;
6981 					ioinfo[irq]->nopfunc(irq,
6982 							     DEVSTAT_DEVICE_GONE);
6983 				}
6984 				ret = CIO_PATHGONE_DEVGONE;
6985 			} else {
6986 				ret |= CIO_PATHGONE_WAIT4INT;
6987 			}
6988 			ioinfo[irq]->ui.flags.noio = 1;
6989 			ret |= CIO_PATHGONE_IOERR;
6990 		}
6991 
6992 	} else if (schib->scsw.actl & (SCSW_ACTL_CLEAR_PEND |
6993 				SCSW_ACTL_HALT_PEND |
6994 				SCSW_ACTL_START_PEND |
6995 				SCSW_ACTL_RESUME_PEND)) {
6996 		if ((schib->pmcw.lpum != mask) ||
6997 		    (fatal == 0)) {
6998 			ret = CIO_PATHGONE_WAIT4INT;
6999 		} else {
7000 			int cc;
7001 			/* Cancel the i/o. */
7002 			cc = cancel_IO(irq);
7003 			switch (cc) {
7004 			case 0:
7005 				/* i/o cancelled; we can do path verif. */
7006 				ret = CIO_PATHGONE_IOERR;
7007 				break;
7008 			case -EBUSY:
7009 				/* Status pending, we'll get an interrupt */
7010 				ret = CIO_PATHGONE_WAIT4INT;
7011 				break;
7012 			case -EINVAL:
7013 				/*
7014 				 * There is either not only the start function
7015 				 * specified or we are subchannel active.
7016 				 * Do a clear sch.
7017 				 */
7018 				ioinfo[irq]->ui.flags.noio = 0;
7019 				ioinfo[irq]->ui.flags.killio = 1;
7020 				cc = clear_IO(irq, 0xD2C9D3D3, 0);
7021 				if (cc != 0) {
7022 				/* Eek, can't kill io. */
7023 					CIO_CRW_EVENT(0,
7024 						      "Can't kill io on "
7025 						      "sch %x, clear_IO "
7026 						      "returned %d!\n",
7027 						      irq, cc);
7028 					ioinfo[irq]->ui.flags.killio = 0;
7029 					s390irq_spin_unlock(irq);
7030 					if ((cc == -ENODEV) &&
7031 					    (ioinfo[irq]->nopfunc)) {
7032 						ioinfo[irq]->nopfunc(irq,
7033 								     DEVSTAT_DEVICE_GONE);
7034 						ioinfo[irq]->ui.flags.oper = 0;
7035 					}
7036 					ret = CIO_PATHGONE_DEVGONE;
7037 				} else {
7038 					ret = CIO_PATHGONE_WAIT4INT
7039 						| CIO_PATHGONE_IOERR;
7040 					ioinfo[irq]->ui.flags.noio = 1;
7041 				}
7042 				break;
7043 			default: /* -ENODEV */
7044 				s390irq_spin_unlock(irq);
7045 				if (ioinfo[irq]->nopfunc) {
7046 						ioinfo[irq]->ui.flags.oper = 0;
7047 						ioinfo[irq]->nopfunc(irq,
7048 								     DEVSTAT_DEVICE_GONE);
7049 				}
7050 				ret = CIO_PATHGONE_DEVGONE;
7051 			}
7052 		}
7053 	}
7054 	return ret;
7055 }
7056 
7057 void
s390_do_chpid_processing(__u8 chpid)7058 s390_do_chpid_processing( __u8 chpid)
7059 {
7060 
7061 	int irq;
7062 	int j;
7063 	char dbf_txt[15];
7064 
7065 	sprintf(dbf_txt, "chpr%x", chpid);
7066 	CIO_TRACE_EVENT( 2, dbf_txt);
7067 
7068 	/*
7069 	 * TODO: the chpid may be not the chpid with the link incident,
7070 	 * but the chpid the report came in through. How to handle???
7071 	 */
7072 	clear_bit(chpid, &chpids);
7073 	if (!test_and_clear_bit(chpid, &chpids_known)) {
7074 #ifdef CONFIG_DEBUG_CHSC
7075 		pr_debug(KERN_DEBUG"Got link incident for unknown chpid %x\n",
7076 		       chpid);
7077 #endif /* CONFIG_DEBUG_CHSC */
7078 		return;  /* we didn't know the chpid anyway */
7079 	}
7080 
7081 	for (irq=0;irq<=highest_subchannel;irq++) {
7082 		schib_t *schib;
7083 
7084 		if (ioinfo[irq] == INVALID_STORAGE_AREA)
7085 			continue;  /* we don't know the device anyway */
7086 		if (ioinfo[irq]->st)
7087 			continue; /* non-io subchannel */
7088 		schib = &ioinfo[irq]->schib;
7089 		for (j=0; j<8;j++) {
7090 			int mask = 0x80 >> j;
7091 			int out = 0;
7092 			int err = 0;
7093 
7094 			if (schib->pmcw.chpid[j] != chpid)
7095 				continue;
7096 
7097 			if (stsch(irq, schib) != 0) {
7098 				ioinfo[irq]->ui.flags.oper = 0;
7099 				if (ioinfo[irq]->nopfunc)
7100 					ioinfo[irq]->nopfunc(irq, DEVSTAT_DEVICE_GONE);
7101 				break;
7102 			}
7103 
7104 			s390irq_spin_lock(irq);
7105 
7106 			ioinfo[irq]->ui.flags.noio = 1;
7107 
7108 			/* Do we still expect an interrupt for outstanding io? */
7109 			if (ioinfo[irq]->ui.flags.busy) {
7110 				int rck = __check_for_io_and_kill(irq, mask, 1);
7111 				if (rck & CIO_PATHGONE_WAIT4INT)
7112 					out=1;
7113 				if (rck & CIO_PATHGONE_IOERR)
7114 					err=1;
7115 				if (rck & CIO_PATHGONE_DEVGONE)
7116 					break;
7117 			}
7118 
7119 			s390irq_spin_unlock(irq);
7120 
7121 			/*
7122 			 * Tell the device driver not to disturb us.
7123 			 * If the driver is not capable of handling
7124 			 * DEVSTAT_NOT_ACC, it doesn't want path grouping anyway.
7125 			 */
7126 			if (ioinfo[irq]->ui.flags.ready &&
7127 			    schib->pmcw.pim != 0x80 &&
7128 			    ioinfo[irq]->nopfunc &&
7129 			    ioinfo[irq]->ui.flags.notacccap) {
7130 				if (err)
7131 					ioinfo[irq]->nopfunc(irq, DEVSTAT_NOT_ACC_ERR);
7132 				else
7133 					ioinfo[irq]->nopfunc(irq, DEVSTAT_NOT_ACC);
7134 			}
7135 
7136 			ioinfo[irq]->opm &= ~mask;
7137 
7138 			if (out)
7139 				break;
7140 
7141 			/*
7142 			 * Always schedule the path verification, even if opm=0.
7143 			 * Reason: We can't rely on stsch() to return latest&greatest
7144 			 * values, if a device selections hasn't been performed, and
7145 			 * we might miss a path we didn't get a mchk for.
7146 			 */
7147 			if (ioinfo[irq]->ui.flags.ready)
7148 				s390_schedule_path_verification(irq);
7149 			else {
7150 				ioinfo[irq]->ui.flags.noio = 0;
7151 				ioinfo[irq]->ui.flags.killio = 0;
7152 			}
7153 			break;
7154 		}
7155 	}
7156 }
7157 
7158 
7159 void
s390_do_res_acc_processing(__u8 chpid,__u16 fla,int info)7160 s390_do_res_acc_processing( __u8 chpid, __u16 fla, int info)
7161 {
7162 
7163 	char dbf_txt[15];
7164 	int irq = 0;
7165 	__u32 fla_mask = 0xffff;
7166 	int chp;
7167 	int mask;
7168 
7169 	sprintf(dbf_txt, "accpr%x", chpid);
7170 	CIO_TRACE_EVENT( 2, dbf_txt);
7171 	if (info != CHSC_SEI_ACC_CHPID) {
7172 		sprintf(dbf_txt, "fla%x", fla);
7173 		CIO_TRACE_EVENT( 2, dbf_txt);
7174 	}
7175 	sprintf(dbf_txt, "info:%d", info);
7176 	CIO_TRACE_EVENT( 2, dbf_txt);
7177 
7178 	/*
7179 	 * I/O resources may have become accessible.
7180 	 * Scan through all subchannels that may be concerned and
7181 	 * do a validation on those.
7182 	 * The more information we have (info), the less scanning
7183 	 * will we have to do.
7184 	 */
7185 
7186 	if (!cio_chsc_desc_avail)
7187 		chsc_get_sch_descriptions();
7188 
7189 	if (!cio_chsc_desc_avail) {
7190 		/*
7191 		 * Something went wrong...
7192 		 */
7193 #ifdef CONFIG_DEBUG_CRW
7194 		printk( KERN_WARNING
7195 			"Error: Could not retrieve subchannel descriptions, "
7196 		       "will not process css machine check...\n");
7197 #endif /* CONFIG_DEBUG_CRW */
7198 		CIO_CRW_EVENT( 0,
7199 			       "Error: Could not retrieve subchannel descriptions, "
7200 			       "will not process css machine check...\n");
7201 		return;
7202 	}
7203 
7204 	if (!test_bit(chpid, &chpids_logical)) {
7205 #ifdef CONFIG_DEBUG_CHSC
7206 		printk(KERN_DEBUG"chpid %x is logically offline, "
7207 		       "skipping res acc processing\n", chpid);
7208 #endif /* CONFIG_DEBUG_CHSC */
7209 		return; /* no need to do the rest */
7210 	}
7211 
7212 	switch (info) {
7213 	case CHSC_SEI_ACC_CHPID: /*
7214 				  * worst case, we only know about the chpid
7215 				  * the devices are attached to
7216 				  */
7217 #ifdef CONFIG_DEBUG_CHSC
7218 		printk( KERN_DEBUG "Looking at chpid %x...\n", chpid);
7219 #endif /* CONFIG_DEBUG_CHSC */
7220 
7221 		for (irq=0; irq<__MAX_SUBCHANNELS; irq++) {
7222 
7223 			if((ioinfo[irq] != INVALID_STORAGE_AREA)
7224 			   && (ioinfo[irq]->st != 0))
7225 				continue;
7226 
7227 			if (ioinfo[irq] == INVALID_STORAGE_AREA) {
7228 				/*
7229 				 * We don't know the device yet, but since a path
7230 				 * may be available now to the device we'll have
7231 				 * to do recognition again.
7232 				 * Since we don't have any idea about which chpid
7233 				 * that beast may be on we'll have to do a stsch
7234 				 * on all devices, grr...
7235 				 */
7236 				int valret = 0;
7237 
7238 				valret = s390_validate_subchannel(irq,0);
7239 				if (valret == -ENXIO) {
7240 					/* We're through */
7241 					return;
7242 				}
7243 				if (irq > highest_subchannel)
7244 					highest_subchannel = irq;
7245 				if (valret == 0)
7246 					s390_device_recognition_irq(irq);
7247 				continue;
7248 			}
7249 
7250 			for (chp=0;chp<=7;chp++) {
7251 				mask = 0x80 >> chp;
7252 
7253 				/*
7254 				 * check if chpid is in information
7255 				 * updated by ssd
7256 				 */
7257 				if ((!ioinfo[irq]->ssd_info.valid) ||
7258 				    (ioinfo[irq]->ssd_info.chpid[chp] != chpid))
7259 					continue;
7260 
7261 				/* Tell the device driver not to disturb us. */
7262 				if (ioinfo[irq]->ui.flags.ready &&
7263 				    ioinfo[irq]->schib.pmcw.pim != 0x80 &&
7264 				    ioinfo[irq]->nopfunc &&
7265 				    ioinfo[irq]->ui.flags.notacccap)
7266 					ioinfo[irq]->nopfunc(irq, DEVSTAT_NOT_ACC);
7267 
7268 				ioinfo[irq]->ui.flags.noio = 1;
7269 
7270 				/* Do we still expect an interrupt for outstanding io? */
7271 				if (ioinfo[irq]->ui.flags.busy)
7272 					/* Wait for interrupt. */
7273 					break;
7274 
7275 				if (ioinfo[irq]->ui.flags.ready) {
7276 					s390_schedule_path_verification(irq);
7277 				} else
7278 					ioinfo[irq]->ui.flags.noio = 0;
7279 
7280 				break;
7281 			}
7282 		}
7283 		break;
7284 
7285 	case CHSC_SEI_ACC_LINKADDR: /*
7286 				     * better, we know the link determined by
7287 				     * the link address and the chpid
7288 				     */
7289 		fla_mask = 0xff00;
7290 		/* fallthrough */
7291 
7292 	case CHSC_SEI_ACC_FULLLINKADDR: /*
7293 					 * best case, we know the CU image
7294 					 * by chpid and full link address
7295 					 */
7296 
7297 #ifdef CONFIG_DEBUG_CHSC
7298 		printk( KERN_DEBUG "Looking at chpid %x, link addr %x...\n",
7299 			chpid, fla);
7300 #endif /* CONFIG_DEBUG_CHSC */
7301 
7302 		for (irq=0; irq<__MAX_SUBCHANNELS; irq++) {
7303 			int j;
7304 			/*
7305 			 * Walk through all subchannels and
7306 			 * look if our chpid and our (masked) link
7307 			 * address are in somewhere
7308 			 * Do a stsch for the found subchannels and
7309 			 * perform path grouping
7310 			 */
7311 			if (ioinfo[irq] == INVALID_STORAGE_AREA) {
7312 				/* The full program again (see above), grr... */
7313 				int valret = 0;
7314 
7315 				valret = s390_validate_subchannel(irq,0);
7316 				if (valret == -ENXIO) {
7317 					/* We're done */
7318 					return;
7319 				}
7320 				if (irq > highest_subchannel)
7321 					highest_subchannel = irq;
7322 				if (valret == 0)
7323 					s390_device_recognition_irq(irq);
7324 				continue;
7325 			}
7326 			if (ioinfo[irq]->st != 0)
7327 				continue;
7328 
7329 			/* Update our ssd_info */
7330 			if (chsc_get_sch_desc_irq(irq))
7331 				break;
7332 
7333 			for (j=0;j<8;j++) {
7334 				if ((ioinfo[irq]->ssd_info.chpid[j] != chpid) ||
7335 				    ((ioinfo[irq]->ssd_info.fla[j]&fla_mask) != fla))
7336 					continue;
7337 
7338 				/* Tell the device driver not to disturb us. */
7339 				if (ioinfo[irq]->ui.flags.ready &&
7340 				    ioinfo[irq]->schib.pmcw.pim != 0x80 &&
7341 				    ioinfo[irq]->nopfunc &&
7342 				    ioinfo[irq]->ui.flags.notacccap)
7343 					ioinfo[irq]->nopfunc(irq, DEVSTAT_NOT_ACC);
7344 
7345 				ioinfo[irq]->ui.flags.noio = 1;
7346 
7347 				/* Do we still expect an interrupt for outstanding io? */
7348 				if (ioinfo[irq]->ui.flags.busy)
7349 					/* Wait for interrupt. */
7350 					break;
7351 
7352 				if (ioinfo[irq]->ui.flags.ready) {
7353 					s390_schedule_path_verification(irq);
7354 				} else
7355 					ioinfo[irq]->ui.flags.noio = 0;
7356 
7357 				break;
7358 			}
7359 			break;
7360 
7361 		}
7362 		break;
7363 
7364 	default: BUG();
7365 	}
7366 }
7367 
7368 static int
__get_chpid_from_lir(void * data)7369 __get_chpid_from_lir(void *data)
7370 {
7371 	struct lir {
7372 		u8  iq;
7373 		u8  ic;
7374 		u16 sci;
7375 		/* incident-node descriptor */
7376 		u32 indesc[28];
7377 		/* attached-node descriptor */
7378 		u32 andesc[28];
7379 		/* incident-specific information */
7380 		u32 isinfo[28];
7381 	} *lir;
7382 
7383 	lir = (struct lir*) data;
7384 	if (!(lir->iq&0x80))
7385 		/* NULL link incident record */
7386 		return -EINVAL;
7387 	if (!(lir->indesc[0]&0xc0000000))
7388 		/* node descriptor not valid */
7389 		return -EINVAL;
7390 	if (!(lir->indesc[0]&0x10000000))
7391 		/* don't handle device-type nodes - FIXME */
7392 		return -EINVAL;
7393 	/* Byte 3 contains the chpid. Could also be CTCA, but we don't care */
7394 
7395 	return (u16) (lir->indesc[0]&0x000000ff);
7396 }
7397 
7398 void
s390_process_css(void)7399 s390_process_css( void )
7400 {
7401 
7402 	int ccode, do_sei, chpid;
7403 
7404 	CIO_TRACE_EVENT( 2, "prcss");
7405 
7406 	spin_lock(&chsc_lock_sei);
7407 
7408 	if (!chsc_area_sei) {
7409 		if (init_IRQ_complete)
7410 			chsc_area_sei = kmalloc(sizeof(chsc_area_t),GFP_KERNEL);
7411 		else
7412 			chsc_area_sei = alloc_bootmem(sizeof(chsc_area_t));
7413 	}
7414 
7415 	if (!chsc_area_sei) {
7416 		printk( KERN_CRIT
7417 			"No memory to store event information...\n");
7418 		spin_unlock(&chsc_lock_sei);
7419 		return;
7420 	}
7421 
7422 	do_sei = 1;
7423 
7424 	while (do_sei) {
7425 
7426 		do_sei = 0;
7427 
7428 		/*
7429 		 * build the chsc request block for store event information
7430 		 * and do the call
7431 		 */
7432 		memset(chsc_area_sei,0,sizeof(chsc_area_t));
7433 		chsc_area_sei->request_block.command_code1=0x0010;
7434 		chsc_area_sei->request_block.command_code2=0x000E;
7435 
7436 		ccode = chsc(chsc_area_sei);
7437 
7438 
7439 		if (ccode)
7440 			break;
7441 
7442 		/* for debug purposes, check for problems */
7443 		if (chsc_area_sei->response_block.response_code == 0x0003) {
7444 #ifdef CONFIG_DEBUG_CHSC
7445 			printk( KERN_WARNING
7446 				"s390_process_css: error in chsc request block!\n");
7447 #endif /* CONFIG_DEBUG_CHSC */
7448 			CIO_CRW_EVENT( 2,
7449 				       "s390_process_css: "
7450 				       "error in chsc request block!\n");
7451 			break;
7452 		}
7453 		if (chsc_area_sei->response_block.response_code == 0x0005) {
7454 #ifdef CONFIG_DEBUG_CHSC
7455 			printk( KERN_WARNING
7456 				"s390_process_css: no event information stored\n");
7457 #endif /* CONFIG_DEBUG_CHSC */
7458 			CIO_CRW_EVENT( 2,
7459 				       "s390_process_css: "
7460 				       "no event information stored\n");
7461 			break;
7462 		}
7463 		if (chsc_area_sei->response_block.response_code == 0x0002) {
7464 #ifdef CONFIG_DEBUG_CHSC
7465 			printk( KERN_WARNING
7466 				"s390_process_css: invalid command!\n");
7467 #endif /* CONFIG_DEBUG_CHSC */
7468 			CIO_CRW_EVENT( 2,
7469 				       "s390_process_css: "
7470 				       "invalid command!\n");
7471 			break;
7472 		}
7473 		if (chsc_area_sei->response_block.response_code != 0x0001) {
7474 #ifdef CONFIG_DEBUG_CHSC
7475 			printk( KERN_WARNING
7476 				"s390_process_css: unknown response code %d\n",
7477 				chsc_area_sei->response_block.response_code);
7478 #endif /* CONFIG_DEBUG_CHSC */
7479 			CIO_CRW_EVENT( 2,
7480 				       "s390_process_css: unknown response "
7481 				       "code %d\n",
7482 				       chsc_area_sei->response_block.response_code);
7483 			break;
7484 		}
7485 		/* everything ok */
7486 #ifdef CONFIG_DEBUG_CHSC
7487 		printk( KERN_DEBUG
7488 			"s390_process_css: "
7489 			"event information successfully stored\n");
7490 #endif /* CONFIG_DEBUG_CHSC */
7491 		CIO_CRW_EVENT( 4,
7492 			       "s390_process_css: "
7493 			       "event information successfully stored\n");
7494 
7495 		/* Check if there is more event information pending. */
7496 		if (chsc_area_sei->response_block.response_block_data.
7497 		    sei_res.flags & 0x80) {
7498 #ifdef CONFIG_DEBUG_CHSC
7499 			printk(KERN_INFO"s390_process_css: further event "
7500 			       "information pending...\n");
7501 #endif /* CONFIG_DEBUG_CHSC */
7502 			CIO_CRW_EVENT( 2, "further event information pending\n");
7503 
7504 			do_sei = 1;
7505 		}
7506 
7507 		/* Check if we might have lost some information. */
7508 		if (chsc_area_sei->response_block.response_block_data.
7509 		    sei_res.flags & 0x40) {
7510 #ifdef CONFIG_DEBUG_CHSC
7511 			printk(KERN_ERR"s390_process_css: Event information has "
7512 			       "been lost due to overflow!\n");
7513 #endif /* CONFIG_DEBUG_CHSC */
7514 			CIO_CRW_EVENT( 2, "Event information has "
7515 				       "been lost due to overflow!\n");
7516 		}
7517 
7518 		if (chsc_area_sei->response_block.
7519 		    response_block_data.sei_res.rs != 4) {
7520 #ifdef CONFIG_DEBUG_CHSC
7521 			printk( KERN_ERR
7522 				"s390_process_css: "
7523 				"reporting source (%04X) isn't a chpid!\n",
7524 				chsc_area_sei->response_block.
7525 				response_block_data.sei_res.rsid);
7526 #endif /* CONFIG_DEBUG_CHSC */
7527 			CIO_CRW_EVENT( 2,
7528 				       "s390_process_css: "
7529 				       "reporting source (%04X) isn't a chpid!\n",
7530 				       chsc_area_sei->response_block.
7531 				       response_block_data.sei_res.rsid);
7532 			continue;
7533 		}
7534 
7535 		/* which kind of information was stored? */
7536 		switch (chsc_area_sei->response_block.
7537 			response_block_data.sei_res.cc) {
7538 		case 1: /* link incident*/
7539 #ifdef CONFIG_DEBUG_CHSC
7540 			printk( KERN_DEBUG
7541 				"s390_process_css: "
7542 				"channel subsystem reports link incident,"
7543 				" source is chpid %x\n",
7544 				chsc_area_sei->response_block.
7545 				response_block_data.sei_res.rsid);
7546 #endif /* CONFIG_DEBUG_CHSC */
7547 			CIO_CRW_EVENT( 4,
7548 				       "s390_process_css: "
7549 				       "channel subsystem reports "
7550 				       "link incident, "
7551 				       "source is chpid %x\n",
7552 				       chsc_area_sei->response_block.
7553 				       response_block_data.sei_res.rsid);
7554 
7555 			chpid = __get_chpid_from_lir(chsc_area_sei->response_block.
7556 						     response_block_data.sei_res.
7557 						     ccdf);
7558 			if (chpid >= 0)
7559 				s390_do_chpid_processing(chpid);
7560 			break;
7561 
7562 		case 2: /* i/o resource accessibiliy */
7563 #ifdef CONFIG_DEBUG_CHSC
7564 			printk( KERN_DEBUG
7565 				"s390_process_css: channel subsystem "
7566 				"reports some I/O devices "
7567 				"may have become accessible\n");
7568 #endif /* CONFIG_DEBUG_CHSC */
7569 			CIO_CRW_EVENT( 4,
7570 				       "s390_process_css: "
7571 				       "channel subsystem reports "
7572 				       "some I/O devices "
7573 				       "may have become accessible\n");
7574 #ifdef CONFIG_DEBUG_CHSC
7575 			printk( KERN_DEBUG
7576 				"Data received after sei: \n");
7577 			printk( KERN_DEBUG
7578 				"Validity flags: %x\n",
7579 				chsc_area_sei->response_block.
7580 				response_block_data.sei_res.vf);
7581 #endif /* CONFIG_DEBUG_CHSC */
7582 			if ((chsc_area_sei->response_block.
7583 			     response_block_data.sei_res.vf&0x80)
7584 			    == 0) {
7585 #ifdef CONFIG_DEBUG_CHSC
7586 				printk( KERN_DEBUG "chpid: %x\n",
7587 					chsc_area_sei->response_block.
7588 					response_block_data.sei_res.rsid);
7589 #endif /* CONFIG_DEBUG_CHSC */
7590 				s390_do_res_acc_processing
7591 					(chsc_area_sei->response_block.
7592 					 response_block_data.sei_res.rsid,
7593 					 0,
7594 					 CHSC_SEI_ACC_CHPID);
7595 			} else if ((chsc_area_sei->response_block.
7596 				    response_block_data.sei_res.vf&0xc0)
7597 				   == 0x80) {
7598 #ifdef CONFIG_DEBUG_CHSC
7599 				printk( KERN_DEBUG
7600 					"chpid: %x  link addr: %x\n",
7601 					chsc_area_sei->response_block.
7602 					response_block_data.sei_res.rsid,
7603 					chsc_area_sei->response_block.
7604 					response_block_data.sei_res.fla);
7605 #endif /* CONFIG_DEBUG_CHSC */
7606 				s390_do_res_acc_processing
7607 					(chsc_area_sei->response_block.
7608 					 response_block_data.sei_res.rsid,
7609 					 chsc_area_sei->response_block.
7610 					 response_block_data.sei_res.fla,
7611 					 CHSC_SEI_ACC_LINKADDR);
7612 			} else if ((chsc_area_sei->response_block.
7613 				    response_block_data.sei_res.vf & 0xc0)
7614 				   == 0xc0) {
7615 #ifdef CONFIG_DEBUG_CHSC
7616 				printk( KERN_DEBUG
7617 					"chpid: %x  "
7618 					"full link addr: %x\n",
7619 					chsc_area_sei->response_block.
7620 					response_block_data.sei_res.rsid,
7621 					chsc_area_sei->response_block.
7622 					response_block_data.sei_res.fla);
7623 #endif /* CONFIG_DEBUG_CHSC */
7624 				s390_do_res_acc_processing
7625 					(chsc_area_sei->response_block.
7626 					 response_block_data.sei_res.rsid,
7627 					 chsc_area_sei->response_block.
7628 					 response_block_data.sei_res.fla,
7629 					 CHSC_SEI_ACC_FULLLINKADDR);
7630 			}
7631 #ifdef CONFIG_DEBUG_CHSC
7632 			printk( KERN_DEBUG "\n");
7633 #endif /* CONFIG_DEBUG_CHSC */
7634 
7635 			break;
7636 
7637 		default: /* other stuff */
7638 #ifdef CONFIG_DEBUG_CHSC
7639 			printk( KERN_DEBUG
7640 				"s390_process_css: event %d\n",
7641 				chsc_area_sei->response_block.
7642 				response_block_data.sei_res.cc);
7643 #endif /* CONFIG_DEBUG_CHSC */
7644 			CIO_CRW_EVENT( 4,
7645 				       "s390_process_css: event %d\n",
7646 				       chsc_area_sei->response_block.
7647 				       response_block_data.sei_res.cc);
7648 
7649 			break;
7650 
7651 		}
7652 	}
7653 
7654 	spin_unlock(&chsc_lock_sei);
7655 }
7656 #endif
7657 
7658 static void
__process_chp_gone(int irq,int chpid)7659 __process_chp_gone(int irq, int chpid)
7660 {
7661 	schib_t *schib = &ioinfo[irq]->schib;
7662 	int i;
7663 
7664 	for (i=0;i<8;i++) {
7665 		int mask = 0x80>>i;
7666 		int out = 0;
7667 		int err = 0;
7668 
7669 		if (schib->pmcw.chpid[i] != chpid)
7670 			continue;
7671 
7672 		if (stsch(irq, schib) != 0) {
7673 			ioinfo[irq]->ui.flags.oper = 0;
7674 			if (ioinfo[irq]->nopfunc)
7675 				ioinfo[irq]->nopfunc(irq, DEVSTAT_DEVICE_GONE);
7676 			break;
7677 		}
7678 
7679 		s390irq_spin_lock(irq);
7680 
7681 		ioinfo[irq]->ui.flags.noio = 1;
7682 
7683 		/* Do we still expect an interrupt for outstanding io? */
7684 		if (ioinfo[irq]->ui.flags.busy) {
7685 			int rck = __check_for_io_and_kill(irq, mask, 1);
7686 			if (rck & CIO_PATHGONE_WAIT4INT)
7687 				out=1;
7688 			if (rck & CIO_PATHGONE_IOERR)
7689 				err=1;
7690 			if (rck & CIO_PATHGONE_DEVGONE)
7691 				break;
7692 		}
7693 
7694 		s390irq_spin_unlock(irq);
7695 
7696 		/* Tell the device driver not to disturb us. */
7697 		if (ioinfo[irq]->ui.flags.ready &&
7698 		    schib->pmcw.pim != 0x80 &&
7699 		    ioinfo[irq]->nopfunc &&
7700 		    ioinfo[irq]->ui.flags.notacccap) {
7701 			if (err)
7702 				ioinfo[irq]->nopfunc(irq, DEVSTAT_NOT_ACC_ERR);
7703 			else
7704 				ioinfo[irq]->nopfunc(irq, DEVSTAT_NOT_ACC);
7705 		}
7706 
7707 		if (out)
7708 			break;
7709 
7710 		if (ioinfo[irq]->ui.flags.ready) {
7711 			s390_schedule_path_verification(irq);
7712 		} else {
7713 			ioinfo[irq]->ui.flags.noio = 0;
7714 			ioinfo[irq]->ui.flags.killio = 0;
7715 		}
7716 		break;
7717 	}
7718 
7719 }
7720 
7721 static void
__process_chp_come(int irq,int chpid)7722 __process_chp_come(int irq, int chpid)
7723 {
7724 	schib_t *schib = &ioinfo[irq]->schib;
7725 	int i;
7726 
7727 	for (i=0;i<8;i++) {
7728 
7729 		if (schib->pmcw.chpid[i] != chpid)
7730 			continue;
7731 
7732 		/* Tell the device driver not to disturb us. */
7733 		if (ioinfo[irq]->ui.flags.ready &&
7734 		    schib->pmcw.pim != 0x80 &&
7735 		    ioinfo[irq]->nopfunc &&
7736 		    ioinfo[irq]->ui.flags.notacccap)
7737 			ioinfo[irq]->nopfunc(irq, DEVSTAT_NOT_ACC);
7738 
7739 		ioinfo[irq]->ui.flags.noio = 1;
7740 
7741 		/* Do we still expect an interrupt for outstanding io? */
7742 		if (ioinfo[irq]->ui.flags.busy)
7743 			/* Wait for interrupt. */
7744 			break;
7745 
7746 		if (ioinfo[irq]->ui.flags.ready)
7747 			s390_schedule_path_verification(irq);
7748 		else
7749 			ioinfo[irq]->ui.flags.noio = 0;
7750 
7751 		break;
7752 	}
7753 }
7754 
7755 static void
s390_process_chp_source(int chpid,int onoff)7756 s390_process_chp_source(int chpid, int onoff)
7757 {
7758 	int irq;
7759 	int ret;
7760 	char dbf_txt[15];
7761 
7762 	sprintf(dbf_txt, "prchp%x", chpid);
7763 	CIO_TRACE_EVENT(2, dbf_txt);
7764 
7765 #ifdef CONFIG_CHSC
7766 	if (onoff == 0) {
7767 		clear_bit(chpid, &chpids);
7768 	} else {
7769 		set_bit(chpid, &chpids);
7770 		set_bit(chpid, &chpids_known);
7771 	}
7772 #endif /* CONFIG_CHSC */
7773 
7774 	if (onoff == 0) {
7775 		for (irq=0;irq<=highest_subchannel;irq++) {
7776 
7777 			if ((ioinfo[irq] == INVALID_STORAGE_AREA)
7778 			    || (ioinfo[irq]->st != 0))
7779 				continue;
7780 
7781 			__process_chp_gone(irq, chpid);
7782 		}
7783 		return;
7784 	}
7785 
7786 	for (irq=0;irq<__MAX_SUBCHANNELS;irq++) {
7787 
7788 		if (ioinfo[irq] == INVALID_STORAGE_AREA) {
7789 			ret = s390_validate_subchannel(irq,0);
7790 			if (ret == 0) {
7791 				if (irq > highest_subchannel)
7792 					highest_subchannel = irq;
7793 #ifdef CONFIG_DEBUG_CRW
7794 				printk(KERN_DEBUG"process_chp_source: Found "
7795 				       "device on irq %x\n", irq);
7796 #endif /* CONFIG_DEBUG_CRW */
7797 				CIO_CRW_EVENT(4, "Found device on irq %x\n",
7798 					      irq);
7799 				s390_device_recognition_irq(irq);
7800 			}
7801 		} else if (ioinfo[irq]->st == 0) {
7802 			ret = stsch(irq, &ioinfo[irq]->schib);
7803 			if (ret != 0)
7804 				ret = -ENXIO;
7805 		} else
7806 			continue;
7807 
7808 		if (ret == -ENXIO)
7809 			/* We're through. */
7810 			return;
7811 
7812 		if (ret != 0)
7813 			continue;
7814 
7815 		__process_chp_come(irq, chpid);
7816 	}
7817 
7818 }
7819 
7820 /*
7821  * s390_do_crw_pending
7822  *
7823  * Called by the machine check handler to process CRW pending
7824  *  conditions. It may be a single CRW, or CRWs may be chained.
7825  *
7826  * Note : we currently process CRWs for subchannel source only
7827  */
7828 void
s390_do_crw_pending(crwe_t * pcrwe)7829 s390_do_crw_pending (crwe_t * pcrwe)
7830 {
7831 	int irq;
7832 	int chpid;
7833 
7834 #ifdef CONFIG_DEBUG_CRW
7835 	printk (KERN_DEBUG "do_crw_pending : starting ...\n");
7836 #endif
7837 	CIO_CRW_EVENT( 2, "do_crw_pending: starting\n");
7838 	while (pcrwe != NULL) {
7839 
7840 		switch (pcrwe->crw.rsc) {
7841 		case CRW_RSC_SCH:
7842 
7843 			irq = pcrwe->crw.rsid;
7844 
7845 #ifdef CONFIG_DEBUG_CRW
7846 			printk (KERN_NOTICE "do_crw_pending : source is "
7847 				"subchannel %04X\n", irq);
7848 #endif
7849 			CIO_CRW_EVENT(2, "source is subchannel %04X\n",
7850 				      irq);
7851 			s390_process_subchannel_source (irq);
7852 
7853 			break;
7854 
7855 		case CRW_RSC_MONITOR:
7856 
7857 #ifdef CONFIG_DEBUG_CRW
7858 			printk (KERN_NOTICE "do_crw_pending : source is "
7859 				"monitoring facility\n");
7860 #endif
7861 			CIO_CRW_EVENT(2, "source is monitoring facility\n");
7862 			break;
7863 
7864 		case CRW_RSC_CPATH:
7865 
7866 			chpid = pcrwe->crw.rsid;
7867 
7868 #ifdef CONFIG_DEBUG_CRW
7869 			printk (KERN_NOTICE "do_crw_pending : source is "
7870 				"channel path %02X\n", chpid);
7871 #endif
7872 			CIO_CRW_EVENT(2, "source is channel path %02X\n",
7873 				      chpid);
7874 			switch (pcrwe->crw.erc) {
7875 			case CRW_ERC_IPARM: /* Path has come. */
7876 				s390_process_chp_source(chpid, 1);
7877 				break;
7878 			case CRW_ERC_PERRI: /* Path has gone. */
7879 				s390_process_chp_source(chpid, 0);
7880 				break;
7881 			default:
7882 #ifdef CONFIG_DEBUG_CRW
7883 				printk(KERN_WARNING"do_crw_pending: don't "
7884 				       "know how to handle erc=%x\n",
7885 				       pcrwe->crw.erc);
7886 #endif /* CONFIG_DEBUG_CRW */
7887 				CIO_CRW_EVENT(0, "don't know how to handle "
7888 					      "erc=%x\n", pcrwe->crw.erc);
7889 			}
7890 			break;
7891 
7892 		case CRW_RSC_CONFIG:
7893 
7894 #ifdef CONFIG_DEBUG_CRW
7895 			printk (KERN_NOTICE "do_crw_pending : source is "
7896 				"configuration-alert facility\n");
7897 #endif
7898 			CIO_CRW_EVENT(2, "source is configuration-alert facility\n");
7899 			break;
7900 
7901 		case CRW_RSC_CSS:
7902 
7903 #ifdef CONFIG_DEBUG_CRW
7904 			printk (KERN_NOTICE "do_crw_pending : source is "
7905 				"channel subsystem\n");
7906 #endif
7907 			CIO_CRW_EVENT(2, "source is channel subsystem\n");
7908 #ifdef CONFIG_CHSC
7909 			s390_process_css();
7910 #endif
7911 			break;
7912 
7913 		default:
7914 
7915 #ifdef CONFIG_DEBUG_CRW
7916 			printk (KERN_NOTICE
7917 				"do_crw_pending : unknown source\n");
7918 #endif
7919 			CIO_CRW_EVENT( 2, "unknown source\n");
7920 			break;
7921 
7922 		}
7923 
7924 		pcrwe = pcrwe->crwe_next;
7925 
7926 	}
7927 
7928 #ifdef CONFIG_DEBUG_CRW
7929 	printk (KERN_DEBUG "do_crw_pending : done\n");
7930 #endif
7931 	CIO_CRW_EVENT(2, "do_crw_pending: done\n");
7932 	return;
7933 }
7934 
7935 /* added by Holger Smolinski for reipl support in reipl.S */
7936 extern void do_reipl (int);
7937 void
reipl(int sch)7938 reipl (int sch)
7939 {
7940 	int i;
7941 	s390_dev_info_t dev_info;
7942 
7943 	for (i = 0; i <= highest_subchannel; i++) {
7944 		if (get_dev_info_by_irq (i, &dev_info) == 0
7945 		    && (dev_info.status & DEVSTAT_DEVICE_OWNED)) {
7946 			free_irq (i, ioinfo[i]->irq_desc.dev_id);
7947 		}
7948 	}
7949 	if (MACHINE_IS_VM)
7950 		cpcmd ("IPL", NULL, 0);
7951 	else
7952 		do_reipl (0x10000 | sch);
7953 }
7954 
7955 /*
7956  * Function: cio_debug_init
7957  * Initializes three debug logs (under /proc/s390dbf) for common I/O:
7958  * - cio_msg logs the messages which are printk'ed when CONFIG_DEBUG_IO is on
7959  * - cio_trace logs the calling of different functions
7960  * - cio_crw logs the messages which are printk'ed when CONFIG_DEBUG_CRW is on
7961  * debug levels depend on CONFIG_DEBUG_IO resp. CONFIG_DEBUG_CRW
7962  */
7963 int
cio_debug_init(void)7964 cio_debug_init (void)
7965 {
7966 	int ret = 0;
7967 
7968 	cio_debug_msg_id = debug_register ("cio_msg", 4, 4, 16 * sizeof (long));
7969 	if (cio_debug_msg_id != NULL) {
7970 		debug_register_view (cio_debug_msg_id, &debug_sprintf_view);
7971 		debug_set_level (cio_debug_msg_id, 6);
7972 	} else {
7973 		ret = -1;
7974 	}
7975 	cio_debug_trace_id = debug_register ("cio_trace", 4, 4, 8);
7976 	if (cio_debug_trace_id != NULL) {
7977 		debug_register_view (cio_debug_trace_id, &debug_hex_ascii_view);
7978 		debug_set_level (cio_debug_trace_id, 6);
7979 	} else {
7980 		ret = -1;
7981 	}
7982 	cio_debug_crw_id = debug_register ("cio_crw", 2, 4, 16 * sizeof (long));
7983 	if (cio_debug_crw_id != NULL) {
7984 		debug_register_view (cio_debug_crw_id, &debug_sprintf_view);
7985 		debug_set_level (cio_debug_crw_id, 6);
7986 	} else {
7987 		ret = -1;
7988 	}
7989 	if (ret)
7990 		return ret;
7991 	cio_debug_initialized = 1;
7992 	return 0;
7993 }
7994 
7995 __initcall (cio_debug_init);
7996 
7997 #ifdef CONFIG_PROC_FS
7998 #ifdef CONFIG_CHSC
7999 /*
8000  * Function: cio_parse_chpids_proc_parameters
8001  * parse the stuff piped to /proc/chpids
8002  */
8003 void
cio_parse_chpids_proc_parameters(char * buf)8004 cio_parse_chpids_proc_parameters(char* buf)
8005 {
8006 	int i;
8007 	int cp;
8008 	int ret;
8009 
8010 	if (strstr(buf, "on ")) {
8011 		for (i=0; i<3; i++) {
8012 			buf++;
8013 		}
8014 		cp = blacklist_strtoul(buf, &buf);
8015 
8016 		chsc_get_sch_descriptions();
8017 		if (!cio_chsc_desc_avail) {
8018 			printk(KERN_ERR "Could not get chpid status, "
8019 			       "vary on/off not available\n");
8020 			return;
8021 		}
8022 
8023 		if (!test_bit(cp, &chpids)) {
8024 			ret = s390_vary_chpid(cp, 1);
8025 			if (ret == -EINVAL) {
8026 #ifdef CONFIG_DEBUG_CHSC
8027 				printk(KERN_ERR "/proc/chpids: "
8028 				       "Invalid chpid specified\n");
8029 #else /* CONFIG_DEBUG_CHSC */
8030 				printk(KERN_DEBUG "/proc/chpids: "
8031 				       "Invalid chpid specified\n");
8032 #endif /* CONFIG_DEBUG_CHSC */
8033 			} else if (ret == 0) {
8034 				printk(KERN_INFO "/proc/chpids: "
8035 				       "Varied chpid %x logically online\n",
8036 				       cp);
8037 			}
8038 		} else {
8039 			printk(KERN_ERR "/proc/chpids: chpid %x is "
8040 			       "already online\n",
8041 			       cp);
8042 		}
8043 	} else if (strstr(buf, "off ")) {
8044 		for (i=0; i<4; i++) {
8045 			buf++;
8046 		}
8047 		cp = blacklist_strtoul(buf, &buf);
8048 
8049 		chsc_get_sch_descriptions();
8050 		if (!cio_chsc_desc_avail) {
8051 			printk(KERN_ERR "Could not get chpid status, "
8052 			       "vary on/off not available\n");
8053 			return;
8054 		}
8055 
8056 		if (test_bit(cp, &chpids)) {
8057 			ret = s390_vary_chpid(cp, 0);
8058 			if (ret == -EINVAL) {
8059 #ifdef CONFIG_DEBUG_CHSC
8060 				printk(KERN_ERR "/proc/chpids: "
8061 				       "Invalid chpid specified\n");
8062 #else /* CONFIG_DEBUG_CHSC */
8063 				printk(KERN_DEBUG "/proc/chpids: "
8064 				       "Invalid chpid specified\n");
8065 #endif /* CONFIG_DEBUG_CHSC */
8066 			} else if (ret == 0) {
8067 				printk(KERN_INFO "/proc/chpids: "
8068 				       "Varied chpid %x logically offline\n",
8069 				       cp);
8070 			}
8071 		} else {
8072 			printk(KERN_ERR "/proc/chpids: "
8073 			       "chpid %x is already offline\n",
8074 			       cp);
8075 		}
8076 	} else {
8077 		printk(KERN_ERR "/proc/chpids: Parse error; "
8078 		       "try using '{on,off} <chpid>'\n");
8079 	}
8080 }
8081 
8082 static void
__vary_chpid_offline(int irq,int chpid)8083 __vary_chpid_offline(int irq, int chpid)
8084 {
8085 	schib_t *schib = &ioinfo[irq]->schib;
8086 	int i;
8087 
8088 	for (i=0;i<8;i++) {
8089 		int mask = 0x80>>i;
8090 		int out = 0;
8091 		unsigned long flags;
8092 
8093 		if (ioinfo[irq]->ssd_info.chpid[i] != chpid)
8094 			continue;
8095 
8096 		s390irq_spin_lock_irqsave(irq, flags);
8097 
8098 		ioinfo[irq]->ui.flags.noio = 1;
8099 
8100 		/* Hmm, the path is not really gone... */
8101 		if (ioinfo[irq]->ui.flags.busy) {
8102 			if (__check_for_io_and_kill(irq, mask, 0) != 0)
8103 				out=1;
8104 		}
8105 
8106 		s390irq_spin_unlock_irqrestore(irq, flags);
8107 
8108 		/* Tell the device driver not to disturb us. */
8109 		if (ioinfo[irq]->ui.flags.ready &&
8110 		    schib->pmcw.pim != 0x80 &&
8111 		    ioinfo[irq]->nopfunc &&
8112 		    ioinfo[irq]->ui.flags.notacccap)
8113 			ioinfo[irq]->nopfunc(irq, DEVSTAT_NOT_ACC);
8114 
8115 		if (out)
8116 			break;
8117 
8118 		if (ioinfo[irq]->ui.flags.ready)
8119 			s390_schedule_path_verification(irq);
8120 		else
8121 			ioinfo[irq]->ui.flags.noio = 0;
8122 
8123 		break;
8124 	}
8125 
8126 }
8127 
8128 static void
__vary_chpid_online(int irq,int chpid)8129 __vary_chpid_online(int irq, int chpid)
8130 {
8131 	schib_t *schib = &ioinfo[irq]->schib;
8132 	int i;
8133 
8134 	for (i=0;i<8;i++) {
8135 
8136 		if (schib->pmcw.chpid[i] != chpid)
8137 			continue;
8138 
8139 		/* Tell the device driver not to disturb us. */
8140 		if (ioinfo[irq]->ui.flags.ready &&
8141 		    schib->pmcw.pim != 0x80 &&
8142 		    ioinfo[irq]->nopfunc &&
8143 		    ioinfo[irq]->ui.flags.notacccap)
8144 			ioinfo[irq]->nopfunc(irq, DEVSTAT_NOT_ACC);
8145 
8146 		ioinfo[irq]->ui.flags.noio = 1;
8147 
8148 		/* Do we still expect an interrupt for outstanding io? */
8149 		if (ioinfo[irq]->ui.flags.busy)
8150 			/* Wait for interrupt. */
8151 			break;
8152 
8153 		s390_schedule_path_verification(irq);
8154 
8155 		break;
8156 	}
8157 }
8158 
8159 
8160 /*
8161  * Function: s390_vary_chpid
8162  * Varies the specified chpid online or offline
8163  */
8164 int
s390_vary_chpid(__u8 chpid,int on)8165 s390_vary_chpid( __u8 chpid, int on)
8166 {
8167 	char dbf_text[15];
8168 	int irq;
8169 
8170 	if ((chpid <=0) || (chpid >= NR_CHPIDS))
8171 		return -EINVAL;
8172 
8173 	sprintf(dbf_text, on?"varyon%x":"varyoff%x", chpid);
8174 	CIO_TRACE_EVENT( 2, dbf_text);
8175 
8176 	if (!test_bit(chpid, &chpids_known)) {
8177 		printk(KERN_ERR "Can't vary unknown chpid %02X\n", chpid);
8178 		return -EINVAL;
8179 	}
8180 
8181 	if (on && test_bit(chpid, &chpids_logical)) {
8182 		printk(KERN_ERR "chpid %02X already logically online\n",
8183 		       chpid);
8184 		return -EINVAL;
8185 	}
8186 
8187 	if (!on && !test_bit(chpid, &chpids_logical)) {
8188 		printk(KERN_ERR "chpid %02X already logically offline\n",
8189 		       chpid);
8190 		return -EINVAL;
8191 	}
8192 
8193 	if (on) {
8194 		set_bit(chpid, &chpids_logical);
8195 		set_bit(chpid, &chpids);
8196 
8197 	} else {
8198 		clear_bit(chpid, &chpids_logical);
8199 		clear_bit(chpid, &chpids);
8200 	}
8201 
8202 	/*
8203 	 * Redo PathVerification on the devices the chpid connects to
8204 	 */
8205 
8206 	for (irq=0;irq<=highest_subchannel;irq++) {
8207 
8208 		if (ioinfo[irq] == INVALID_STORAGE_AREA)
8209 			continue;
8210 
8211 		if (ioinfo[irq]->st)
8212 			continue;
8213 
8214 		if (!ioinfo[irq]->ssd_info.valid)
8215 			continue;
8216 
8217 		if (on)
8218 			__vary_chpid_online(irq, chpid);
8219 		else
8220 			__vary_chpid_offline(irq, chpid);
8221 
8222 	}
8223 
8224 	return 0;
8225 }
8226 #endif /* CONFIG_CHSC */
8227 
8228 /*
8229  * Display info on subchannels in /proc/subchannels.
8230  * Adapted from procfs stuff in dasd.c by Cornelia Huck, 02/28/01.
8231  */
8232 
8233 typedef struct {
8234 	char *data;
8235 	int len;
8236 } tempinfo_t;
8237 
8238 #define MIN(a,b) ((a)<(b)?(a):(b))
8239 
8240 static struct proc_dir_entry *chan_subch_entry;
8241 
8242 static int
chan_subch_open(struct inode * inode,struct file * file)8243 chan_subch_open (struct inode *inode, struct file *file)
8244 {
8245 	int rc = 0;
8246 	int size = 1;
8247 	int len = 0;
8248 	int i = 0;
8249 	int j = 0;
8250 	tempinfo_t *info;
8251 
8252 	info = (tempinfo_t *) vmalloc (sizeof (tempinfo_t));
8253 	if (info == NULL) {
8254 		printk (KERN_WARNING "No memory available for data\n");
8255 		return -ENOMEM;
8256 	} else {
8257 		file->private_data = (void *) info;
8258 	}
8259 
8260 	size += (highest_subchannel + 1) * 128;
8261 	info->data = (char *) vmalloc (size);
8262 
8263 	if (size && info->data == NULL) {
8264 		printk (KERN_WARNING "No memory available for data\n");
8265 		vfree (info);
8266 		return -ENOMEM;
8267 	}
8268 
8269 	len += sprintf (info->data + len,
8270 			"Device sch.  Dev Type/Model CU  in use  PIM PAM POM CHPIDs\n");
8271 	len += sprintf (info->data + len,
8272 			"---------------------------------------------------------------------\n");
8273 
8274 	for (i = 0; i <= highest_subchannel; i++) {
8275 		if (!((ioinfo[i] == NULL) || (ioinfo[i] == INVALID_STORAGE_AREA)
8276 		      || (ioinfo[i]->st )|| !(ioinfo[i]->ui.flags.oper))) {
8277 			len +=
8278 			    sprintf (info->data + len, "%04X   %04X  ",
8279 				     ioinfo[i]->schib.pmcw.dev, i);
8280 			if (ioinfo[i]->senseid.dev_type != 0) {
8281 				len += sprintf (info->data + len,
8282 						"%04X/%02X   %04X/%02X",
8283 						ioinfo[i]->senseid.dev_type,
8284 						ioinfo[i]->senseid.dev_model,
8285 						ioinfo[i]->senseid.cu_type,
8286 						ioinfo[i]->senseid.cu_model);
8287 			} else {
8288 				len += sprintf (info->data + len,
8289 						"          %04X/%02X",
8290 						ioinfo[i]->senseid.cu_type,
8291 						ioinfo[i]->senseid.cu_model);
8292 			}
8293 			if (ioinfo[i]->ui.flags.ready) {
8294 				len += sprintf (info->data + len, "  yes ");
8295 			} else {
8296 				len += sprintf (info->data + len, "      ");
8297 			}
8298 			len += sprintf (info->data + len,
8299 					"    %02X  %02X  %02X  ",
8300 					ioinfo[i]->schib.pmcw.pim,
8301 					ioinfo[i]->schib.pmcw.pam,
8302 					ioinfo[i]->schib.pmcw.pom);
8303 			for (j = 0; j < 8; j++) {
8304 				len += sprintf (info->data + len,
8305 						"%02X",
8306 						ioinfo[i]->schib.pmcw.chpid[j]);
8307 				if (j == 3) {
8308 					len += sprintf (info->data + len, " ");
8309 				}
8310 			}
8311 			len += sprintf (info->data + len, "\n");
8312 		}
8313 	}
8314 	info->len = len;
8315 
8316 	return rc;
8317 }
8318 
8319 static int
chan_subch_close(struct inode * inode,struct file * file)8320 chan_subch_close (struct inode *inode, struct file *file)
8321 {
8322 	int rc = 0;
8323 	tempinfo_t *p_info = (tempinfo_t *) file->private_data;
8324 
8325 	if (p_info) {
8326 		if (p_info->data)
8327 			vfree (p_info->data);
8328 		vfree (p_info);
8329 	}
8330 
8331 	return rc;
8332 }
8333 
8334 static ssize_t
chan_subch_read(struct file * file,char * user_buf,size_t user_len,loff_t * offset)8335 chan_subch_read (struct file *file, char *user_buf, size_t user_len,
8336 		 loff_t * offset)
8337 {
8338 	loff_t len;
8339 	tempinfo_t *p_info = (tempinfo_t *) file->private_data;
8340 	loff_t pos = *offset;
8341 
8342 	if (pos < 0 || pos >= p_info->len) {
8343 		return 0;
8344 	} else {
8345 		len = MIN (user_len, (p_info->len - pos));
8346 		if (copy_to_user (user_buf, &(p_info->data[pos]), len))
8347 			return -EFAULT;
8348 		*offset = pos + len;
8349 		return len;
8350 	}
8351 }
8352 
8353 static struct file_operations chan_subch_file_ops = {
8354 	read:chan_subch_read, open:chan_subch_open, release:chan_subch_close,
8355 };
8356 
8357 static int
chan_proc_init(void)8358 chan_proc_init (void)
8359 {
8360 	chan_subch_entry =
8361 	    create_proc_entry ("subchannels", S_IFREG | S_IRUGO, &proc_root);
8362 	chan_subch_entry->proc_fops = &chan_subch_file_ops;
8363 
8364 	return 1;
8365 }
8366 
8367 __initcall (chan_proc_init);
8368 
8369 void
chan_proc_cleanup(void)8370 chan_proc_cleanup (void)
8371 {
8372 	remove_proc_entry ("subchannels", &proc_root);
8373 }
8374 
8375 /*
8376  * Display device specific information under /proc/deviceinfo/<devno>
8377  */ static struct proc_dir_entry *cio_procfs_deviceinfo_root = NULL;
8378 
8379 /*
8380  * cio_procfs_device_list holds all devno-specific procfs directories
8381  */
8382 
8383 typedef struct {
8384 	int devno;
8385 	struct proc_dir_entry *cio_device_entry;
8386 	struct proc_dir_entry *cio_sensedata_entry;
8387 	struct proc_dir_entry *cio_in_use_entry;
8388 	struct proc_dir_entry *cio_chpid_entry;
8389 } cio_procfs_entry_t;
8390 
8391 typedef struct _cio_procfs_device {
8392 	struct _cio_procfs_device *next;
8393 	cio_procfs_entry_t *entry;
8394 } cio_procfs_device_t;
8395 
8396 cio_procfs_device_t *cio_procfs_device_list = NULL;
8397 
8398 /*
8399  * File operations
8400  */
8401 
8402 static int
cio_device_entry_close(struct inode * inode,struct file * file)8403 cio_device_entry_close (struct inode *inode, struct file *file)
8404 {
8405 	int rc = 0;
8406 	tempinfo_t *p_info = (tempinfo_t *) file->private_data;
8407 
8408 	if (p_info) {
8409 		if (p_info->data)
8410 			vfree (p_info->data);
8411 		vfree (p_info);
8412 	}
8413 
8414 	return rc;
8415 }
8416 
8417 static ssize_t
cio_device_entry_read(struct file * file,char * user_buf,size_t user_len,loff_t * offset)8418 cio_device_entry_read (struct file *file, char *user_buf, size_t user_len,
8419 		       loff_t * offset)
8420 {
8421 	loff_t len;
8422 	tempinfo_t *p_info = (tempinfo_t *) file->private_data;
8423 	loff_t pos = *offset;
8424 
8425 	if (pos < 0 || pos >= p_info->len) {
8426 		return 0;
8427 	} else {
8428 		len = MIN (user_len, (p_info->len - pos));
8429 		if (copy_to_user (user_buf, &(p_info->data[pos]), len))
8430 			return -EFAULT;
8431 		*offset = pos + len;
8432 		return len;
8433 	}
8434 }
8435 
8436 static int
cio_sensedata_entry_open(struct inode * inode,struct file * file)8437 cio_sensedata_entry_open (struct inode *inode, struct file *file)
8438 {
8439 	int rc = 0;
8440 	int size = 1;
8441 	int len = 0;
8442 	tempinfo_t *info;
8443 	int irq;
8444 	int devno;
8445 	char *devno_str;
8446 
8447 	info = (tempinfo_t *) vmalloc (sizeof (tempinfo_t));
8448 	if (info == NULL) {
8449 		printk (KERN_WARNING "No memory available for data\n");
8450 		rc = -ENOMEM;
8451 	} else {
8452 		file->private_data = (void *) info;
8453 		size += 2 * 32;
8454 		info->data = (char *) vmalloc (size);
8455 		if (size && info->data == NULL) {
8456 			printk (KERN_WARNING "No memory available for data\n");
8457 			vfree (info);
8458 			rc = -ENOMEM;
8459 		} else {
8460 			devno_str = kmalloc (6 * sizeof (char), GFP_KERNEL);
8461 			memset (devno_str, 0, 6 * sizeof (char));
8462 			memcpy (devno_str,
8463 				file->f_dentry->d_parent->d_name.name,
8464 				strlen (file->f_dentry->d_parent->d_name.name) +
8465 				1);
8466 			devno = simple_strtoul (devno_str, &devno_str, 16);
8467 			irq = get_irq_by_devno (devno);
8468 			if (irq != -1) {
8469 				len +=
8470 				    sprintf (info->data + len,
8471 					     "Dev Type/Mod: ");
8472 				if (ioinfo[irq]->senseid.dev_type == 0) {
8473 					len +=
8474 					    sprintf (info->data + len,
8475 						     "%04X/%02X\n",
8476 						     ioinfo[irq]->senseid.
8477 						     cu_type,
8478 						     ioinfo[irq]->senseid.
8479 						     cu_model);
8480 				} else {
8481 					len +=
8482 					    sprintf (info->data + len,
8483 						     "%04X/%02X\n",
8484 						     ioinfo[irq]->senseid.
8485 						     dev_type,
8486 						     ioinfo[irq]->senseid.
8487 						     dev_model);
8488 					len +=
8489 					    sprintf (info->data + len,
8490 						     "CU Type/Mod:  %04X/%02X\n",
8491 						     ioinfo[irq]->senseid.
8492 						     cu_type,
8493 						     ioinfo[irq]->senseid.
8494 						     cu_model);
8495 				}
8496 			}
8497 			info->len = len;
8498 		}
8499 	}
8500 
8501 	return rc;
8502 }
8503 
8504 static int
cio_in_use_entry_open(struct inode * inode,struct file * file)8505 cio_in_use_entry_open (struct inode *inode, struct file *file)
8506 {
8507 	int rc = 0;
8508 	int size = 1;
8509 	int len = 0;
8510 	tempinfo_t *info;
8511 	int irq;
8512 	int devno;
8513 	char *devno_str;
8514 
8515 	info = (tempinfo_t *) vmalloc (sizeof (tempinfo_t));
8516 	if (info == NULL) {
8517 		printk (KERN_WARNING "No memory available for data\n");
8518 		rc = -ENOMEM;
8519 	} else {
8520 		file->private_data = (void *) info;
8521 		size += 8;
8522 		info->data = (char *) vmalloc (size);
8523 		if (size && info->data == NULL) {
8524 			printk (KERN_WARNING "No memory available for data\n");
8525 			vfree (info);
8526 			rc = -ENOMEM;
8527 		} else {
8528 			devno_str = kmalloc (6 * sizeof (char), GFP_KERNEL);
8529 			memset (devno_str, 0, 6 * sizeof (char));
8530 			memcpy (devno_str,
8531 				file->f_dentry->d_parent->d_name.name,
8532 				strlen (file->f_dentry->d_parent->d_name.name) +
8533 				1);
8534 			devno = simple_strtoul (devno_str, &devno_str, 16);
8535 			irq = get_irq_by_devno (devno);
8536 			if (irq != -1) {
8537 				len +=
8538 				    sprintf (info->data + len, "%s\n",
8539 					     ioinfo[irq]->ui.flags.
8540 					     ready ? "yes" : "no");
8541 			}
8542 			info->len = len;
8543 		}
8544 	}
8545 
8546 	return rc;
8547 }
8548 
8549 static int
cio_chpid_entry_open(struct inode * inode,struct file * file)8550 cio_chpid_entry_open (struct inode *inode, struct file *file)
8551 {
8552 	int rc = 0;
8553 	int size = 1;
8554 	int len = 0;
8555 	tempinfo_t *info;
8556 	int irq;
8557 	int devno;
8558 	int i;
8559 	char *devno_str;
8560 
8561 	info = (tempinfo_t *) vmalloc (sizeof (tempinfo_t));
8562 	if (info == NULL) {
8563 		printk (KERN_WARNING "No memory available for data\n");
8564 		rc = -ENOMEM;
8565 	} else {
8566 		file->private_data = (void *) info;
8567 		size += 8 * 16;
8568 		info->data = (char *) vmalloc (size);
8569 		if (size && info->data == NULL) {
8570 			printk (KERN_WARNING "No memory available for data\n");
8571 			vfree (info);
8572 			rc = -ENOMEM;
8573 		} else {
8574 			devno_str = kmalloc (6 * sizeof (char), GFP_KERNEL);
8575 			memset (devno_str, 0, 6 * sizeof (char));
8576 			memcpy (devno_str,
8577 				file->f_dentry->d_parent->d_name.name,
8578 				strlen (file->f_dentry->d_parent->d_name.name) +
8579 				1);
8580 			devno = simple_strtoul (devno_str, &devno_str, 16);
8581 			irq = get_irq_by_devno (devno);
8582 			if (irq != -1) {
8583 				for (i = 0; i < 8; i++) {
8584 					len +=
8585 					    sprintf (info->data + len,
8586 						     "CHPID[%d]: ", i);
8587 					len +=
8588 					    sprintf (info->data + len, "%02X\n",
8589 						     ioinfo[irq]->schib.pmcw.
8590 						     chpid[i]);
8591 				}
8592 			}
8593 			info->len = len;
8594 		}
8595 	}
8596 
8597 	return rc;
8598 }
8599 
8600 static struct file_operations cio_sensedata_entry_file_ops = {
8601 	read:cio_device_entry_read, open:cio_sensedata_entry_open,
8602 	release:cio_device_entry_close,
8603 };
8604 
8605 static struct file_operations cio_in_use_entry_file_ops = {
8606 	read:cio_device_entry_read, open:cio_in_use_entry_open,
8607 	release:cio_device_entry_close,
8608 };
8609 
8610 static struct file_operations cio_chpid_entry_file_ops = {
8611 	read:cio_device_entry_read, open:cio_chpid_entry_open,
8612 	release:cio_device_entry_close,
8613 };
8614 
8615 /*
8616  * Function: cio_procfs_device_create
8617  * create procfs entry for given device number
8618  * and insert it into list
8619  */
8620 int
cio_procfs_device_create(int devno)8621 cio_procfs_device_create (int devno)
8622 {
8623 	cio_procfs_entry_t *entry;
8624 	cio_procfs_device_t *tmp;
8625 	cio_procfs_device_t *where;
8626 	char buf[8];
8627 	int i;
8628 	int rc = 0;
8629 
8630 	/* create the directory entry */
8631 	entry =
8632 	    (cio_procfs_entry_t *) kmalloc (sizeof (cio_procfs_entry_t),
8633 					    GFP_KERNEL);
8634 	if (entry) {
8635 		entry->devno = devno;
8636 		sprintf (buf, "%x", devno);
8637 		entry->cio_device_entry =
8638 		    proc_mkdir (buf, cio_procfs_deviceinfo_root);
8639 
8640 		if (entry->cio_device_entry) {
8641 			tmp = (cio_procfs_device_t *)
8642 			    kmalloc (sizeof (cio_procfs_device_t), GFP_KERNEL);
8643 			if (tmp) {
8644 				tmp->entry = entry;
8645 
8646 				if (cio_procfs_device_list == NULL) {
8647 					cio_procfs_device_list = tmp;
8648 					tmp->next = NULL;
8649 				} else {
8650 					where = cio_procfs_device_list;
8651 					i = where->entry->devno;
8652 					while ((devno > i)
8653 					       && (where->next != NULL)) {
8654 						where = where->next;
8655 						i = where->entry->devno;
8656 					}
8657 					if (where->next == NULL) {
8658 						where->next = tmp;
8659 						tmp->next = NULL;
8660 					} else {
8661 						tmp->next = where->next;
8662 						where->next = tmp;
8663 					}
8664 				}
8665 				/* create the different entries */
8666 				entry->cio_sensedata_entry =
8667 				    create_proc_entry ("sensedata",
8668 						       S_IFREG | S_IRUGO,
8669 						       entry->cio_device_entry);
8670 				entry->cio_sensedata_entry->proc_fops =
8671 				    &cio_sensedata_entry_file_ops;
8672 				entry->cio_in_use_entry =
8673 				    create_proc_entry ("in_use",
8674 						       S_IFREG | S_IRUGO,
8675 						       entry->cio_device_entry);
8676 				entry->cio_in_use_entry->proc_fops =
8677 				    &cio_in_use_entry_file_ops;
8678 				entry->cio_chpid_entry =
8679 				    create_proc_entry ("chpids",
8680 						       S_IFREG | S_IRUGO,
8681 						       entry->cio_device_entry);
8682 				entry->cio_chpid_entry->proc_fops =
8683 				    &cio_chpid_entry_file_ops;
8684 			} else {
8685 				printk (KERN_WARNING
8686 					"Error, could not allocate procfs structure!\n");
8687 				remove_proc_entry (buf,
8688 						   cio_procfs_deviceinfo_root);
8689 				kfree (entry);
8690 				rc = -ENOMEM;
8691 			}
8692 		} else {
8693 			printk (KERN_WARNING
8694 				"Error, could not allocate procfs structure!\n");
8695 			kfree (entry);
8696 			rc = -ENOMEM;
8697 		}
8698 
8699 	} else {
8700 		printk (KERN_WARNING
8701 			"Error, could not allocate procfs structure!\n");
8702 		rc = -ENOMEM;
8703 	}
8704 	return rc;
8705 }
8706 
8707 /*
8708  * Function: cio_procfs_device_remove
8709  * remove procfs entry for given device number
8710  */
8711 int
cio_procfs_device_remove(int devno)8712 cio_procfs_device_remove (int devno)
8713 {
8714 	int rc = 0;
8715 	cio_procfs_device_t *tmp;
8716 	cio_procfs_device_t *prev = NULL;
8717 
8718 	tmp = cio_procfs_device_list;
8719 	while (tmp) {
8720 		if (tmp->entry->devno == devno)
8721 			break;
8722 		prev = tmp;
8723 		tmp = tmp->next;
8724 	}
8725 	if (tmp) {
8726 		char buf[8];
8727 
8728 		remove_proc_entry ("sensedata", tmp->entry->cio_device_entry);
8729 		remove_proc_entry ("in_use", tmp->entry->cio_device_entry);
8730 		remove_proc_entry ("chpid", tmp->entry->cio_device_entry);
8731 		sprintf (buf, "%x", devno);
8732 		remove_proc_entry (buf, cio_procfs_deviceinfo_root);
8733 
8734 		if (tmp == cio_procfs_device_list) {
8735 			cio_procfs_device_list = tmp->next;
8736 		} else {
8737 			prev->next = tmp->next;
8738 		}
8739 		kfree (tmp->entry);
8740 		kfree (tmp);
8741 	} else {
8742 		rc = -ENODEV;
8743 	}
8744 
8745 	return rc;
8746 }
8747 
8748 /*
8749  * Function: cio_procfs_purge
8750  * purge /proc/deviceinfo of entries for gone devices
8751  */
8752 
8753 int
cio_procfs_device_purge(void)8754 cio_procfs_device_purge (void)
8755 {
8756 	int i;
8757 
8758 	for (i = 0; i <= highest_subchannel; i++) {
8759 		if (ioinfo[i] != INVALID_STORAGE_AREA) {
8760 			if (!ioinfo[i]->ui.flags.oper)
8761 				cio_procfs_device_remove (ioinfo[i]->devno);
8762 		}
8763 	}
8764 	return 0;
8765 }
8766 
8767 /*
8768  * Function: cio_procfs_create
8769  * create /proc/deviceinfo/ and subdirs for the devices
8770  */
8771 static int
cio_procfs_create(void)8772 cio_procfs_create (void)
8773 {
8774 	int irq;
8775 
8776 	if (cio_proc_devinfo) {
8777 
8778 		cio_procfs_deviceinfo_root =
8779 		    proc_mkdir ("deviceinfo", &proc_root);
8780 
8781 		if (highest_subchannel >= MAX_CIO_PROCFS_ENTRIES) {
8782 			printk (KERN_ALERT
8783 				"Warning: Not enough inodes for creating all "
8784 				"entries under /proc/deviceinfo/. "
8785 				"Not every device will get an entry.\n");
8786 		}
8787 
8788 		for (irq = 0; irq <= highest_subchannel; irq++) {
8789 			if (irq >= MAX_CIO_PROCFS_ENTRIES)
8790 				break;
8791 			if (ioinfo[irq] != INVALID_STORAGE_AREA) {
8792 				if (ioinfo[irq]->ui.flags.oper)
8793 					if (cio_procfs_device_create
8794 					    (ioinfo[irq]->devno) == -ENOMEM) {
8795 						printk (KERN_CRIT
8796 							"Out of memory while creating "
8797 							"entries in /proc/deviceinfo/, "
8798 							"not all devices might show up\n");
8799 						break;
8800 					}
8801 			}
8802 		}
8803 
8804 	}
8805 
8806 	return 1;
8807 }
8808 
8809 __initcall (cio_procfs_create);
8810 
8811 /*
8812  * Entry /proc/cio_ignore to display blacklisted ranges of devices.
8813  * un-ignore devices by piping to /proc/cio_ignore:
8814  * free all frees all blacklisted devices, free <range>,<range>,...
8815  * frees specified ranges of devnos
8816  * add <range>,<range>,... will add a range of devices to blacklist -
8817  * but only for devices not already known
8818  */
8819 
8820 static struct proc_dir_entry *cio_ignore_proc_entry;
8821 static int
cio_ignore_proc_open(struct inode * inode,struct file * file)8822 cio_ignore_proc_open (struct inode *inode, struct file *file)
8823 {
8824 	int rc = 0;
8825 	int size = 1;
8826 	int len = 0;
8827 	tempinfo_t *info;
8828 	long flags;
8829 	int i, j;
8830 
8831 	info = (tempinfo_t *) vmalloc (sizeof (tempinfo_t));
8832 	if (info == NULL) {
8833 		printk (KERN_WARNING "No memory available for data\n");
8834 		rc = -ENOMEM;
8835 	} else {
8836 		file->private_data = (void *) info;
8837 		size += nr_ignored * 6;
8838 		info->data = (char *) vmalloc (size);
8839 		if (size && info->data == NULL) {
8840 			printk (KERN_WARNING "No memory available for data\n");
8841 			vfree (info);
8842 			rc = -ENOMEM;
8843 		} else {
8844 			spin_lock_irqsave (&blacklist_lock, flags);
8845 			for (i = 0; i <= highest_ignored; i++)
8846 				if (test_bit (i, &bl_dev)) {
8847 					len +=
8848 					    sprintf (info->data + len, "%04x ",
8849 						     i);
8850 					for (j = i; (j <= highest_ignored)
8851 					     && (test_bit (j, &bl_dev)); j++) ;
8852 					j--;
8853 					if (i != j)
8854 						len +=
8855 						    sprintf (info->data + len,
8856 							     "- %04x", j);
8857 					len += sprintf (info->data + len, "\n");
8858 					i = j;
8859 				}
8860 			spin_unlock_irqrestore (&blacklist_lock, flags);
8861 			info->len = len;
8862 		}
8863 	}
8864 	return rc;
8865 }
8866 
8867 static int
cio_ignore_proc_close(struct inode * inode,struct file * file)8868 cio_ignore_proc_close (struct inode *inode, struct file *file)
8869 {
8870 	int rc = 0;
8871 	tempinfo_t *p_info = (tempinfo_t *) file->private_data;
8872 
8873 	if (p_info) {
8874 		if (p_info->data)
8875 			vfree (p_info->data);
8876 		vfree (p_info);
8877 	}
8878 
8879 	return rc;
8880 }
8881 
8882 static ssize_t
cio_ignore_proc_read(struct file * file,char * user_buf,size_t user_len,loff_t * offset)8883 cio_ignore_proc_read (struct file *file, char *user_buf, size_t user_len,
8884 		      loff_t * offset)
8885 {
8886 	loff_t len;
8887 	tempinfo_t *p_info = (tempinfo_t *) file->private_data;
8888 	loff_t pos = *offset;
8889 
8890 	if (pos < 0 || pos >= p_info->len) {
8891 		return 0;
8892 	} else {
8893 		len = MIN (user_len, (p_info->len - *offset));
8894 		if (copy_to_user (user_buf, &(p_info->data[*offset]), len))
8895 			return -EFAULT;
8896 		(*offset) = pos + len;
8897 		return len;
8898 	}
8899 }
8900 
8901 static ssize_t
cio_ignore_proc_write(struct file * file,const char * user_buf,size_t user_len,loff_t * offset)8902 cio_ignore_proc_write (struct file *file, const char *user_buf,
8903 		       size_t user_len, loff_t * offset)
8904 {
8905 	char *buffer;
8906 
8907 	if(user_len > 65536)
8908 		user_len = 65536;
8909 
8910 	buffer = vmalloc (user_len + 1);
8911 
8912 	if (buffer == NULL)
8913 		return -ENOMEM;
8914 	if (copy_from_user (buffer, user_buf, user_len)) {
8915 		vfree (buffer);
8916 		return -EFAULT;
8917 	}
8918 	buffer[user_len] = '\0';
8919 #ifdef CONFIG_DEBUG_IO
8920 	printk (KERN_DEBUG "/proc/cio_ignore: '%s'\n", buffer);
8921 #endif /* CONFIG_DEBUG_IO */
8922 
8923 	blacklist_parse_proc_parameters (buffer);
8924 
8925 	vfree (buffer);
8926 	return user_len;
8927 }
8928 
8929 static struct file_operations cio_ignore_proc_file_ops = {
8930 	read:cio_ignore_proc_read, open:cio_ignore_proc_open,
8931 	write:cio_ignore_proc_write, release:cio_ignore_proc_close,
8932 };
8933 
8934 static int
cio_ignore_proc_init(void)8935 cio_ignore_proc_init (void)
8936 {
8937 	cio_ignore_proc_entry =
8938 	    create_proc_entry ("cio_ignore", S_IFREG | S_IRUGO | S_IWUSR,
8939 			       &proc_root);
8940 	cio_ignore_proc_entry->proc_fops = &cio_ignore_proc_file_ops;
8941 
8942 	return 1;
8943 }
8944 
8945 __initcall (cio_ignore_proc_init);
8946 
8947 /*
8948  * Entry /proc/irq_count
8949  * display how many irqs have occured per cpu...
8950  */
8951 
8952 static struct proc_dir_entry *cio_irq_proc_entry;
8953 
8954 static int
cio_irq_proc_open(struct inode * inode,struct file * file)8955 cio_irq_proc_open (struct inode *inode, struct file *file)
8956 {
8957 	int rc = 0;
8958 	int size = 1;
8959 	int len = 0;
8960 	tempinfo_t *info;
8961 	int i;
8962 
8963 	info = (tempinfo_t *) vmalloc (sizeof (tempinfo_t));
8964 	if (info == NULL) {
8965 		printk (KERN_WARNING "No memory available for data\n");
8966 		rc = -ENOMEM;
8967 	} else {
8968 		file->private_data = (void *) info;
8969 		size += NR_CPUS * 16;
8970 		info->data = (char *) vmalloc (size);
8971 		if (size && info->data == NULL) {
8972 			printk (KERN_WARNING "No memory available for data\n");
8973 			vfree (info);
8974 			rc = -ENOMEM;
8975 		} else {
8976 			for (i = 0; i < NR_CPUS; i++) {
8977 				if (s390_irq_count[i] != 0)
8978 					len +=
8979 					    sprintf (info->data + len, "%lx\n",
8980 						     s390_irq_count[i]);
8981 			}
8982 			info->len = len;
8983 		}
8984 	}
8985 	return rc;
8986 }
8987 
8988 static int
cio_irq_proc_close(struct inode * inode,struct file * file)8989 cio_irq_proc_close (struct inode *inode, struct file *file)
8990 {
8991 	int rc = 0;
8992 	tempinfo_t *p_info = (tempinfo_t *) file->private_data;
8993 
8994 	if (p_info) {
8995 		if (p_info->data)
8996 			vfree (p_info->data);
8997 		vfree (p_info);
8998 	}
8999 
9000 	return rc;
9001 }
9002 
9003 static ssize_t
cio_irq_proc_read(struct file * file,char * user_buf,size_t user_len,loff_t * offset)9004 cio_irq_proc_read (struct file *file, char *user_buf, size_t user_len,
9005 		   loff_t * offset)
9006 {
9007 	loff_t len;
9008 	tempinfo_t *p_info = (tempinfo_t *) file->private_data;
9009 	loff_t pos = *offset;
9010 
9011 	if (pos < 0 || pos >= p_info->len) {
9012 		return 0;
9013 	} else {
9014 		len = MIN (user_len, (p_info->len - *offset));
9015 		if (copy_to_user (user_buf, &(p_info->data[*offset]), len))
9016 			return -EFAULT;
9017 		(*offset) = pos + len;
9018 		return len;
9019 	}
9020 }
9021 
9022 static struct file_operations cio_irq_proc_file_ops = {
9023 	read:cio_irq_proc_read, open:cio_irq_proc_open,
9024 	release:cio_irq_proc_close,
9025 };
9026 
9027 static int
cio_irq_proc_init(void)9028 cio_irq_proc_init (void)
9029 {
9030 
9031 	int i;
9032 
9033 	if (cio_count_irqs) {
9034 		for (i = 0; i < NR_CPUS; i++)
9035 			s390_irq_count[i] = 0;
9036 		cio_irq_proc_entry =
9037 		    create_proc_entry ("irq_count", S_IFREG | S_IRUGO,
9038 				       &proc_root);
9039 		cio_irq_proc_entry->proc_fops = &cio_irq_proc_file_ops;
9040 	}
9041 
9042 	return 1;
9043 }
9044 
9045 __initcall (cio_irq_proc_init);
9046 
9047 
9048 #ifdef CONFIG_CHSC
9049 /*
9050  * /proc/chpids to display available chpids
9051  * vary chpids on/off by piping to it
9052  */
9053 
9054 static struct proc_dir_entry *cio_chpids_proc_entry;
9055 
9056 static int
cio_chpids_proc_open(struct inode * inode,struct file * file)9057 cio_chpids_proc_open(struct inode *inode, struct file *file)
9058 {
9059 	int rc = 0;
9060 	int size = 1;
9061 	int len = 0;
9062 	tempinfo_t *info;
9063 	int i;
9064 
9065 	if (!cio_chsc_desc_avail) {
9066 		/*
9067 		 * We have not yet retrieved the link addresses,
9068 		 * so we do it now.
9069 		 */
9070 		chsc_get_sch_descriptions();
9071 	}
9072 
9073 
9074 	info = (tempinfo_t *) vmalloc(sizeof(tempinfo_t));
9075 	if (info == NULL) {
9076 		printk( KERN_WARNING "No memory available for data\n");
9077 		rc = -ENOMEM;
9078 	} else {
9079 		file->private_data = (void *) info;
9080 		size += NR_CHPIDS * 16;
9081 		info->data = (char *) vmalloc(size);
9082 		if ( size && info->data == NULL) {
9083 			printk( KERN_WARNING "No memory available for data\n");
9084 			vfree (info);
9085 			rc = -ENOMEM;
9086 		} else {
9087 			/* update our stuff */
9088 			chsc_get_sch_descriptions();
9089 			if (!cio_chsc_desc_avail) {
9090 				len += sprintf(info->data+len, "no info available\n");
9091 				goto cont;
9092 			}
9093 
9094 			for (i=0;i<NR_CHPIDS;i++) {
9095 				if (test_bit(i, &chpids_known)) {
9096 					if (!test_bit(i, &chpids))
9097 						len += sprintf(info->data+len,
9098 							       "%02X n/a\n",
9099 							       i);
9100 					else if (test_bit(i, &chpids_logical))
9101 						len += sprintf(info->data+len,
9102 							       "%02X online\n",
9103 							       i);
9104 					else
9105 						len += sprintf(info->data+len,
9106 							       "%02X logically "
9107 							       "offline\n",
9108 							       i);
9109 				}
9110 
9111 			}
9112 		cont:
9113 			info->len = len;
9114 		}
9115 	}
9116 	return rc;
9117 }
9118 
9119 static int
cio_chpids_proc_close(struct inode * inode,struct file * file)9120 cio_chpids_proc_close(struct inode *inode, struct file *file)
9121 {
9122 	int rc = 0;
9123 	tempinfo_t *p_info = (tempinfo_t *) file->private_data;
9124 
9125      if (p_info) {
9126 	  if (p_info->data)
9127 	       vfree( p_info->data );
9128 	  vfree( p_info );
9129      }
9130 
9131      return rc;
9132 }
9133 
9134 static ssize_t
cio_chpids_proc_read(struct file * file,char * user_buf,size_t user_len,loff_t * offset)9135 cio_chpids_proc_read( struct file *file, char *user_buf, size_t user_len, loff_t * offset)
9136 {
9137      loff_t len;
9138      tempinfo_t *p_info = (tempinfo_t *) file->private_data;
9139      loff_t pos = *offset;
9140 
9141      if (pos < 0 || pos >= p_info->len) {
9142 	  return 0;
9143      } else {
9144 	  len = MIN(user_len, (p_info->len - pos));
9145 	  if (copy_to_user( user_buf, &(p_info->data[pos]), len))
9146 	       return -EFAULT;
9147 	  *offset = pos + len;
9148 	  return len;
9149      }
9150 }
9151 
9152 static ssize_t
cio_chpids_proc_write(struct file * file,const char * user_buf,size_t user_len,loff_t * offset)9153 cio_chpids_proc_write (struct file *file, const char *user_buf,
9154 		       size_t user_len, loff_t * offset)
9155 {
9156 	char *buffer;
9157 
9158 	if(user_len > 65536)
9159 		user_len = 65536;
9160 
9161 	buffer = vmalloc (user_len + 1);
9162 
9163 	if (buffer == NULL)
9164 		return -ENOMEM;
9165 	if (copy_from_user (buffer, user_buf, user_len)) {
9166 		vfree (buffer);
9167 		return -EFAULT;
9168 	}
9169 	buffer[user_len]='\0';
9170 #ifdef CIO_DEBUG_IO
9171 	printk("/proc/chpids: '%s'\n", buffer);
9172 #endif /* CIO_DEBUG_IO */
9173 
9174 	cio_parse_chpids_proc_parameters(buffer);
9175 
9176 	vfree (buffer);
9177 	return user_len;
9178 }
9179 
9180 static struct file_operations cio_chpids_proc_file_ops =
9181 {
9182 	read:cio_chpids_proc_read,
9183 	open:cio_chpids_proc_open,
9184 	write:cio_chpids_proc_write,
9185 	release:cio_chpids_proc_close,
9186 };
9187 
9188 static int
cio_chpids_proc_init(void)9189 cio_chpids_proc_init(void)
9190 {
9191 
9192 	cio_chpids_proc_entry = create_proc_entry("chpids", S_IFREG|S_IRUGO|S_IWUSR, &proc_root);
9193 	cio_chpids_proc_entry->proc_fops = &cio_chpids_proc_file_ops;
9194 
9195 	return 1;
9196 
9197 
9198 }
9199 
9200 __initcall(cio_chpids_proc_init);
9201 #endif
9202 /* end of procfs stuff */
9203 #endif
9204 
9205 schib_t *
s390_get_schib(int irq)9206 s390_get_schib (int irq)
9207 {
9208 	if ((irq > highest_subchannel) || (irq < 0))
9209 		return NULL;
9210 	if (ioinfo[irq] == INVALID_STORAGE_AREA)
9211 		return NULL;
9212 	if (ioinfo[irq]->st)
9213 		return NULL;
9214 	return &ioinfo[irq]->schib;
9215 
9216 }
9217 
9218 int
s390_set_private_data(int irq,void * data)9219 s390_set_private_data(int irq, void *data)
9220 {
9221 	SANITY_CHECK(irq);
9222 
9223 	ioinfo[irq]->private_data = data;
9224 
9225 	return 0;
9226 }
9227 
9228 void *
s390_get_private_data(int irq)9229 s390_get_private_data(int irq)
9230 {
9231 	if ((irq > highest_subchannel) || (irq < 0))
9232 		return NULL;
9233 	if (ioinfo[irq] == INVALID_STORAGE_AREA)
9234 		return NULL;
9235 	if (ioinfo[irq]->st)
9236 		return NULL;
9237 	return ioinfo[irq]->private_data;
9238 }
9239 
9240 EXPORT_SYMBOL (halt_IO);
9241 EXPORT_SYMBOL (clear_IO);
9242 EXPORT_SYMBOL (do_IO);
9243 EXPORT_SYMBOL (resume_IO);
9244 EXPORT_SYMBOL (ioinfo);
9245 EXPORT_SYMBOL (diag210);
9246 EXPORT_SYMBOL (get_dev_info_by_irq);
9247 EXPORT_SYMBOL (get_dev_info_by_devno);
9248 EXPORT_SYMBOL (get_irq_by_devno);
9249 EXPORT_SYMBOL (get_devno_by_irq);
9250 EXPORT_SYMBOL (get_irq_first);
9251 EXPORT_SYMBOL (get_irq_next);
9252 EXPORT_SYMBOL (read_conf_data);
9253 EXPORT_SYMBOL (read_dev_chars);
9254 EXPORT_SYMBOL (s390_request_irq_special);
9255 EXPORT_SYMBOL (s390_get_schib);
9256 EXPORT_SYMBOL (s390_register_adapter_interrupt);
9257 EXPORT_SYMBOL (s390_unregister_adapter_interrupt);
9258 EXPORT_SYMBOL (s390_set_private_data);
9259 EXPORT_SYMBOL (s390_get_private_data);
9260 EXPORT_SYMBOL (s390_trigger_resense);
9261