1 /*
2  * linux/drivers/s390/cio/qdio_main.c
3  *
4  * Linux for s390 qdio support, buffer handling, qdio API and module support.
5  *
6  * Copyright 2000,2008 IBM Corp.
7  * Author(s): Utz Bacher <utz.bacher@de.ibm.com>
8  *	      Jan Glauber <jang@linux.vnet.ibm.com>
9  * 2.6 cio integration by Cornelia Huck <cornelia.huck@de.ibm.com>
10  */
11 #include <linux/module.h>
12 #include <linux/init.h>
13 #include <linux/kernel.h>
14 #include <linux/timer.h>
15 #include <linux/delay.h>
16 #include <linux/gfp.h>
17 #include <linux/io.h>
18 #include <linux/atomic.h>
19 #include <asm/debug.h>
20 #include <asm/qdio.h>
21 #include <asm/ipl.h>
22 
23 #include "cio.h"
24 #include "css.h"
25 #include "device.h"
26 #include "qdio.h"
27 #include "qdio_debug.h"
28 
29 MODULE_AUTHOR("Utz Bacher <utz.bacher@de.ibm.com>,"\
30 	"Jan Glauber <jang@linux.vnet.ibm.com>");
31 MODULE_DESCRIPTION("QDIO base support");
32 MODULE_LICENSE("GPL");
33 
do_siga_sync(unsigned long schid,unsigned int out_mask,unsigned int in_mask,unsigned int fc)34 static inline int do_siga_sync(unsigned long schid,
35 			       unsigned int out_mask, unsigned int in_mask,
36 			       unsigned int fc)
37 {
38 	register unsigned long __fc asm ("0") = fc;
39 	register unsigned long __schid asm ("1") = schid;
40 	register unsigned long out asm ("2") = out_mask;
41 	register unsigned long in asm ("3") = in_mask;
42 	int cc;
43 
44 	asm volatile(
45 		"	siga	0\n"
46 		"	ipm	%0\n"
47 		"	srl	%0,28\n"
48 		: "=d" (cc)
49 		: "d" (__fc), "d" (__schid), "d" (out), "d" (in) : "cc");
50 	return cc;
51 }
52 
do_siga_input(unsigned long schid,unsigned int mask,unsigned int fc)53 static inline int do_siga_input(unsigned long schid, unsigned int mask,
54 				unsigned int fc)
55 {
56 	register unsigned long __fc asm ("0") = fc;
57 	register unsigned long __schid asm ("1") = schid;
58 	register unsigned long __mask asm ("2") = mask;
59 	int cc;
60 
61 	asm volatile(
62 		"	siga	0\n"
63 		"	ipm	%0\n"
64 		"	srl	%0,28\n"
65 		: "=d" (cc)
66 		: "d" (__fc), "d" (__schid), "d" (__mask) : "cc", "memory");
67 	return cc;
68 }
69 
70 /**
71  * do_siga_output - perform SIGA-w/wt function
72  * @schid: subchannel id or in case of QEBSM the subchannel token
73  * @mask: which output queues to process
74  * @bb: busy bit indicator, set only if SIGA-w/wt could not access a buffer
75  * @fc: function code to perform
76  *
77  * Returns cc or QDIO_ERROR_SIGA_ACCESS_EXCEPTION.
78  * Note: For IQDC unicast queues only the highest priority queue is processed.
79  */
do_siga_output(unsigned long schid,unsigned long mask,unsigned int * bb,unsigned int fc,unsigned long aob)80 static inline int do_siga_output(unsigned long schid, unsigned long mask,
81 				 unsigned int *bb, unsigned int fc,
82 				 unsigned long aob)
83 {
84 	register unsigned long __fc asm("0") = fc;
85 	register unsigned long __schid asm("1") = schid;
86 	register unsigned long __mask asm("2") = mask;
87 	register unsigned long __aob asm("3") = aob;
88 	int cc = QDIO_ERROR_SIGA_ACCESS_EXCEPTION;
89 
90 	asm volatile(
91 		"	siga	0\n"
92 		"0:	ipm	%0\n"
93 		"	srl	%0,28\n"
94 		"1:\n"
95 		EX_TABLE(0b, 1b)
96 		: "+d" (cc), "+d" (__fc), "+d" (__schid), "+d" (__mask),
97 		  "+d" (__aob)
98 		: : "cc", "memory");
99 	*bb = ((unsigned int) __fc) >> 31;
100 	return cc;
101 }
102 
qdio_check_ccq(struct qdio_q * q,unsigned int ccq)103 static inline int qdio_check_ccq(struct qdio_q *q, unsigned int ccq)
104 {
105 	/* all done or next buffer state different */
106 	if (ccq == 0 || ccq == 32)
107 		return 0;
108 	/* no buffer processed */
109 	if (ccq == 97)
110 		return 1;
111 	/* not all buffers processed */
112 	if (ccq == 96)
113 		return 2;
114 	/* notify devices immediately */
115 	DBF_ERROR("%4x ccq:%3d", SCH_NO(q), ccq);
116 	return -EIO;
117 }
118 
119 /**
120  * qdio_do_eqbs - extract buffer states for QEBSM
121  * @q: queue to manipulate
122  * @state: state of the extracted buffers
123  * @start: buffer number to start at
124  * @count: count of buffers to examine
125  * @auto_ack: automatically acknowledge buffers
126  *
127  * Returns the number of successfully extracted equal buffer states.
128  * Stops processing if a state is different from the last buffers state.
129  */
qdio_do_eqbs(struct qdio_q * q,unsigned char * state,int start,int count,int auto_ack)130 static int qdio_do_eqbs(struct qdio_q *q, unsigned char *state,
131 			int start, int count, int auto_ack)
132 {
133 	int rc, tmp_count = count, tmp_start = start, nr = q->nr, retried = 0;
134 	unsigned int ccq = 0;
135 
136 	BUG_ON(!q->irq_ptr->sch_token);
137 	qperf_inc(q, eqbs);
138 
139 	if (!q->is_input_q)
140 		nr += q->irq_ptr->nr_input_qs;
141 again:
142 	ccq = do_eqbs(q->irq_ptr->sch_token, state, nr, &tmp_start, &tmp_count,
143 		      auto_ack);
144 	rc = qdio_check_ccq(q, ccq);
145 	if (!rc)
146 		return count - tmp_count;
147 
148 	if (rc == 1) {
149 		DBF_DEV_EVENT(DBF_WARN, q->irq_ptr, "EQBS again:%2d", ccq);
150 		goto again;
151 	}
152 
153 	if (rc == 2) {
154 		BUG_ON(tmp_count == count);
155 		qperf_inc(q, eqbs_partial);
156 		DBF_DEV_EVENT(DBF_WARN, q->irq_ptr, "EQBS part:%02x",
157 			tmp_count);
158 		/*
159 		 * Retry once, if that fails bail out and process the
160 		 * extracted buffers before trying again.
161 		 */
162 		if (!retried++)
163 			goto again;
164 		else
165 			return count - tmp_count;
166 	}
167 
168 	DBF_ERROR("%4x EQBS ERROR", SCH_NO(q));
169 	DBF_ERROR("%3d%3d%2d", count, tmp_count, nr);
170 	q->handler(q->irq_ptr->cdev, QDIO_ERROR_ACTIVATE_CHECK_CONDITION,
171 		   q->nr, q->first_to_kick, count, q->irq_ptr->int_parm);
172 	return 0;
173 }
174 
175 /**
176  * qdio_do_sqbs - set buffer states for QEBSM
177  * @q: queue to manipulate
178  * @state: new state of the buffers
179  * @start: first buffer number to change
180  * @count: how many buffers to change
181  *
182  * Returns the number of successfully changed buffers.
183  * Does retrying until the specified count of buffer states is set or an
184  * error occurs.
185  */
qdio_do_sqbs(struct qdio_q * q,unsigned char state,int start,int count)186 static int qdio_do_sqbs(struct qdio_q *q, unsigned char state, int start,
187 			int count)
188 {
189 	unsigned int ccq = 0;
190 	int tmp_count = count, tmp_start = start;
191 	int nr = q->nr;
192 	int rc;
193 
194 	if (!count)
195 		return 0;
196 
197 	BUG_ON(!q->irq_ptr->sch_token);
198 	qperf_inc(q, sqbs);
199 
200 	if (!q->is_input_q)
201 		nr += q->irq_ptr->nr_input_qs;
202 again:
203 	ccq = do_sqbs(q->irq_ptr->sch_token, state, nr, &tmp_start, &tmp_count);
204 	rc = qdio_check_ccq(q, ccq);
205 	if (!rc) {
206 		WARN_ON(tmp_count);
207 		return count - tmp_count;
208 	}
209 
210 	if (rc == 1 || rc == 2) {
211 		DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "SQBS again:%2d", ccq);
212 		qperf_inc(q, sqbs_partial);
213 		goto again;
214 	}
215 
216 	DBF_ERROR("%4x SQBS ERROR", SCH_NO(q));
217 	DBF_ERROR("%3d%3d%2d", count, tmp_count, nr);
218 	q->handler(q->irq_ptr->cdev, QDIO_ERROR_ACTIVATE_CHECK_CONDITION,
219 		   q->nr, q->first_to_kick, count, q->irq_ptr->int_parm);
220 	return 0;
221 }
222 
223 /* returns number of examined buffers and their common state in *state */
get_buf_states(struct qdio_q * q,unsigned int bufnr,unsigned char * state,unsigned int count,int auto_ack,int merge_pending)224 static inline int get_buf_states(struct qdio_q *q, unsigned int bufnr,
225 				 unsigned char *state, unsigned int count,
226 				 int auto_ack, int merge_pending)
227 {
228 	unsigned char __state = 0;
229 	int i;
230 
231 	BUG_ON(bufnr > QDIO_MAX_BUFFERS_MASK);
232 	BUG_ON(count > QDIO_MAX_BUFFERS_PER_Q);
233 
234 	if (is_qebsm(q))
235 		return qdio_do_eqbs(q, state, bufnr, count, auto_ack);
236 
237 	for (i = 0; i < count; i++) {
238 		if (!__state) {
239 			__state = q->slsb.val[bufnr];
240 			if (merge_pending && __state == SLSB_P_OUTPUT_PENDING)
241 				__state = SLSB_P_OUTPUT_EMPTY;
242 		} else if (merge_pending) {
243 			if ((q->slsb.val[bufnr] & __state) != __state)
244 				break;
245 		} else if (q->slsb.val[bufnr] != __state)
246 			break;
247 		bufnr = next_buf(bufnr);
248 	}
249 	*state = __state;
250 	return i;
251 }
252 
get_buf_state(struct qdio_q * q,unsigned int bufnr,unsigned char * state,int auto_ack)253 static inline int get_buf_state(struct qdio_q *q, unsigned int bufnr,
254 				unsigned char *state, int auto_ack)
255 {
256 	return get_buf_states(q, bufnr, state, 1, auto_ack, 0);
257 }
258 
259 /* wrap-around safe setting of slsb states, returns number of changed buffers */
set_buf_states(struct qdio_q * q,int bufnr,unsigned char state,int count)260 static inline int set_buf_states(struct qdio_q *q, int bufnr,
261 				 unsigned char state, int count)
262 {
263 	int i;
264 
265 	BUG_ON(bufnr > QDIO_MAX_BUFFERS_MASK);
266 	BUG_ON(count > QDIO_MAX_BUFFERS_PER_Q);
267 
268 	if (is_qebsm(q))
269 		return qdio_do_sqbs(q, state, bufnr, count);
270 
271 	for (i = 0; i < count; i++) {
272 		xchg(&q->slsb.val[bufnr], state);
273 		bufnr = next_buf(bufnr);
274 	}
275 	return count;
276 }
277 
set_buf_state(struct qdio_q * q,int bufnr,unsigned char state)278 static inline int set_buf_state(struct qdio_q *q, int bufnr,
279 				unsigned char state)
280 {
281 	return set_buf_states(q, bufnr, state, 1);
282 }
283 
284 /* set slsb states to initial state */
qdio_init_buf_states(struct qdio_irq * irq_ptr)285 static void qdio_init_buf_states(struct qdio_irq *irq_ptr)
286 {
287 	struct qdio_q *q;
288 	int i;
289 
290 	for_each_input_queue(irq_ptr, q, i)
291 		set_buf_states(q, 0, SLSB_P_INPUT_NOT_INIT,
292 			       QDIO_MAX_BUFFERS_PER_Q);
293 	for_each_output_queue(irq_ptr, q, i)
294 		set_buf_states(q, 0, SLSB_P_OUTPUT_NOT_INIT,
295 			       QDIO_MAX_BUFFERS_PER_Q);
296 }
297 
qdio_siga_sync(struct qdio_q * q,unsigned int output,unsigned int input)298 static inline int qdio_siga_sync(struct qdio_q *q, unsigned int output,
299 			  unsigned int input)
300 {
301 	unsigned long schid = *((u32 *) &q->irq_ptr->schid);
302 	unsigned int fc = QDIO_SIGA_SYNC;
303 	int cc;
304 
305 	DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "siga-s:%1d", q->nr);
306 	qperf_inc(q, siga_sync);
307 
308 	if (is_qebsm(q)) {
309 		schid = q->irq_ptr->sch_token;
310 		fc |= QDIO_SIGA_QEBSM_FLAG;
311 	}
312 
313 	cc = do_siga_sync(schid, output, input, fc);
314 	if (unlikely(cc))
315 		DBF_ERROR("%4x SIGA-S:%2d", SCH_NO(q), cc);
316 	return cc;
317 }
318 
qdio_siga_sync_q(struct qdio_q * q)319 static inline int qdio_siga_sync_q(struct qdio_q *q)
320 {
321 	if (q->is_input_q)
322 		return qdio_siga_sync(q, 0, q->mask);
323 	else
324 		return qdio_siga_sync(q, q->mask, 0);
325 }
326 
qdio_siga_output(struct qdio_q * q,unsigned int * busy_bit,unsigned long aob)327 static int qdio_siga_output(struct qdio_q *q, unsigned int *busy_bit,
328 	unsigned long aob)
329 {
330 	unsigned long schid = *((u32 *) &q->irq_ptr->schid);
331 	unsigned int fc = QDIO_SIGA_WRITE;
332 	u64 start_time = 0;
333 	int retries = 0, cc;
334 	unsigned long laob = 0;
335 
336 	if (q->u.out.use_cq && aob != 0) {
337 		fc = QDIO_SIGA_WRITEQ;
338 		laob = aob;
339 	}
340 
341 	if (is_qebsm(q)) {
342 		schid = q->irq_ptr->sch_token;
343 		fc |= QDIO_SIGA_QEBSM_FLAG;
344 	}
345 again:
346 	WARN_ON_ONCE((aob && queue_type(q) != QDIO_IQDIO_QFMT) ||
347 		(aob && fc != QDIO_SIGA_WRITEQ));
348 	cc = do_siga_output(schid, q->mask, busy_bit, fc, laob);
349 
350 	/* hipersocket busy condition */
351 	if (unlikely(*busy_bit)) {
352 		WARN_ON(queue_type(q) != QDIO_IQDIO_QFMT || cc != 2);
353 		retries++;
354 
355 		if (!start_time) {
356 			start_time = get_clock();
357 			goto again;
358 		}
359 		if ((get_clock() - start_time) < QDIO_BUSY_BIT_PATIENCE)
360 			goto again;
361 	}
362 	if (retries) {
363 		DBF_DEV_EVENT(DBF_WARN, q->irq_ptr,
364 			      "%4x cc2 BB1:%1d", SCH_NO(q), q->nr);
365 		DBF_DEV_EVENT(DBF_WARN, q->irq_ptr, "count:%u", retries);
366 	}
367 	return cc;
368 }
369 
qdio_siga_input(struct qdio_q * q)370 static inline int qdio_siga_input(struct qdio_q *q)
371 {
372 	unsigned long schid = *((u32 *) &q->irq_ptr->schid);
373 	unsigned int fc = QDIO_SIGA_READ;
374 	int cc;
375 
376 	DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "siga-r:%1d", q->nr);
377 	qperf_inc(q, siga_read);
378 
379 	if (is_qebsm(q)) {
380 		schid = q->irq_ptr->sch_token;
381 		fc |= QDIO_SIGA_QEBSM_FLAG;
382 	}
383 
384 	cc = do_siga_input(schid, q->mask, fc);
385 	if (unlikely(cc))
386 		DBF_ERROR("%4x SIGA-R:%2d", SCH_NO(q), cc);
387 	return cc;
388 }
389 
390 #define qdio_siga_sync_out(q) qdio_siga_sync(q, ~0U, 0)
391 #define qdio_siga_sync_all(q) qdio_siga_sync(q, ~0U, ~0U)
392 
qdio_sync_queues(struct qdio_q * q)393 static inline void qdio_sync_queues(struct qdio_q *q)
394 {
395 	/* PCI capable outbound queues will also be scanned so sync them too */
396 	if (pci_out_supported(q))
397 		qdio_siga_sync_all(q);
398 	else
399 		qdio_siga_sync_q(q);
400 }
401 
debug_get_buf_state(struct qdio_q * q,unsigned int bufnr,unsigned char * state)402 int debug_get_buf_state(struct qdio_q *q, unsigned int bufnr,
403 			unsigned char *state)
404 {
405 	if (need_siga_sync(q))
406 		qdio_siga_sync_q(q);
407 	return get_buf_states(q, bufnr, state, 1, 0, 0);
408 }
409 
qdio_stop_polling(struct qdio_q * q)410 static inline void qdio_stop_polling(struct qdio_q *q)
411 {
412 	if (!q->u.in.polling)
413 		return;
414 
415 	q->u.in.polling = 0;
416 	qperf_inc(q, stop_polling);
417 
418 	/* show the card that we are not polling anymore */
419 	if (is_qebsm(q)) {
420 		set_buf_states(q, q->u.in.ack_start, SLSB_P_INPUT_NOT_INIT,
421 			       q->u.in.ack_count);
422 		q->u.in.ack_count = 0;
423 	} else
424 		set_buf_state(q, q->u.in.ack_start, SLSB_P_INPUT_NOT_INIT);
425 }
426 
account_sbals(struct qdio_q * q,int count)427 static inline void account_sbals(struct qdio_q *q, int count)
428 {
429 	int pos = 0;
430 
431 	q->q_stats.nr_sbal_total += count;
432 	if (count == QDIO_MAX_BUFFERS_MASK) {
433 		q->q_stats.nr_sbals[7]++;
434 		return;
435 	}
436 	while (count >>= 1)
437 		pos++;
438 	q->q_stats.nr_sbals[pos]++;
439 }
440 
process_buffer_error(struct qdio_q * q,int count)441 static void process_buffer_error(struct qdio_q *q, int count)
442 {
443 	unsigned char state = (q->is_input_q) ? SLSB_P_INPUT_NOT_INIT :
444 					SLSB_P_OUTPUT_NOT_INIT;
445 
446 	q->qdio_error |= QDIO_ERROR_SLSB_STATE;
447 
448 	/* special handling for no target buffer empty */
449 	if ((!q->is_input_q &&
450 	    (q->sbal[q->first_to_check]->element[15].sflags) == 0x10)) {
451 		qperf_inc(q, target_full);
452 		DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "OUTFULL FTC:%02x",
453 			      q->first_to_check);
454 		goto set;
455 	}
456 
457 	DBF_ERROR("%4x BUF ERROR", SCH_NO(q));
458 	DBF_ERROR((q->is_input_q) ? "IN:%2d" : "OUT:%2d", q->nr);
459 	DBF_ERROR("FTC:%3d C:%3d", q->first_to_check, count);
460 	DBF_ERROR("F14:%2x F15:%2x",
461 		  q->sbal[q->first_to_check]->element[14].sflags,
462 		  q->sbal[q->first_to_check]->element[15].sflags);
463 
464 set:
465 	/*
466 	 * Interrupts may be avoided as long as the error is present
467 	 * so change the buffer state immediately to avoid starvation.
468 	 */
469 	set_buf_states(q, q->first_to_check, state, count);
470 }
471 
inbound_primed(struct qdio_q * q,int count)472 static inline void inbound_primed(struct qdio_q *q, int count)
473 {
474 	int new;
475 
476 	DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "in prim: %02x", count);
477 
478 	/* for QEBSM the ACK was already set by EQBS */
479 	if (is_qebsm(q)) {
480 		if (!q->u.in.polling) {
481 			q->u.in.polling = 1;
482 			q->u.in.ack_count = count;
483 			q->u.in.ack_start = q->first_to_check;
484 			return;
485 		}
486 
487 		/* delete the previous ACK's */
488 		set_buf_states(q, q->u.in.ack_start, SLSB_P_INPUT_NOT_INIT,
489 			       q->u.in.ack_count);
490 		q->u.in.ack_count = count;
491 		q->u.in.ack_start = q->first_to_check;
492 		return;
493 	}
494 
495 	/*
496 	 * ACK the newest buffer. The ACK will be removed in qdio_stop_polling
497 	 * or by the next inbound run.
498 	 */
499 	new = add_buf(q->first_to_check, count - 1);
500 	if (q->u.in.polling) {
501 		/* reset the previous ACK but first set the new one */
502 		set_buf_state(q, new, SLSB_P_INPUT_ACK);
503 		set_buf_state(q, q->u.in.ack_start, SLSB_P_INPUT_NOT_INIT);
504 	} else {
505 		q->u.in.polling = 1;
506 		set_buf_state(q, new, SLSB_P_INPUT_ACK);
507 	}
508 
509 	q->u.in.ack_start = new;
510 	count--;
511 	if (!count)
512 		return;
513 	/* need to change ALL buffers to get more interrupts */
514 	set_buf_states(q, q->first_to_check, SLSB_P_INPUT_NOT_INIT, count);
515 }
516 
get_inbound_buffer_frontier(struct qdio_q * q)517 static int get_inbound_buffer_frontier(struct qdio_q *q)
518 {
519 	int count, stop;
520 	unsigned char state = 0;
521 
522 	q->timestamp = get_clock_fast();
523 
524 	/*
525 	 * Don't check 128 buffers, as otherwise qdio_inbound_q_moved
526 	 * would return 0.
527 	 */
528 	count = min(atomic_read(&q->nr_buf_used), QDIO_MAX_BUFFERS_MASK);
529 	stop = add_buf(q->first_to_check, count);
530 
531 	if (q->first_to_check == stop)
532 		goto out;
533 
534 	/*
535 	 * No siga sync here, as a PCI or we after a thin interrupt
536 	 * already sync'ed the queues.
537 	 */
538 	count = get_buf_states(q, q->first_to_check, &state, count, 1, 0);
539 	if (!count)
540 		goto out;
541 
542 	switch (state) {
543 	case SLSB_P_INPUT_PRIMED:
544 		inbound_primed(q, count);
545 		q->first_to_check = add_buf(q->first_to_check, count);
546 		if (atomic_sub(count, &q->nr_buf_used) == 0)
547 			qperf_inc(q, inbound_queue_full);
548 		if (q->irq_ptr->perf_stat_enabled)
549 			account_sbals(q, count);
550 		break;
551 	case SLSB_P_INPUT_ERROR:
552 		process_buffer_error(q, count);
553 		q->first_to_check = add_buf(q->first_to_check, count);
554 		atomic_sub(count, &q->nr_buf_used);
555 		if (q->irq_ptr->perf_stat_enabled)
556 			account_sbals_error(q, count);
557 		break;
558 	case SLSB_CU_INPUT_EMPTY:
559 	case SLSB_P_INPUT_NOT_INIT:
560 	case SLSB_P_INPUT_ACK:
561 		if (q->irq_ptr->perf_stat_enabled)
562 			q->q_stats.nr_sbal_nop++;
563 		DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "in nop");
564 		break;
565 	default:
566 		BUG();
567 	}
568 out:
569 	return q->first_to_check;
570 }
571 
qdio_inbound_q_moved(struct qdio_q * q)572 static int qdio_inbound_q_moved(struct qdio_q *q)
573 {
574 	int bufnr;
575 
576 	bufnr = get_inbound_buffer_frontier(q);
577 
578 	if ((bufnr != q->last_move) || q->qdio_error) {
579 		q->last_move = bufnr;
580 		if (!is_thinint_irq(q->irq_ptr) && MACHINE_IS_LPAR)
581 			q->u.in.timestamp = get_clock();
582 		return 1;
583 	} else
584 		return 0;
585 }
586 
qdio_inbound_q_done(struct qdio_q * q)587 static inline int qdio_inbound_q_done(struct qdio_q *q)
588 {
589 	unsigned char state = 0;
590 
591 	if (!atomic_read(&q->nr_buf_used))
592 		return 1;
593 
594 	if (need_siga_sync(q))
595 		qdio_siga_sync_q(q);
596 	get_buf_state(q, q->first_to_check, &state, 0);
597 
598 	if (state == SLSB_P_INPUT_PRIMED || state == SLSB_P_INPUT_ERROR)
599 		/* more work coming */
600 		return 0;
601 
602 	if (is_thinint_irq(q->irq_ptr))
603 		return 1;
604 
605 	/* don't poll under z/VM */
606 	if (MACHINE_IS_VM)
607 		return 1;
608 
609 	/*
610 	 * At this point we know, that inbound first_to_check
611 	 * has (probably) not moved (see qdio_inbound_processing).
612 	 */
613 	if (get_clock() > q->u.in.timestamp + QDIO_INPUT_THRESHOLD) {
614 		DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "in done:%02x",
615 			      q->first_to_check);
616 		return 1;
617 	} else
618 		return 0;
619 }
620 
contains_aobs(struct qdio_q * q)621 static inline int contains_aobs(struct qdio_q *q)
622 {
623 	return !q->is_input_q && q->u.out.use_cq;
624 }
625 
qdio_trace_aob(struct qdio_irq * irq,struct qdio_q * q,int i,struct qaob * aob)626 static inline void qdio_trace_aob(struct qdio_irq *irq, struct qdio_q *q,
627 				int i, struct qaob *aob)
628 {
629 	int tmp;
630 
631 	DBF_DEV_EVENT(DBF_INFO, irq, "AOB%d:%lx", i,
632 			(unsigned long) virt_to_phys(aob));
633 	DBF_DEV_EVENT(DBF_INFO, irq, "RES00:%lx",
634 			(unsigned long) aob->res0[0]);
635 	DBF_DEV_EVENT(DBF_INFO, irq, "RES01:%lx",
636 			(unsigned long) aob->res0[1]);
637 	DBF_DEV_EVENT(DBF_INFO, irq, "RES02:%lx",
638 			(unsigned long) aob->res0[2]);
639 	DBF_DEV_EVENT(DBF_INFO, irq, "RES03:%lx",
640 			(unsigned long) aob->res0[3]);
641 	DBF_DEV_EVENT(DBF_INFO, irq, "RES04:%lx",
642 			(unsigned long) aob->res0[4]);
643 	DBF_DEV_EVENT(DBF_INFO, irq, "RES05:%lx",
644 			(unsigned long) aob->res0[5]);
645 	DBF_DEV_EVENT(DBF_INFO, irq, "RES1:%x", aob->res1);
646 	DBF_DEV_EVENT(DBF_INFO, irq, "RES2:%x", aob->res2);
647 	DBF_DEV_EVENT(DBF_INFO, irq, "RES3:%x", aob->res3);
648 	DBF_DEV_EVENT(DBF_INFO, irq, "AORC:%u", aob->aorc);
649 	DBF_DEV_EVENT(DBF_INFO, irq, "FLAGS:%u", aob->flags);
650 	DBF_DEV_EVENT(DBF_INFO, irq, "CBTBS:%u", aob->cbtbs);
651 	DBF_DEV_EVENT(DBF_INFO, irq, "SBC:%u", aob->sb_count);
652 	for (tmp = 0; tmp < QDIO_MAX_ELEMENTS_PER_BUFFER; ++tmp) {
653 		DBF_DEV_EVENT(DBF_INFO, irq, "SBA%d:%lx", tmp,
654 				(unsigned long) aob->sba[tmp]);
655 		DBF_DEV_EVENT(DBF_INFO, irq, "rSBA%d:%lx", tmp,
656 				(unsigned long) q->sbal[i]->element[tmp].addr);
657 		DBF_DEV_EVENT(DBF_INFO, irq, "DC%d:%u", tmp, aob->dcount[tmp]);
658 		DBF_DEV_EVENT(DBF_INFO, irq, "rDC%d:%u", tmp,
659 				q->sbal[i]->element[tmp].length);
660 	}
661 	DBF_DEV_EVENT(DBF_INFO, irq, "USER0:%lx", (unsigned long) aob->user0);
662 	for (tmp = 0; tmp < 2; ++tmp) {
663 		DBF_DEV_EVENT(DBF_INFO, irq, "RES4%d:%lx", tmp,
664 			(unsigned long) aob->res4[tmp]);
665 	}
666 	DBF_DEV_EVENT(DBF_INFO, irq, "USER1:%lx", (unsigned long) aob->user1);
667 	DBF_DEV_EVENT(DBF_INFO, irq, "USER2:%lx", (unsigned long) aob->user2);
668 }
669 
qdio_handle_aobs(struct qdio_q * q,int start,int count)670 static inline void qdio_handle_aobs(struct qdio_q *q, int start, int count)
671 {
672 	unsigned char state = 0;
673 	int j, b = start;
674 
675 	if (!contains_aobs(q))
676 		return;
677 
678 	for (j = 0; j < count; ++j) {
679 		get_buf_state(q, b, &state, 0);
680 		if (state == SLSB_P_OUTPUT_PENDING) {
681 			struct qaob *aob = q->u.out.aobs[b];
682 			if (aob == NULL)
683 				continue;
684 
685 			BUG_ON(q->u.out.sbal_state == NULL);
686 			q->u.out.sbal_state[b].flags |=
687 				QDIO_OUTBUF_STATE_FLAG_PENDING;
688 			q->u.out.aobs[b] = NULL;
689 		} else if (state == SLSB_P_OUTPUT_EMPTY) {
690 			BUG_ON(q->u.out.sbal_state == NULL);
691 			q->u.out.sbal_state[b].aob = NULL;
692 		}
693 		b = next_buf(b);
694 	}
695 }
696 
qdio_aob_for_buffer(struct qdio_output_q * q,int bufnr)697 static inline unsigned long qdio_aob_for_buffer(struct qdio_output_q *q,
698 					int bufnr)
699 {
700 	unsigned long phys_aob = 0;
701 
702 	if (!q->use_cq)
703 		goto out;
704 
705 	if (!q->aobs[bufnr]) {
706 		struct qaob *aob = qdio_allocate_aob();
707 		q->aobs[bufnr] = aob;
708 	}
709 	if (q->aobs[bufnr]) {
710 		BUG_ON(q->sbal_state == NULL);
711 		q->sbal_state[bufnr].flags = QDIO_OUTBUF_STATE_FLAG_NONE;
712 		q->sbal_state[bufnr].aob = q->aobs[bufnr];
713 		q->aobs[bufnr]->user1 = (u64) q->sbal_state[bufnr].user;
714 		phys_aob = virt_to_phys(q->aobs[bufnr]);
715 		BUG_ON(phys_aob & 0xFF);
716 	}
717 
718 out:
719 	return phys_aob;
720 }
721 
qdio_kick_handler(struct qdio_q * q)722 static void qdio_kick_handler(struct qdio_q *q)
723 {
724 	int start = q->first_to_kick;
725 	int end = q->first_to_check;
726 	int count;
727 
728 	if (unlikely(q->irq_ptr->state != QDIO_IRQ_STATE_ACTIVE))
729 		return;
730 
731 	count = sub_buf(end, start);
732 
733 	if (q->is_input_q) {
734 		qperf_inc(q, inbound_handler);
735 		DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "kih s:%02x c:%02x", start, count);
736 	} else {
737 		qperf_inc(q, outbound_handler);
738 		DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "koh: s:%02x c:%02x",
739 			      start, count);
740 	}
741 
742 	qdio_handle_aobs(q, start, count);
743 
744 	q->handler(q->irq_ptr->cdev, q->qdio_error, q->nr, start, count,
745 		   q->irq_ptr->int_parm);
746 
747 	/* for the next time */
748 	q->first_to_kick = end;
749 	q->qdio_error = 0;
750 }
751 
__qdio_inbound_processing(struct qdio_q * q)752 static void __qdio_inbound_processing(struct qdio_q *q)
753 {
754 	qperf_inc(q, tasklet_inbound);
755 
756 	if (!qdio_inbound_q_moved(q))
757 		return;
758 
759 	qdio_kick_handler(q);
760 
761 	if (!qdio_inbound_q_done(q)) {
762 		/* means poll time is not yet over */
763 		qperf_inc(q, tasklet_inbound_resched);
764 		if (likely(q->irq_ptr->state != QDIO_IRQ_STATE_STOPPED)) {
765 			tasklet_schedule(&q->tasklet);
766 			return;
767 		}
768 	}
769 
770 	qdio_stop_polling(q);
771 	/*
772 	 * We need to check again to not lose initiative after
773 	 * resetting the ACK state.
774 	 */
775 	if (!qdio_inbound_q_done(q)) {
776 		qperf_inc(q, tasklet_inbound_resched2);
777 		if (likely(q->irq_ptr->state != QDIO_IRQ_STATE_STOPPED))
778 			tasklet_schedule(&q->tasklet);
779 	}
780 }
781 
qdio_inbound_processing(unsigned long data)782 void qdio_inbound_processing(unsigned long data)
783 {
784 	struct qdio_q *q = (struct qdio_q *)data;
785 	__qdio_inbound_processing(q);
786 }
787 
get_outbound_buffer_frontier(struct qdio_q * q)788 static int get_outbound_buffer_frontier(struct qdio_q *q)
789 {
790 	int count, stop;
791 	unsigned char state = 0;
792 
793 	q->timestamp = get_clock_fast();
794 
795 	if (need_siga_sync(q))
796 		if (((queue_type(q) != QDIO_IQDIO_QFMT) &&
797 		    !pci_out_supported(q)) ||
798 		    (queue_type(q) == QDIO_IQDIO_QFMT &&
799 		    multicast_outbound(q)))
800 			qdio_siga_sync_q(q);
801 
802 	/*
803 	 * Don't check 128 buffers, as otherwise qdio_inbound_q_moved
804 	 * would return 0.
805 	 */
806 	count = min(atomic_read(&q->nr_buf_used), QDIO_MAX_BUFFERS_MASK);
807 	stop = add_buf(q->first_to_check, count);
808 	if (q->first_to_check == stop)
809 		goto out;
810 
811 	count = get_buf_states(q, q->first_to_check, &state, count, 0, 1);
812 	if (!count)
813 		goto out;
814 
815 	switch (state) {
816 	case SLSB_P_OUTPUT_PENDING:
817 		BUG();
818 	case SLSB_P_OUTPUT_EMPTY:
819 		/* the adapter got it */
820 		DBF_DEV_EVENT(DBF_INFO, q->irq_ptr,
821 			"out empty:%1d %02x", q->nr, count);
822 
823 		atomic_sub(count, &q->nr_buf_used);
824 		q->first_to_check = add_buf(q->first_to_check, count);
825 		if (q->irq_ptr->perf_stat_enabled)
826 			account_sbals(q, count);
827 
828 		break;
829 	case SLSB_P_OUTPUT_ERROR:
830 		process_buffer_error(q, count);
831 		q->first_to_check = add_buf(q->first_to_check, count);
832 		atomic_sub(count, &q->nr_buf_used);
833 		if (q->irq_ptr->perf_stat_enabled)
834 			account_sbals_error(q, count);
835 		break;
836 	case SLSB_CU_OUTPUT_PRIMED:
837 		/* the adapter has not fetched the output yet */
838 		if (q->irq_ptr->perf_stat_enabled)
839 			q->q_stats.nr_sbal_nop++;
840 		DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "out primed:%1d",
841 			      q->nr);
842 		break;
843 	case SLSB_P_OUTPUT_NOT_INIT:
844 	case SLSB_P_OUTPUT_HALTED:
845 		break;
846 	default:
847 		BUG();
848 	}
849 
850 out:
851 	return q->first_to_check;
852 }
853 
854 /* all buffers processed? */
qdio_outbound_q_done(struct qdio_q * q)855 static inline int qdio_outbound_q_done(struct qdio_q *q)
856 {
857 	return atomic_read(&q->nr_buf_used) == 0;
858 }
859 
qdio_outbound_q_moved(struct qdio_q * q)860 static inline int qdio_outbound_q_moved(struct qdio_q *q)
861 {
862 	int bufnr;
863 
864 	bufnr = get_outbound_buffer_frontier(q);
865 
866 	if ((bufnr != q->last_move) || q->qdio_error) {
867 		q->last_move = bufnr;
868 		DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "out moved:%1d", q->nr);
869 		return 1;
870 	} else
871 		return 0;
872 }
873 
qdio_kick_outbound_q(struct qdio_q * q,unsigned long aob)874 static int qdio_kick_outbound_q(struct qdio_q *q, unsigned long aob)
875 {
876 	int retries = 0, cc;
877 	unsigned int busy_bit;
878 
879 	if (!need_siga_out(q))
880 		return 0;
881 
882 	DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "siga-w:%1d", q->nr);
883 retry:
884 	qperf_inc(q, siga_write);
885 
886 	cc = qdio_siga_output(q, &busy_bit, aob);
887 	switch (cc) {
888 	case 0:
889 		break;
890 	case 2:
891 		if (busy_bit) {
892 			while (++retries < QDIO_BUSY_BIT_RETRIES) {
893 				mdelay(QDIO_BUSY_BIT_RETRY_DELAY);
894 				goto retry;
895 			}
896 			DBF_ERROR("%4x cc2 BBC:%1d", SCH_NO(q), q->nr);
897 			cc |= QDIO_ERROR_SIGA_BUSY;
898 		} else
899 			DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "siga-w cc2:%1d", q->nr);
900 		break;
901 	case 1:
902 	case 3:
903 		DBF_ERROR("%4x SIGA-W:%1d", SCH_NO(q), cc);
904 		break;
905 	}
906 	if (retries) {
907 		DBF_ERROR("%4x cc2 BB2:%1d", SCH_NO(q), q->nr);
908 		DBF_ERROR("count:%u", retries);
909 	}
910 	return cc;
911 }
912 
__qdio_outbound_processing(struct qdio_q * q)913 static void __qdio_outbound_processing(struct qdio_q *q)
914 {
915 	qperf_inc(q, tasklet_outbound);
916 	BUG_ON(atomic_read(&q->nr_buf_used) < 0);
917 
918 	if (qdio_outbound_q_moved(q))
919 		qdio_kick_handler(q);
920 
921 	if (queue_type(q) == QDIO_ZFCP_QFMT)
922 		if (!pci_out_supported(q) && !qdio_outbound_q_done(q))
923 			goto sched;
924 
925 	if (q->u.out.pci_out_enabled)
926 		return;
927 
928 	/*
929 	 * Now we know that queue type is either qeth without pci enabled
930 	 * or HiperSockets. Make sure buffer switch from PRIMED to EMPTY
931 	 * is noticed and outbound_handler is called after some time.
932 	 */
933 	if (qdio_outbound_q_done(q))
934 		del_timer(&q->u.out.timer);
935 	else
936 		if (!timer_pending(&q->u.out.timer))
937 			mod_timer(&q->u.out.timer, jiffies + 10 * HZ);
938 	return;
939 
940 sched:
941 	if (unlikely(q->irq_ptr->state == QDIO_IRQ_STATE_STOPPED))
942 		return;
943 	tasklet_schedule(&q->tasklet);
944 }
945 
946 /* outbound tasklet */
qdio_outbound_processing(unsigned long data)947 void qdio_outbound_processing(unsigned long data)
948 {
949 	struct qdio_q *q = (struct qdio_q *)data;
950 	__qdio_outbound_processing(q);
951 }
952 
qdio_outbound_timer(unsigned long data)953 void qdio_outbound_timer(unsigned long data)
954 {
955 	struct qdio_q *q = (struct qdio_q *)data;
956 
957 	if (unlikely(q->irq_ptr->state == QDIO_IRQ_STATE_STOPPED))
958 		return;
959 	tasklet_schedule(&q->tasklet);
960 }
961 
qdio_check_outbound_after_thinint(struct qdio_q * q)962 static inline void qdio_check_outbound_after_thinint(struct qdio_q *q)
963 {
964 	struct qdio_q *out;
965 	int i;
966 
967 	if (!pci_out_supported(q))
968 		return;
969 
970 	for_each_output_queue(q->irq_ptr, out, i)
971 		if (!qdio_outbound_q_done(out))
972 			tasklet_schedule(&out->tasklet);
973 }
974 
__tiqdio_inbound_processing(struct qdio_q * q)975 static void __tiqdio_inbound_processing(struct qdio_q *q)
976 {
977 	qperf_inc(q, tasklet_inbound);
978 	if (need_siga_sync(q) && need_siga_sync_after_ai(q))
979 		qdio_sync_queues(q);
980 
981 	/*
982 	 * The interrupt could be caused by a PCI request. Check the
983 	 * PCI capable outbound queues.
984 	 */
985 	qdio_check_outbound_after_thinint(q);
986 
987 	if (!qdio_inbound_q_moved(q))
988 		return;
989 
990 	qdio_kick_handler(q);
991 
992 	if (!qdio_inbound_q_done(q)) {
993 		qperf_inc(q, tasklet_inbound_resched);
994 		if (likely(q->irq_ptr->state != QDIO_IRQ_STATE_STOPPED)) {
995 			tasklet_schedule(&q->tasklet);
996 			return;
997 		}
998 	}
999 
1000 	qdio_stop_polling(q);
1001 	/*
1002 	 * We need to check again to not lose initiative after
1003 	 * resetting the ACK state.
1004 	 */
1005 	if (!qdio_inbound_q_done(q)) {
1006 		qperf_inc(q, tasklet_inbound_resched2);
1007 		if (likely(q->irq_ptr->state != QDIO_IRQ_STATE_STOPPED))
1008 			tasklet_schedule(&q->tasklet);
1009 	}
1010 }
1011 
tiqdio_inbound_processing(unsigned long data)1012 void tiqdio_inbound_processing(unsigned long data)
1013 {
1014 	struct qdio_q *q = (struct qdio_q *)data;
1015 	__tiqdio_inbound_processing(q);
1016 }
1017 
qdio_set_state(struct qdio_irq * irq_ptr,enum qdio_irq_states state)1018 static inline void qdio_set_state(struct qdio_irq *irq_ptr,
1019 				  enum qdio_irq_states state)
1020 {
1021 	DBF_DEV_EVENT(DBF_INFO, irq_ptr, "newstate: %1d", state);
1022 
1023 	irq_ptr->state = state;
1024 	mb();
1025 }
1026 
qdio_irq_check_sense(struct qdio_irq * irq_ptr,struct irb * irb)1027 static void qdio_irq_check_sense(struct qdio_irq *irq_ptr, struct irb *irb)
1028 {
1029 	if (irb->esw.esw0.erw.cons) {
1030 		DBF_ERROR("%4x sense:", irq_ptr->schid.sch_no);
1031 		DBF_ERROR_HEX(irb, 64);
1032 		DBF_ERROR_HEX(irb->ecw, 64);
1033 	}
1034 }
1035 
1036 /* PCI interrupt handler */
qdio_int_handler_pci(struct qdio_irq * irq_ptr)1037 static void qdio_int_handler_pci(struct qdio_irq *irq_ptr)
1038 {
1039 	int i;
1040 	struct qdio_q *q;
1041 
1042 	if (unlikely(irq_ptr->state == QDIO_IRQ_STATE_STOPPED))
1043 		return;
1044 
1045 	for_each_input_queue(irq_ptr, q, i) {
1046 		if (q->u.in.queue_start_poll) {
1047 			/* skip if polling is enabled or already in work */
1048 			if (test_and_set_bit(QDIO_QUEUE_IRQS_DISABLED,
1049 				     &q->u.in.queue_irq_state)) {
1050 				qperf_inc(q, int_discarded);
1051 				continue;
1052 			}
1053 			q->u.in.queue_start_poll(q->irq_ptr->cdev, q->nr,
1054 						 q->irq_ptr->int_parm);
1055 		} else {
1056 			tasklet_schedule(&q->tasklet);
1057 		}
1058 	}
1059 
1060 	if (!pci_out_supported(q))
1061 		return;
1062 
1063 	for_each_output_queue(irq_ptr, q, i) {
1064 		if (qdio_outbound_q_done(q))
1065 			continue;
1066 		if (need_siga_sync(q) && need_siga_sync_out_after_pci(q))
1067 			qdio_siga_sync_q(q);
1068 		tasklet_schedule(&q->tasklet);
1069 	}
1070 }
1071 
qdio_handle_activate_check(struct ccw_device * cdev,unsigned long intparm,int cstat,int dstat)1072 static void qdio_handle_activate_check(struct ccw_device *cdev,
1073 				unsigned long intparm, int cstat, int dstat)
1074 {
1075 	struct qdio_irq *irq_ptr = cdev->private->qdio_data;
1076 	struct qdio_q *q;
1077 	int count;
1078 
1079 	DBF_ERROR("%4x ACT CHECK", irq_ptr->schid.sch_no);
1080 	DBF_ERROR("intp :%lx", intparm);
1081 	DBF_ERROR("ds: %2x cs:%2x", dstat, cstat);
1082 
1083 	if (irq_ptr->nr_input_qs) {
1084 		q = irq_ptr->input_qs[0];
1085 	} else if (irq_ptr->nr_output_qs) {
1086 		q = irq_ptr->output_qs[0];
1087 	} else {
1088 		dump_stack();
1089 		goto no_handler;
1090 	}
1091 
1092 	count = sub_buf(q->first_to_check, q->first_to_kick);
1093 	q->handler(q->irq_ptr->cdev, QDIO_ERROR_ACTIVATE_CHECK_CONDITION,
1094 		   q->nr, q->first_to_kick, count, irq_ptr->int_parm);
1095 no_handler:
1096 	qdio_set_state(irq_ptr, QDIO_IRQ_STATE_STOPPED);
1097 	/*
1098 	 * In case of z/VM LGR (Live Guest Migration) QDIO recovery will happen.
1099 	 * Therefore we call the LGR detection function here.
1100 	 */
1101 	lgr_info_log();
1102 }
1103 
qdio_establish_handle_irq(struct ccw_device * cdev,int cstat,int dstat)1104 static void qdio_establish_handle_irq(struct ccw_device *cdev, int cstat,
1105 				      int dstat)
1106 {
1107 	struct qdio_irq *irq_ptr = cdev->private->qdio_data;
1108 
1109 	DBF_DEV_EVENT(DBF_INFO, irq_ptr, "qest irq");
1110 
1111 	if (cstat)
1112 		goto error;
1113 	if (dstat & ~(DEV_STAT_DEV_END | DEV_STAT_CHN_END))
1114 		goto error;
1115 	if (!(dstat & DEV_STAT_DEV_END))
1116 		goto error;
1117 	qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ESTABLISHED);
1118 	return;
1119 
1120 error:
1121 	DBF_ERROR("%4x EQ:error", irq_ptr->schid.sch_no);
1122 	DBF_ERROR("ds: %2x cs:%2x", dstat, cstat);
1123 	qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ERR);
1124 }
1125 
1126 /* qdio interrupt handler */
qdio_int_handler(struct ccw_device * cdev,unsigned long intparm,struct irb * irb)1127 void qdio_int_handler(struct ccw_device *cdev, unsigned long intparm,
1128 		      struct irb *irb)
1129 {
1130 	struct qdio_irq *irq_ptr = cdev->private->qdio_data;
1131 	int cstat, dstat;
1132 
1133 	if (!intparm || !irq_ptr) {
1134 		DBF_ERROR("qint:%4x", cdev->private->schid.sch_no);
1135 		return;
1136 	}
1137 
1138 	if (irq_ptr->perf_stat_enabled)
1139 		irq_ptr->perf_stat.qdio_int++;
1140 
1141 	if (IS_ERR(irb)) {
1142 		switch (PTR_ERR(irb)) {
1143 		case -EIO:
1144 			DBF_ERROR("%4x IO error", irq_ptr->schid.sch_no);
1145 			qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ERR);
1146 			wake_up(&cdev->private->wait_q);
1147 			return;
1148 		default:
1149 			WARN_ON(1);
1150 			return;
1151 		}
1152 	}
1153 	qdio_irq_check_sense(irq_ptr, irb);
1154 	cstat = irb->scsw.cmd.cstat;
1155 	dstat = irb->scsw.cmd.dstat;
1156 
1157 	switch (irq_ptr->state) {
1158 	case QDIO_IRQ_STATE_INACTIVE:
1159 		qdio_establish_handle_irq(cdev, cstat, dstat);
1160 		break;
1161 	case QDIO_IRQ_STATE_CLEANUP:
1162 		qdio_set_state(irq_ptr, QDIO_IRQ_STATE_INACTIVE);
1163 		break;
1164 	case QDIO_IRQ_STATE_ESTABLISHED:
1165 	case QDIO_IRQ_STATE_ACTIVE:
1166 		if (cstat & SCHN_STAT_PCI) {
1167 			qdio_int_handler_pci(irq_ptr);
1168 			return;
1169 		}
1170 		if (cstat || dstat)
1171 			qdio_handle_activate_check(cdev, intparm, cstat,
1172 						   dstat);
1173 		break;
1174 	case QDIO_IRQ_STATE_STOPPED:
1175 		break;
1176 	default:
1177 		WARN_ON(1);
1178 	}
1179 	wake_up(&cdev->private->wait_q);
1180 }
1181 
1182 /**
1183  * qdio_get_ssqd_desc - get qdio subchannel description
1184  * @cdev: ccw device to get description for
1185  * @data: where to store the ssqd
1186  *
1187  * Returns 0 or an error code. The results of the chsc are stored in the
1188  * specified structure.
1189  */
qdio_get_ssqd_desc(struct ccw_device * cdev,struct qdio_ssqd_desc * data)1190 int qdio_get_ssqd_desc(struct ccw_device *cdev,
1191 		       struct qdio_ssqd_desc *data)
1192 {
1193 
1194 	if (!cdev || !cdev->private)
1195 		return -EINVAL;
1196 
1197 	DBF_EVENT("get ssqd:%4x", cdev->private->schid.sch_no);
1198 	return qdio_setup_get_ssqd(NULL, &cdev->private->schid, data);
1199 }
1200 EXPORT_SYMBOL_GPL(qdio_get_ssqd_desc);
1201 
qdio_shutdown_queues(struct ccw_device * cdev)1202 static void qdio_shutdown_queues(struct ccw_device *cdev)
1203 {
1204 	struct qdio_irq *irq_ptr = cdev->private->qdio_data;
1205 	struct qdio_q *q;
1206 	int i;
1207 
1208 	for_each_input_queue(irq_ptr, q, i)
1209 		tasklet_kill(&q->tasklet);
1210 
1211 	for_each_output_queue(irq_ptr, q, i) {
1212 		del_timer(&q->u.out.timer);
1213 		tasklet_kill(&q->tasklet);
1214 	}
1215 }
1216 
1217 /**
1218  * qdio_shutdown - shut down a qdio subchannel
1219  * @cdev: associated ccw device
1220  * @how: use halt or clear to shutdown
1221  */
qdio_shutdown(struct ccw_device * cdev,int how)1222 int qdio_shutdown(struct ccw_device *cdev, int how)
1223 {
1224 	struct qdio_irq *irq_ptr = cdev->private->qdio_data;
1225 	int rc;
1226 	unsigned long flags;
1227 
1228 	if (!irq_ptr)
1229 		return -ENODEV;
1230 
1231 	BUG_ON(irqs_disabled());
1232 	DBF_EVENT("qshutdown:%4x", cdev->private->schid.sch_no);
1233 
1234 	mutex_lock(&irq_ptr->setup_mutex);
1235 	/*
1236 	 * Subchannel was already shot down. We cannot prevent being called
1237 	 * twice since cio may trigger a shutdown asynchronously.
1238 	 */
1239 	if (irq_ptr->state == QDIO_IRQ_STATE_INACTIVE) {
1240 		mutex_unlock(&irq_ptr->setup_mutex);
1241 		return 0;
1242 	}
1243 
1244 	/*
1245 	 * Indicate that the device is going down. Scheduling the queue
1246 	 * tasklets is forbidden from here on.
1247 	 */
1248 	qdio_set_state(irq_ptr, QDIO_IRQ_STATE_STOPPED);
1249 
1250 	tiqdio_remove_input_queues(irq_ptr);
1251 	qdio_shutdown_queues(cdev);
1252 	qdio_shutdown_debug_entries(irq_ptr, cdev);
1253 
1254 	/* cleanup subchannel */
1255 	spin_lock_irqsave(get_ccwdev_lock(cdev), flags);
1256 
1257 	if (how & QDIO_FLAG_CLEANUP_USING_CLEAR)
1258 		rc = ccw_device_clear(cdev, QDIO_DOING_CLEANUP);
1259 	else
1260 		/* default behaviour is halt */
1261 		rc = ccw_device_halt(cdev, QDIO_DOING_CLEANUP);
1262 	if (rc) {
1263 		DBF_ERROR("%4x SHUTD ERR", irq_ptr->schid.sch_no);
1264 		DBF_ERROR("rc:%4d", rc);
1265 		goto no_cleanup;
1266 	}
1267 
1268 	qdio_set_state(irq_ptr, QDIO_IRQ_STATE_CLEANUP);
1269 	spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags);
1270 	wait_event_interruptible_timeout(cdev->private->wait_q,
1271 		irq_ptr->state == QDIO_IRQ_STATE_INACTIVE ||
1272 		irq_ptr->state == QDIO_IRQ_STATE_ERR,
1273 		10 * HZ);
1274 	spin_lock_irqsave(get_ccwdev_lock(cdev), flags);
1275 
1276 no_cleanup:
1277 	qdio_shutdown_thinint(irq_ptr);
1278 
1279 	/* restore interrupt handler */
1280 	if ((void *)cdev->handler == (void *)qdio_int_handler)
1281 		cdev->handler = irq_ptr->orig_handler;
1282 	spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags);
1283 
1284 	qdio_set_state(irq_ptr, QDIO_IRQ_STATE_INACTIVE);
1285 	mutex_unlock(&irq_ptr->setup_mutex);
1286 	if (rc)
1287 		return rc;
1288 	return 0;
1289 }
1290 EXPORT_SYMBOL_GPL(qdio_shutdown);
1291 
1292 /**
1293  * qdio_free - free data structures for a qdio subchannel
1294  * @cdev: associated ccw device
1295  */
qdio_free(struct ccw_device * cdev)1296 int qdio_free(struct ccw_device *cdev)
1297 {
1298 	struct qdio_irq *irq_ptr = cdev->private->qdio_data;
1299 
1300 	if (!irq_ptr)
1301 		return -ENODEV;
1302 
1303 	DBF_EVENT("qfree:%4x", cdev->private->schid.sch_no);
1304 	mutex_lock(&irq_ptr->setup_mutex);
1305 
1306 	if (irq_ptr->debug_area != NULL) {
1307 		debug_unregister(irq_ptr->debug_area);
1308 		irq_ptr->debug_area = NULL;
1309 	}
1310 	cdev->private->qdio_data = NULL;
1311 	mutex_unlock(&irq_ptr->setup_mutex);
1312 
1313 	qdio_release_memory(irq_ptr);
1314 	return 0;
1315 }
1316 EXPORT_SYMBOL_GPL(qdio_free);
1317 
1318 /**
1319  * qdio_allocate - allocate qdio queues and associated data
1320  * @init_data: initialization data
1321  */
qdio_allocate(struct qdio_initialize * init_data)1322 int qdio_allocate(struct qdio_initialize *init_data)
1323 {
1324 	struct qdio_irq *irq_ptr;
1325 
1326 	DBF_EVENT("qallocate:%4x", init_data->cdev->private->schid.sch_no);
1327 
1328 	if ((init_data->no_input_qs && !init_data->input_handler) ||
1329 	    (init_data->no_output_qs && !init_data->output_handler))
1330 		return -EINVAL;
1331 
1332 	if ((init_data->no_input_qs > QDIO_MAX_QUEUES_PER_IRQ) ||
1333 	    (init_data->no_output_qs > QDIO_MAX_QUEUES_PER_IRQ))
1334 		return -EINVAL;
1335 
1336 	if ((!init_data->input_sbal_addr_array) ||
1337 	    (!init_data->output_sbal_addr_array))
1338 		return -EINVAL;
1339 
1340 	/* irq_ptr must be in GFP_DMA since it contains ccw1.cda */
1341 	irq_ptr = (void *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
1342 	if (!irq_ptr)
1343 		goto out_err;
1344 
1345 	mutex_init(&irq_ptr->setup_mutex);
1346 	qdio_allocate_dbf(init_data, irq_ptr);
1347 
1348 	/*
1349 	 * Allocate a page for the chsc calls in qdio_establish.
1350 	 * Must be pre-allocated since a zfcp recovery will call
1351 	 * qdio_establish. In case of low memory and swap on a zfcp disk
1352 	 * we may not be able to allocate memory otherwise.
1353 	 */
1354 	irq_ptr->chsc_page = get_zeroed_page(GFP_KERNEL);
1355 	if (!irq_ptr->chsc_page)
1356 		goto out_rel;
1357 
1358 	/* qdr is used in ccw1.cda which is u32 */
1359 	irq_ptr->qdr = (struct qdr *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
1360 	if (!irq_ptr->qdr)
1361 		goto out_rel;
1362 	WARN_ON((unsigned long)irq_ptr->qdr & 0xfff);
1363 
1364 	if (qdio_allocate_qs(irq_ptr, init_data->no_input_qs,
1365 			     init_data->no_output_qs))
1366 		goto out_rel;
1367 
1368 	init_data->cdev->private->qdio_data = irq_ptr;
1369 	qdio_set_state(irq_ptr, QDIO_IRQ_STATE_INACTIVE);
1370 	return 0;
1371 out_rel:
1372 	qdio_release_memory(irq_ptr);
1373 out_err:
1374 	return -ENOMEM;
1375 }
1376 EXPORT_SYMBOL_GPL(qdio_allocate);
1377 
qdio_detect_hsicq(struct qdio_irq * irq_ptr)1378 static void qdio_detect_hsicq(struct qdio_irq *irq_ptr)
1379 {
1380 	struct qdio_q *q = irq_ptr->input_qs[0];
1381 	int i, use_cq = 0;
1382 
1383 	if (irq_ptr->nr_input_qs > 1 && queue_type(q) == QDIO_IQDIO_QFMT)
1384 		use_cq = 1;
1385 
1386 	for_each_output_queue(irq_ptr, q, i) {
1387 		if (use_cq) {
1388 			if (qdio_enable_async_operation(&q->u.out) < 0) {
1389 				use_cq = 0;
1390 				continue;
1391 			}
1392 		} else
1393 			qdio_disable_async_operation(&q->u.out);
1394 	}
1395 	DBF_EVENT("use_cq:%d", use_cq);
1396 }
1397 
1398 /**
1399  * qdio_establish - establish queues on a qdio subchannel
1400  * @init_data: initialization data
1401  */
qdio_establish(struct qdio_initialize * init_data)1402 int qdio_establish(struct qdio_initialize *init_data)
1403 {
1404 	struct qdio_irq *irq_ptr;
1405 	struct ccw_device *cdev = init_data->cdev;
1406 	unsigned long saveflags;
1407 	int rc;
1408 
1409 	DBF_EVENT("qestablish:%4x", cdev->private->schid.sch_no);
1410 
1411 	irq_ptr = cdev->private->qdio_data;
1412 	if (!irq_ptr)
1413 		return -ENODEV;
1414 
1415 	if (cdev->private->state != DEV_STATE_ONLINE)
1416 		return -EINVAL;
1417 
1418 	mutex_lock(&irq_ptr->setup_mutex);
1419 	qdio_setup_irq(init_data);
1420 
1421 	rc = qdio_establish_thinint(irq_ptr);
1422 	if (rc) {
1423 		mutex_unlock(&irq_ptr->setup_mutex);
1424 		qdio_shutdown(cdev, QDIO_FLAG_CLEANUP_USING_CLEAR);
1425 		return rc;
1426 	}
1427 
1428 	/* establish q */
1429 	irq_ptr->ccw.cmd_code = irq_ptr->equeue.cmd;
1430 	irq_ptr->ccw.flags = CCW_FLAG_SLI;
1431 	irq_ptr->ccw.count = irq_ptr->equeue.count;
1432 	irq_ptr->ccw.cda = (u32)((addr_t)irq_ptr->qdr);
1433 
1434 	spin_lock_irqsave(get_ccwdev_lock(cdev), saveflags);
1435 	ccw_device_set_options_mask(cdev, 0);
1436 
1437 	rc = ccw_device_start(cdev, &irq_ptr->ccw, QDIO_DOING_ESTABLISH, 0, 0);
1438 	if (rc) {
1439 		DBF_ERROR("%4x est IO ERR", irq_ptr->schid.sch_no);
1440 		DBF_ERROR("rc:%4x", rc);
1441 	}
1442 	spin_unlock_irqrestore(get_ccwdev_lock(cdev), saveflags);
1443 
1444 	if (rc) {
1445 		mutex_unlock(&irq_ptr->setup_mutex);
1446 		qdio_shutdown(cdev, QDIO_FLAG_CLEANUP_USING_CLEAR);
1447 		return rc;
1448 	}
1449 
1450 	wait_event_interruptible_timeout(cdev->private->wait_q,
1451 		irq_ptr->state == QDIO_IRQ_STATE_ESTABLISHED ||
1452 		irq_ptr->state == QDIO_IRQ_STATE_ERR, HZ);
1453 
1454 	if (irq_ptr->state != QDIO_IRQ_STATE_ESTABLISHED) {
1455 		mutex_unlock(&irq_ptr->setup_mutex);
1456 		qdio_shutdown(cdev, QDIO_FLAG_CLEANUP_USING_CLEAR);
1457 		return -EIO;
1458 	}
1459 
1460 	qdio_setup_ssqd_info(irq_ptr);
1461 
1462 	qdio_detect_hsicq(irq_ptr);
1463 
1464 	/* qebsm is now setup if available, initialize buffer states */
1465 	qdio_init_buf_states(irq_ptr);
1466 
1467 	mutex_unlock(&irq_ptr->setup_mutex);
1468 	qdio_print_subchannel_info(irq_ptr, cdev);
1469 	qdio_setup_debug_entries(irq_ptr, cdev);
1470 	return 0;
1471 }
1472 EXPORT_SYMBOL_GPL(qdio_establish);
1473 
1474 /**
1475  * qdio_activate - activate queues on a qdio subchannel
1476  * @cdev: associated cdev
1477  */
qdio_activate(struct ccw_device * cdev)1478 int qdio_activate(struct ccw_device *cdev)
1479 {
1480 	struct qdio_irq *irq_ptr;
1481 	int rc;
1482 	unsigned long saveflags;
1483 
1484 	DBF_EVENT("qactivate:%4x", cdev->private->schid.sch_no);
1485 
1486 	irq_ptr = cdev->private->qdio_data;
1487 	if (!irq_ptr)
1488 		return -ENODEV;
1489 
1490 	if (cdev->private->state != DEV_STATE_ONLINE)
1491 		return -EINVAL;
1492 
1493 	mutex_lock(&irq_ptr->setup_mutex);
1494 	if (irq_ptr->state == QDIO_IRQ_STATE_INACTIVE) {
1495 		rc = -EBUSY;
1496 		goto out;
1497 	}
1498 
1499 	irq_ptr->ccw.cmd_code = irq_ptr->aqueue.cmd;
1500 	irq_ptr->ccw.flags = CCW_FLAG_SLI;
1501 	irq_ptr->ccw.count = irq_ptr->aqueue.count;
1502 	irq_ptr->ccw.cda = 0;
1503 
1504 	spin_lock_irqsave(get_ccwdev_lock(cdev), saveflags);
1505 	ccw_device_set_options(cdev, CCWDEV_REPORT_ALL);
1506 
1507 	rc = ccw_device_start(cdev, &irq_ptr->ccw, QDIO_DOING_ACTIVATE,
1508 			      0, DOIO_DENY_PREFETCH);
1509 	if (rc) {
1510 		DBF_ERROR("%4x act IO ERR", irq_ptr->schid.sch_no);
1511 		DBF_ERROR("rc:%4x", rc);
1512 	}
1513 	spin_unlock_irqrestore(get_ccwdev_lock(cdev), saveflags);
1514 
1515 	if (rc)
1516 		goto out;
1517 
1518 	if (is_thinint_irq(irq_ptr))
1519 		tiqdio_add_input_queues(irq_ptr);
1520 
1521 	/* wait for subchannel to become active */
1522 	msleep(5);
1523 
1524 	switch (irq_ptr->state) {
1525 	case QDIO_IRQ_STATE_STOPPED:
1526 	case QDIO_IRQ_STATE_ERR:
1527 		rc = -EIO;
1528 		break;
1529 	default:
1530 		qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ACTIVE);
1531 		rc = 0;
1532 	}
1533 out:
1534 	mutex_unlock(&irq_ptr->setup_mutex);
1535 	return rc;
1536 }
1537 EXPORT_SYMBOL_GPL(qdio_activate);
1538 
buf_in_between(int bufnr,int start,int count)1539 static inline int buf_in_between(int bufnr, int start, int count)
1540 {
1541 	int end = add_buf(start, count);
1542 
1543 	if (end > start) {
1544 		if (bufnr >= start && bufnr < end)
1545 			return 1;
1546 		else
1547 			return 0;
1548 	}
1549 
1550 	/* wrap-around case */
1551 	if ((bufnr >= start && bufnr <= QDIO_MAX_BUFFERS_PER_Q) ||
1552 	    (bufnr < end))
1553 		return 1;
1554 	else
1555 		return 0;
1556 }
1557 
1558 /**
1559  * handle_inbound - reset processed input buffers
1560  * @q: queue containing the buffers
1561  * @callflags: flags
1562  * @bufnr: first buffer to process
1563  * @count: how many buffers are emptied
1564  */
handle_inbound(struct qdio_q * q,unsigned int callflags,int bufnr,int count)1565 static int handle_inbound(struct qdio_q *q, unsigned int callflags,
1566 			  int bufnr, int count)
1567 {
1568 	int used, diff;
1569 
1570 	qperf_inc(q, inbound_call);
1571 
1572 	if (!q->u.in.polling)
1573 		goto set;
1574 
1575 	/* protect against stop polling setting an ACK for an emptied slsb */
1576 	if (count == QDIO_MAX_BUFFERS_PER_Q) {
1577 		/* overwriting everything, just delete polling status */
1578 		q->u.in.polling = 0;
1579 		q->u.in.ack_count = 0;
1580 		goto set;
1581 	} else if (buf_in_between(q->u.in.ack_start, bufnr, count)) {
1582 		if (is_qebsm(q)) {
1583 			/* partial overwrite, just update ack_start */
1584 			diff = add_buf(bufnr, count);
1585 			diff = sub_buf(diff, q->u.in.ack_start);
1586 			q->u.in.ack_count -= diff;
1587 			if (q->u.in.ack_count <= 0) {
1588 				q->u.in.polling = 0;
1589 				q->u.in.ack_count = 0;
1590 				goto set;
1591 			}
1592 			q->u.in.ack_start = add_buf(q->u.in.ack_start, diff);
1593 		}
1594 		else
1595 			/* the only ACK will be deleted, so stop polling */
1596 			q->u.in.polling = 0;
1597 	}
1598 
1599 set:
1600 	count = set_buf_states(q, bufnr, SLSB_CU_INPUT_EMPTY, count);
1601 
1602 	used = atomic_add_return(count, &q->nr_buf_used) - count;
1603 	BUG_ON(used + count > QDIO_MAX_BUFFERS_PER_Q);
1604 
1605 	if (need_siga_in(q))
1606 		return qdio_siga_input(q);
1607 
1608 	return 0;
1609 }
1610 
1611 /**
1612  * handle_outbound - process filled outbound buffers
1613  * @q: queue containing the buffers
1614  * @callflags: flags
1615  * @bufnr: first buffer to process
1616  * @count: how many buffers are filled
1617  */
handle_outbound(struct qdio_q * q,unsigned int callflags,int bufnr,int count)1618 static int handle_outbound(struct qdio_q *q, unsigned int callflags,
1619 			   int bufnr, int count)
1620 {
1621 	unsigned char state = 0;
1622 	int used, rc = 0;
1623 
1624 	qperf_inc(q, outbound_call);
1625 
1626 	count = set_buf_states(q, bufnr, SLSB_CU_OUTPUT_PRIMED, count);
1627 	used = atomic_add_return(count, &q->nr_buf_used);
1628 	BUG_ON(used > QDIO_MAX_BUFFERS_PER_Q);
1629 
1630 	if (used == QDIO_MAX_BUFFERS_PER_Q)
1631 		qperf_inc(q, outbound_queue_full);
1632 
1633 	if (callflags & QDIO_FLAG_PCI_OUT) {
1634 		q->u.out.pci_out_enabled = 1;
1635 		qperf_inc(q, pci_request_int);
1636 	} else
1637 		q->u.out.pci_out_enabled = 0;
1638 
1639 	if (queue_type(q) == QDIO_IQDIO_QFMT) {
1640 		unsigned long phys_aob = 0;
1641 
1642 		/* One SIGA-W per buffer required for unicast HSI */
1643 		WARN_ON_ONCE(count > 1 && !multicast_outbound(q));
1644 
1645 		phys_aob = qdio_aob_for_buffer(&q->u.out, bufnr);
1646 
1647 		rc = qdio_kick_outbound_q(q, phys_aob);
1648 	} else if (need_siga_sync(q)) {
1649 		rc = qdio_siga_sync_q(q);
1650 	} else {
1651 		/* try to fast requeue buffers */
1652 		get_buf_state(q, prev_buf(bufnr), &state, 0);
1653 		if (state != SLSB_CU_OUTPUT_PRIMED)
1654 			rc = qdio_kick_outbound_q(q, 0);
1655 		else
1656 			qperf_inc(q, fast_requeue);
1657 	}
1658 
1659 	/* in case of SIGA errors we must process the error immediately */
1660 	if (used >= q->u.out.scan_threshold || rc)
1661 		tasklet_schedule(&q->tasklet);
1662 	else
1663 		/* free the SBALs in case of no further traffic */
1664 		if (!timer_pending(&q->u.out.timer))
1665 			mod_timer(&q->u.out.timer, jiffies + HZ);
1666 	return rc;
1667 }
1668 
1669 /**
1670  * do_QDIO - process input or output buffers
1671  * @cdev: associated ccw_device for the qdio subchannel
1672  * @callflags: input or output and special flags from the program
1673  * @q_nr: queue number
1674  * @bufnr: buffer number
1675  * @count: how many buffers to process
1676  */
do_QDIO(struct ccw_device * cdev,unsigned int callflags,int q_nr,unsigned int bufnr,unsigned int count)1677 int do_QDIO(struct ccw_device *cdev, unsigned int callflags,
1678 	    int q_nr, unsigned int bufnr, unsigned int count)
1679 {
1680 	struct qdio_irq *irq_ptr;
1681 
1682 
1683 	if (bufnr >= QDIO_MAX_BUFFERS_PER_Q || count > QDIO_MAX_BUFFERS_PER_Q)
1684 		return -EINVAL;
1685 
1686 	irq_ptr = cdev->private->qdio_data;
1687 	if (!irq_ptr)
1688 		return -ENODEV;
1689 
1690 	DBF_DEV_EVENT(DBF_INFO, irq_ptr,
1691 		      "do%02x b:%02x c:%02x", callflags, bufnr, count);
1692 
1693 	if (irq_ptr->state != QDIO_IRQ_STATE_ACTIVE)
1694 		return -EBUSY;
1695 	if (!count)
1696 		return 0;
1697 	if (callflags & QDIO_FLAG_SYNC_INPUT)
1698 		return handle_inbound(irq_ptr->input_qs[q_nr],
1699 				      callflags, bufnr, count);
1700 	else if (callflags & QDIO_FLAG_SYNC_OUTPUT)
1701 		return handle_outbound(irq_ptr->output_qs[q_nr],
1702 				       callflags, bufnr, count);
1703 	return -EINVAL;
1704 }
1705 EXPORT_SYMBOL_GPL(do_QDIO);
1706 
1707 /**
1708  * qdio_start_irq - process input buffers
1709  * @cdev: associated ccw_device for the qdio subchannel
1710  * @nr: input queue number
1711  *
1712  * Return codes
1713  *   0 - success
1714  *   1 - irqs not started since new data is available
1715  */
qdio_start_irq(struct ccw_device * cdev,int nr)1716 int qdio_start_irq(struct ccw_device *cdev, int nr)
1717 {
1718 	struct qdio_q *q;
1719 	struct qdio_irq *irq_ptr = cdev->private->qdio_data;
1720 
1721 	if (!irq_ptr)
1722 		return -ENODEV;
1723 	q = irq_ptr->input_qs[nr];
1724 
1725 	WARN_ON(queue_irqs_enabled(q));
1726 
1727 	clear_nonshared_ind(irq_ptr);
1728 	qdio_stop_polling(q);
1729 	clear_bit(QDIO_QUEUE_IRQS_DISABLED, &q->u.in.queue_irq_state);
1730 
1731 	/*
1732 	 * We need to check again to not lose initiative after
1733 	 * resetting the ACK state.
1734 	 */
1735 	if (test_nonshared_ind(irq_ptr))
1736 		goto rescan;
1737 	if (!qdio_inbound_q_done(q))
1738 		goto rescan;
1739 	return 0;
1740 
1741 rescan:
1742 	if (test_and_set_bit(QDIO_QUEUE_IRQS_DISABLED,
1743 			     &q->u.in.queue_irq_state))
1744 		return 0;
1745 	else
1746 		return 1;
1747 
1748 }
1749 EXPORT_SYMBOL(qdio_start_irq);
1750 
1751 /**
1752  * qdio_get_next_buffers - process input buffers
1753  * @cdev: associated ccw_device for the qdio subchannel
1754  * @nr: input queue number
1755  * @bufnr: first filled buffer number
1756  * @error: buffers are in error state
1757  *
1758  * Return codes
1759  *   < 0 - error
1760  *   = 0 - no new buffers found
1761  *   > 0 - number of processed buffers
1762  */
qdio_get_next_buffers(struct ccw_device * cdev,int nr,int * bufnr,int * error)1763 int qdio_get_next_buffers(struct ccw_device *cdev, int nr, int *bufnr,
1764 			  int *error)
1765 {
1766 	struct qdio_q *q;
1767 	int start, end;
1768 	struct qdio_irq *irq_ptr = cdev->private->qdio_data;
1769 
1770 	if (!irq_ptr)
1771 		return -ENODEV;
1772 	q = irq_ptr->input_qs[nr];
1773 	WARN_ON(queue_irqs_enabled(q));
1774 
1775 	/*
1776 	 * Cannot rely on automatic sync after interrupt since queues may
1777 	 * also be examined without interrupt.
1778 	 */
1779 	if (need_siga_sync(q))
1780 		qdio_sync_queues(q);
1781 
1782 	/* check the PCI capable outbound queues. */
1783 	qdio_check_outbound_after_thinint(q);
1784 
1785 	if (!qdio_inbound_q_moved(q))
1786 		return 0;
1787 
1788 	/* Note: upper-layer MUST stop processing immediately here ... */
1789 	if (unlikely(q->irq_ptr->state != QDIO_IRQ_STATE_ACTIVE))
1790 		return -EIO;
1791 
1792 	start = q->first_to_kick;
1793 	end = q->first_to_check;
1794 	*bufnr = start;
1795 	*error = q->qdio_error;
1796 
1797 	/* for the next time */
1798 	q->first_to_kick = end;
1799 	q->qdio_error = 0;
1800 	return sub_buf(end, start);
1801 }
1802 EXPORT_SYMBOL(qdio_get_next_buffers);
1803 
1804 /**
1805  * qdio_stop_irq - disable interrupt processing for the device
1806  * @cdev: associated ccw_device for the qdio subchannel
1807  * @nr: input queue number
1808  *
1809  * Return codes
1810  *   0 - interrupts were already disabled
1811  *   1 - interrupts successfully disabled
1812  */
qdio_stop_irq(struct ccw_device * cdev,int nr)1813 int qdio_stop_irq(struct ccw_device *cdev, int nr)
1814 {
1815 	struct qdio_q *q;
1816 	struct qdio_irq *irq_ptr = cdev->private->qdio_data;
1817 
1818 	if (!irq_ptr)
1819 		return -ENODEV;
1820 	q = irq_ptr->input_qs[nr];
1821 
1822 	if (test_and_set_bit(QDIO_QUEUE_IRQS_DISABLED,
1823 			     &q->u.in.queue_irq_state))
1824 		return 0;
1825 	else
1826 		return 1;
1827 }
1828 EXPORT_SYMBOL(qdio_stop_irq);
1829 
init_QDIO(void)1830 static int __init init_QDIO(void)
1831 {
1832 	int rc;
1833 
1834 	rc = qdio_debug_init();
1835 	if (rc)
1836 		return rc;
1837 	rc = qdio_setup_init();
1838 	if (rc)
1839 		goto out_debug;
1840 	rc = tiqdio_allocate_memory();
1841 	if (rc)
1842 		goto out_cache;
1843 	rc = tiqdio_register_thinints();
1844 	if (rc)
1845 		goto out_ti;
1846 	return 0;
1847 
1848 out_ti:
1849 	tiqdio_free_memory();
1850 out_cache:
1851 	qdio_setup_exit();
1852 out_debug:
1853 	qdio_debug_exit();
1854 	return rc;
1855 }
1856 
exit_QDIO(void)1857 static void __exit exit_QDIO(void)
1858 {
1859 	tiqdio_unregister_thinints();
1860 	tiqdio_free_memory();
1861 	qdio_setup_exit();
1862 	qdio_debug_exit();
1863 }
1864 
1865 module_init(init_QDIO);
1866 module_exit(exit_QDIO);
1867