1 /*
2  * linux/drivers/s390/cio/qdio_main.c
3  *
4  * Linux for s390 qdio support, buffer handling, qdio API and module support.
5  *
6  * Copyright 2000,2008 IBM Corp.
7  * Author(s): Utz Bacher <utz.bacher@de.ibm.com>
8  *	      Jan Glauber <jang@linux.vnet.ibm.com>
9  * 2.6 cio integration by Cornelia Huck <cornelia.huck@de.ibm.com>
10  */
11 #include <linux/module.h>
12 #include <linux/init.h>
13 #include <linux/kernel.h>
14 #include <linux/timer.h>
15 #include <linux/delay.h>
16 #include <linux/gfp.h>
17 #include <linux/kernel_stat.h>
18 #include <asm/atomic.h>
19 #include <asm/debug.h>
20 #include <asm/qdio.h>
21 
22 #include "cio.h"
23 #include "css.h"
24 #include "device.h"
25 #include "qdio.h"
26 #include "qdio_debug.h"
27 
28 MODULE_AUTHOR("Utz Bacher <utz.bacher@de.ibm.com>,"\
29 	"Jan Glauber <jang@linux.vnet.ibm.com>");
30 MODULE_DESCRIPTION("QDIO base support");
31 MODULE_LICENSE("GPL");
32 
do_siga_sync(unsigned long schid,unsigned int out_mask,unsigned int in_mask,unsigned int fc)33 static inline int do_siga_sync(unsigned long schid,
34 			       unsigned int out_mask, unsigned int in_mask,
35 			       unsigned int fc)
36 {
37 	register unsigned long __fc asm ("0") = fc;
38 	register unsigned long __schid asm ("1") = schid;
39 	register unsigned long out asm ("2") = out_mask;
40 	register unsigned long in asm ("3") = in_mask;
41 	int cc;
42 
43 	asm volatile(
44 		"	siga	0\n"
45 		"	ipm	%0\n"
46 		"	srl	%0,28\n"
47 		: "=d" (cc)
48 		: "d" (__fc), "d" (__schid), "d" (out), "d" (in) : "cc");
49 	return cc;
50 }
51 
do_siga_input(unsigned long schid,unsigned int mask,unsigned int fc)52 static inline int do_siga_input(unsigned long schid, unsigned int mask,
53 				unsigned int fc)
54 {
55 	register unsigned long __fc asm ("0") = fc;
56 	register unsigned long __schid asm ("1") = schid;
57 	register unsigned long __mask asm ("2") = mask;
58 	int cc;
59 
60 	asm volatile(
61 		"	siga	0\n"
62 		"	ipm	%0\n"
63 		"	srl	%0,28\n"
64 		: "=d" (cc)
65 		: "d" (__fc), "d" (__schid), "d" (__mask) : "cc", "memory");
66 	return cc;
67 }
68 
69 /**
70  * do_siga_output - perform SIGA-w/wt function
71  * @schid: subchannel id or in case of QEBSM the subchannel token
72  * @mask: which output queues to process
73  * @bb: busy bit indicator, set only if SIGA-w/wt could not access a buffer
74  * @fc: function code to perform
75  *
76  * Returns cc or QDIO_ERROR_SIGA_ACCESS_EXCEPTION.
77  * Note: For IQDC unicast queues only the highest priority queue is processed.
78  */
do_siga_output(unsigned long schid,unsigned long mask,unsigned int * bb,unsigned int fc)79 static inline int do_siga_output(unsigned long schid, unsigned long mask,
80 				 unsigned int *bb, unsigned int fc)
81 {
82 	register unsigned long __fc asm("0") = fc;
83 	register unsigned long __schid asm("1") = schid;
84 	register unsigned long __mask asm("2") = mask;
85 	int cc = QDIO_ERROR_SIGA_ACCESS_EXCEPTION;
86 
87 	asm volatile(
88 		"	siga	0\n"
89 		"0:	ipm	%0\n"
90 		"	srl	%0,28\n"
91 		"1:\n"
92 		EX_TABLE(0b, 1b)
93 		: "+d" (cc), "+d" (__fc), "+d" (__schid), "+d" (__mask)
94 		: : "cc", "memory");
95 	*bb = ((unsigned int) __fc) >> 31;
96 	return cc;
97 }
98 
qdio_check_ccq(struct qdio_q * q,unsigned int ccq)99 static inline int qdio_check_ccq(struct qdio_q *q, unsigned int ccq)
100 {
101 	/* all done or next buffer state different */
102 	if (ccq == 0 || ccq == 32)
103 		return 0;
104 	/* not all buffers processed */
105 	if (ccq == 96 || ccq == 97)
106 		return 1;
107 	/* notify devices immediately */
108 	DBF_ERROR("%4x ccq:%3d", SCH_NO(q), ccq);
109 	return -EIO;
110 }
111 
112 /**
113  * qdio_do_eqbs - extract buffer states for QEBSM
114  * @q: queue to manipulate
115  * @state: state of the extracted buffers
116  * @start: buffer number to start at
117  * @count: count of buffers to examine
118  * @auto_ack: automatically acknowledge buffers
119  *
120  * Returns the number of successfully extracted equal buffer states.
121  * Stops processing if a state is different from the last buffers state.
122  */
qdio_do_eqbs(struct qdio_q * q,unsigned char * state,int start,int count,int auto_ack)123 static int qdio_do_eqbs(struct qdio_q *q, unsigned char *state,
124 			int start, int count, int auto_ack)
125 {
126 	unsigned int ccq = 0;
127 	int tmp_count = count, tmp_start = start;
128 	int nr = q->nr;
129 	int rc;
130 
131 	BUG_ON(!q->irq_ptr->sch_token);
132 	qperf_inc(q, eqbs);
133 
134 	if (!q->is_input_q)
135 		nr += q->irq_ptr->nr_input_qs;
136 again:
137 	ccq = do_eqbs(q->irq_ptr->sch_token, state, nr, &tmp_start, &tmp_count,
138 		      auto_ack);
139 	rc = qdio_check_ccq(q, ccq);
140 
141 	/* At least one buffer was processed, return and extract the remaining
142 	 * buffers later.
143 	 */
144 	if ((ccq == 96) && (count != tmp_count)) {
145 		qperf_inc(q, eqbs_partial);
146 		return (count - tmp_count);
147 	}
148 
149 	if (rc == 1) {
150 		DBF_DEV_EVENT(DBF_WARN, q->irq_ptr, "EQBS again:%2d", ccq);
151 		goto again;
152 	}
153 
154 	if (rc < 0) {
155 		DBF_ERROR("%4x EQBS ERROR", SCH_NO(q));
156 		DBF_ERROR("%3d%3d%2d", count, tmp_count, nr);
157 		q->handler(q->irq_ptr->cdev,
158 			   QDIO_ERROR_ACTIVATE_CHECK_CONDITION,
159 			   0, -1, -1, q->irq_ptr->int_parm);
160 		return 0;
161 	}
162 	return count - tmp_count;
163 }
164 
165 /**
166  * qdio_do_sqbs - set buffer states for QEBSM
167  * @q: queue to manipulate
168  * @state: new state of the buffers
169  * @start: first buffer number to change
170  * @count: how many buffers to change
171  *
172  * Returns the number of successfully changed buffers.
173  * Does retrying until the specified count of buffer states is set or an
174  * error occurs.
175  */
qdio_do_sqbs(struct qdio_q * q,unsigned char state,int start,int count)176 static int qdio_do_sqbs(struct qdio_q *q, unsigned char state, int start,
177 			int count)
178 {
179 	unsigned int ccq = 0;
180 	int tmp_count = count, tmp_start = start;
181 	int nr = q->nr;
182 	int rc;
183 
184 	if (!count)
185 		return 0;
186 
187 	BUG_ON(!q->irq_ptr->sch_token);
188 	qperf_inc(q, sqbs);
189 
190 	if (!q->is_input_q)
191 		nr += q->irq_ptr->nr_input_qs;
192 again:
193 	ccq = do_sqbs(q->irq_ptr->sch_token, state, nr, &tmp_start, &tmp_count);
194 	rc = qdio_check_ccq(q, ccq);
195 	if (rc == 1) {
196 		DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "SQBS again:%2d", ccq);
197 		qperf_inc(q, sqbs_partial);
198 		goto again;
199 	}
200 	if (rc < 0) {
201 		DBF_ERROR("%4x SQBS ERROR", SCH_NO(q));
202 		DBF_ERROR("%3d%3d%2d", count, tmp_count, nr);
203 		q->handler(q->irq_ptr->cdev,
204 			   QDIO_ERROR_ACTIVATE_CHECK_CONDITION,
205 			   0, -1, -1, q->irq_ptr->int_parm);
206 		return 0;
207 	}
208 	WARN_ON(tmp_count);
209 	return count - tmp_count;
210 }
211 
212 /* returns number of examined buffers and their common state in *state */
get_buf_states(struct qdio_q * q,unsigned int bufnr,unsigned char * state,unsigned int count,int auto_ack)213 static inline int get_buf_states(struct qdio_q *q, unsigned int bufnr,
214 				 unsigned char *state, unsigned int count,
215 				 int auto_ack)
216 {
217 	unsigned char __state = 0;
218 	int i;
219 
220 	BUG_ON(bufnr > QDIO_MAX_BUFFERS_MASK);
221 	BUG_ON(count > QDIO_MAX_BUFFERS_PER_Q);
222 
223 	if (is_qebsm(q))
224 		return qdio_do_eqbs(q, state, bufnr, count, auto_ack);
225 
226 	for (i = 0; i < count; i++) {
227 		if (!__state)
228 			__state = q->slsb.val[bufnr];
229 		else if (q->slsb.val[bufnr] != __state)
230 			break;
231 		bufnr = next_buf(bufnr);
232 	}
233 	*state = __state;
234 	return i;
235 }
236 
get_buf_state(struct qdio_q * q,unsigned int bufnr,unsigned char * state,int auto_ack)237 static inline int get_buf_state(struct qdio_q *q, unsigned int bufnr,
238 				unsigned char *state, int auto_ack)
239 {
240 	return get_buf_states(q, bufnr, state, 1, auto_ack);
241 }
242 
243 /* wrap-around safe setting of slsb states, returns number of changed buffers */
set_buf_states(struct qdio_q * q,int bufnr,unsigned char state,int count)244 static inline int set_buf_states(struct qdio_q *q, int bufnr,
245 				 unsigned char state, int count)
246 {
247 	int i;
248 
249 	BUG_ON(bufnr > QDIO_MAX_BUFFERS_MASK);
250 	BUG_ON(count > QDIO_MAX_BUFFERS_PER_Q);
251 
252 	if (is_qebsm(q))
253 		return qdio_do_sqbs(q, state, bufnr, count);
254 
255 	for (i = 0; i < count; i++) {
256 		xchg(&q->slsb.val[bufnr], state);
257 		bufnr = next_buf(bufnr);
258 	}
259 	return count;
260 }
261 
set_buf_state(struct qdio_q * q,int bufnr,unsigned char state)262 static inline int set_buf_state(struct qdio_q *q, int bufnr,
263 				unsigned char state)
264 {
265 	return set_buf_states(q, bufnr, state, 1);
266 }
267 
268 /* set slsb states to initial state */
qdio_init_buf_states(struct qdio_irq * irq_ptr)269 void qdio_init_buf_states(struct qdio_irq *irq_ptr)
270 {
271 	struct qdio_q *q;
272 	int i;
273 
274 	for_each_input_queue(irq_ptr, q, i)
275 		set_buf_states(q, 0, SLSB_P_INPUT_NOT_INIT,
276 			       QDIO_MAX_BUFFERS_PER_Q);
277 	for_each_output_queue(irq_ptr, q, i)
278 		set_buf_states(q, 0, SLSB_P_OUTPUT_NOT_INIT,
279 			       QDIO_MAX_BUFFERS_PER_Q);
280 }
281 
qdio_siga_sync(struct qdio_q * q,unsigned int output,unsigned int input)282 static inline int qdio_siga_sync(struct qdio_q *q, unsigned int output,
283 			  unsigned int input)
284 {
285 	unsigned long schid = *((u32 *) &q->irq_ptr->schid);
286 	unsigned int fc = QDIO_SIGA_SYNC;
287 	int cc;
288 
289 	DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "siga-s:%1d", q->nr);
290 	qperf_inc(q, siga_sync);
291 
292 	if (is_qebsm(q)) {
293 		schid = q->irq_ptr->sch_token;
294 		fc |= QDIO_SIGA_QEBSM_FLAG;
295 	}
296 
297 	cc = do_siga_sync(schid, output, input, fc);
298 	if (unlikely(cc))
299 		DBF_ERROR("%4x SIGA-S:%2d", SCH_NO(q), cc);
300 	return cc;
301 }
302 
qdio_siga_sync_q(struct qdio_q * q)303 static inline int qdio_siga_sync_q(struct qdio_q *q)
304 {
305 	if (q->is_input_q)
306 		return qdio_siga_sync(q, 0, q->mask);
307 	else
308 		return qdio_siga_sync(q, q->mask, 0);
309 }
310 
qdio_siga_output(struct qdio_q * q,unsigned int * busy_bit)311 static int qdio_siga_output(struct qdio_q *q, unsigned int *busy_bit)
312 {
313 	unsigned long schid = *((u32 *) &q->irq_ptr->schid);
314 	unsigned int fc = QDIO_SIGA_WRITE;
315 	u64 start_time = 0;
316 	int cc;
317 
318 	if (is_qebsm(q)) {
319 		schid = q->irq_ptr->sch_token;
320 		fc |= QDIO_SIGA_QEBSM_FLAG;
321 	}
322 again:
323 	cc = do_siga_output(schid, q->mask, busy_bit, fc);
324 
325 	/* hipersocket busy condition */
326 	if (unlikely(*busy_bit)) {
327 		WARN_ON(queue_type(q) != QDIO_IQDIO_QFMT || cc != 2);
328 
329 		if (!start_time) {
330 			start_time = get_clock();
331 			goto again;
332 		}
333 		if ((get_clock() - start_time) < QDIO_BUSY_BIT_PATIENCE)
334 			goto again;
335 	}
336 	return cc;
337 }
338 
qdio_siga_input(struct qdio_q * q)339 static inline int qdio_siga_input(struct qdio_q *q)
340 {
341 	unsigned long schid = *((u32 *) &q->irq_ptr->schid);
342 	unsigned int fc = QDIO_SIGA_READ;
343 	int cc;
344 
345 	DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "siga-r:%1d", q->nr);
346 	qperf_inc(q, siga_read);
347 
348 	if (is_qebsm(q)) {
349 		schid = q->irq_ptr->sch_token;
350 		fc |= QDIO_SIGA_QEBSM_FLAG;
351 	}
352 
353 	cc = do_siga_input(schid, q->mask, fc);
354 	if (unlikely(cc))
355 		DBF_ERROR("%4x SIGA-R:%2d", SCH_NO(q), cc);
356 	return cc;
357 }
358 
359 #define qdio_siga_sync_out(q) qdio_siga_sync(q, ~0U, 0)
360 #define qdio_siga_sync_all(q) qdio_siga_sync(q, ~0U, ~0U)
361 
qdio_sync_queues(struct qdio_q * q)362 static inline void qdio_sync_queues(struct qdio_q *q)
363 {
364 	/* PCI capable outbound queues will also be scanned so sync them too */
365 	if (pci_out_supported(q))
366 		qdio_siga_sync_all(q);
367 	else
368 		qdio_siga_sync_q(q);
369 }
370 
debug_get_buf_state(struct qdio_q * q,unsigned int bufnr,unsigned char * state)371 int debug_get_buf_state(struct qdio_q *q, unsigned int bufnr,
372 			unsigned char *state)
373 {
374 	if (need_siga_sync(q))
375 		qdio_siga_sync_q(q);
376 	return get_buf_states(q, bufnr, state, 1, 0);
377 }
378 
qdio_stop_polling(struct qdio_q * q)379 static inline void qdio_stop_polling(struct qdio_q *q)
380 {
381 	if (!q->u.in.polling)
382 		return;
383 
384 	q->u.in.polling = 0;
385 	qperf_inc(q, stop_polling);
386 
387 	/* show the card that we are not polling anymore */
388 	if (is_qebsm(q)) {
389 		set_buf_states(q, q->u.in.ack_start, SLSB_P_INPUT_NOT_INIT,
390 			       q->u.in.ack_count);
391 		q->u.in.ack_count = 0;
392 	} else
393 		set_buf_state(q, q->u.in.ack_start, SLSB_P_INPUT_NOT_INIT);
394 }
395 
account_sbals(struct qdio_q * q,int count)396 static inline void account_sbals(struct qdio_q *q, int count)
397 {
398 	int pos = 0;
399 
400 	q->q_stats.nr_sbal_total += count;
401 	if (count == QDIO_MAX_BUFFERS_MASK) {
402 		q->q_stats.nr_sbals[7]++;
403 		return;
404 	}
405 	while (count >>= 1)
406 		pos++;
407 	q->q_stats.nr_sbals[pos]++;
408 }
409 
process_buffer_error(struct qdio_q * q,int count)410 static void process_buffer_error(struct qdio_q *q, int count)
411 {
412 	unsigned char state = (q->is_input_q) ? SLSB_P_INPUT_NOT_INIT :
413 					SLSB_P_OUTPUT_NOT_INIT;
414 
415 	q->qdio_error |= QDIO_ERROR_SLSB_STATE;
416 
417 	/* special handling for no target buffer empty */
418 	if ((!q->is_input_q &&
419 	    (q->sbal[q->first_to_check]->element[15].flags & 0xff) == 0x10)) {
420 		qperf_inc(q, target_full);
421 		DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "OUTFULL FTC:%02x",
422 			      q->first_to_check);
423 		return;
424 	}
425 
426 	DBF_ERROR("%4x BUF ERROR", SCH_NO(q));
427 	DBF_ERROR((q->is_input_q) ? "IN:%2d" : "OUT:%2d", q->nr);
428 	DBF_ERROR("FTC:%3d C:%3d", q->first_to_check, count);
429 	DBF_ERROR("F14:%2x F15:%2x",
430 		  q->sbal[q->first_to_check]->element[14].flags & 0xff,
431 		  q->sbal[q->first_to_check]->element[15].flags & 0xff);
432 
433 	/*
434 	 * Interrupts may be avoided as long as the error is present
435 	 * so change the buffer state immediately to avoid starvation.
436 	 */
437 	set_buf_states(q, q->first_to_check, state, count);
438 }
439 
inbound_primed(struct qdio_q * q,int count)440 static inline void inbound_primed(struct qdio_q *q, int count)
441 {
442 	int new;
443 
444 	DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "in prim: %02x", count);
445 
446 	/* for QEBSM the ACK was already set by EQBS */
447 	if (is_qebsm(q)) {
448 		if (!q->u.in.polling) {
449 			q->u.in.polling = 1;
450 			q->u.in.ack_count = count;
451 			q->u.in.ack_start = q->first_to_check;
452 			return;
453 		}
454 
455 		/* delete the previous ACK's */
456 		set_buf_states(q, q->u.in.ack_start, SLSB_P_INPUT_NOT_INIT,
457 			       q->u.in.ack_count);
458 		q->u.in.ack_count = count;
459 		q->u.in.ack_start = q->first_to_check;
460 		return;
461 	}
462 
463 	/*
464 	 * ACK the newest buffer. The ACK will be removed in qdio_stop_polling
465 	 * or by the next inbound run.
466 	 */
467 	new = add_buf(q->first_to_check, count - 1);
468 	if (q->u.in.polling) {
469 		/* reset the previous ACK but first set the new one */
470 		set_buf_state(q, new, SLSB_P_INPUT_ACK);
471 		set_buf_state(q, q->u.in.ack_start, SLSB_P_INPUT_NOT_INIT);
472 	} else {
473 		q->u.in.polling = 1;
474 		set_buf_state(q, new, SLSB_P_INPUT_ACK);
475 	}
476 
477 	q->u.in.ack_start = new;
478 	count--;
479 	if (!count)
480 		return;
481 	/* need to change ALL buffers to get more interrupts */
482 	set_buf_states(q, q->first_to_check, SLSB_P_INPUT_NOT_INIT, count);
483 }
484 
get_inbound_buffer_frontier(struct qdio_q * q)485 static int get_inbound_buffer_frontier(struct qdio_q *q)
486 {
487 	int count, stop;
488 	unsigned char state = 0;
489 
490 	/*
491 	 * Don't check 128 buffers, as otherwise qdio_inbound_q_moved
492 	 * would return 0.
493 	 */
494 	count = min(atomic_read(&q->nr_buf_used), QDIO_MAX_BUFFERS_MASK);
495 	stop = add_buf(q->first_to_check, count);
496 
497 	if (q->first_to_check == stop)
498 		goto out;
499 
500 	/*
501 	 * No siga sync here, as a PCI or we after a thin interrupt
502 	 * already sync'ed the queues.
503 	 */
504 	count = get_buf_states(q, q->first_to_check, &state, count, 1);
505 	if (!count)
506 		goto out;
507 
508 	switch (state) {
509 	case SLSB_P_INPUT_PRIMED:
510 		inbound_primed(q, count);
511 		q->first_to_check = add_buf(q->first_to_check, count);
512 		if (atomic_sub(count, &q->nr_buf_used) == 0)
513 			qperf_inc(q, inbound_queue_full);
514 		if (q->irq_ptr->perf_stat_enabled)
515 			account_sbals(q, count);
516 		break;
517 	case SLSB_P_INPUT_ERROR:
518 		process_buffer_error(q, count);
519 		q->first_to_check = add_buf(q->first_to_check, count);
520 		atomic_sub(count, &q->nr_buf_used);
521 		if (q->irq_ptr->perf_stat_enabled)
522 			account_sbals_error(q, count);
523 		break;
524 	case SLSB_CU_INPUT_EMPTY:
525 	case SLSB_P_INPUT_NOT_INIT:
526 	case SLSB_P_INPUT_ACK:
527 		if (q->irq_ptr->perf_stat_enabled)
528 			q->q_stats.nr_sbal_nop++;
529 		DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "in nop");
530 		break;
531 	default:
532 		BUG();
533 	}
534 out:
535 	return q->first_to_check;
536 }
537 
qdio_inbound_q_moved(struct qdio_q * q)538 static int qdio_inbound_q_moved(struct qdio_q *q)
539 {
540 	int bufnr;
541 
542 	bufnr = get_inbound_buffer_frontier(q);
543 
544 	if ((bufnr != q->last_move) || q->qdio_error) {
545 		q->last_move = bufnr;
546 		if (!is_thinint_irq(q->irq_ptr) && MACHINE_IS_LPAR)
547 			q->u.in.timestamp = get_clock();
548 		return 1;
549 	} else
550 		return 0;
551 }
552 
qdio_inbound_q_done(struct qdio_q * q)553 static inline int qdio_inbound_q_done(struct qdio_q *q)
554 {
555 	unsigned char state = 0;
556 
557 	if (!atomic_read(&q->nr_buf_used))
558 		return 1;
559 
560 	if (need_siga_sync(q))
561 		qdio_siga_sync_q(q);
562 	get_buf_state(q, q->first_to_check, &state, 0);
563 
564 	if (state == SLSB_P_INPUT_PRIMED || state == SLSB_P_INPUT_ERROR)
565 		/* more work coming */
566 		return 0;
567 
568 	if (is_thinint_irq(q->irq_ptr))
569 		return 1;
570 
571 	/* don't poll under z/VM */
572 	if (MACHINE_IS_VM)
573 		return 1;
574 
575 	/*
576 	 * At this point we know, that inbound first_to_check
577 	 * has (probably) not moved (see qdio_inbound_processing).
578 	 */
579 	if (get_clock() > q->u.in.timestamp + QDIO_INPUT_THRESHOLD) {
580 		DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "in done:%02x",
581 			      q->first_to_check);
582 		return 1;
583 	} else
584 		return 0;
585 }
586 
qdio_kick_handler(struct qdio_q * q)587 static void qdio_kick_handler(struct qdio_q *q)
588 {
589 	int start = q->first_to_kick;
590 	int end = q->first_to_check;
591 	int count;
592 
593 	if (unlikely(q->irq_ptr->state != QDIO_IRQ_STATE_ACTIVE))
594 		return;
595 
596 	count = sub_buf(end, start);
597 
598 	if (q->is_input_q) {
599 		qperf_inc(q, inbound_handler);
600 		DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "kih s:%02x c:%02x", start, count);
601 	} else {
602 		qperf_inc(q, outbound_handler);
603 		DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "koh: s:%02x c:%02x",
604 			      start, count);
605 	}
606 
607 	q->handler(q->irq_ptr->cdev, q->qdio_error, q->nr, start, count,
608 		   q->irq_ptr->int_parm);
609 
610 	/* for the next time */
611 	q->first_to_kick = end;
612 	q->qdio_error = 0;
613 }
614 
__qdio_inbound_processing(struct qdio_q * q)615 static void __qdio_inbound_processing(struct qdio_q *q)
616 {
617 	qperf_inc(q, tasklet_inbound);
618 
619 	if (!qdio_inbound_q_moved(q))
620 		return;
621 
622 	qdio_kick_handler(q);
623 
624 	if (!qdio_inbound_q_done(q)) {
625 		/* means poll time is not yet over */
626 		qperf_inc(q, tasklet_inbound_resched);
627 		if (likely(q->irq_ptr->state != QDIO_IRQ_STATE_STOPPED)) {
628 			tasklet_schedule(&q->tasklet);
629 			return;
630 		}
631 	}
632 
633 	qdio_stop_polling(q);
634 	/*
635 	 * We need to check again to not lose initiative after
636 	 * resetting the ACK state.
637 	 */
638 	if (!qdio_inbound_q_done(q)) {
639 		qperf_inc(q, tasklet_inbound_resched2);
640 		if (likely(q->irq_ptr->state != QDIO_IRQ_STATE_STOPPED))
641 			tasklet_schedule(&q->tasklet);
642 	}
643 }
644 
qdio_inbound_processing(unsigned long data)645 void qdio_inbound_processing(unsigned long data)
646 {
647 	struct qdio_q *q = (struct qdio_q *)data;
648 	__qdio_inbound_processing(q);
649 }
650 
get_outbound_buffer_frontier(struct qdio_q * q)651 static int get_outbound_buffer_frontier(struct qdio_q *q)
652 {
653 	int count, stop;
654 	unsigned char state = 0;
655 
656 	if (need_siga_sync(q))
657 		if (((queue_type(q) != QDIO_IQDIO_QFMT) &&
658 		    !pci_out_supported(q)) ||
659 		    (queue_type(q) == QDIO_IQDIO_QFMT &&
660 		    multicast_outbound(q)))
661 			qdio_siga_sync_q(q);
662 
663 	/*
664 	 * Don't check 128 buffers, as otherwise qdio_inbound_q_moved
665 	 * would return 0.
666 	 */
667 	count = min(atomic_read(&q->nr_buf_used), QDIO_MAX_BUFFERS_MASK);
668 	stop = add_buf(q->first_to_check, count);
669 
670 	if (q->first_to_check == stop)
671 		return q->first_to_check;
672 
673 	count = get_buf_states(q, q->first_to_check, &state, count, 0);
674 	if (!count)
675 		return q->first_to_check;
676 
677 	switch (state) {
678 	case SLSB_P_OUTPUT_EMPTY:
679 		/* the adapter got it */
680 		DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "out empty:%1d %02x", q->nr, count);
681 
682 		atomic_sub(count, &q->nr_buf_used);
683 		q->first_to_check = add_buf(q->first_to_check, count);
684 		if (q->irq_ptr->perf_stat_enabled)
685 			account_sbals(q, count);
686 		break;
687 	case SLSB_P_OUTPUT_ERROR:
688 		process_buffer_error(q, count);
689 		q->first_to_check = add_buf(q->first_to_check, count);
690 		atomic_sub(count, &q->nr_buf_used);
691 		if (q->irq_ptr->perf_stat_enabled)
692 			account_sbals_error(q, count);
693 		break;
694 	case SLSB_CU_OUTPUT_PRIMED:
695 		/* the adapter has not fetched the output yet */
696 		if (q->irq_ptr->perf_stat_enabled)
697 			q->q_stats.nr_sbal_nop++;
698 		DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "out primed:%1d", q->nr);
699 		break;
700 	case SLSB_P_OUTPUT_NOT_INIT:
701 	case SLSB_P_OUTPUT_HALTED:
702 		break;
703 	default:
704 		BUG();
705 	}
706 	return q->first_to_check;
707 }
708 
709 /* all buffers processed? */
qdio_outbound_q_done(struct qdio_q * q)710 static inline int qdio_outbound_q_done(struct qdio_q *q)
711 {
712 	return atomic_read(&q->nr_buf_used) == 0;
713 }
714 
qdio_outbound_q_moved(struct qdio_q * q)715 static inline int qdio_outbound_q_moved(struct qdio_q *q)
716 {
717 	int bufnr;
718 
719 	bufnr = get_outbound_buffer_frontier(q);
720 
721 	if ((bufnr != q->last_move) || q->qdio_error) {
722 		q->last_move = bufnr;
723 		DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "out moved:%1d", q->nr);
724 		return 1;
725 	} else
726 		return 0;
727 }
728 
qdio_kick_outbound_q(struct qdio_q * q)729 static int qdio_kick_outbound_q(struct qdio_q *q)
730 {
731 	unsigned int busy_bit;
732 	int cc;
733 
734 	if (!need_siga_out(q))
735 		return 0;
736 
737 	DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "siga-w:%1d", q->nr);
738 	qperf_inc(q, siga_write);
739 
740 	cc = qdio_siga_output(q, &busy_bit);
741 	switch (cc) {
742 	case 0:
743 		break;
744 	case 2:
745 		if (busy_bit) {
746 			DBF_ERROR("%4x cc2 REP:%1d", SCH_NO(q), q->nr);
747 			cc |= QDIO_ERROR_SIGA_BUSY;
748 		} else
749 			DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "siga-w cc2:%1d", q->nr);
750 		break;
751 	case 1:
752 	case 3:
753 		DBF_ERROR("%4x SIGA-W:%1d", SCH_NO(q), cc);
754 		break;
755 	}
756 	return cc;
757 }
758 
__qdio_outbound_processing(struct qdio_q * q)759 static void __qdio_outbound_processing(struct qdio_q *q)
760 {
761 	qperf_inc(q, tasklet_outbound);
762 	BUG_ON(atomic_read(&q->nr_buf_used) < 0);
763 
764 	if (qdio_outbound_q_moved(q))
765 		qdio_kick_handler(q);
766 
767 	if (queue_type(q) == QDIO_ZFCP_QFMT)
768 		if (!pci_out_supported(q) && !qdio_outbound_q_done(q))
769 			goto sched;
770 
771 	/* bail out for HiperSockets unicast queues */
772 	if (queue_type(q) == QDIO_IQDIO_QFMT && !multicast_outbound(q))
773 		return;
774 
775 	if ((queue_type(q) == QDIO_IQDIO_QFMT) &&
776 	    (atomic_read(&q->nr_buf_used)) > QDIO_IQDIO_POLL_LVL)
777 		goto sched;
778 
779 	if (q->u.out.pci_out_enabled)
780 		return;
781 
782 	/*
783 	 * Now we know that queue type is either qeth without pci enabled
784 	 * or HiperSockets multicast. Make sure buffer switch from PRIMED to
785 	 * EMPTY is noticed and outbound_handler is called after some time.
786 	 */
787 	if (qdio_outbound_q_done(q))
788 		del_timer(&q->u.out.timer);
789 	else
790 		if (!timer_pending(&q->u.out.timer))
791 			mod_timer(&q->u.out.timer, jiffies + 10 * HZ);
792 	return;
793 
794 sched:
795 	if (unlikely(q->irq_ptr->state == QDIO_IRQ_STATE_STOPPED))
796 		return;
797 	tasklet_schedule(&q->tasklet);
798 }
799 
800 /* outbound tasklet */
qdio_outbound_processing(unsigned long data)801 void qdio_outbound_processing(unsigned long data)
802 {
803 	struct qdio_q *q = (struct qdio_q *)data;
804 	__qdio_outbound_processing(q);
805 }
806 
qdio_outbound_timer(unsigned long data)807 void qdio_outbound_timer(unsigned long data)
808 {
809 	struct qdio_q *q = (struct qdio_q *)data;
810 
811 	if (unlikely(q->irq_ptr->state == QDIO_IRQ_STATE_STOPPED))
812 		return;
813 	tasklet_schedule(&q->tasklet);
814 }
815 
qdio_check_outbound_after_thinint(struct qdio_q * q)816 static inline void qdio_check_outbound_after_thinint(struct qdio_q *q)
817 {
818 	struct qdio_q *out;
819 	int i;
820 
821 	if (!pci_out_supported(q))
822 		return;
823 
824 	for_each_output_queue(q->irq_ptr, out, i)
825 		if (!qdio_outbound_q_done(out))
826 			tasklet_schedule(&out->tasklet);
827 }
828 
__tiqdio_inbound_processing(struct qdio_q * q)829 static void __tiqdio_inbound_processing(struct qdio_q *q)
830 {
831 	qperf_inc(q, tasklet_inbound);
832 	if (need_siga_sync(q) && need_siga_sync_after_ai(q))
833 		qdio_sync_queues(q);
834 
835 	/*
836 	 * The interrupt could be caused by a PCI request. Check the
837 	 * PCI capable outbound queues.
838 	 */
839 	qdio_check_outbound_after_thinint(q);
840 
841 	if (!qdio_inbound_q_moved(q))
842 		return;
843 
844 	qdio_kick_handler(q);
845 
846 	if (!qdio_inbound_q_done(q)) {
847 		qperf_inc(q, tasklet_inbound_resched);
848 		if (likely(q->irq_ptr->state != QDIO_IRQ_STATE_STOPPED)) {
849 			tasklet_schedule(&q->tasklet);
850 			return;
851 		}
852 	}
853 
854 	qdio_stop_polling(q);
855 	/*
856 	 * We need to check again to not lose initiative after
857 	 * resetting the ACK state.
858 	 */
859 	if (!qdio_inbound_q_done(q)) {
860 		qperf_inc(q, tasklet_inbound_resched2);
861 		if (likely(q->irq_ptr->state != QDIO_IRQ_STATE_STOPPED))
862 			tasklet_schedule(&q->tasklet);
863 	}
864 }
865 
tiqdio_inbound_processing(unsigned long data)866 void tiqdio_inbound_processing(unsigned long data)
867 {
868 	struct qdio_q *q = (struct qdio_q *)data;
869 	__tiqdio_inbound_processing(q);
870 }
871 
qdio_set_state(struct qdio_irq * irq_ptr,enum qdio_irq_states state)872 static inline void qdio_set_state(struct qdio_irq *irq_ptr,
873 				  enum qdio_irq_states state)
874 {
875 	DBF_DEV_EVENT(DBF_INFO, irq_ptr, "newstate: %1d", state);
876 
877 	irq_ptr->state = state;
878 	mb();
879 }
880 
qdio_irq_check_sense(struct qdio_irq * irq_ptr,struct irb * irb)881 static void qdio_irq_check_sense(struct qdio_irq *irq_ptr, struct irb *irb)
882 {
883 	if (irb->esw.esw0.erw.cons) {
884 		DBF_ERROR("%4x sense:", irq_ptr->schid.sch_no);
885 		DBF_ERROR_HEX(irb, 64);
886 		DBF_ERROR_HEX(irb->ecw, 64);
887 	}
888 }
889 
890 /* PCI interrupt handler */
qdio_int_handler_pci(struct qdio_irq * irq_ptr)891 static void qdio_int_handler_pci(struct qdio_irq *irq_ptr)
892 {
893 	int i;
894 	struct qdio_q *q;
895 
896 	if (unlikely(irq_ptr->state == QDIO_IRQ_STATE_STOPPED))
897 		return;
898 
899 	for_each_input_queue(irq_ptr, q, i) {
900 		if (q->u.in.queue_start_poll) {
901 			/* skip if polling is enabled or already in work */
902 			if (test_and_set_bit(QDIO_QUEUE_IRQS_DISABLED,
903 				     &q->u.in.queue_irq_state)) {
904 				qperf_inc(q, int_discarded);
905 				continue;
906 			}
907 			q->u.in.queue_start_poll(q->irq_ptr->cdev, q->nr,
908 						 q->irq_ptr->int_parm);
909 		} else
910 			tasklet_schedule(&q->tasklet);
911 	}
912 
913 	if (!pci_out_supported(q))
914 		return;
915 
916 	for_each_output_queue(irq_ptr, q, i) {
917 		if (qdio_outbound_q_done(q))
918 			continue;
919 		if (need_siga_sync(q) && need_siga_sync_out_after_pci(q))
920 			qdio_siga_sync_q(q);
921 		tasklet_schedule(&q->tasklet);
922 	}
923 }
924 
qdio_handle_activate_check(struct ccw_device * cdev,unsigned long intparm,int cstat,int dstat)925 static void qdio_handle_activate_check(struct ccw_device *cdev,
926 				unsigned long intparm, int cstat, int dstat)
927 {
928 	struct qdio_irq *irq_ptr = cdev->private->qdio_data;
929 	struct qdio_q *q;
930 
931 	DBF_ERROR("%4x ACT CHECK", irq_ptr->schid.sch_no);
932 	DBF_ERROR("intp :%lx", intparm);
933 	DBF_ERROR("ds: %2x cs:%2x", dstat, cstat);
934 
935 	if (irq_ptr->nr_input_qs) {
936 		q = irq_ptr->input_qs[0];
937 	} else if (irq_ptr->nr_output_qs) {
938 		q = irq_ptr->output_qs[0];
939 	} else {
940 		dump_stack();
941 		goto no_handler;
942 	}
943 	q->handler(q->irq_ptr->cdev, QDIO_ERROR_ACTIVATE_CHECK_CONDITION,
944 		   0, -1, -1, irq_ptr->int_parm);
945 no_handler:
946 	qdio_set_state(irq_ptr, QDIO_IRQ_STATE_STOPPED);
947 }
948 
qdio_establish_handle_irq(struct ccw_device * cdev,int cstat,int dstat)949 static void qdio_establish_handle_irq(struct ccw_device *cdev, int cstat,
950 				      int dstat)
951 {
952 	struct qdio_irq *irq_ptr = cdev->private->qdio_data;
953 
954 	DBF_DEV_EVENT(DBF_INFO, irq_ptr, "qest irq");
955 
956 	if (cstat)
957 		goto error;
958 	if (dstat & ~(DEV_STAT_DEV_END | DEV_STAT_CHN_END))
959 		goto error;
960 	if (!(dstat & DEV_STAT_DEV_END))
961 		goto error;
962 	qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ESTABLISHED);
963 	return;
964 
965 error:
966 	DBF_ERROR("%4x EQ:error", irq_ptr->schid.sch_no);
967 	DBF_ERROR("ds: %2x cs:%2x", dstat, cstat);
968 	qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ERR);
969 }
970 
971 /* qdio interrupt handler */
qdio_int_handler(struct ccw_device * cdev,unsigned long intparm,struct irb * irb)972 void qdio_int_handler(struct ccw_device *cdev, unsigned long intparm,
973 		      struct irb *irb)
974 {
975 	struct qdio_irq *irq_ptr = cdev->private->qdio_data;
976 	int cstat, dstat;
977 
978 	if (!intparm || !irq_ptr) {
979 		DBF_ERROR("qint:%4x", cdev->private->schid.sch_no);
980 		return;
981 	}
982 
983 	kstat_cpu(smp_processor_id()).irqs[IOINT_QDI]++;
984 	if (irq_ptr->perf_stat_enabled)
985 		irq_ptr->perf_stat.qdio_int++;
986 
987 	if (IS_ERR(irb)) {
988 		switch (PTR_ERR(irb)) {
989 		case -EIO:
990 			DBF_ERROR("%4x IO error", irq_ptr->schid.sch_no);
991 			qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ERR);
992 			wake_up(&cdev->private->wait_q);
993 			return;
994 		default:
995 			WARN_ON(1);
996 			return;
997 		}
998 	}
999 	qdio_irq_check_sense(irq_ptr, irb);
1000 	cstat = irb->scsw.cmd.cstat;
1001 	dstat = irb->scsw.cmd.dstat;
1002 
1003 	switch (irq_ptr->state) {
1004 	case QDIO_IRQ_STATE_INACTIVE:
1005 		qdio_establish_handle_irq(cdev, cstat, dstat);
1006 		break;
1007 	case QDIO_IRQ_STATE_CLEANUP:
1008 		qdio_set_state(irq_ptr, QDIO_IRQ_STATE_INACTIVE);
1009 		break;
1010 	case QDIO_IRQ_STATE_ESTABLISHED:
1011 	case QDIO_IRQ_STATE_ACTIVE:
1012 		if (cstat & SCHN_STAT_PCI) {
1013 			qdio_int_handler_pci(irq_ptr);
1014 			return;
1015 		}
1016 		if (cstat || dstat)
1017 			qdio_handle_activate_check(cdev, intparm, cstat,
1018 						   dstat);
1019 		break;
1020 	case QDIO_IRQ_STATE_STOPPED:
1021 		break;
1022 	default:
1023 		WARN_ON(1);
1024 	}
1025 	wake_up(&cdev->private->wait_q);
1026 }
1027 
1028 /**
1029  * qdio_get_ssqd_desc - get qdio subchannel description
1030  * @cdev: ccw device to get description for
1031  * @data: where to store the ssqd
1032  *
1033  * Returns 0 or an error code. The results of the chsc are stored in the
1034  * specified structure.
1035  */
qdio_get_ssqd_desc(struct ccw_device * cdev,struct qdio_ssqd_desc * data)1036 int qdio_get_ssqd_desc(struct ccw_device *cdev,
1037 		       struct qdio_ssqd_desc *data)
1038 {
1039 
1040 	if (!cdev || !cdev->private)
1041 		return -EINVAL;
1042 
1043 	DBF_EVENT("get ssqd:%4x", cdev->private->schid.sch_no);
1044 	return qdio_setup_get_ssqd(NULL, &cdev->private->schid, data);
1045 }
1046 EXPORT_SYMBOL_GPL(qdio_get_ssqd_desc);
1047 
qdio_shutdown_queues(struct ccw_device * cdev)1048 static void qdio_shutdown_queues(struct ccw_device *cdev)
1049 {
1050 	struct qdio_irq *irq_ptr = cdev->private->qdio_data;
1051 	struct qdio_q *q;
1052 	int i;
1053 
1054 	for_each_input_queue(irq_ptr, q, i)
1055 		tasklet_kill(&q->tasklet);
1056 
1057 	for_each_output_queue(irq_ptr, q, i) {
1058 		del_timer(&q->u.out.timer);
1059 		tasklet_kill(&q->tasklet);
1060 	}
1061 }
1062 
1063 /**
1064  * qdio_shutdown - shut down a qdio subchannel
1065  * @cdev: associated ccw device
1066  * @how: use halt or clear to shutdown
1067  */
qdio_shutdown(struct ccw_device * cdev,int how)1068 int qdio_shutdown(struct ccw_device *cdev, int how)
1069 {
1070 	struct qdio_irq *irq_ptr = cdev->private->qdio_data;
1071 	int rc;
1072 	unsigned long flags;
1073 
1074 	if (!irq_ptr)
1075 		return -ENODEV;
1076 
1077 	BUG_ON(irqs_disabled());
1078 	DBF_EVENT("qshutdown:%4x", cdev->private->schid.sch_no);
1079 
1080 	mutex_lock(&irq_ptr->setup_mutex);
1081 	/*
1082 	 * Subchannel was already shot down. We cannot prevent being called
1083 	 * twice since cio may trigger a shutdown asynchronously.
1084 	 */
1085 	if (irq_ptr->state == QDIO_IRQ_STATE_INACTIVE) {
1086 		mutex_unlock(&irq_ptr->setup_mutex);
1087 		return 0;
1088 	}
1089 
1090 	/*
1091 	 * Indicate that the device is going down. Scheduling the queue
1092 	 * tasklets is forbidden from here on.
1093 	 */
1094 	qdio_set_state(irq_ptr, QDIO_IRQ_STATE_STOPPED);
1095 
1096 	tiqdio_remove_input_queues(irq_ptr);
1097 	qdio_shutdown_queues(cdev);
1098 	qdio_shutdown_debug_entries(irq_ptr, cdev);
1099 
1100 	/* cleanup subchannel */
1101 	spin_lock_irqsave(get_ccwdev_lock(cdev), flags);
1102 
1103 	if (how & QDIO_FLAG_CLEANUP_USING_CLEAR)
1104 		rc = ccw_device_clear(cdev, QDIO_DOING_CLEANUP);
1105 	else
1106 		/* default behaviour is halt */
1107 		rc = ccw_device_halt(cdev, QDIO_DOING_CLEANUP);
1108 	if (rc) {
1109 		DBF_ERROR("%4x SHUTD ERR", irq_ptr->schid.sch_no);
1110 		DBF_ERROR("rc:%4d", rc);
1111 		goto no_cleanup;
1112 	}
1113 
1114 	qdio_set_state(irq_ptr, QDIO_IRQ_STATE_CLEANUP);
1115 	spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags);
1116 	wait_event_interruptible_timeout(cdev->private->wait_q,
1117 		irq_ptr->state == QDIO_IRQ_STATE_INACTIVE ||
1118 		irq_ptr->state == QDIO_IRQ_STATE_ERR,
1119 		10 * HZ);
1120 	spin_lock_irqsave(get_ccwdev_lock(cdev), flags);
1121 
1122 no_cleanup:
1123 	qdio_shutdown_thinint(irq_ptr);
1124 
1125 	/* restore interrupt handler */
1126 	if ((void *)cdev->handler == (void *)qdio_int_handler)
1127 		cdev->handler = irq_ptr->orig_handler;
1128 	spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags);
1129 
1130 	qdio_set_state(irq_ptr, QDIO_IRQ_STATE_INACTIVE);
1131 	mutex_unlock(&irq_ptr->setup_mutex);
1132 	if (rc)
1133 		return rc;
1134 	return 0;
1135 }
1136 EXPORT_SYMBOL_GPL(qdio_shutdown);
1137 
1138 /**
1139  * qdio_free - free data structures for a qdio subchannel
1140  * @cdev: associated ccw device
1141  */
qdio_free(struct ccw_device * cdev)1142 int qdio_free(struct ccw_device *cdev)
1143 {
1144 	struct qdio_irq *irq_ptr = cdev->private->qdio_data;
1145 
1146 	if (!irq_ptr)
1147 		return -ENODEV;
1148 
1149 	DBF_EVENT("qfree:%4x", cdev->private->schid.sch_no);
1150 	mutex_lock(&irq_ptr->setup_mutex);
1151 
1152 	if (irq_ptr->debug_area != NULL) {
1153 		debug_unregister(irq_ptr->debug_area);
1154 		irq_ptr->debug_area = NULL;
1155 	}
1156 	cdev->private->qdio_data = NULL;
1157 	mutex_unlock(&irq_ptr->setup_mutex);
1158 
1159 	qdio_release_memory(irq_ptr);
1160 	return 0;
1161 }
1162 EXPORT_SYMBOL_GPL(qdio_free);
1163 
1164 /**
1165  * qdio_allocate - allocate qdio queues and associated data
1166  * @init_data: initialization data
1167  */
qdio_allocate(struct qdio_initialize * init_data)1168 int qdio_allocate(struct qdio_initialize *init_data)
1169 {
1170 	struct qdio_irq *irq_ptr;
1171 
1172 	DBF_EVENT("qallocate:%4x", init_data->cdev->private->schid.sch_no);
1173 
1174 	if ((init_data->no_input_qs && !init_data->input_handler) ||
1175 	    (init_data->no_output_qs && !init_data->output_handler))
1176 		return -EINVAL;
1177 
1178 	if ((init_data->no_input_qs > QDIO_MAX_QUEUES_PER_IRQ) ||
1179 	    (init_data->no_output_qs > QDIO_MAX_QUEUES_PER_IRQ))
1180 		return -EINVAL;
1181 
1182 	if ((!init_data->input_sbal_addr_array) ||
1183 	    (!init_data->output_sbal_addr_array))
1184 		return -EINVAL;
1185 
1186 	/* irq_ptr must be in GFP_DMA since it contains ccw1.cda */
1187 	irq_ptr = (void *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
1188 	if (!irq_ptr)
1189 		goto out_err;
1190 
1191 	mutex_init(&irq_ptr->setup_mutex);
1192 	qdio_allocate_dbf(init_data, irq_ptr);
1193 
1194 	/*
1195 	 * Allocate a page for the chsc calls in qdio_establish.
1196 	 * Must be pre-allocated since a zfcp recovery will call
1197 	 * qdio_establish. In case of low memory and swap on a zfcp disk
1198 	 * we may not be able to allocate memory otherwise.
1199 	 */
1200 	irq_ptr->chsc_page = get_zeroed_page(GFP_KERNEL);
1201 	if (!irq_ptr->chsc_page)
1202 		goto out_rel;
1203 
1204 	/* qdr is used in ccw1.cda which is u32 */
1205 	irq_ptr->qdr = (struct qdr *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
1206 	if (!irq_ptr->qdr)
1207 		goto out_rel;
1208 	WARN_ON((unsigned long)irq_ptr->qdr & 0xfff);
1209 
1210 	if (qdio_allocate_qs(irq_ptr, init_data->no_input_qs,
1211 			     init_data->no_output_qs))
1212 		goto out_rel;
1213 
1214 	init_data->cdev->private->qdio_data = irq_ptr;
1215 	qdio_set_state(irq_ptr, QDIO_IRQ_STATE_INACTIVE);
1216 	return 0;
1217 out_rel:
1218 	qdio_release_memory(irq_ptr);
1219 out_err:
1220 	return -ENOMEM;
1221 }
1222 EXPORT_SYMBOL_GPL(qdio_allocate);
1223 
1224 /**
1225  * qdio_establish - establish queues on a qdio subchannel
1226  * @init_data: initialization data
1227  */
qdio_establish(struct qdio_initialize * init_data)1228 int qdio_establish(struct qdio_initialize *init_data)
1229 {
1230 	struct qdio_irq *irq_ptr;
1231 	struct ccw_device *cdev = init_data->cdev;
1232 	unsigned long saveflags;
1233 	int rc;
1234 
1235 	DBF_EVENT("qestablish:%4x", cdev->private->schid.sch_no);
1236 
1237 	irq_ptr = cdev->private->qdio_data;
1238 	if (!irq_ptr)
1239 		return -ENODEV;
1240 
1241 	if (cdev->private->state != DEV_STATE_ONLINE)
1242 		return -EINVAL;
1243 
1244 	mutex_lock(&irq_ptr->setup_mutex);
1245 	qdio_setup_irq(init_data);
1246 
1247 	rc = qdio_establish_thinint(irq_ptr);
1248 	if (rc) {
1249 		mutex_unlock(&irq_ptr->setup_mutex);
1250 		qdio_shutdown(cdev, QDIO_FLAG_CLEANUP_USING_CLEAR);
1251 		return rc;
1252 	}
1253 
1254 	/* establish q */
1255 	irq_ptr->ccw.cmd_code = irq_ptr->equeue.cmd;
1256 	irq_ptr->ccw.flags = CCW_FLAG_SLI;
1257 	irq_ptr->ccw.count = irq_ptr->equeue.count;
1258 	irq_ptr->ccw.cda = (u32)((addr_t)irq_ptr->qdr);
1259 
1260 	spin_lock_irqsave(get_ccwdev_lock(cdev), saveflags);
1261 	ccw_device_set_options_mask(cdev, 0);
1262 
1263 	rc = ccw_device_start(cdev, &irq_ptr->ccw, QDIO_DOING_ESTABLISH, 0, 0);
1264 	if (rc) {
1265 		DBF_ERROR("%4x est IO ERR", irq_ptr->schid.sch_no);
1266 		DBF_ERROR("rc:%4x", rc);
1267 	}
1268 	spin_unlock_irqrestore(get_ccwdev_lock(cdev), saveflags);
1269 
1270 	if (rc) {
1271 		mutex_unlock(&irq_ptr->setup_mutex);
1272 		qdio_shutdown(cdev, QDIO_FLAG_CLEANUP_USING_CLEAR);
1273 		return rc;
1274 	}
1275 
1276 	wait_event_interruptible_timeout(cdev->private->wait_q,
1277 		irq_ptr->state == QDIO_IRQ_STATE_ESTABLISHED ||
1278 		irq_ptr->state == QDIO_IRQ_STATE_ERR, HZ);
1279 
1280 	if (irq_ptr->state != QDIO_IRQ_STATE_ESTABLISHED) {
1281 		mutex_unlock(&irq_ptr->setup_mutex);
1282 		qdio_shutdown(cdev, QDIO_FLAG_CLEANUP_USING_CLEAR);
1283 		return -EIO;
1284 	}
1285 
1286 	qdio_setup_ssqd_info(irq_ptr);
1287 	DBF_EVENT("qib ac:%4x", irq_ptr->qib.ac);
1288 
1289 	/* qebsm is now setup if available, initialize buffer states */
1290 	qdio_init_buf_states(irq_ptr);
1291 
1292 	mutex_unlock(&irq_ptr->setup_mutex);
1293 	qdio_print_subchannel_info(irq_ptr, cdev);
1294 	qdio_setup_debug_entries(irq_ptr, cdev);
1295 	return 0;
1296 }
1297 EXPORT_SYMBOL_GPL(qdio_establish);
1298 
1299 /**
1300  * qdio_activate - activate queues on a qdio subchannel
1301  * @cdev: associated cdev
1302  */
qdio_activate(struct ccw_device * cdev)1303 int qdio_activate(struct ccw_device *cdev)
1304 {
1305 	struct qdio_irq *irq_ptr;
1306 	int rc;
1307 	unsigned long saveflags;
1308 
1309 	DBF_EVENT("qactivate:%4x", cdev->private->schid.sch_no);
1310 
1311 	irq_ptr = cdev->private->qdio_data;
1312 	if (!irq_ptr)
1313 		return -ENODEV;
1314 
1315 	if (cdev->private->state != DEV_STATE_ONLINE)
1316 		return -EINVAL;
1317 
1318 	mutex_lock(&irq_ptr->setup_mutex);
1319 	if (irq_ptr->state == QDIO_IRQ_STATE_INACTIVE) {
1320 		rc = -EBUSY;
1321 		goto out;
1322 	}
1323 
1324 	irq_ptr->ccw.cmd_code = irq_ptr->aqueue.cmd;
1325 	irq_ptr->ccw.flags = CCW_FLAG_SLI;
1326 	irq_ptr->ccw.count = irq_ptr->aqueue.count;
1327 	irq_ptr->ccw.cda = 0;
1328 
1329 	spin_lock_irqsave(get_ccwdev_lock(cdev), saveflags);
1330 	ccw_device_set_options(cdev, CCWDEV_REPORT_ALL);
1331 
1332 	rc = ccw_device_start(cdev, &irq_ptr->ccw, QDIO_DOING_ACTIVATE,
1333 			      0, DOIO_DENY_PREFETCH);
1334 	if (rc) {
1335 		DBF_ERROR("%4x act IO ERR", irq_ptr->schid.sch_no);
1336 		DBF_ERROR("rc:%4x", rc);
1337 	}
1338 	spin_unlock_irqrestore(get_ccwdev_lock(cdev), saveflags);
1339 
1340 	if (rc)
1341 		goto out;
1342 
1343 	if (is_thinint_irq(irq_ptr))
1344 		tiqdio_add_input_queues(irq_ptr);
1345 
1346 	/* wait for subchannel to become active */
1347 	msleep(5);
1348 
1349 	switch (irq_ptr->state) {
1350 	case QDIO_IRQ_STATE_STOPPED:
1351 	case QDIO_IRQ_STATE_ERR:
1352 		rc = -EIO;
1353 		break;
1354 	default:
1355 		qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ACTIVE);
1356 		rc = 0;
1357 	}
1358 out:
1359 	mutex_unlock(&irq_ptr->setup_mutex);
1360 	return rc;
1361 }
1362 EXPORT_SYMBOL_GPL(qdio_activate);
1363 
buf_in_between(int bufnr,int start,int count)1364 static inline int buf_in_between(int bufnr, int start, int count)
1365 {
1366 	int end = add_buf(start, count);
1367 
1368 	if (end > start) {
1369 		if (bufnr >= start && bufnr < end)
1370 			return 1;
1371 		else
1372 			return 0;
1373 	}
1374 
1375 	/* wrap-around case */
1376 	if ((bufnr >= start && bufnr <= QDIO_MAX_BUFFERS_PER_Q) ||
1377 	    (bufnr < end))
1378 		return 1;
1379 	else
1380 		return 0;
1381 }
1382 
1383 /**
1384  * handle_inbound - reset processed input buffers
1385  * @q: queue containing the buffers
1386  * @callflags: flags
1387  * @bufnr: first buffer to process
1388  * @count: how many buffers are emptied
1389  */
handle_inbound(struct qdio_q * q,unsigned int callflags,int bufnr,int count)1390 static int handle_inbound(struct qdio_q *q, unsigned int callflags,
1391 			  int bufnr, int count)
1392 {
1393 	int used, diff;
1394 
1395 	qperf_inc(q, inbound_call);
1396 
1397 	if (!q->u.in.polling)
1398 		goto set;
1399 
1400 	/* protect against stop polling setting an ACK for an emptied slsb */
1401 	if (count == QDIO_MAX_BUFFERS_PER_Q) {
1402 		/* overwriting everything, just delete polling status */
1403 		q->u.in.polling = 0;
1404 		q->u.in.ack_count = 0;
1405 		goto set;
1406 	} else if (buf_in_between(q->u.in.ack_start, bufnr, count)) {
1407 		if (is_qebsm(q)) {
1408 			/* partial overwrite, just update ack_start */
1409 			diff = add_buf(bufnr, count);
1410 			diff = sub_buf(diff, q->u.in.ack_start);
1411 			q->u.in.ack_count -= diff;
1412 			if (q->u.in.ack_count <= 0) {
1413 				q->u.in.polling = 0;
1414 				q->u.in.ack_count = 0;
1415 				goto set;
1416 			}
1417 			q->u.in.ack_start = add_buf(q->u.in.ack_start, diff);
1418 		}
1419 		else
1420 			/* the only ACK will be deleted, so stop polling */
1421 			q->u.in.polling = 0;
1422 	}
1423 
1424 set:
1425 	count = set_buf_states(q, bufnr, SLSB_CU_INPUT_EMPTY, count);
1426 
1427 	used = atomic_add_return(count, &q->nr_buf_used) - count;
1428 	BUG_ON(used + count > QDIO_MAX_BUFFERS_PER_Q);
1429 
1430 	/* no need to signal as long as the adapter had free buffers */
1431 	if (used)
1432 		return 0;
1433 
1434 	if (need_siga_in(q))
1435 		return qdio_siga_input(q);
1436 	return 0;
1437 }
1438 
1439 /**
1440  * handle_outbound - process filled outbound buffers
1441  * @q: queue containing the buffers
1442  * @callflags: flags
1443  * @bufnr: first buffer to process
1444  * @count: how many buffers are filled
1445  */
handle_outbound(struct qdio_q * q,unsigned int callflags,int bufnr,int count)1446 static int handle_outbound(struct qdio_q *q, unsigned int callflags,
1447 			   int bufnr, int count)
1448 {
1449 	unsigned char state;
1450 	int used, rc = 0;
1451 
1452 	qperf_inc(q, outbound_call);
1453 
1454 	count = set_buf_states(q, bufnr, SLSB_CU_OUTPUT_PRIMED, count);
1455 	used = atomic_add_return(count, &q->nr_buf_used);
1456 	BUG_ON(used > QDIO_MAX_BUFFERS_PER_Q);
1457 
1458 	if (used == QDIO_MAX_BUFFERS_PER_Q)
1459 		qperf_inc(q, outbound_queue_full);
1460 
1461 	if (callflags & QDIO_FLAG_PCI_OUT) {
1462 		q->u.out.pci_out_enabled = 1;
1463 		qperf_inc(q, pci_request_int);
1464 	} else
1465 		q->u.out.pci_out_enabled = 0;
1466 
1467 	if (queue_type(q) == QDIO_IQDIO_QFMT) {
1468 		/* One SIGA-W per buffer required for unicast HiperSockets. */
1469 		WARN_ON_ONCE(count > 1 && !multicast_outbound(q));
1470 
1471 		rc = qdio_kick_outbound_q(q);
1472 	} else if (need_siga_sync(q)) {
1473 		rc = qdio_siga_sync_q(q);
1474 	} else {
1475 		/* try to fast requeue buffers */
1476 		get_buf_state(q, prev_buf(bufnr), &state, 0);
1477 		if (state != SLSB_CU_OUTPUT_PRIMED)
1478 			rc = qdio_kick_outbound_q(q);
1479 		else
1480 			qperf_inc(q, fast_requeue);
1481 	}
1482 
1483 	/* in case of SIGA errors we must process the error immediately */
1484 	if (used >= q->u.out.scan_threshold || rc)
1485 		tasklet_schedule(&q->tasklet);
1486 	else
1487 		/* free the SBALs in case of no further traffic */
1488 		if (!timer_pending(&q->u.out.timer))
1489 			mod_timer(&q->u.out.timer, jiffies + HZ);
1490 	return rc;
1491 }
1492 
1493 /**
1494  * do_QDIO - process input or output buffers
1495  * @cdev: associated ccw_device for the qdio subchannel
1496  * @callflags: input or output and special flags from the program
1497  * @q_nr: queue number
1498  * @bufnr: buffer number
1499  * @count: how many buffers to process
1500  */
do_QDIO(struct ccw_device * cdev,unsigned int callflags,int q_nr,unsigned int bufnr,unsigned int count)1501 int do_QDIO(struct ccw_device *cdev, unsigned int callflags,
1502 	    int q_nr, unsigned int bufnr, unsigned int count)
1503 {
1504 	struct qdio_irq *irq_ptr;
1505 
1506 	if (bufnr >= QDIO_MAX_BUFFERS_PER_Q || count > QDIO_MAX_BUFFERS_PER_Q)
1507 		return -EINVAL;
1508 
1509 	irq_ptr = cdev->private->qdio_data;
1510 	if (!irq_ptr)
1511 		return -ENODEV;
1512 
1513 	DBF_DEV_EVENT(DBF_INFO, irq_ptr,
1514 		      "do%02x b:%02x c:%02x", callflags, bufnr, count);
1515 
1516 	if (irq_ptr->state != QDIO_IRQ_STATE_ACTIVE)
1517 		return -EBUSY;
1518 	if (!count)
1519 		return 0;
1520 	if (callflags & QDIO_FLAG_SYNC_INPUT)
1521 		return handle_inbound(irq_ptr->input_qs[q_nr],
1522 				      callflags, bufnr, count);
1523 	else if (callflags & QDIO_FLAG_SYNC_OUTPUT)
1524 		return handle_outbound(irq_ptr->output_qs[q_nr],
1525 				       callflags, bufnr, count);
1526 	return -EINVAL;
1527 }
1528 EXPORT_SYMBOL_GPL(do_QDIO);
1529 
1530 /**
1531  * qdio_start_irq - process input buffers
1532  * @cdev: associated ccw_device for the qdio subchannel
1533  * @nr: input queue number
1534  *
1535  * Return codes
1536  *   0 - success
1537  *   1 - irqs not started since new data is available
1538  */
qdio_start_irq(struct ccw_device * cdev,int nr)1539 int qdio_start_irq(struct ccw_device *cdev, int nr)
1540 {
1541 	struct qdio_q *q;
1542 	struct qdio_irq *irq_ptr = cdev->private->qdio_data;
1543 
1544 	if (!irq_ptr)
1545 		return -ENODEV;
1546 	q = irq_ptr->input_qs[nr];
1547 
1548 	WARN_ON(queue_irqs_enabled(q));
1549 
1550 	if (!shared_ind(q->irq_ptr->dsci))
1551 		xchg(q->irq_ptr->dsci, 0);
1552 
1553 	qdio_stop_polling(q);
1554 	clear_bit(QDIO_QUEUE_IRQS_DISABLED, &q->u.in.queue_irq_state);
1555 
1556 	/*
1557 	 * We need to check again to not lose initiative after
1558 	 * resetting the ACK state.
1559 	 */
1560 	if (!shared_ind(q->irq_ptr->dsci) && *q->irq_ptr->dsci)
1561 		goto rescan;
1562 	if (!qdio_inbound_q_done(q))
1563 		goto rescan;
1564 	return 0;
1565 
1566 rescan:
1567 	if (test_and_set_bit(QDIO_QUEUE_IRQS_DISABLED,
1568 			     &q->u.in.queue_irq_state))
1569 		return 0;
1570 	else
1571 		return 1;
1572 
1573 }
1574 EXPORT_SYMBOL(qdio_start_irq);
1575 
1576 /**
1577  * qdio_get_next_buffers - process input buffers
1578  * @cdev: associated ccw_device for the qdio subchannel
1579  * @nr: input queue number
1580  * @bufnr: first filled buffer number
1581  * @error: buffers are in error state
1582  *
1583  * Return codes
1584  *   < 0 - error
1585  *   = 0 - no new buffers found
1586  *   > 0 - number of processed buffers
1587  */
qdio_get_next_buffers(struct ccw_device * cdev,int nr,int * bufnr,int * error)1588 int qdio_get_next_buffers(struct ccw_device *cdev, int nr, int *bufnr,
1589 			  int *error)
1590 {
1591 	struct qdio_q *q;
1592 	int start, end;
1593 	struct qdio_irq *irq_ptr = cdev->private->qdio_data;
1594 
1595 	if (!irq_ptr)
1596 		return -ENODEV;
1597 	q = irq_ptr->input_qs[nr];
1598 	WARN_ON(queue_irqs_enabled(q));
1599 
1600 	/*
1601 	 * Cannot rely on automatic sync after interrupt since queues may
1602 	 * also be examined without interrupt.
1603 	 */
1604 	if (need_siga_sync(q))
1605 		qdio_sync_queues(q);
1606 
1607 	/* check the PCI capable outbound queues. */
1608 	qdio_check_outbound_after_thinint(q);
1609 
1610 	if (!qdio_inbound_q_moved(q))
1611 		return 0;
1612 
1613 	/* Note: upper-layer MUST stop processing immediately here ... */
1614 	if (unlikely(q->irq_ptr->state != QDIO_IRQ_STATE_ACTIVE))
1615 		return -EIO;
1616 
1617 	start = q->first_to_kick;
1618 	end = q->first_to_check;
1619 	*bufnr = start;
1620 	*error = q->qdio_error;
1621 
1622 	/* for the next time */
1623 	q->first_to_kick = end;
1624 	q->qdio_error = 0;
1625 	return sub_buf(end, start);
1626 }
1627 EXPORT_SYMBOL(qdio_get_next_buffers);
1628 
1629 /**
1630  * qdio_stop_irq - disable interrupt processing for the device
1631  * @cdev: associated ccw_device for the qdio subchannel
1632  * @nr: input queue number
1633  *
1634  * Return codes
1635  *   0 - interrupts were already disabled
1636  *   1 - interrupts successfully disabled
1637  */
qdio_stop_irq(struct ccw_device * cdev,int nr)1638 int qdio_stop_irq(struct ccw_device *cdev, int nr)
1639 {
1640 	struct qdio_q *q;
1641 	struct qdio_irq *irq_ptr = cdev->private->qdio_data;
1642 
1643 	if (!irq_ptr)
1644 		return -ENODEV;
1645 	q = irq_ptr->input_qs[nr];
1646 
1647 	if (test_and_set_bit(QDIO_QUEUE_IRQS_DISABLED,
1648 			     &q->u.in.queue_irq_state))
1649 		return 0;
1650 	else
1651 		return 1;
1652 }
1653 EXPORT_SYMBOL(qdio_stop_irq);
1654 
init_QDIO(void)1655 static int __init init_QDIO(void)
1656 {
1657 	int rc;
1658 
1659 	rc = qdio_debug_init();
1660 	if (rc)
1661 		return rc;
1662 	rc = qdio_setup_init();
1663 	if (rc)
1664 		goto out_debug;
1665 	rc = tiqdio_allocate_memory();
1666 	if (rc)
1667 		goto out_cache;
1668 	rc = tiqdio_register_thinints();
1669 	if (rc)
1670 		goto out_ti;
1671 	return 0;
1672 
1673 out_ti:
1674 	tiqdio_free_memory();
1675 out_cache:
1676 	qdio_setup_exit();
1677 out_debug:
1678 	qdio_debug_exit();
1679 	return rc;
1680 }
1681 
exit_QDIO(void)1682 static void __exit exit_QDIO(void)
1683 {
1684 	tiqdio_unregister_thinints();
1685 	tiqdio_free_memory();
1686 	qdio_setup_exit();
1687 	qdio_debug_exit();
1688 }
1689 
1690 module_init(init_QDIO);
1691 module_exit(exit_QDIO);
1692