1 /*
2  * Copyright (c) 2007, 2008 QLogic Corporation. All rights reserved.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and/or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  */
32 
33 #include <linux/spinlock.h>
34 #include <linux/gfp.h>
35 
36 #include "ipath_kernel.h"
37 #include "ipath_verbs.h"
38 #include "ipath_common.h"
39 
40 #define SDMA_DESCQ_SZ PAGE_SIZE /* 256 entries per 4KB page */
41 
vl15_watchdog_enq(struct ipath_devdata * dd)42 static void vl15_watchdog_enq(struct ipath_devdata *dd)
43 {
44 	/* ipath_sdma_lock must already be held */
45 	if (atomic_inc_return(&dd->ipath_sdma_vl15_count) == 1) {
46 		unsigned long interval = (HZ + 19) / 20;
47 		dd->ipath_sdma_vl15_timer.expires = jiffies + interval;
48 		add_timer(&dd->ipath_sdma_vl15_timer);
49 	}
50 }
51 
vl15_watchdog_deq(struct ipath_devdata * dd)52 static void vl15_watchdog_deq(struct ipath_devdata *dd)
53 {
54 	/* ipath_sdma_lock must already be held */
55 	if (atomic_dec_return(&dd->ipath_sdma_vl15_count) != 0) {
56 		unsigned long interval = (HZ + 19) / 20;
57 		mod_timer(&dd->ipath_sdma_vl15_timer, jiffies + interval);
58 	} else {
59 		del_timer(&dd->ipath_sdma_vl15_timer);
60 	}
61 }
62 
vl15_watchdog_timeout(unsigned long opaque)63 static void vl15_watchdog_timeout(unsigned long opaque)
64 {
65 	struct ipath_devdata *dd = (struct ipath_devdata *)opaque;
66 
67 	if (atomic_read(&dd->ipath_sdma_vl15_count) != 0) {
68 		ipath_dbg("vl15 watchdog timeout - clearing\n");
69 		ipath_cancel_sends(dd, 1);
70 		ipath_hol_down(dd);
71 	} else {
72 		ipath_dbg("vl15 watchdog timeout - "
73 			  "condition already cleared\n");
74 	}
75 }
76 
unmap_desc(struct ipath_devdata * dd,unsigned head)77 static void unmap_desc(struct ipath_devdata *dd, unsigned head)
78 {
79 	__le64 *descqp = &dd->ipath_sdma_descq[head].qw[0];
80 	u64 desc[2];
81 	dma_addr_t addr;
82 	size_t len;
83 
84 	desc[0] = le64_to_cpu(descqp[0]);
85 	desc[1] = le64_to_cpu(descqp[1]);
86 
87 	addr = (desc[1] << 32) | (desc[0] >> 32);
88 	len = (desc[0] >> 14) & (0x7ffULL << 2);
89 	dma_unmap_single(&dd->pcidev->dev, addr, len, DMA_TO_DEVICE);
90 }
91 
92 /*
93  * ipath_sdma_lock should be locked before calling this.
94  */
ipath_sdma_make_progress(struct ipath_devdata * dd)95 int ipath_sdma_make_progress(struct ipath_devdata *dd)
96 {
97 	struct list_head *lp = NULL;
98 	struct ipath_sdma_txreq *txp = NULL;
99 	u16 dmahead;
100 	u16 start_idx = 0;
101 	int progress = 0;
102 
103 	if (!list_empty(&dd->ipath_sdma_activelist)) {
104 		lp = dd->ipath_sdma_activelist.next;
105 		txp = list_entry(lp, struct ipath_sdma_txreq, list);
106 		start_idx = txp->start_idx;
107 	}
108 
109 	/*
110 	 * Read the SDMA head register in order to know that the
111 	 * interrupt clear has been written to the chip.
112 	 * Otherwise, we may not get an interrupt for the last
113 	 * descriptor in the queue.
114 	 */
115 	dmahead = (u16)ipath_read_kreg32(dd, dd->ipath_kregs->kr_senddmahead);
116 	/* sanity check return value for error handling (chip reset, etc.) */
117 	if (dmahead >= dd->ipath_sdma_descq_cnt)
118 		goto done;
119 
120 	while (dd->ipath_sdma_descq_head != dmahead) {
121 		if (txp && txp->flags & IPATH_SDMA_TXREQ_F_FREEDESC &&
122 		    dd->ipath_sdma_descq_head == start_idx) {
123 			unmap_desc(dd, dd->ipath_sdma_descq_head);
124 			start_idx++;
125 			if (start_idx == dd->ipath_sdma_descq_cnt)
126 				start_idx = 0;
127 		}
128 
129 		/* increment free count and head */
130 		dd->ipath_sdma_descq_removed++;
131 		if (++dd->ipath_sdma_descq_head == dd->ipath_sdma_descq_cnt)
132 			dd->ipath_sdma_descq_head = 0;
133 
134 		if (txp && txp->next_descq_idx == dd->ipath_sdma_descq_head) {
135 			/* move to notify list */
136 			if (txp->flags & IPATH_SDMA_TXREQ_F_VL15)
137 				vl15_watchdog_deq(dd);
138 			list_move_tail(lp, &dd->ipath_sdma_notifylist);
139 			if (!list_empty(&dd->ipath_sdma_activelist)) {
140 				lp = dd->ipath_sdma_activelist.next;
141 				txp = list_entry(lp, struct ipath_sdma_txreq,
142 						 list);
143 				start_idx = txp->start_idx;
144 			} else {
145 				lp = NULL;
146 				txp = NULL;
147 			}
148 		}
149 		progress = 1;
150 	}
151 
152 	if (progress)
153 		tasklet_hi_schedule(&dd->ipath_sdma_notify_task);
154 
155 done:
156 	return progress;
157 }
158 
ipath_sdma_notify(struct ipath_devdata * dd,struct list_head * list)159 static void ipath_sdma_notify(struct ipath_devdata *dd, struct list_head *list)
160 {
161 	struct ipath_sdma_txreq *txp, *txp_next;
162 
163 	list_for_each_entry_safe(txp, txp_next, list, list) {
164 		list_del_init(&txp->list);
165 
166 		if (txp->callback)
167 			(*txp->callback)(txp->callback_cookie,
168 					 txp->callback_status);
169 	}
170 }
171 
sdma_notify_taskbody(struct ipath_devdata * dd)172 static void sdma_notify_taskbody(struct ipath_devdata *dd)
173 {
174 	unsigned long flags;
175 	struct list_head list;
176 
177 	INIT_LIST_HEAD(&list);
178 
179 	spin_lock_irqsave(&dd->ipath_sdma_lock, flags);
180 
181 	list_splice_init(&dd->ipath_sdma_notifylist, &list);
182 
183 	spin_unlock_irqrestore(&dd->ipath_sdma_lock, flags);
184 
185 	ipath_sdma_notify(dd, &list);
186 
187 	/*
188 	 * The IB verbs layer needs to see the callback before getting
189 	 * the call to ipath_ib_piobufavail() because the callback
190 	 * handles releasing resources the next send will need.
191 	 * Otherwise, we could do these calls in
192 	 * ipath_sdma_make_progress().
193 	 */
194 	ipath_ib_piobufavail(dd->verbs_dev);
195 }
196 
sdma_notify_task(unsigned long opaque)197 static void sdma_notify_task(unsigned long opaque)
198 {
199 	struct ipath_devdata *dd = (struct ipath_devdata *)opaque;
200 
201 	if (!test_bit(IPATH_SDMA_SHUTDOWN, &dd->ipath_sdma_status))
202 		sdma_notify_taskbody(dd);
203 }
204 
dump_sdma_state(struct ipath_devdata * dd)205 static void dump_sdma_state(struct ipath_devdata *dd)
206 {
207 	unsigned long reg;
208 
209 	reg = ipath_read_kreg64(dd, dd->ipath_kregs->kr_senddmastatus);
210 	ipath_cdbg(VERBOSE, "kr_senddmastatus: 0x%016lx\n", reg);
211 
212 	reg = ipath_read_kreg64(dd, dd->ipath_kregs->kr_sendctrl);
213 	ipath_cdbg(VERBOSE, "kr_sendctrl: 0x%016lx\n", reg);
214 
215 	reg = ipath_read_kreg64(dd, dd->ipath_kregs->kr_senddmabufmask0);
216 	ipath_cdbg(VERBOSE, "kr_senddmabufmask0: 0x%016lx\n", reg);
217 
218 	reg = ipath_read_kreg64(dd, dd->ipath_kregs->kr_senddmabufmask1);
219 	ipath_cdbg(VERBOSE, "kr_senddmabufmask1: 0x%016lx\n", reg);
220 
221 	reg = ipath_read_kreg64(dd, dd->ipath_kregs->kr_senddmabufmask2);
222 	ipath_cdbg(VERBOSE, "kr_senddmabufmask2: 0x%016lx\n", reg);
223 
224 	reg = ipath_read_kreg64(dd, dd->ipath_kregs->kr_senddmatail);
225 	ipath_cdbg(VERBOSE, "kr_senddmatail: 0x%016lx\n", reg);
226 
227 	reg = ipath_read_kreg64(dd, dd->ipath_kregs->kr_senddmahead);
228 	ipath_cdbg(VERBOSE, "kr_senddmahead: 0x%016lx\n", reg);
229 }
230 
sdma_abort_task(unsigned long opaque)231 static void sdma_abort_task(unsigned long opaque)
232 {
233 	struct ipath_devdata *dd = (struct ipath_devdata *) opaque;
234 	u64 status;
235 	unsigned long flags;
236 
237 	if (test_bit(IPATH_SDMA_SHUTDOWN, &dd->ipath_sdma_status))
238 		return;
239 
240 	spin_lock_irqsave(&dd->ipath_sdma_lock, flags);
241 
242 	status = dd->ipath_sdma_status & IPATH_SDMA_ABORT_MASK;
243 
244 	/* nothing to do */
245 	if (status == IPATH_SDMA_ABORT_NONE)
246 		goto unlock;
247 
248 	/* ipath_sdma_abort() is done, waiting for interrupt */
249 	if (status == IPATH_SDMA_ABORT_DISARMED) {
250 		if (jiffies < dd->ipath_sdma_abort_intr_timeout)
251 			goto resched_noprint;
252 		/* give up, intr got lost somewhere */
253 		ipath_dbg("give up waiting for SDMADISABLED intr\n");
254 		__set_bit(IPATH_SDMA_DISABLED, &dd->ipath_sdma_status);
255 		status = IPATH_SDMA_ABORT_ABORTED;
256 	}
257 
258 	/* everything is stopped, time to clean up and restart */
259 	if (status == IPATH_SDMA_ABORT_ABORTED) {
260 		struct ipath_sdma_txreq *txp, *txpnext;
261 		u64 hwstatus;
262 		int notify = 0;
263 
264 		hwstatus = ipath_read_kreg64(dd,
265 				dd->ipath_kregs->kr_senddmastatus);
266 
267 		if ((hwstatus & (IPATH_SDMA_STATUS_SCORE_BOARD_DRAIN_IN_PROG |
268 				 IPATH_SDMA_STATUS_ABORT_IN_PROG	     |
269 				 IPATH_SDMA_STATUS_INTERNAL_SDMA_ENABLE)) ||
270 		    !(hwstatus & IPATH_SDMA_STATUS_SCB_EMPTY)) {
271 			if (dd->ipath_sdma_reset_wait > 0) {
272 				/* not done shutting down sdma */
273 				--dd->ipath_sdma_reset_wait;
274 				goto resched;
275 			}
276 			ipath_cdbg(VERBOSE, "gave up waiting for quiescent "
277 				"status after SDMA reset, continuing\n");
278 			dump_sdma_state(dd);
279 		}
280 
281 		/* dequeue all "sent" requests */
282 		list_for_each_entry_safe(txp, txpnext,
283 					 &dd->ipath_sdma_activelist, list) {
284 			txp->callback_status = IPATH_SDMA_TXREQ_S_ABORTED;
285 			if (txp->flags & IPATH_SDMA_TXREQ_F_VL15)
286 				vl15_watchdog_deq(dd);
287 			list_move_tail(&txp->list, &dd->ipath_sdma_notifylist);
288 			notify = 1;
289 		}
290 		if (notify)
291 			tasklet_hi_schedule(&dd->ipath_sdma_notify_task);
292 
293 		/* reset our notion of head and tail */
294 		dd->ipath_sdma_descq_tail = 0;
295 		dd->ipath_sdma_descq_head = 0;
296 		dd->ipath_sdma_head_dma[0] = 0;
297 		dd->ipath_sdma_generation = 0;
298 		dd->ipath_sdma_descq_removed = dd->ipath_sdma_descq_added;
299 
300 		/* Reset SendDmaLenGen */
301 		ipath_write_kreg(dd, dd->ipath_kregs->kr_senddmalengen,
302 			(u64) dd->ipath_sdma_descq_cnt | (1ULL << 18));
303 
304 		/* done with sdma state for a bit */
305 		spin_unlock_irqrestore(&dd->ipath_sdma_lock, flags);
306 
307 		/*
308 		 * Don't restart sdma here (with the exception
309 		 * below). Wait until link is up to ACTIVE.  VL15 MADs
310 		 * used to bring the link up use PIO, and multiple link
311 		 * transitions otherwise cause the sdma engine to be
312 		 * stopped and started multiple times.
313 		 * The disable is done here, including the shadow,
314 		 * so the state is kept consistent.
315 		 * See ipath_restart_sdma() for the actual starting
316 		 * of sdma.
317 		 */
318 		spin_lock_irqsave(&dd->ipath_sendctrl_lock, flags);
319 		dd->ipath_sendctrl &= ~INFINIPATH_S_SDMAENABLE;
320 		ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl,
321 				 dd->ipath_sendctrl);
322 		ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
323 		spin_unlock_irqrestore(&dd->ipath_sendctrl_lock, flags);
324 
325 		/* make sure I see next message */
326 		dd->ipath_sdma_abort_jiffies = 0;
327 
328 		/*
329 		 * Not everything that takes SDMA offline is a link
330 		 * status change.  If the link was up, restart SDMA.
331 		 */
332 		if (dd->ipath_flags & IPATH_LINKACTIVE)
333 			ipath_restart_sdma(dd);
334 
335 		goto done;
336 	}
337 
338 resched:
339 	/*
340 	 * for now, keep spinning
341 	 * JAG - this is bad to just have default be a loop without
342 	 * state change
343 	 */
344 	if (jiffies > dd->ipath_sdma_abort_jiffies) {
345 		ipath_dbg("looping with status 0x%08lx\n",
346 			  dd->ipath_sdma_status);
347 		dd->ipath_sdma_abort_jiffies = jiffies + 5 * HZ;
348 	}
349 resched_noprint:
350 	spin_unlock_irqrestore(&dd->ipath_sdma_lock, flags);
351 	if (!test_bit(IPATH_SDMA_SHUTDOWN, &dd->ipath_sdma_status))
352 		tasklet_hi_schedule(&dd->ipath_sdma_abort_task);
353 	return;
354 
355 unlock:
356 	spin_unlock_irqrestore(&dd->ipath_sdma_lock, flags);
357 done:
358 	return;
359 }
360 
361 /*
362  * This is called from interrupt context.
363  */
ipath_sdma_intr(struct ipath_devdata * dd)364 void ipath_sdma_intr(struct ipath_devdata *dd)
365 {
366 	unsigned long flags;
367 
368 	spin_lock_irqsave(&dd->ipath_sdma_lock, flags);
369 
370 	(void) ipath_sdma_make_progress(dd);
371 
372 	spin_unlock_irqrestore(&dd->ipath_sdma_lock, flags);
373 }
374 
alloc_sdma(struct ipath_devdata * dd)375 static int alloc_sdma(struct ipath_devdata *dd)
376 {
377 	int ret = 0;
378 
379 	/* Allocate memory for SendDMA descriptor FIFO */
380 	dd->ipath_sdma_descq = dma_alloc_coherent(&dd->pcidev->dev,
381 		SDMA_DESCQ_SZ, &dd->ipath_sdma_descq_phys, GFP_KERNEL);
382 
383 	if (!dd->ipath_sdma_descq) {
384 		ipath_dev_err(dd, "failed to allocate SendDMA descriptor "
385 			"FIFO memory\n");
386 		ret = -ENOMEM;
387 		goto done;
388 	}
389 
390 	dd->ipath_sdma_descq_cnt =
391 		SDMA_DESCQ_SZ / sizeof(struct ipath_sdma_desc);
392 
393 	/* Allocate memory for DMA of head register to memory */
394 	dd->ipath_sdma_head_dma = dma_alloc_coherent(&dd->pcidev->dev,
395 		PAGE_SIZE, &dd->ipath_sdma_head_phys, GFP_KERNEL);
396 	if (!dd->ipath_sdma_head_dma) {
397 		ipath_dev_err(dd, "failed to allocate SendDMA head memory\n");
398 		ret = -ENOMEM;
399 		goto cleanup_descq;
400 	}
401 	dd->ipath_sdma_head_dma[0] = 0;
402 
403 	init_timer(&dd->ipath_sdma_vl15_timer);
404 	dd->ipath_sdma_vl15_timer.function = vl15_watchdog_timeout;
405 	dd->ipath_sdma_vl15_timer.data = (unsigned long)dd;
406 	atomic_set(&dd->ipath_sdma_vl15_count, 0);
407 
408 	goto done;
409 
410 cleanup_descq:
411 	dma_free_coherent(&dd->pcidev->dev, SDMA_DESCQ_SZ,
412 		(void *)dd->ipath_sdma_descq, dd->ipath_sdma_descq_phys);
413 	dd->ipath_sdma_descq = NULL;
414 	dd->ipath_sdma_descq_phys = 0;
415 done:
416 	return ret;
417 }
418 
setup_sdma(struct ipath_devdata * dd)419 int setup_sdma(struct ipath_devdata *dd)
420 {
421 	int ret = 0;
422 	unsigned i, n;
423 	u64 tmp64;
424 	u64 senddmabufmask[3] = { 0 };
425 	unsigned long flags;
426 
427 	ret = alloc_sdma(dd);
428 	if (ret)
429 		goto done;
430 
431 	if (!dd->ipath_sdma_descq) {
432 		ipath_dev_err(dd, "SendDMA memory not allocated\n");
433 		goto done;
434 	}
435 
436 	/*
437 	 * Set initial status as if we had been up, then gone down.
438 	 * This lets initial start on transition to ACTIVE be the
439 	 * same as restart after link flap.
440 	 */
441 	dd->ipath_sdma_status = IPATH_SDMA_ABORT_ABORTED;
442 	dd->ipath_sdma_abort_jiffies = 0;
443 	dd->ipath_sdma_generation = 0;
444 	dd->ipath_sdma_descq_tail = 0;
445 	dd->ipath_sdma_descq_head = 0;
446 	dd->ipath_sdma_descq_removed = 0;
447 	dd->ipath_sdma_descq_added = 0;
448 
449 	/* Set SendDmaBase */
450 	ipath_write_kreg(dd, dd->ipath_kregs->kr_senddmabase,
451 			 dd->ipath_sdma_descq_phys);
452 	/* Set SendDmaLenGen */
453 	tmp64 = dd->ipath_sdma_descq_cnt;
454 	tmp64 |= 1<<18; /* enable generation checking */
455 	ipath_write_kreg(dd, dd->ipath_kregs->kr_senddmalengen, tmp64);
456 	/* Set SendDmaTail */
457 	ipath_write_kreg(dd, dd->ipath_kregs->kr_senddmatail,
458 			 dd->ipath_sdma_descq_tail);
459 	/* Set SendDmaHeadAddr */
460 	ipath_write_kreg(dd, dd->ipath_kregs->kr_senddmaheadaddr,
461 			 dd->ipath_sdma_head_phys);
462 
463 	/*
464 	 * Reserve all the former "kernel" piobufs, using high number range
465 	 * so we get as many 4K buffers as possible
466 	 */
467 	n = dd->ipath_piobcnt2k + dd->ipath_piobcnt4k;
468 	i = dd->ipath_lastport_piobuf + dd->ipath_pioreserved;
469 	ipath_chg_pioavailkernel(dd, i, n - i , 0);
470 	for (; i < n; ++i) {
471 		unsigned word = i / 64;
472 		unsigned bit = i & 63;
473 		BUG_ON(word >= 3);
474 		senddmabufmask[word] |= 1ULL << bit;
475 	}
476 	ipath_write_kreg(dd, dd->ipath_kregs->kr_senddmabufmask0,
477 			 senddmabufmask[0]);
478 	ipath_write_kreg(dd, dd->ipath_kregs->kr_senddmabufmask1,
479 			 senddmabufmask[1]);
480 	ipath_write_kreg(dd, dd->ipath_kregs->kr_senddmabufmask2,
481 			 senddmabufmask[2]);
482 
483 	INIT_LIST_HEAD(&dd->ipath_sdma_activelist);
484 	INIT_LIST_HEAD(&dd->ipath_sdma_notifylist);
485 
486 	tasklet_init(&dd->ipath_sdma_notify_task, sdma_notify_task,
487 		     (unsigned long) dd);
488 	tasklet_init(&dd->ipath_sdma_abort_task, sdma_abort_task,
489 		     (unsigned long) dd);
490 
491 	/*
492 	 * No use to turn on SDMA here, as link is probably not ACTIVE
493 	 * Just mark it RUNNING and enable the interrupt, and let the
494 	 * ipath_restart_sdma() on link transition to ACTIVE actually
495 	 * enable it.
496 	 */
497 	spin_lock_irqsave(&dd->ipath_sendctrl_lock, flags);
498 	dd->ipath_sendctrl |= INFINIPATH_S_SDMAINTENABLE;
499 	ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl, dd->ipath_sendctrl);
500 	ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
501 	__set_bit(IPATH_SDMA_RUNNING, &dd->ipath_sdma_status);
502 	spin_unlock_irqrestore(&dd->ipath_sendctrl_lock, flags);
503 
504 done:
505 	return ret;
506 }
507 
teardown_sdma(struct ipath_devdata * dd)508 void teardown_sdma(struct ipath_devdata *dd)
509 {
510 	struct ipath_sdma_txreq *txp, *txpnext;
511 	unsigned long flags;
512 	dma_addr_t sdma_head_phys = 0;
513 	dma_addr_t sdma_descq_phys = 0;
514 	void *sdma_descq = NULL;
515 	void *sdma_head_dma = NULL;
516 
517 	spin_lock_irqsave(&dd->ipath_sdma_lock, flags);
518 	__clear_bit(IPATH_SDMA_RUNNING, &dd->ipath_sdma_status);
519 	__set_bit(IPATH_SDMA_ABORTING, &dd->ipath_sdma_status);
520 	__set_bit(IPATH_SDMA_SHUTDOWN, &dd->ipath_sdma_status);
521 	spin_unlock_irqrestore(&dd->ipath_sdma_lock, flags);
522 
523 	tasklet_kill(&dd->ipath_sdma_abort_task);
524 	tasklet_kill(&dd->ipath_sdma_notify_task);
525 
526 	/* turn off sdma */
527 	spin_lock_irqsave(&dd->ipath_sendctrl_lock, flags);
528 	dd->ipath_sendctrl &= ~INFINIPATH_S_SDMAENABLE;
529 	ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl,
530 		dd->ipath_sendctrl);
531 	ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
532 	spin_unlock_irqrestore(&dd->ipath_sendctrl_lock, flags);
533 
534 	spin_lock_irqsave(&dd->ipath_sdma_lock, flags);
535 	/* dequeue all "sent" requests */
536 	list_for_each_entry_safe(txp, txpnext, &dd->ipath_sdma_activelist,
537 				 list) {
538 		txp->callback_status = IPATH_SDMA_TXREQ_S_SHUTDOWN;
539 		if (txp->flags & IPATH_SDMA_TXREQ_F_VL15)
540 			vl15_watchdog_deq(dd);
541 		list_move_tail(&txp->list, &dd->ipath_sdma_notifylist);
542 	}
543 	spin_unlock_irqrestore(&dd->ipath_sdma_lock, flags);
544 
545 	sdma_notify_taskbody(dd);
546 
547 	del_timer_sync(&dd->ipath_sdma_vl15_timer);
548 
549 	spin_lock_irqsave(&dd->ipath_sdma_lock, flags);
550 
551 	dd->ipath_sdma_abort_jiffies = 0;
552 
553 	ipath_write_kreg(dd, dd->ipath_kregs->kr_senddmabase, 0);
554 	ipath_write_kreg(dd, dd->ipath_kregs->kr_senddmalengen, 0);
555 	ipath_write_kreg(dd, dd->ipath_kregs->kr_senddmatail, 0);
556 	ipath_write_kreg(dd, dd->ipath_kregs->kr_senddmaheadaddr, 0);
557 	ipath_write_kreg(dd, dd->ipath_kregs->kr_senddmabufmask0, 0);
558 	ipath_write_kreg(dd, dd->ipath_kregs->kr_senddmabufmask1, 0);
559 	ipath_write_kreg(dd, dd->ipath_kregs->kr_senddmabufmask2, 0);
560 
561 	if (dd->ipath_sdma_head_dma) {
562 		sdma_head_dma = (void *) dd->ipath_sdma_head_dma;
563 		sdma_head_phys = dd->ipath_sdma_head_phys;
564 		dd->ipath_sdma_head_dma = NULL;
565 		dd->ipath_sdma_head_phys = 0;
566 	}
567 
568 	if (dd->ipath_sdma_descq) {
569 		sdma_descq = dd->ipath_sdma_descq;
570 		sdma_descq_phys = dd->ipath_sdma_descq_phys;
571 		dd->ipath_sdma_descq = NULL;
572 		dd->ipath_sdma_descq_phys = 0;
573 	}
574 
575 	spin_unlock_irqrestore(&dd->ipath_sdma_lock, flags);
576 
577 	if (sdma_head_dma)
578 		dma_free_coherent(&dd->pcidev->dev, PAGE_SIZE,
579 				  sdma_head_dma, sdma_head_phys);
580 
581 	if (sdma_descq)
582 		dma_free_coherent(&dd->pcidev->dev, SDMA_DESCQ_SZ,
583 				  sdma_descq, sdma_descq_phys);
584 }
585 
586 /*
587  * [Re]start SDMA, if we use it, and it's not already OK.
588  * This is called on transition to link ACTIVE, either the first or
589  * subsequent times.
590  */
ipath_restart_sdma(struct ipath_devdata * dd)591 void ipath_restart_sdma(struct ipath_devdata *dd)
592 {
593 	unsigned long flags;
594 	int needed = 1;
595 
596 	if (!(dd->ipath_flags & IPATH_HAS_SEND_DMA))
597 		goto bail;
598 
599 	/*
600 	 * First, make sure we should, which is to say,
601 	 * check that we are "RUNNING" (not in teardown)
602 	 * and not "SHUTDOWN"
603 	 */
604 	spin_lock_irqsave(&dd->ipath_sdma_lock, flags);
605 	if (!test_bit(IPATH_SDMA_RUNNING, &dd->ipath_sdma_status)
606 		|| test_bit(IPATH_SDMA_SHUTDOWN, &dd->ipath_sdma_status))
607 			needed = 0;
608 	else {
609 		__clear_bit(IPATH_SDMA_DISABLED, &dd->ipath_sdma_status);
610 		__clear_bit(IPATH_SDMA_DISARMED, &dd->ipath_sdma_status);
611 		__clear_bit(IPATH_SDMA_ABORTING, &dd->ipath_sdma_status);
612 	}
613 	spin_unlock_irqrestore(&dd->ipath_sdma_lock, flags);
614 	if (!needed) {
615 		ipath_dbg("invalid attempt to restart SDMA, status 0x%08lx\n",
616 			dd->ipath_sdma_status);
617 		goto bail;
618 	}
619 	spin_lock_irqsave(&dd->ipath_sendctrl_lock, flags);
620 	/*
621 	 * First clear, just to be safe. Enable is only done
622 	 * in chip on 0->1 transition
623 	 */
624 	dd->ipath_sendctrl &= ~INFINIPATH_S_SDMAENABLE;
625 	ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl, dd->ipath_sendctrl);
626 	ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
627 	dd->ipath_sendctrl |= INFINIPATH_S_SDMAENABLE;
628 	ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl, dd->ipath_sendctrl);
629 	ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
630 	spin_unlock_irqrestore(&dd->ipath_sendctrl_lock, flags);
631 
632 	/* notify upper layers */
633 	ipath_ib_piobufavail(dd->verbs_dev);
634 
635 bail:
636 	return;
637 }
638 
make_sdma_desc(struct ipath_devdata * dd,u64 * sdmadesc,u64 addr,u64 dwlen,u64 dwoffset)639 static inline void make_sdma_desc(struct ipath_devdata *dd,
640 	u64 *sdmadesc, u64 addr, u64 dwlen, u64 dwoffset)
641 {
642 	WARN_ON(addr & 3);
643 	/* SDmaPhyAddr[47:32] */
644 	sdmadesc[1] = addr >> 32;
645 	/* SDmaPhyAddr[31:0] */
646 	sdmadesc[0] = (addr & 0xfffffffcULL) << 32;
647 	/* SDmaGeneration[1:0] */
648 	sdmadesc[0] |= (dd->ipath_sdma_generation & 3ULL) << 30;
649 	/* SDmaDwordCount[10:0] */
650 	sdmadesc[0] |= (dwlen & 0x7ffULL) << 16;
651 	/* SDmaBufOffset[12:2] */
652 	sdmadesc[0] |= dwoffset & 0x7ffULL;
653 }
654 
655 /*
656  * This function queues one IB packet onto the send DMA queue per call.
657  * The caller is responsible for checking:
658  * 1) The number of send DMA descriptor entries is less than the size of
659  *    the descriptor queue.
660  * 2) The IB SGE addresses and lengths are 32-bit aligned
661  *    (except possibly the last SGE's length)
662  * 3) The SGE addresses are suitable for passing to dma_map_single().
663  */
ipath_sdma_verbs_send(struct ipath_devdata * dd,struct ipath_sge_state * ss,u32 dwords,struct ipath_verbs_txreq * tx)664 int ipath_sdma_verbs_send(struct ipath_devdata *dd,
665 	struct ipath_sge_state *ss, u32 dwords,
666 	struct ipath_verbs_txreq *tx)
667 {
668 
669 	unsigned long flags;
670 	struct ipath_sge *sge;
671 	int ret = 0;
672 	u16 tail;
673 	__le64 *descqp;
674 	u64 sdmadesc[2];
675 	u32 dwoffset;
676 	dma_addr_t addr;
677 
678 	if ((tx->map_len + (dwords<<2)) > dd->ipath_ibmaxlen) {
679 		ipath_dbg("packet size %X > ibmax %X, fail\n",
680 			tx->map_len + (dwords<<2), dd->ipath_ibmaxlen);
681 		ret = -EMSGSIZE;
682 		goto fail;
683 	}
684 
685 	spin_lock_irqsave(&dd->ipath_sdma_lock, flags);
686 
687 retry:
688 	if (unlikely(test_bit(IPATH_SDMA_ABORTING, &dd->ipath_sdma_status))) {
689 		ret = -EBUSY;
690 		goto unlock;
691 	}
692 
693 	if (tx->txreq.sg_count > ipath_sdma_descq_freecnt(dd)) {
694 		if (ipath_sdma_make_progress(dd))
695 			goto retry;
696 		ret = -ENOBUFS;
697 		goto unlock;
698 	}
699 
700 	addr = dma_map_single(&dd->pcidev->dev, tx->txreq.map_addr,
701 			      tx->map_len, DMA_TO_DEVICE);
702 	if (dma_mapping_error(&dd->pcidev->dev, addr))
703 		goto ioerr;
704 
705 	dwoffset = tx->map_len >> 2;
706 	make_sdma_desc(dd, sdmadesc, (u64) addr, dwoffset, 0);
707 
708 	/* SDmaFirstDesc */
709 	sdmadesc[0] |= 1ULL << 12;
710 	if (tx->txreq.flags & IPATH_SDMA_TXREQ_F_USELARGEBUF)
711 		sdmadesc[0] |= 1ULL << 14;	/* SDmaUseLargeBuf */
712 
713 	/* write to the descq */
714 	tail = dd->ipath_sdma_descq_tail;
715 	descqp = &dd->ipath_sdma_descq[tail].qw[0];
716 	*descqp++ = cpu_to_le64(sdmadesc[0]);
717 	*descqp++ = cpu_to_le64(sdmadesc[1]);
718 
719 	if (tx->txreq.flags & IPATH_SDMA_TXREQ_F_FREEDESC)
720 		tx->txreq.start_idx = tail;
721 
722 	/* increment the tail */
723 	if (++tail == dd->ipath_sdma_descq_cnt) {
724 		tail = 0;
725 		descqp = &dd->ipath_sdma_descq[0].qw[0];
726 		++dd->ipath_sdma_generation;
727 	}
728 
729 	sge = &ss->sge;
730 	while (dwords) {
731 		u32 dw;
732 		u32 len;
733 
734 		len = dwords << 2;
735 		if (len > sge->length)
736 			len = sge->length;
737 		if (len > sge->sge_length)
738 			len = sge->sge_length;
739 		BUG_ON(len == 0);
740 		dw = (len + 3) >> 2;
741 		addr = dma_map_single(&dd->pcidev->dev, sge->vaddr, dw << 2,
742 				      DMA_TO_DEVICE);
743 		if (dma_mapping_error(&dd->pcidev->dev, addr))
744 			goto unmap;
745 		make_sdma_desc(dd, sdmadesc, (u64) addr, dw, dwoffset);
746 		/* SDmaUseLargeBuf has to be set in every descriptor */
747 		if (tx->txreq.flags & IPATH_SDMA_TXREQ_F_USELARGEBUF)
748 			sdmadesc[0] |= 1ULL << 14;
749 		/* write to the descq */
750 		*descqp++ = cpu_to_le64(sdmadesc[0]);
751 		*descqp++ = cpu_to_le64(sdmadesc[1]);
752 
753 		/* increment the tail */
754 		if (++tail == dd->ipath_sdma_descq_cnt) {
755 			tail = 0;
756 			descqp = &dd->ipath_sdma_descq[0].qw[0];
757 			++dd->ipath_sdma_generation;
758 		}
759 		sge->vaddr += len;
760 		sge->length -= len;
761 		sge->sge_length -= len;
762 		if (sge->sge_length == 0) {
763 			if (--ss->num_sge)
764 				*sge = *ss->sg_list++;
765 		} else if (sge->length == 0 && sge->mr != NULL) {
766 			if (++sge->n >= IPATH_SEGSZ) {
767 				if (++sge->m >= sge->mr->mapsz)
768 					break;
769 				sge->n = 0;
770 			}
771 			sge->vaddr =
772 				sge->mr->map[sge->m]->segs[sge->n].vaddr;
773 			sge->length =
774 				sge->mr->map[sge->m]->segs[sge->n].length;
775 		}
776 
777 		dwoffset += dw;
778 		dwords -= dw;
779 	}
780 
781 	if (!tail)
782 		descqp = &dd->ipath_sdma_descq[dd->ipath_sdma_descq_cnt].qw[0];
783 	descqp -= 2;
784 	/* SDmaLastDesc */
785 	descqp[0] |= cpu_to_le64(1ULL << 11);
786 	if (tx->txreq.flags & IPATH_SDMA_TXREQ_F_INTREQ) {
787 		/* SDmaIntReq */
788 		descqp[0] |= cpu_to_le64(1ULL << 15);
789 	}
790 
791 	/* Commit writes to memory and advance the tail on the chip */
792 	wmb();
793 	ipath_write_kreg(dd, dd->ipath_kregs->kr_senddmatail, tail);
794 
795 	tx->txreq.next_descq_idx = tail;
796 	tx->txreq.callback_status = IPATH_SDMA_TXREQ_S_OK;
797 	dd->ipath_sdma_descq_tail = tail;
798 	dd->ipath_sdma_descq_added += tx->txreq.sg_count;
799 	list_add_tail(&tx->txreq.list, &dd->ipath_sdma_activelist);
800 	if (tx->txreq.flags & IPATH_SDMA_TXREQ_F_VL15)
801 		vl15_watchdog_enq(dd);
802 	goto unlock;
803 
804 unmap:
805 	while (tail != dd->ipath_sdma_descq_tail) {
806 		if (!tail)
807 			tail = dd->ipath_sdma_descq_cnt - 1;
808 		else
809 			tail--;
810 		unmap_desc(dd, tail);
811 	}
812 ioerr:
813 	ret = -EIO;
814 unlock:
815 	spin_unlock_irqrestore(&dd->ipath_sdma_lock, flags);
816 fail:
817 	return ret;
818 }
819