1 /*
2
3 Broadcom B43 wireless driver
4
5 DMA ringbuffer and descriptor allocation/management
6
7 Copyright (c) 2005, 2006 Michael Buesch <mb@bu3sch.de>
8
9 Some code in this file is derived from the b44.c driver
10 Copyright (C) 2002 David S. Miller
11 Copyright (C) Pekka Pietikainen
12
13 This program is free software; you can redistribute it and/or modify
14 it under the terms of the GNU General Public License as published by
15 the Free Software Foundation; either version 2 of the License, or
16 (at your option) any later version.
17
18 This program is distributed in the hope that it will be useful,
19 but WITHOUT ANY WARRANTY; without even the implied warranty of
20 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
21 GNU General Public License for more details.
22
23 You should have received a copy of the GNU General Public License
24 along with this program; see the file COPYING. If not, write to
25 the Free Software Foundation, Inc., 51 Franklin Steet, Fifth Floor,
26 Boston, MA 02110-1301, USA.
27
28 */
29
30 #include "b43.h"
31 #include "dma.h"
32 #include "main.h"
33 #include "debugfs.h"
34 #include "xmit.h"
35
36 #include <linux/dma-mapping.h>
37 #include <linux/pci.h>
38 #include <linux/delay.h>
39 #include <linux/skbuff.h>
40 #include <linux/etherdevice.h>
41 #include <linux/slab.h>
42 #include <asm/div64.h>
43
44
45 /* Required number of TX DMA slots per TX frame.
46 * This currently is 2, because we put the header and the ieee80211 frame
47 * into separate slots. */
48 #define TX_SLOTS_PER_FRAME 2
49
50
51 /* 32bit DMA ops. */
52 static
op32_idx2desc(struct b43_dmaring * ring,int slot,struct b43_dmadesc_meta ** meta)53 struct b43_dmadesc_generic *op32_idx2desc(struct b43_dmaring *ring,
54 int slot,
55 struct b43_dmadesc_meta **meta)
56 {
57 struct b43_dmadesc32 *desc;
58
59 *meta = &(ring->meta[slot]);
60 desc = ring->descbase;
61 desc = &(desc[slot]);
62
63 return (struct b43_dmadesc_generic *)desc;
64 }
65
op32_fill_descriptor(struct b43_dmaring * ring,struct b43_dmadesc_generic * desc,dma_addr_t dmaaddr,u16 bufsize,int start,int end,int irq)66 static void op32_fill_descriptor(struct b43_dmaring *ring,
67 struct b43_dmadesc_generic *desc,
68 dma_addr_t dmaaddr, u16 bufsize,
69 int start, int end, int irq)
70 {
71 struct b43_dmadesc32 *descbase = ring->descbase;
72 int slot;
73 u32 ctl;
74 u32 addr;
75 u32 addrext;
76
77 slot = (int)(&(desc->dma32) - descbase);
78 B43_WARN_ON(!(slot >= 0 && slot < ring->nr_slots));
79
80 addr = (u32) (dmaaddr & ~SSB_DMA_TRANSLATION_MASK);
81 addrext = (u32) (dmaaddr & SSB_DMA_TRANSLATION_MASK)
82 >> SSB_DMA_TRANSLATION_SHIFT;
83 addr |= ssb_dma_translation(ring->dev->dev);
84 ctl = bufsize & B43_DMA32_DCTL_BYTECNT;
85 if (slot == ring->nr_slots - 1)
86 ctl |= B43_DMA32_DCTL_DTABLEEND;
87 if (start)
88 ctl |= B43_DMA32_DCTL_FRAMESTART;
89 if (end)
90 ctl |= B43_DMA32_DCTL_FRAMEEND;
91 if (irq)
92 ctl |= B43_DMA32_DCTL_IRQ;
93 ctl |= (addrext << B43_DMA32_DCTL_ADDREXT_SHIFT)
94 & B43_DMA32_DCTL_ADDREXT_MASK;
95
96 desc->dma32.control = cpu_to_le32(ctl);
97 desc->dma32.address = cpu_to_le32(addr);
98 }
99
op32_poke_tx(struct b43_dmaring * ring,int slot)100 static void op32_poke_tx(struct b43_dmaring *ring, int slot)
101 {
102 b43_dma_write(ring, B43_DMA32_TXINDEX,
103 (u32) (slot * sizeof(struct b43_dmadesc32)));
104 }
105
op32_tx_suspend(struct b43_dmaring * ring)106 static void op32_tx_suspend(struct b43_dmaring *ring)
107 {
108 b43_dma_write(ring, B43_DMA32_TXCTL, b43_dma_read(ring, B43_DMA32_TXCTL)
109 | B43_DMA32_TXSUSPEND);
110 }
111
op32_tx_resume(struct b43_dmaring * ring)112 static void op32_tx_resume(struct b43_dmaring *ring)
113 {
114 b43_dma_write(ring, B43_DMA32_TXCTL, b43_dma_read(ring, B43_DMA32_TXCTL)
115 & ~B43_DMA32_TXSUSPEND);
116 }
117
op32_get_current_rxslot(struct b43_dmaring * ring)118 static int op32_get_current_rxslot(struct b43_dmaring *ring)
119 {
120 u32 val;
121
122 val = b43_dma_read(ring, B43_DMA32_RXSTATUS);
123 val &= B43_DMA32_RXDPTR;
124
125 return (val / sizeof(struct b43_dmadesc32));
126 }
127
op32_set_current_rxslot(struct b43_dmaring * ring,int slot)128 static void op32_set_current_rxslot(struct b43_dmaring *ring, int slot)
129 {
130 b43_dma_write(ring, B43_DMA32_RXINDEX,
131 (u32) (slot * sizeof(struct b43_dmadesc32)));
132 }
133
134 static const struct b43_dma_ops dma32_ops = {
135 .idx2desc = op32_idx2desc,
136 .fill_descriptor = op32_fill_descriptor,
137 .poke_tx = op32_poke_tx,
138 .tx_suspend = op32_tx_suspend,
139 .tx_resume = op32_tx_resume,
140 .get_current_rxslot = op32_get_current_rxslot,
141 .set_current_rxslot = op32_set_current_rxslot,
142 };
143
144 /* 64bit DMA ops. */
145 static
op64_idx2desc(struct b43_dmaring * ring,int slot,struct b43_dmadesc_meta ** meta)146 struct b43_dmadesc_generic *op64_idx2desc(struct b43_dmaring *ring,
147 int slot,
148 struct b43_dmadesc_meta **meta)
149 {
150 struct b43_dmadesc64 *desc;
151
152 *meta = &(ring->meta[slot]);
153 desc = ring->descbase;
154 desc = &(desc[slot]);
155
156 return (struct b43_dmadesc_generic *)desc;
157 }
158
op64_fill_descriptor(struct b43_dmaring * ring,struct b43_dmadesc_generic * desc,dma_addr_t dmaaddr,u16 bufsize,int start,int end,int irq)159 static void op64_fill_descriptor(struct b43_dmaring *ring,
160 struct b43_dmadesc_generic *desc,
161 dma_addr_t dmaaddr, u16 bufsize,
162 int start, int end, int irq)
163 {
164 struct b43_dmadesc64 *descbase = ring->descbase;
165 int slot;
166 u32 ctl0 = 0, ctl1 = 0;
167 u32 addrlo, addrhi;
168 u32 addrext;
169
170 slot = (int)(&(desc->dma64) - descbase);
171 B43_WARN_ON(!(slot >= 0 && slot < ring->nr_slots));
172
173 addrlo = (u32) (dmaaddr & 0xFFFFFFFF);
174 addrhi = (((u64) dmaaddr >> 32) & ~SSB_DMA_TRANSLATION_MASK);
175 addrext = (((u64) dmaaddr >> 32) & SSB_DMA_TRANSLATION_MASK)
176 >> SSB_DMA_TRANSLATION_SHIFT;
177 addrhi |= (ssb_dma_translation(ring->dev->dev) << 1);
178 if (slot == ring->nr_slots - 1)
179 ctl0 |= B43_DMA64_DCTL0_DTABLEEND;
180 if (start)
181 ctl0 |= B43_DMA64_DCTL0_FRAMESTART;
182 if (end)
183 ctl0 |= B43_DMA64_DCTL0_FRAMEEND;
184 if (irq)
185 ctl0 |= B43_DMA64_DCTL0_IRQ;
186 ctl1 |= bufsize & B43_DMA64_DCTL1_BYTECNT;
187 ctl1 |= (addrext << B43_DMA64_DCTL1_ADDREXT_SHIFT)
188 & B43_DMA64_DCTL1_ADDREXT_MASK;
189
190 desc->dma64.control0 = cpu_to_le32(ctl0);
191 desc->dma64.control1 = cpu_to_le32(ctl1);
192 desc->dma64.address_low = cpu_to_le32(addrlo);
193 desc->dma64.address_high = cpu_to_le32(addrhi);
194 }
195
op64_poke_tx(struct b43_dmaring * ring,int slot)196 static void op64_poke_tx(struct b43_dmaring *ring, int slot)
197 {
198 b43_dma_write(ring, B43_DMA64_TXINDEX,
199 (u32) (slot * sizeof(struct b43_dmadesc64)));
200 }
201
op64_tx_suspend(struct b43_dmaring * ring)202 static void op64_tx_suspend(struct b43_dmaring *ring)
203 {
204 b43_dma_write(ring, B43_DMA64_TXCTL, b43_dma_read(ring, B43_DMA64_TXCTL)
205 | B43_DMA64_TXSUSPEND);
206 }
207
op64_tx_resume(struct b43_dmaring * ring)208 static void op64_tx_resume(struct b43_dmaring *ring)
209 {
210 b43_dma_write(ring, B43_DMA64_TXCTL, b43_dma_read(ring, B43_DMA64_TXCTL)
211 & ~B43_DMA64_TXSUSPEND);
212 }
213
op64_get_current_rxslot(struct b43_dmaring * ring)214 static int op64_get_current_rxslot(struct b43_dmaring *ring)
215 {
216 u32 val;
217
218 val = b43_dma_read(ring, B43_DMA64_RXSTATUS);
219 val &= B43_DMA64_RXSTATDPTR;
220
221 return (val / sizeof(struct b43_dmadesc64));
222 }
223
op64_set_current_rxslot(struct b43_dmaring * ring,int slot)224 static void op64_set_current_rxslot(struct b43_dmaring *ring, int slot)
225 {
226 b43_dma_write(ring, B43_DMA64_RXINDEX,
227 (u32) (slot * sizeof(struct b43_dmadesc64)));
228 }
229
230 static const struct b43_dma_ops dma64_ops = {
231 .idx2desc = op64_idx2desc,
232 .fill_descriptor = op64_fill_descriptor,
233 .poke_tx = op64_poke_tx,
234 .tx_suspend = op64_tx_suspend,
235 .tx_resume = op64_tx_resume,
236 .get_current_rxslot = op64_get_current_rxslot,
237 .set_current_rxslot = op64_set_current_rxslot,
238 };
239
free_slots(struct b43_dmaring * ring)240 static inline int free_slots(struct b43_dmaring *ring)
241 {
242 return (ring->nr_slots - ring->used_slots);
243 }
244
next_slot(struct b43_dmaring * ring,int slot)245 static inline int next_slot(struct b43_dmaring *ring, int slot)
246 {
247 B43_WARN_ON(!(slot >= -1 && slot <= ring->nr_slots - 1));
248 if (slot == ring->nr_slots - 1)
249 return 0;
250 return slot + 1;
251 }
252
prev_slot(struct b43_dmaring * ring,int slot)253 static inline int prev_slot(struct b43_dmaring *ring, int slot)
254 {
255 B43_WARN_ON(!(slot >= 0 && slot <= ring->nr_slots - 1));
256 if (slot == 0)
257 return ring->nr_slots - 1;
258 return slot - 1;
259 }
260
261 #ifdef CONFIG_B43_DEBUG
update_max_used_slots(struct b43_dmaring * ring,int current_used_slots)262 static void update_max_used_slots(struct b43_dmaring *ring,
263 int current_used_slots)
264 {
265 if (current_used_slots <= ring->max_used_slots)
266 return;
267 ring->max_used_slots = current_used_slots;
268 if (b43_debug(ring->dev, B43_DBG_DMAVERBOSE)) {
269 b43dbg(ring->dev->wl,
270 "max_used_slots increased to %d on %s ring %d\n",
271 ring->max_used_slots,
272 ring->tx ? "TX" : "RX", ring->index);
273 }
274 }
275 #else
276 static inline
update_max_used_slots(struct b43_dmaring * ring,int current_used_slots)277 void update_max_used_slots(struct b43_dmaring *ring, int current_used_slots)
278 {
279 }
280 #endif /* DEBUG */
281
282 /* Request a slot for usage. */
request_slot(struct b43_dmaring * ring)283 static inline int request_slot(struct b43_dmaring *ring)
284 {
285 int slot;
286
287 B43_WARN_ON(!ring->tx);
288 B43_WARN_ON(ring->stopped);
289 B43_WARN_ON(free_slots(ring) == 0);
290
291 slot = next_slot(ring, ring->current_slot);
292 ring->current_slot = slot;
293 ring->used_slots++;
294
295 update_max_used_slots(ring, ring->used_slots);
296
297 return slot;
298 }
299
b43_dmacontroller_base(enum b43_dmatype type,int controller_idx)300 static u16 b43_dmacontroller_base(enum b43_dmatype type, int controller_idx)
301 {
302 static const u16 map64[] = {
303 B43_MMIO_DMA64_BASE0,
304 B43_MMIO_DMA64_BASE1,
305 B43_MMIO_DMA64_BASE2,
306 B43_MMIO_DMA64_BASE3,
307 B43_MMIO_DMA64_BASE4,
308 B43_MMIO_DMA64_BASE5,
309 };
310 static const u16 map32[] = {
311 B43_MMIO_DMA32_BASE0,
312 B43_MMIO_DMA32_BASE1,
313 B43_MMIO_DMA32_BASE2,
314 B43_MMIO_DMA32_BASE3,
315 B43_MMIO_DMA32_BASE4,
316 B43_MMIO_DMA32_BASE5,
317 };
318
319 if (type == B43_DMA_64BIT) {
320 B43_WARN_ON(!(controller_idx >= 0 &&
321 controller_idx < ARRAY_SIZE(map64)));
322 return map64[controller_idx];
323 }
324 B43_WARN_ON(!(controller_idx >= 0 &&
325 controller_idx < ARRAY_SIZE(map32)));
326 return map32[controller_idx];
327 }
328
329 static inline
map_descbuffer(struct b43_dmaring * ring,unsigned char * buf,size_t len,int tx)330 dma_addr_t map_descbuffer(struct b43_dmaring *ring,
331 unsigned char *buf, size_t len, int tx)
332 {
333 dma_addr_t dmaaddr;
334
335 if (tx) {
336 dmaaddr = dma_map_single(ring->dev->dev->dma_dev,
337 buf, len, DMA_TO_DEVICE);
338 } else {
339 dmaaddr = dma_map_single(ring->dev->dev->dma_dev,
340 buf, len, DMA_FROM_DEVICE);
341 }
342
343 return dmaaddr;
344 }
345
346 static inline
unmap_descbuffer(struct b43_dmaring * ring,dma_addr_t addr,size_t len,int tx)347 void unmap_descbuffer(struct b43_dmaring *ring,
348 dma_addr_t addr, size_t len, int tx)
349 {
350 if (tx) {
351 dma_unmap_single(ring->dev->dev->dma_dev,
352 addr, len, DMA_TO_DEVICE);
353 } else {
354 dma_unmap_single(ring->dev->dev->dma_dev,
355 addr, len, DMA_FROM_DEVICE);
356 }
357 }
358
359 static inline
sync_descbuffer_for_cpu(struct b43_dmaring * ring,dma_addr_t addr,size_t len)360 void sync_descbuffer_for_cpu(struct b43_dmaring *ring,
361 dma_addr_t addr, size_t len)
362 {
363 B43_WARN_ON(ring->tx);
364 dma_sync_single_for_cpu(ring->dev->dev->dma_dev,
365 addr, len, DMA_FROM_DEVICE);
366 }
367
368 static inline
sync_descbuffer_for_device(struct b43_dmaring * ring,dma_addr_t addr,size_t len)369 void sync_descbuffer_for_device(struct b43_dmaring *ring,
370 dma_addr_t addr, size_t len)
371 {
372 B43_WARN_ON(ring->tx);
373 dma_sync_single_for_device(ring->dev->dev->dma_dev,
374 addr, len, DMA_FROM_DEVICE);
375 }
376
377 static inline
free_descriptor_buffer(struct b43_dmaring * ring,struct b43_dmadesc_meta * meta)378 void free_descriptor_buffer(struct b43_dmaring *ring,
379 struct b43_dmadesc_meta *meta)
380 {
381 if (meta->skb) {
382 dev_kfree_skb_any(meta->skb);
383 meta->skb = NULL;
384 }
385 }
386
alloc_ringmemory(struct b43_dmaring * ring)387 static int alloc_ringmemory(struct b43_dmaring *ring)
388 {
389 gfp_t flags = GFP_KERNEL;
390
391 /* The specs call for 4K buffers for 30- and 32-bit DMA with 4K
392 * alignment and 8K buffers for 64-bit DMA with 8K alignment. Testing
393 * has shown that 4K is sufficient for the latter as long as the buffer
394 * does not cross an 8K boundary.
395 *
396 * For unknown reasons - possibly a hardware error - the BCM4311 rev
397 * 02, which uses 64-bit DMA, needs the ring buffer in very low memory,
398 * which accounts for the GFP_DMA flag below.
399 *
400 * The flags here must match the flags in free_ringmemory below!
401 */
402 if (ring->type == B43_DMA_64BIT)
403 flags |= GFP_DMA;
404 ring->descbase = dma_alloc_coherent(ring->dev->dev->dma_dev,
405 B43_DMA_RINGMEMSIZE,
406 &(ring->dmabase), flags);
407 if (!ring->descbase) {
408 b43err(ring->dev->wl, "DMA ringmemory allocation failed\n");
409 return -ENOMEM;
410 }
411 memset(ring->descbase, 0, B43_DMA_RINGMEMSIZE);
412
413 return 0;
414 }
415
free_ringmemory(struct b43_dmaring * ring)416 static void free_ringmemory(struct b43_dmaring *ring)
417 {
418 dma_free_coherent(ring->dev->dev->dma_dev, B43_DMA_RINGMEMSIZE,
419 ring->descbase, ring->dmabase);
420 }
421
422 /* Reset the RX DMA channel */
b43_dmacontroller_rx_reset(struct b43_wldev * dev,u16 mmio_base,enum b43_dmatype type)423 static int b43_dmacontroller_rx_reset(struct b43_wldev *dev, u16 mmio_base,
424 enum b43_dmatype type)
425 {
426 int i;
427 u32 value;
428 u16 offset;
429
430 might_sleep();
431
432 offset = (type == B43_DMA_64BIT) ? B43_DMA64_RXCTL : B43_DMA32_RXCTL;
433 b43_write32(dev, mmio_base + offset, 0);
434 for (i = 0; i < 10; i++) {
435 offset = (type == B43_DMA_64BIT) ? B43_DMA64_RXSTATUS :
436 B43_DMA32_RXSTATUS;
437 value = b43_read32(dev, mmio_base + offset);
438 if (type == B43_DMA_64BIT) {
439 value &= B43_DMA64_RXSTAT;
440 if (value == B43_DMA64_RXSTAT_DISABLED) {
441 i = -1;
442 break;
443 }
444 } else {
445 value &= B43_DMA32_RXSTATE;
446 if (value == B43_DMA32_RXSTAT_DISABLED) {
447 i = -1;
448 break;
449 }
450 }
451 msleep(1);
452 }
453 if (i != -1) {
454 b43err(dev->wl, "DMA RX reset timed out\n");
455 return -ENODEV;
456 }
457
458 return 0;
459 }
460
461 /* Reset the TX DMA channel */
b43_dmacontroller_tx_reset(struct b43_wldev * dev,u16 mmio_base,enum b43_dmatype type)462 static int b43_dmacontroller_tx_reset(struct b43_wldev *dev, u16 mmio_base,
463 enum b43_dmatype type)
464 {
465 int i;
466 u32 value;
467 u16 offset;
468
469 might_sleep();
470
471 for (i = 0; i < 10; i++) {
472 offset = (type == B43_DMA_64BIT) ? B43_DMA64_TXSTATUS :
473 B43_DMA32_TXSTATUS;
474 value = b43_read32(dev, mmio_base + offset);
475 if (type == B43_DMA_64BIT) {
476 value &= B43_DMA64_TXSTAT;
477 if (value == B43_DMA64_TXSTAT_DISABLED ||
478 value == B43_DMA64_TXSTAT_IDLEWAIT ||
479 value == B43_DMA64_TXSTAT_STOPPED)
480 break;
481 } else {
482 value &= B43_DMA32_TXSTATE;
483 if (value == B43_DMA32_TXSTAT_DISABLED ||
484 value == B43_DMA32_TXSTAT_IDLEWAIT ||
485 value == B43_DMA32_TXSTAT_STOPPED)
486 break;
487 }
488 msleep(1);
489 }
490 offset = (type == B43_DMA_64BIT) ? B43_DMA64_TXCTL : B43_DMA32_TXCTL;
491 b43_write32(dev, mmio_base + offset, 0);
492 for (i = 0; i < 10; i++) {
493 offset = (type == B43_DMA_64BIT) ? B43_DMA64_TXSTATUS :
494 B43_DMA32_TXSTATUS;
495 value = b43_read32(dev, mmio_base + offset);
496 if (type == B43_DMA_64BIT) {
497 value &= B43_DMA64_TXSTAT;
498 if (value == B43_DMA64_TXSTAT_DISABLED) {
499 i = -1;
500 break;
501 }
502 } else {
503 value &= B43_DMA32_TXSTATE;
504 if (value == B43_DMA32_TXSTAT_DISABLED) {
505 i = -1;
506 break;
507 }
508 }
509 msleep(1);
510 }
511 if (i != -1) {
512 b43err(dev->wl, "DMA TX reset timed out\n");
513 return -ENODEV;
514 }
515 /* ensure the reset is completed. */
516 msleep(1);
517
518 return 0;
519 }
520
521 /* Check if a DMA mapping address is invalid. */
b43_dma_mapping_error(struct b43_dmaring * ring,dma_addr_t addr,size_t buffersize,bool dma_to_device)522 static bool b43_dma_mapping_error(struct b43_dmaring *ring,
523 dma_addr_t addr,
524 size_t buffersize, bool dma_to_device)
525 {
526 if (unlikely(dma_mapping_error(ring->dev->dev->dma_dev, addr)))
527 return 1;
528
529 switch (ring->type) {
530 case B43_DMA_30BIT:
531 if ((u64)addr + buffersize > (1ULL << 30))
532 goto address_error;
533 break;
534 case B43_DMA_32BIT:
535 if ((u64)addr + buffersize > (1ULL << 32))
536 goto address_error;
537 break;
538 case B43_DMA_64BIT:
539 /* Currently we can't have addresses beyond
540 * 64bit in the kernel. */
541 break;
542 }
543
544 /* The address is OK. */
545 return 0;
546
547 address_error:
548 /* We can't support this address. Unmap it again. */
549 unmap_descbuffer(ring, addr, buffersize, dma_to_device);
550
551 return 1;
552 }
553
b43_rx_buffer_is_poisoned(struct b43_dmaring * ring,struct sk_buff * skb)554 static bool b43_rx_buffer_is_poisoned(struct b43_dmaring *ring, struct sk_buff *skb)
555 {
556 unsigned char *f = skb->data + ring->frameoffset;
557
558 return ((f[0] & f[1] & f[2] & f[3] & f[4] & f[5] & f[6] & f[7]) == 0xFF);
559 }
560
b43_poison_rx_buffer(struct b43_dmaring * ring,struct sk_buff * skb)561 static void b43_poison_rx_buffer(struct b43_dmaring *ring, struct sk_buff *skb)
562 {
563 struct b43_rxhdr_fw4 *rxhdr;
564 unsigned char *frame;
565
566 /* This poisons the RX buffer to detect DMA failures. */
567
568 rxhdr = (struct b43_rxhdr_fw4 *)(skb->data);
569 rxhdr->frame_len = 0;
570
571 B43_WARN_ON(ring->rx_buffersize < ring->frameoffset + sizeof(struct b43_plcp_hdr6) + 2);
572 frame = skb->data + ring->frameoffset;
573 memset(frame, 0xFF, sizeof(struct b43_plcp_hdr6) + 2 /* padding */);
574 }
575
setup_rx_descbuffer(struct b43_dmaring * ring,struct b43_dmadesc_generic * desc,struct b43_dmadesc_meta * meta,gfp_t gfp_flags)576 static int setup_rx_descbuffer(struct b43_dmaring *ring,
577 struct b43_dmadesc_generic *desc,
578 struct b43_dmadesc_meta *meta, gfp_t gfp_flags)
579 {
580 dma_addr_t dmaaddr;
581 struct sk_buff *skb;
582
583 B43_WARN_ON(ring->tx);
584
585 skb = __dev_alloc_skb(ring->rx_buffersize, gfp_flags);
586 if (unlikely(!skb))
587 return -ENOMEM;
588 b43_poison_rx_buffer(ring, skb);
589 dmaaddr = map_descbuffer(ring, skb->data, ring->rx_buffersize, 0);
590 if (b43_dma_mapping_error(ring, dmaaddr, ring->rx_buffersize, 0)) {
591 /* ugh. try to realloc in zone_dma */
592 gfp_flags |= GFP_DMA;
593
594 dev_kfree_skb_any(skb);
595
596 skb = __dev_alloc_skb(ring->rx_buffersize, gfp_flags);
597 if (unlikely(!skb))
598 return -ENOMEM;
599 b43_poison_rx_buffer(ring, skb);
600 dmaaddr = map_descbuffer(ring, skb->data,
601 ring->rx_buffersize, 0);
602 if (b43_dma_mapping_error(ring, dmaaddr, ring->rx_buffersize, 0)) {
603 b43err(ring->dev->wl, "RX DMA buffer allocation failed\n");
604 dev_kfree_skb_any(skb);
605 return -EIO;
606 }
607 }
608
609 meta->skb = skb;
610 meta->dmaaddr = dmaaddr;
611 ring->ops->fill_descriptor(ring, desc, dmaaddr,
612 ring->rx_buffersize, 0, 0, 0);
613
614 return 0;
615 }
616
617 /* Allocate the initial descbuffers.
618 * This is used for an RX ring only.
619 */
alloc_initial_descbuffers(struct b43_dmaring * ring)620 static int alloc_initial_descbuffers(struct b43_dmaring *ring)
621 {
622 int i, err = -ENOMEM;
623 struct b43_dmadesc_generic *desc;
624 struct b43_dmadesc_meta *meta;
625
626 for (i = 0; i < ring->nr_slots; i++) {
627 desc = ring->ops->idx2desc(ring, i, &meta);
628
629 err = setup_rx_descbuffer(ring, desc, meta, GFP_KERNEL);
630 if (err) {
631 b43err(ring->dev->wl,
632 "Failed to allocate initial descbuffers\n");
633 goto err_unwind;
634 }
635 }
636 mb();
637 ring->used_slots = ring->nr_slots;
638 err = 0;
639 out:
640 return err;
641
642 err_unwind:
643 for (i--; i >= 0; i--) {
644 desc = ring->ops->idx2desc(ring, i, &meta);
645
646 unmap_descbuffer(ring, meta->dmaaddr, ring->rx_buffersize, 0);
647 dev_kfree_skb(meta->skb);
648 }
649 goto out;
650 }
651
652 /* Do initial setup of the DMA controller.
653 * Reset the controller, write the ring busaddress
654 * and switch the "enable" bit on.
655 */
dmacontroller_setup(struct b43_dmaring * ring)656 static int dmacontroller_setup(struct b43_dmaring *ring)
657 {
658 int err = 0;
659 u32 value;
660 u32 addrext;
661 u32 trans = ssb_dma_translation(ring->dev->dev);
662
663 if (ring->tx) {
664 if (ring->type == B43_DMA_64BIT) {
665 u64 ringbase = (u64) (ring->dmabase);
666
667 addrext = ((ringbase >> 32) & SSB_DMA_TRANSLATION_MASK)
668 >> SSB_DMA_TRANSLATION_SHIFT;
669 value = B43_DMA64_TXENABLE;
670 value |= (addrext << B43_DMA64_TXADDREXT_SHIFT)
671 & B43_DMA64_TXADDREXT_MASK;
672 b43_dma_write(ring, B43_DMA64_TXCTL, value);
673 b43_dma_write(ring, B43_DMA64_TXRINGLO,
674 (ringbase & 0xFFFFFFFF));
675 b43_dma_write(ring, B43_DMA64_TXRINGHI,
676 ((ringbase >> 32) &
677 ~SSB_DMA_TRANSLATION_MASK)
678 | (trans << 1));
679 } else {
680 u32 ringbase = (u32) (ring->dmabase);
681
682 addrext = (ringbase & SSB_DMA_TRANSLATION_MASK)
683 >> SSB_DMA_TRANSLATION_SHIFT;
684 value = B43_DMA32_TXENABLE;
685 value |= (addrext << B43_DMA32_TXADDREXT_SHIFT)
686 & B43_DMA32_TXADDREXT_MASK;
687 b43_dma_write(ring, B43_DMA32_TXCTL, value);
688 b43_dma_write(ring, B43_DMA32_TXRING,
689 (ringbase & ~SSB_DMA_TRANSLATION_MASK)
690 | trans);
691 }
692 } else {
693 err = alloc_initial_descbuffers(ring);
694 if (err)
695 goto out;
696 if (ring->type == B43_DMA_64BIT) {
697 u64 ringbase = (u64) (ring->dmabase);
698
699 addrext = ((ringbase >> 32) & SSB_DMA_TRANSLATION_MASK)
700 >> SSB_DMA_TRANSLATION_SHIFT;
701 value = (ring->frameoffset << B43_DMA64_RXFROFF_SHIFT);
702 value |= B43_DMA64_RXENABLE;
703 value |= (addrext << B43_DMA64_RXADDREXT_SHIFT)
704 & B43_DMA64_RXADDREXT_MASK;
705 b43_dma_write(ring, B43_DMA64_RXCTL, value);
706 b43_dma_write(ring, B43_DMA64_RXRINGLO,
707 (ringbase & 0xFFFFFFFF));
708 b43_dma_write(ring, B43_DMA64_RXRINGHI,
709 ((ringbase >> 32) &
710 ~SSB_DMA_TRANSLATION_MASK)
711 | (trans << 1));
712 b43_dma_write(ring, B43_DMA64_RXINDEX, ring->nr_slots *
713 sizeof(struct b43_dmadesc64));
714 } else {
715 u32 ringbase = (u32) (ring->dmabase);
716
717 addrext = (ringbase & SSB_DMA_TRANSLATION_MASK)
718 >> SSB_DMA_TRANSLATION_SHIFT;
719 value = (ring->frameoffset << B43_DMA32_RXFROFF_SHIFT);
720 value |= B43_DMA32_RXENABLE;
721 value |= (addrext << B43_DMA32_RXADDREXT_SHIFT)
722 & B43_DMA32_RXADDREXT_MASK;
723 b43_dma_write(ring, B43_DMA32_RXCTL, value);
724 b43_dma_write(ring, B43_DMA32_RXRING,
725 (ringbase & ~SSB_DMA_TRANSLATION_MASK)
726 | trans);
727 b43_dma_write(ring, B43_DMA32_RXINDEX, ring->nr_slots *
728 sizeof(struct b43_dmadesc32));
729 }
730 }
731
732 out:
733 return err;
734 }
735
736 /* Shutdown the DMA controller. */
dmacontroller_cleanup(struct b43_dmaring * ring)737 static void dmacontroller_cleanup(struct b43_dmaring *ring)
738 {
739 if (ring->tx) {
740 b43_dmacontroller_tx_reset(ring->dev, ring->mmio_base,
741 ring->type);
742 if (ring->type == B43_DMA_64BIT) {
743 b43_dma_write(ring, B43_DMA64_TXRINGLO, 0);
744 b43_dma_write(ring, B43_DMA64_TXRINGHI, 0);
745 } else
746 b43_dma_write(ring, B43_DMA32_TXRING, 0);
747 } else {
748 b43_dmacontroller_rx_reset(ring->dev, ring->mmio_base,
749 ring->type);
750 if (ring->type == B43_DMA_64BIT) {
751 b43_dma_write(ring, B43_DMA64_RXRINGLO, 0);
752 b43_dma_write(ring, B43_DMA64_RXRINGHI, 0);
753 } else
754 b43_dma_write(ring, B43_DMA32_RXRING, 0);
755 }
756 }
757
free_all_descbuffers(struct b43_dmaring * ring)758 static void free_all_descbuffers(struct b43_dmaring *ring)
759 {
760 struct b43_dmadesc_generic *desc;
761 struct b43_dmadesc_meta *meta;
762 int i;
763
764 if (!ring->used_slots)
765 return;
766 for (i = 0; i < ring->nr_slots; i++) {
767 desc = ring->ops->idx2desc(ring, i, &meta);
768
769 if (!meta->skb || b43_dma_ptr_is_poisoned(meta->skb)) {
770 B43_WARN_ON(!ring->tx);
771 continue;
772 }
773 if (ring->tx) {
774 unmap_descbuffer(ring, meta->dmaaddr,
775 meta->skb->len, 1);
776 } else {
777 unmap_descbuffer(ring, meta->dmaaddr,
778 ring->rx_buffersize, 0);
779 }
780 free_descriptor_buffer(ring, meta);
781 }
782 }
783
supported_dma_mask(struct b43_wldev * dev)784 static u64 supported_dma_mask(struct b43_wldev *dev)
785 {
786 u32 tmp;
787 u16 mmio_base;
788
789 tmp = b43_read32(dev, SSB_TMSHIGH);
790 if (tmp & SSB_TMSHIGH_DMA64)
791 return DMA_BIT_MASK(64);
792 mmio_base = b43_dmacontroller_base(0, 0);
793 b43_write32(dev, mmio_base + B43_DMA32_TXCTL, B43_DMA32_TXADDREXT_MASK);
794 tmp = b43_read32(dev, mmio_base + B43_DMA32_TXCTL);
795 if (tmp & B43_DMA32_TXADDREXT_MASK)
796 return DMA_BIT_MASK(32);
797
798 return DMA_BIT_MASK(30);
799 }
800
dma_mask_to_engine_type(u64 dmamask)801 static enum b43_dmatype dma_mask_to_engine_type(u64 dmamask)
802 {
803 if (dmamask == DMA_BIT_MASK(30))
804 return B43_DMA_30BIT;
805 if (dmamask == DMA_BIT_MASK(32))
806 return B43_DMA_32BIT;
807 if (dmamask == DMA_BIT_MASK(64))
808 return B43_DMA_64BIT;
809 B43_WARN_ON(1);
810 return B43_DMA_30BIT;
811 }
812
813 /* Main initialization function. */
814 static
b43_setup_dmaring(struct b43_wldev * dev,int controller_index,int for_tx,enum b43_dmatype type)815 struct b43_dmaring *b43_setup_dmaring(struct b43_wldev *dev,
816 int controller_index,
817 int for_tx,
818 enum b43_dmatype type)
819 {
820 struct b43_dmaring *ring;
821 int i, err;
822 dma_addr_t dma_test;
823
824 ring = kzalloc(sizeof(*ring), GFP_KERNEL);
825 if (!ring)
826 goto out;
827
828 ring->nr_slots = B43_RXRING_SLOTS;
829 if (for_tx)
830 ring->nr_slots = B43_TXRING_SLOTS;
831
832 ring->meta = kcalloc(ring->nr_slots, sizeof(struct b43_dmadesc_meta),
833 GFP_KERNEL);
834 if (!ring->meta)
835 goto err_kfree_ring;
836 for (i = 0; i < ring->nr_slots; i++)
837 ring->meta->skb = B43_DMA_PTR_POISON;
838
839 ring->type = type;
840 ring->dev = dev;
841 ring->mmio_base = b43_dmacontroller_base(type, controller_index);
842 ring->index = controller_index;
843 if (type == B43_DMA_64BIT)
844 ring->ops = &dma64_ops;
845 else
846 ring->ops = &dma32_ops;
847 if (for_tx) {
848 ring->tx = 1;
849 ring->current_slot = -1;
850 } else {
851 if (ring->index == 0) {
852 ring->rx_buffersize = B43_DMA0_RX_BUFFERSIZE;
853 ring->frameoffset = B43_DMA0_RX_FRAMEOFFSET;
854 } else
855 B43_WARN_ON(1);
856 }
857 #ifdef CONFIG_B43_DEBUG
858 ring->last_injected_overflow = jiffies;
859 #endif
860
861 if (for_tx) {
862 /* Assumption: B43_TXRING_SLOTS can be divided by TX_SLOTS_PER_FRAME */
863 BUILD_BUG_ON(B43_TXRING_SLOTS % TX_SLOTS_PER_FRAME != 0);
864
865 ring->txhdr_cache = kcalloc(ring->nr_slots / TX_SLOTS_PER_FRAME,
866 b43_txhdr_size(dev),
867 GFP_KERNEL);
868 if (!ring->txhdr_cache)
869 goto err_kfree_meta;
870
871 /* test for ability to dma to txhdr_cache */
872 dma_test = dma_map_single(dev->dev->dma_dev,
873 ring->txhdr_cache,
874 b43_txhdr_size(dev),
875 DMA_TO_DEVICE);
876
877 if (b43_dma_mapping_error(ring, dma_test,
878 b43_txhdr_size(dev), 1)) {
879 /* ugh realloc */
880 kfree(ring->txhdr_cache);
881 ring->txhdr_cache = kcalloc(ring->nr_slots / TX_SLOTS_PER_FRAME,
882 b43_txhdr_size(dev),
883 GFP_KERNEL | GFP_DMA);
884 if (!ring->txhdr_cache)
885 goto err_kfree_meta;
886
887 dma_test = dma_map_single(dev->dev->dma_dev,
888 ring->txhdr_cache,
889 b43_txhdr_size(dev),
890 DMA_TO_DEVICE);
891
892 if (b43_dma_mapping_error(ring, dma_test,
893 b43_txhdr_size(dev), 1)) {
894
895 b43err(dev->wl,
896 "TXHDR DMA allocation failed\n");
897 goto err_kfree_txhdr_cache;
898 }
899 }
900
901 dma_unmap_single(dev->dev->dma_dev,
902 dma_test, b43_txhdr_size(dev),
903 DMA_TO_DEVICE);
904 }
905
906 err = alloc_ringmemory(ring);
907 if (err)
908 goto err_kfree_txhdr_cache;
909 err = dmacontroller_setup(ring);
910 if (err)
911 goto err_free_ringmemory;
912
913 out:
914 return ring;
915
916 err_free_ringmemory:
917 free_ringmemory(ring);
918 err_kfree_txhdr_cache:
919 kfree(ring->txhdr_cache);
920 err_kfree_meta:
921 kfree(ring->meta);
922 err_kfree_ring:
923 kfree(ring);
924 ring = NULL;
925 goto out;
926 }
927
928 #define divide(a, b) ({ \
929 typeof(a) __a = a; \
930 do_div(__a, b); \
931 __a; \
932 })
933
934 #define modulo(a, b) ({ \
935 typeof(a) __a = a; \
936 do_div(__a, b); \
937 })
938
939 /* Main cleanup function. */
b43_destroy_dmaring(struct b43_dmaring * ring,const char * ringname)940 static void b43_destroy_dmaring(struct b43_dmaring *ring,
941 const char *ringname)
942 {
943 if (!ring)
944 return;
945
946 #ifdef CONFIG_B43_DEBUG
947 {
948 /* Print some statistics. */
949 u64 failed_packets = ring->nr_failed_tx_packets;
950 u64 succeed_packets = ring->nr_succeed_tx_packets;
951 u64 nr_packets = failed_packets + succeed_packets;
952 u64 permille_failed = 0, average_tries = 0;
953
954 if (nr_packets)
955 permille_failed = divide(failed_packets * 1000, nr_packets);
956 if (nr_packets)
957 average_tries = divide(ring->nr_total_packet_tries * 100, nr_packets);
958
959 b43dbg(ring->dev->wl, "DMA-%u %s: "
960 "Used slots %d/%d, Failed frames %llu/%llu = %llu.%01llu%%, "
961 "Average tries %llu.%02llu\n",
962 (unsigned int)(ring->type), ringname,
963 ring->max_used_slots,
964 ring->nr_slots,
965 (unsigned long long)failed_packets,
966 (unsigned long long)nr_packets,
967 (unsigned long long)divide(permille_failed, 10),
968 (unsigned long long)modulo(permille_failed, 10),
969 (unsigned long long)divide(average_tries, 100),
970 (unsigned long long)modulo(average_tries, 100));
971 }
972 #endif /* DEBUG */
973
974 /* Device IRQs are disabled prior entering this function,
975 * so no need to take care of concurrency with rx handler stuff.
976 */
977 dmacontroller_cleanup(ring);
978 free_all_descbuffers(ring);
979 free_ringmemory(ring);
980
981 kfree(ring->txhdr_cache);
982 kfree(ring->meta);
983 kfree(ring);
984 }
985
986 #define destroy_ring(dma, ring) do { \
987 b43_destroy_dmaring((dma)->ring, __stringify(ring)); \
988 (dma)->ring = NULL; \
989 } while (0)
990
b43_dma_free(struct b43_wldev * dev)991 void b43_dma_free(struct b43_wldev *dev)
992 {
993 struct b43_dma *dma;
994
995 if (b43_using_pio_transfers(dev))
996 return;
997 dma = &dev->dma;
998
999 destroy_ring(dma, rx_ring);
1000 destroy_ring(dma, tx_ring_AC_BK);
1001 destroy_ring(dma, tx_ring_AC_BE);
1002 destroy_ring(dma, tx_ring_AC_VI);
1003 destroy_ring(dma, tx_ring_AC_VO);
1004 destroy_ring(dma, tx_ring_mcast);
1005 }
1006
b43_dma_set_mask(struct b43_wldev * dev,u64 mask)1007 static int b43_dma_set_mask(struct b43_wldev *dev, u64 mask)
1008 {
1009 u64 orig_mask = mask;
1010 bool fallback = 0;
1011 int err;
1012
1013 /* Try to set the DMA mask. If it fails, try falling back to a
1014 * lower mask, as we can always also support a lower one. */
1015 while (1) {
1016 err = dma_set_mask(dev->dev->dma_dev, mask);
1017 if (!err) {
1018 err = dma_set_coherent_mask(dev->dev->dma_dev, mask);
1019 if (!err)
1020 break;
1021 }
1022 if (mask == DMA_BIT_MASK(64)) {
1023 mask = DMA_BIT_MASK(32);
1024 fallback = 1;
1025 continue;
1026 }
1027 if (mask == DMA_BIT_MASK(32)) {
1028 mask = DMA_BIT_MASK(30);
1029 fallback = 1;
1030 continue;
1031 }
1032 b43err(dev->wl, "The machine/kernel does not support "
1033 "the required %u-bit DMA mask\n",
1034 (unsigned int)dma_mask_to_engine_type(orig_mask));
1035 return -EOPNOTSUPP;
1036 }
1037 if (fallback) {
1038 b43info(dev->wl, "DMA mask fallback from %u-bit to %u-bit\n",
1039 (unsigned int)dma_mask_to_engine_type(orig_mask),
1040 (unsigned int)dma_mask_to_engine_type(mask));
1041 }
1042
1043 return 0;
1044 }
1045
b43_dma_init(struct b43_wldev * dev)1046 int b43_dma_init(struct b43_wldev *dev)
1047 {
1048 struct b43_dma *dma = &dev->dma;
1049 int err;
1050 u64 dmamask;
1051 enum b43_dmatype type;
1052
1053 dmamask = supported_dma_mask(dev);
1054 type = dma_mask_to_engine_type(dmamask);
1055 err = b43_dma_set_mask(dev, dmamask);
1056 if (err)
1057 return err;
1058
1059 err = -ENOMEM;
1060 /* setup TX DMA channels. */
1061 dma->tx_ring_AC_BK = b43_setup_dmaring(dev, 0, 1, type);
1062 if (!dma->tx_ring_AC_BK)
1063 goto out;
1064
1065 dma->tx_ring_AC_BE = b43_setup_dmaring(dev, 1, 1, type);
1066 if (!dma->tx_ring_AC_BE)
1067 goto err_destroy_bk;
1068
1069 dma->tx_ring_AC_VI = b43_setup_dmaring(dev, 2, 1, type);
1070 if (!dma->tx_ring_AC_VI)
1071 goto err_destroy_be;
1072
1073 dma->tx_ring_AC_VO = b43_setup_dmaring(dev, 3, 1, type);
1074 if (!dma->tx_ring_AC_VO)
1075 goto err_destroy_vi;
1076
1077 dma->tx_ring_mcast = b43_setup_dmaring(dev, 4, 1, type);
1078 if (!dma->tx_ring_mcast)
1079 goto err_destroy_vo;
1080
1081 /* setup RX DMA channel. */
1082 dma->rx_ring = b43_setup_dmaring(dev, 0, 0, type);
1083 if (!dma->rx_ring)
1084 goto err_destroy_mcast;
1085
1086 /* No support for the TX status DMA ring. */
1087 B43_WARN_ON(dev->dev->id.revision < 5);
1088
1089 b43dbg(dev->wl, "%u-bit DMA initialized\n",
1090 (unsigned int)type);
1091 err = 0;
1092 out:
1093 return err;
1094
1095 err_destroy_mcast:
1096 destroy_ring(dma, tx_ring_mcast);
1097 err_destroy_vo:
1098 destroy_ring(dma, tx_ring_AC_VO);
1099 err_destroy_vi:
1100 destroy_ring(dma, tx_ring_AC_VI);
1101 err_destroy_be:
1102 destroy_ring(dma, tx_ring_AC_BE);
1103 err_destroy_bk:
1104 destroy_ring(dma, tx_ring_AC_BK);
1105 return err;
1106 }
1107
1108 /* Generate a cookie for the TX header. */
generate_cookie(struct b43_dmaring * ring,int slot)1109 static u16 generate_cookie(struct b43_dmaring *ring, int slot)
1110 {
1111 u16 cookie;
1112
1113 /* Use the upper 4 bits of the cookie as
1114 * DMA controller ID and store the slot number
1115 * in the lower 12 bits.
1116 * Note that the cookie must never be 0, as this
1117 * is a special value used in RX path.
1118 * It can also not be 0xFFFF because that is special
1119 * for multicast frames.
1120 */
1121 cookie = (((u16)ring->index + 1) << 12);
1122 B43_WARN_ON(slot & ~0x0FFF);
1123 cookie |= (u16)slot;
1124
1125 return cookie;
1126 }
1127
1128 /* Inspect a cookie and find out to which controller/slot it belongs. */
1129 static
parse_cookie(struct b43_wldev * dev,u16 cookie,int * slot)1130 struct b43_dmaring *parse_cookie(struct b43_wldev *dev, u16 cookie, int *slot)
1131 {
1132 struct b43_dma *dma = &dev->dma;
1133 struct b43_dmaring *ring = NULL;
1134
1135 switch (cookie & 0xF000) {
1136 case 0x1000:
1137 ring = dma->tx_ring_AC_BK;
1138 break;
1139 case 0x2000:
1140 ring = dma->tx_ring_AC_BE;
1141 break;
1142 case 0x3000:
1143 ring = dma->tx_ring_AC_VI;
1144 break;
1145 case 0x4000:
1146 ring = dma->tx_ring_AC_VO;
1147 break;
1148 case 0x5000:
1149 ring = dma->tx_ring_mcast;
1150 break;
1151 }
1152 *slot = (cookie & 0x0FFF);
1153 if (unlikely(!ring || *slot < 0 || *slot >= ring->nr_slots)) {
1154 b43dbg(dev->wl, "TX-status contains "
1155 "invalid cookie: 0x%04X\n", cookie);
1156 return NULL;
1157 }
1158
1159 return ring;
1160 }
1161
dma_tx_fragment(struct b43_dmaring * ring,struct sk_buff * skb)1162 static int dma_tx_fragment(struct b43_dmaring *ring,
1163 struct sk_buff *skb)
1164 {
1165 const struct b43_dma_ops *ops = ring->ops;
1166 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
1167 struct b43_private_tx_info *priv_info = b43_get_priv_tx_info(info);
1168 u8 *header;
1169 int slot, old_top_slot, old_used_slots;
1170 int err;
1171 struct b43_dmadesc_generic *desc;
1172 struct b43_dmadesc_meta *meta;
1173 struct b43_dmadesc_meta *meta_hdr;
1174 u16 cookie;
1175 size_t hdrsize = b43_txhdr_size(ring->dev);
1176
1177 /* Important note: If the number of used DMA slots per TX frame
1178 * is changed here, the TX_SLOTS_PER_FRAME definition at the top of
1179 * the file has to be updated, too!
1180 */
1181
1182 old_top_slot = ring->current_slot;
1183 old_used_slots = ring->used_slots;
1184
1185 /* Get a slot for the header. */
1186 slot = request_slot(ring);
1187 desc = ops->idx2desc(ring, slot, &meta_hdr);
1188 memset(meta_hdr, 0, sizeof(*meta_hdr));
1189
1190 header = &(ring->txhdr_cache[(slot / TX_SLOTS_PER_FRAME) * hdrsize]);
1191 cookie = generate_cookie(ring, slot);
1192 err = b43_generate_txhdr(ring->dev, header,
1193 skb, info, cookie);
1194 if (unlikely(err)) {
1195 ring->current_slot = old_top_slot;
1196 ring->used_slots = old_used_slots;
1197 return err;
1198 }
1199
1200 meta_hdr->dmaaddr = map_descbuffer(ring, (unsigned char *)header,
1201 hdrsize, 1);
1202 if (b43_dma_mapping_error(ring, meta_hdr->dmaaddr, hdrsize, 1)) {
1203 ring->current_slot = old_top_slot;
1204 ring->used_slots = old_used_slots;
1205 return -EIO;
1206 }
1207 ops->fill_descriptor(ring, desc, meta_hdr->dmaaddr,
1208 hdrsize, 1, 0, 0);
1209
1210 /* Get a slot for the payload. */
1211 slot = request_slot(ring);
1212 desc = ops->idx2desc(ring, slot, &meta);
1213 memset(meta, 0, sizeof(*meta));
1214
1215 meta->skb = skb;
1216 meta->is_last_fragment = 1;
1217 priv_info->bouncebuffer = NULL;
1218
1219 meta->dmaaddr = map_descbuffer(ring, skb->data, skb->len, 1);
1220 /* create a bounce buffer in zone_dma on mapping failure. */
1221 if (b43_dma_mapping_error(ring, meta->dmaaddr, skb->len, 1)) {
1222 priv_info->bouncebuffer = kmemdup(skb->data, skb->len,
1223 GFP_ATOMIC | GFP_DMA);
1224 if (!priv_info->bouncebuffer) {
1225 ring->current_slot = old_top_slot;
1226 ring->used_slots = old_used_slots;
1227 err = -ENOMEM;
1228 goto out_unmap_hdr;
1229 }
1230
1231 meta->dmaaddr = map_descbuffer(ring, priv_info->bouncebuffer, skb->len, 1);
1232 if (b43_dma_mapping_error(ring, meta->dmaaddr, skb->len, 1)) {
1233 kfree(priv_info->bouncebuffer);
1234 priv_info->bouncebuffer = NULL;
1235 ring->current_slot = old_top_slot;
1236 ring->used_slots = old_used_slots;
1237 err = -EIO;
1238 goto out_unmap_hdr;
1239 }
1240 }
1241
1242 ops->fill_descriptor(ring, desc, meta->dmaaddr, skb->len, 0, 1, 1);
1243
1244 if (info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM) {
1245 /* Tell the firmware about the cookie of the last
1246 * mcast frame, so it can clear the more-data bit in it. */
1247 b43_shm_write16(ring->dev, B43_SHM_SHARED,
1248 B43_SHM_SH_MCASTCOOKIE, cookie);
1249 }
1250 /* Now transfer the whole frame. */
1251 wmb();
1252 ops->poke_tx(ring, next_slot(ring, slot));
1253 return 0;
1254
1255 out_unmap_hdr:
1256 unmap_descbuffer(ring, meta_hdr->dmaaddr,
1257 hdrsize, 1);
1258 return err;
1259 }
1260
should_inject_overflow(struct b43_dmaring * ring)1261 static inline int should_inject_overflow(struct b43_dmaring *ring)
1262 {
1263 #ifdef CONFIG_B43_DEBUG
1264 if (unlikely(b43_debug(ring->dev, B43_DBG_DMAOVERFLOW))) {
1265 /* Check if we should inject another ringbuffer overflow
1266 * to test handling of this situation in the stack. */
1267 unsigned long next_overflow;
1268
1269 next_overflow = ring->last_injected_overflow + HZ;
1270 if (time_after(jiffies, next_overflow)) {
1271 ring->last_injected_overflow = jiffies;
1272 b43dbg(ring->dev->wl,
1273 "Injecting TX ring overflow on "
1274 "DMA controller %d\n", ring->index);
1275 return 1;
1276 }
1277 }
1278 #endif /* CONFIG_B43_DEBUG */
1279 return 0;
1280 }
1281
1282 /* Static mapping of mac80211's queues (priorities) to b43 DMA rings. */
select_ring_by_priority(struct b43_wldev * dev,u8 queue_prio)1283 static struct b43_dmaring *select_ring_by_priority(struct b43_wldev *dev,
1284 u8 queue_prio)
1285 {
1286 struct b43_dmaring *ring;
1287
1288 if (dev->qos_enabled) {
1289 /* 0 = highest priority */
1290 switch (queue_prio) {
1291 default:
1292 B43_WARN_ON(1);
1293 /* fallthrough */
1294 case 0:
1295 ring = dev->dma.tx_ring_AC_VO;
1296 break;
1297 case 1:
1298 ring = dev->dma.tx_ring_AC_VI;
1299 break;
1300 case 2:
1301 ring = dev->dma.tx_ring_AC_BE;
1302 break;
1303 case 3:
1304 ring = dev->dma.tx_ring_AC_BK;
1305 break;
1306 }
1307 } else
1308 ring = dev->dma.tx_ring_AC_BE;
1309
1310 return ring;
1311 }
1312
b43_dma_tx(struct b43_wldev * dev,struct sk_buff * skb)1313 int b43_dma_tx(struct b43_wldev *dev, struct sk_buff *skb)
1314 {
1315 struct b43_dmaring *ring;
1316 struct ieee80211_hdr *hdr;
1317 int err = 0;
1318 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
1319
1320 hdr = (struct ieee80211_hdr *)skb->data;
1321 if (info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM) {
1322 /* The multicast ring will be sent after the DTIM */
1323 ring = dev->dma.tx_ring_mcast;
1324 /* Set the more-data bit. Ucode will clear it on
1325 * the last frame for us. */
1326 hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_MOREDATA);
1327 } else {
1328 /* Decide by priority where to put this frame. */
1329 ring = select_ring_by_priority(
1330 dev, skb_get_queue_mapping(skb));
1331 }
1332
1333 B43_WARN_ON(!ring->tx);
1334
1335 if (unlikely(ring->stopped)) {
1336 /* We get here only because of a bug in mac80211.
1337 * Because of a race, one packet may be queued after
1338 * the queue is stopped, thus we got called when we shouldn't.
1339 * For now, just refuse the transmit. */
1340 if (b43_debug(dev, B43_DBG_DMAVERBOSE))
1341 b43err(dev->wl, "Packet after queue stopped\n");
1342 err = -ENOSPC;
1343 goto out;
1344 }
1345
1346 if (unlikely(WARN_ON(free_slots(ring) < TX_SLOTS_PER_FRAME))) {
1347 /* If we get here, we have a real error with the queue
1348 * full, but queues not stopped. */
1349 b43err(dev->wl, "DMA queue overflow\n");
1350 err = -ENOSPC;
1351 goto out;
1352 }
1353
1354 /* Assign the queue number to the ring (if not already done before)
1355 * so TX status handling can use it. The queue to ring mapping is
1356 * static, so we don't need to store it per frame. */
1357 ring->queue_prio = skb_get_queue_mapping(skb);
1358
1359 err = dma_tx_fragment(ring, skb);
1360 if (unlikely(err == -ENOKEY)) {
1361 /* Drop this packet, as we don't have the encryption key
1362 * anymore and must not transmit it unencrypted. */
1363 dev_kfree_skb_any(skb);
1364 err = 0;
1365 goto out;
1366 }
1367 if (unlikely(err)) {
1368 b43err(dev->wl, "DMA tx mapping failure\n");
1369 goto out;
1370 }
1371 if ((free_slots(ring) < TX_SLOTS_PER_FRAME) ||
1372 should_inject_overflow(ring)) {
1373 /* This TX ring is full. */
1374 ieee80211_stop_queue(dev->wl->hw, skb_get_queue_mapping(skb));
1375 ring->stopped = 1;
1376 if (b43_debug(dev, B43_DBG_DMAVERBOSE)) {
1377 b43dbg(dev->wl, "Stopped TX ring %d\n", ring->index);
1378 }
1379 }
1380 out:
1381
1382 return err;
1383 }
1384
b43_dma_handle_txstatus(struct b43_wldev * dev,const struct b43_txstatus * status)1385 void b43_dma_handle_txstatus(struct b43_wldev *dev,
1386 const struct b43_txstatus *status)
1387 {
1388 const struct b43_dma_ops *ops;
1389 struct b43_dmaring *ring;
1390 struct b43_dmadesc_generic *desc;
1391 struct b43_dmadesc_meta *meta;
1392 int slot, firstused;
1393 bool frame_succeed;
1394
1395 ring = parse_cookie(dev, status->cookie, &slot);
1396 if (unlikely(!ring))
1397 return;
1398 B43_WARN_ON(!ring->tx);
1399
1400 /* Sanity check: TX packets are processed in-order on one ring.
1401 * Check if the slot deduced from the cookie really is the first
1402 * used slot. */
1403 firstused = ring->current_slot - ring->used_slots + 1;
1404 if (firstused < 0)
1405 firstused = ring->nr_slots + firstused;
1406 if (unlikely(slot != firstused)) {
1407 /* This possibly is a firmware bug and will result in
1408 * malfunction, memory leaks and/or stall of DMA functionality. */
1409 b43dbg(dev->wl, "Out of order TX status report on DMA ring %d. "
1410 "Expected %d, but got %d\n",
1411 ring->index, firstused, slot);
1412 return;
1413 }
1414
1415 ops = ring->ops;
1416 while (1) {
1417 B43_WARN_ON(slot < 0 || slot >= ring->nr_slots);
1418 desc = ops->idx2desc(ring, slot, &meta);
1419
1420 if (b43_dma_ptr_is_poisoned(meta->skb)) {
1421 b43dbg(dev->wl, "Poisoned TX slot %d (first=%d) "
1422 "on ring %d\n",
1423 slot, firstused, ring->index);
1424 break;
1425 }
1426 if (meta->skb) {
1427 struct b43_private_tx_info *priv_info =
1428 b43_get_priv_tx_info(IEEE80211_SKB_CB(meta->skb));
1429
1430 unmap_descbuffer(ring, meta->dmaaddr, meta->skb->len, 1);
1431 kfree(priv_info->bouncebuffer);
1432 priv_info->bouncebuffer = NULL;
1433 } else {
1434 unmap_descbuffer(ring, meta->dmaaddr,
1435 b43_txhdr_size(dev), 1);
1436 }
1437
1438 if (meta->is_last_fragment) {
1439 struct ieee80211_tx_info *info;
1440
1441 if (unlikely(!meta->skb)) {
1442 /* This is a scatter-gather fragment of a frame, so
1443 * the skb pointer must not be NULL. */
1444 b43dbg(dev->wl, "TX status unexpected NULL skb "
1445 "at slot %d (first=%d) on ring %d\n",
1446 slot, firstused, ring->index);
1447 break;
1448 }
1449
1450 info = IEEE80211_SKB_CB(meta->skb);
1451
1452 /*
1453 * Call back to inform the ieee80211 subsystem about
1454 * the status of the transmission.
1455 */
1456 frame_succeed = b43_fill_txstatus_report(dev, info, status);
1457 #ifdef CONFIG_B43_DEBUG
1458 if (frame_succeed)
1459 ring->nr_succeed_tx_packets++;
1460 else
1461 ring->nr_failed_tx_packets++;
1462 ring->nr_total_packet_tries += status->frame_count;
1463 #endif /* DEBUG */
1464 ieee80211_tx_status(dev->wl->hw, meta->skb);
1465
1466 /* skb will be freed by ieee80211_tx_status().
1467 * Poison our pointer. */
1468 meta->skb = B43_DMA_PTR_POISON;
1469 } else {
1470 /* No need to call free_descriptor_buffer here, as
1471 * this is only the txhdr, which is not allocated.
1472 */
1473 if (unlikely(meta->skb)) {
1474 b43dbg(dev->wl, "TX status unexpected non-NULL skb "
1475 "at slot %d (first=%d) on ring %d\n",
1476 slot, firstused, ring->index);
1477 break;
1478 }
1479 }
1480
1481 /* Everything unmapped and free'd. So it's not used anymore. */
1482 ring->used_slots--;
1483
1484 if (meta->is_last_fragment) {
1485 /* This is the last scatter-gather
1486 * fragment of the frame. We are done. */
1487 break;
1488 }
1489 slot = next_slot(ring, slot);
1490 }
1491 if (ring->stopped) {
1492 B43_WARN_ON(free_slots(ring) < TX_SLOTS_PER_FRAME);
1493 ieee80211_wake_queue(dev->wl->hw, ring->queue_prio);
1494 ring->stopped = 0;
1495 if (b43_debug(dev, B43_DBG_DMAVERBOSE)) {
1496 b43dbg(dev->wl, "Woke up TX ring %d\n", ring->index);
1497 }
1498 }
1499 }
1500
dma_rx(struct b43_dmaring * ring,int * slot)1501 static void dma_rx(struct b43_dmaring *ring, int *slot)
1502 {
1503 const struct b43_dma_ops *ops = ring->ops;
1504 struct b43_dmadesc_generic *desc;
1505 struct b43_dmadesc_meta *meta;
1506 struct b43_rxhdr_fw4 *rxhdr;
1507 struct sk_buff *skb;
1508 u16 len;
1509 int err;
1510 dma_addr_t dmaaddr;
1511
1512 desc = ops->idx2desc(ring, *slot, &meta);
1513
1514 sync_descbuffer_for_cpu(ring, meta->dmaaddr, ring->rx_buffersize);
1515 skb = meta->skb;
1516
1517 rxhdr = (struct b43_rxhdr_fw4 *)skb->data;
1518 len = le16_to_cpu(rxhdr->frame_len);
1519 if (len == 0) {
1520 int i = 0;
1521
1522 do {
1523 udelay(2);
1524 barrier();
1525 len = le16_to_cpu(rxhdr->frame_len);
1526 } while (len == 0 && i++ < 5);
1527 if (unlikely(len == 0)) {
1528 dmaaddr = meta->dmaaddr;
1529 goto drop_recycle_buffer;
1530 }
1531 }
1532 if (unlikely(b43_rx_buffer_is_poisoned(ring, skb))) {
1533 /* Something went wrong with the DMA.
1534 * The device did not touch the buffer and did not overwrite the poison. */
1535 b43dbg(ring->dev->wl, "DMA RX: Dropping poisoned buffer.\n");
1536 dmaaddr = meta->dmaaddr;
1537 goto drop_recycle_buffer;
1538 }
1539 if (unlikely(len + ring->frameoffset > ring->rx_buffersize)) {
1540 /* The data did not fit into one descriptor buffer
1541 * and is split over multiple buffers.
1542 * This should never happen, as we try to allocate buffers
1543 * big enough. So simply ignore this packet.
1544 */
1545 int cnt = 0;
1546 s32 tmp = len;
1547
1548 while (1) {
1549 desc = ops->idx2desc(ring, *slot, &meta);
1550 /* recycle the descriptor buffer. */
1551 b43_poison_rx_buffer(ring, meta->skb);
1552 sync_descbuffer_for_device(ring, meta->dmaaddr,
1553 ring->rx_buffersize);
1554 *slot = next_slot(ring, *slot);
1555 cnt++;
1556 tmp -= ring->rx_buffersize;
1557 if (tmp <= 0)
1558 break;
1559 }
1560 b43err(ring->dev->wl, "DMA RX buffer too small "
1561 "(len: %u, buffer: %u, nr-dropped: %d)\n",
1562 len, ring->rx_buffersize, cnt);
1563 goto drop;
1564 }
1565
1566 dmaaddr = meta->dmaaddr;
1567 err = setup_rx_descbuffer(ring, desc, meta, GFP_ATOMIC);
1568 if (unlikely(err)) {
1569 b43dbg(ring->dev->wl, "DMA RX: setup_rx_descbuffer() failed\n");
1570 goto drop_recycle_buffer;
1571 }
1572
1573 unmap_descbuffer(ring, dmaaddr, ring->rx_buffersize, 0);
1574 skb_put(skb, len + ring->frameoffset);
1575 skb_pull(skb, ring->frameoffset);
1576
1577 b43_rx(ring->dev, skb, rxhdr);
1578 drop:
1579 return;
1580
1581 drop_recycle_buffer:
1582 /* Poison and recycle the RX buffer. */
1583 b43_poison_rx_buffer(ring, skb);
1584 sync_descbuffer_for_device(ring, dmaaddr, ring->rx_buffersize);
1585 }
1586
b43_dma_rx(struct b43_dmaring * ring)1587 void b43_dma_rx(struct b43_dmaring *ring)
1588 {
1589 const struct b43_dma_ops *ops = ring->ops;
1590 int slot, current_slot;
1591 int used_slots = 0;
1592
1593 B43_WARN_ON(ring->tx);
1594 current_slot = ops->get_current_rxslot(ring);
1595 B43_WARN_ON(!(current_slot >= 0 && current_slot < ring->nr_slots));
1596
1597 slot = ring->current_slot;
1598 for (; slot != current_slot; slot = next_slot(ring, slot)) {
1599 dma_rx(ring, &slot);
1600 update_max_used_slots(ring, ++used_slots);
1601 }
1602 ops->set_current_rxslot(ring, slot);
1603 ring->current_slot = slot;
1604 }
1605
b43_dma_tx_suspend_ring(struct b43_dmaring * ring)1606 static void b43_dma_tx_suspend_ring(struct b43_dmaring *ring)
1607 {
1608 B43_WARN_ON(!ring->tx);
1609 ring->ops->tx_suspend(ring);
1610 }
1611
b43_dma_tx_resume_ring(struct b43_dmaring * ring)1612 static void b43_dma_tx_resume_ring(struct b43_dmaring *ring)
1613 {
1614 B43_WARN_ON(!ring->tx);
1615 ring->ops->tx_resume(ring);
1616 }
1617
b43_dma_tx_suspend(struct b43_wldev * dev)1618 void b43_dma_tx_suspend(struct b43_wldev *dev)
1619 {
1620 b43_power_saving_ctl_bits(dev, B43_PS_AWAKE);
1621 b43_dma_tx_suspend_ring(dev->dma.tx_ring_AC_BK);
1622 b43_dma_tx_suspend_ring(dev->dma.tx_ring_AC_BE);
1623 b43_dma_tx_suspend_ring(dev->dma.tx_ring_AC_VI);
1624 b43_dma_tx_suspend_ring(dev->dma.tx_ring_AC_VO);
1625 b43_dma_tx_suspend_ring(dev->dma.tx_ring_mcast);
1626 }
1627
b43_dma_tx_resume(struct b43_wldev * dev)1628 void b43_dma_tx_resume(struct b43_wldev *dev)
1629 {
1630 b43_dma_tx_resume_ring(dev->dma.tx_ring_mcast);
1631 b43_dma_tx_resume_ring(dev->dma.tx_ring_AC_VO);
1632 b43_dma_tx_resume_ring(dev->dma.tx_ring_AC_VI);
1633 b43_dma_tx_resume_ring(dev->dma.tx_ring_AC_BE);
1634 b43_dma_tx_resume_ring(dev->dma.tx_ring_AC_BK);
1635 b43_power_saving_ctl_bits(dev, 0);
1636 }
1637
direct_fifo_rx(struct b43_wldev * dev,enum b43_dmatype type,u16 mmio_base,bool enable)1638 static void direct_fifo_rx(struct b43_wldev *dev, enum b43_dmatype type,
1639 u16 mmio_base, bool enable)
1640 {
1641 u32 ctl;
1642
1643 if (type == B43_DMA_64BIT) {
1644 ctl = b43_read32(dev, mmio_base + B43_DMA64_RXCTL);
1645 ctl &= ~B43_DMA64_RXDIRECTFIFO;
1646 if (enable)
1647 ctl |= B43_DMA64_RXDIRECTFIFO;
1648 b43_write32(dev, mmio_base + B43_DMA64_RXCTL, ctl);
1649 } else {
1650 ctl = b43_read32(dev, mmio_base + B43_DMA32_RXCTL);
1651 ctl &= ~B43_DMA32_RXDIRECTFIFO;
1652 if (enable)
1653 ctl |= B43_DMA32_RXDIRECTFIFO;
1654 b43_write32(dev, mmio_base + B43_DMA32_RXCTL, ctl);
1655 }
1656 }
1657
1658 /* Enable/Disable Direct FIFO Receive Mode (PIO) on a RX engine.
1659 * This is called from PIO code, so DMA structures are not available. */
b43_dma_direct_fifo_rx(struct b43_wldev * dev,unsigned int engine_index,bool enable)1660 void b43_dma_direct_fifo_rx(struct b43_wldev *dev,
1661 unsigned int engine_index, bool enable)
1662 {
1663 enum b43_dmatype type;
1664 u16 mmio_base;
1665
1666 type = dma_mask_to_engine_type(supported_dma_mask(dev));
1667
1668 mmio_base = b43_dmacontroller_base(type, engine_index);
1669 direct_fifo_rx(dev, type, mmio_base, enable);
1670 }
1671