1 /*
2 * Copyright (c) 2010 Broadcom Corporation
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
11 * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
13 * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
14 * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
17 #ifndef _BRCM_DMA_H_
18 #define _BRCM_DMA_H_
19
20 #include <linux/delay.h>
21 #include <linux/skbuff.h>
22 #include "types.h" /* forward structure declarations */
23
24 /* map/unmap direction */
25 #define DMA_TX 1 /* TX direction for DMA */
26 #define DMA_RX 2 /* RX direction for DMA */
27
28 /* DMA structure:
29 * support two DMA engines: 32 bits address or 64 bit addressing
30 * basic DMA register set is per channel(transmit or receive)
31 * a pair of channels is defined for convenience
32 */
33
34 /* 32 bits addressing */
35
36 struct dma32diag { /* diag access */
37 u32 fifoaddr; /* diag address */
38 u32 fifodatalow; /* low 32bits of data */
39 u32 fifodatahigh; /* high 32bits of data */
40 u32 pad; /* reserved */
41 };
42
43 /* 64 bits addressing */
44
45 /* dma registers per channel(xmt or rcv) */
46 struct dma64regs {
47 u32 control; /* enable, et al */
48 u32 ptr; /* last descriptor posted to chip */
49 u32 addrlow; /* desc ring base address low 32-bits (8K aligned) */
50 u32 addrhigh; /* desc ring base address bits 63:32 (8K aligned) */
51 u32 status0; /* current descriptor, xmt state */
52 u32 status1; /* active descriptor, xmt error */
53 };
54
55 /* range param for dma_getnexttxp() and dma_txreclaim */
56 enum txd_range {
57 DMA_RANGE_ALL = 1,
58 DMA_RANGE_TRANSMITTED,
59 DMA_RANGE_TRANSFERED
60 };
61
62 /*
63 * Exported data structure (read-only)
64 */
65 /* export structure */
66 struct dma_pub {
67 uint txavail; /* # free tx descriptors */
68 uint dmactrlflags; /* dma control flags */
69
70 /* rx error counters */
71 uint rxgiants; /* rx giant frames */
72 uint rxnobuf; /* rx out of dma descriptors */
73 /* tx error counters */
74 uint txnobuf; /* tx out of dma descriptors */
75 };
76
77 extern struct dma_pub *dma_attach(char *name, struct si_pub *sih,
78 struct bcma_device *d11core,
79 uint txregbase, uint rxregbase,
80 uint ntxd, uint nrxd,
81 uint rxbufsize, int rxextheadroom,
82 uint nrxpost, uint rxoffset, uint *msg_level);
83
84 void dma_rxinit(struct dma_pub *pub);
85 int dma_rx(struct dma_pub *pub, struct sk_buff_head *skb_list);
86 bool dma_rxfill(struct dma_pub *pub);
87 bool dma_rxreset(struct dma_pub *pub);
88 bool dma_txreset(struct dma_pub *pub);
89 void dma_txinit(struct dma_pub *pub);
90 int dma_txfast(struct dma_pub *pub, struct sk_buff *p0, bool commit);
91 void dma_txsuspend(struct dma_pub *pub);
92 bool dma_txsuspended(struct dma_pub *pub);
93 void dma_txresume(struct dma_pub *pub);
94 void dma_txreclaim(struct dma_pub *pub, enum txd_range range);
95 void dma_rxreclaim(struct dma_pub *pub);
96 void dma_detach(struct dma_pub *pub);
97 unsigned long dma_getvar(struct dma_pub *pub, const char *name);
98 struct sk_buff *dma_getnexttxp(struct dma_pub *pub, enum txd_range range);
99 void dma_counterreset(struct dma_pub *pub);
100
101 void dma_walk_packets(struct dma_pub *dmah, void (*callback_fnc)
102 (void *pkt, void *arg_a), void *arg_a);
103
104 /*
105 * DMA(Bug) on bcm47xx chips seems to declare that the packet is ready, but
106 * the packet length is not updated yet (by DMA) on the expected time.
107 * Workaround is to hold processor till DMA updates the length, and stay off
108 * the bus to allow DMA update the length in buffer
109 */
dma_spin_for_len(uint len,struct sk_buff * head)110 static inline void dma_spin_for_len(uint len, struct sk_buff *head)
111 {
112 #if defined(CONFIG_BCM47XX)
113 if (!len) {
114 while (!(len = *(u16 *) KSEG1ADDR(head->data)))
115 udelay(1);
116
117 *(u16 *) (head->data) = cpu_to_le16((u16) len);
118 }
119 #endif /* defined(CONFIG_BCM47XX) */
120 }
121
122 #endif /* _BRCM_DMA_H_ */
123