1 /*
2 * Copyright (C) 2002 Intersil Americas Inc.
3 * Copyright (C) 2004 Aurelien Alleaume <slts@free.fr>
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program; if not, write to the Free Software
15 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
16 *
17 */
18
19 #include <linux/module.h>
20 #include <linux/gfp.h>
21
22 #include <linux/pci.h>
23 #include <linux/delay.h>
24 #include <linux/netdevice.h>
25 #include <linux/etherdevice.h>
26 #include <linux/if_arp.h>
27 #include <asm/byteorder.h>
28
29 #include "prismcompat.h"
30 #include "isl_38xx.h"
31 #include "islpci_eth.h"
32 #include "islpci_mgt.h"
33 #include "oid_mgt.h"
34
35 /******************************************************************************
36 Network Interface functions
37 ******************************************************************************/
38 void
islpci_eth_cleanup_transmit(islpci_private * priv,isl38xx_control_block * control_block)39 islpci_eth_cleanup_transmit(islpci_private *priv,
40 isl38xx_control_block *control_block)
41 {
42 struct sk_buff *skb;
43 u32 index;
44
45 /* compare the control block read pointer with the free pointer */
46 while (priv->free_data_tx !=
47 le32_to_cpu(control_block->
48 device_curr_frag[ISL38XX_CB_TX_DATA_LQ])) {
49 /* read the index of the first fragment to be freed */
50 index = priv->free_data_tx % ISL38XX_CB_TX_QSIZE;
51
52 /* check for holes in the arrays caused by multi fragment frames
53 * searching for the last fragment of a frame */
54 if (priv->pci_map_tx_address[index]) {
55 /* entry is the last fragment of a frame
56 * free the skb structure and unmap pci memory */
57 skb = priv->data_low_tx[index];
58
59 #if VERBOSE > SHOW_ERROR_MESSAGES
60 DEBUG(SHOW_TRACING,
61 "cleanup skb %p skb->data %p skb->len %u truesize %u\n ",
62 skb, skb->data, skb->len, skb->truesize);
63 #endif
64
65 pci_unmap_single(priv->pdev,
66 priv->pci_map_tx_address[index],
67 skb->len, PCI_DMA_TODEVICE);
68 dev_kfree_skb_irq(skb);
69 skb = NULL;
70 }
71 /* increment the free data low queue pointer */
72 priv->free_data_tx++;
73 }
74 }
75
76 netdev_tx_t
islpci_eth_transmit(struct sk_buff * skb,struct net_device * ndev)77 islpci_eth_transmit(struct sk_buff *skb, struct net_device *ndev)
78 {
79 islpci_private *priv = netdev_priv(ndev);
80 isl38xx_control_block *cb = priv->control_block;
81 u32 index;
82 dma_addr_t pci_map_address;
83 int frame_size;
84 isl38xx_fragment *fragment;
85 int offset;
86 struct sk_buff *newskb;
87 int newskb_offset;
88 unsigned long flags;
89 unsigned char wds_mac[6];
90 u32 curr_frag;
91
92 #if VERBOSE > SHOW_ERROR_MESSAGES
93 DEBUG(SHOW_FUNCTION_CALLS, "islpci_eth_transmit\n");
94 #endif
95
96 /* lock the driver code */
97 spin_lock_irqsave(&priv->slock, flags);
98
99 /* check whether the destination queue has enough fragments for the frame */
100 curr_frag = le32_to_cpu(cb->driver_curr_frag[ISL38XX_CB_TX_DATA_LQ]);
101 if (unlikely(curr_frag - priv->free_data_tx >= ISL38XX_CB_TX_QSIZE)) {
102 printk(KERN_ERR "%s: transmit device queue full when awake\n",
103 ndev->name);
104 netif_stop_queue(ndev);
105
106 /* trigger the device */
107 isl38xx_w32_flush(priv->device_base, ISL38XX_DEV_INT_UPDATE,
108 ISL38XX_DEV_INT_REG);
109 udelay(ISL38XX_WRITEIO_DELAY);
110 goto drop_free;
111 }
112 /* Check alignment and WDS frame formatting. The start of the packet should
113 * be aligned on a 4-byte boundary. If WDS is enabled add another 6 bytes
114 * and add WDS address information */
115 if (likely(((long) skb->data & 0x03) | init_wds)) {
116 /* get the number of bytes to add and re-align */
117 offset = (4 - (long) skb->data) & 0x03;
118 offset += init_wds ? 6 : 0;
119
120 /* check whether the current skb can be used */
121 if (!skb_cloned(skb) && (skb_tailroom(skb) >= offset)) {
122 unsigned char *src = skb->data;
123
124 #if VERBOSE > SHOW_ERROR_MESSAGES
125 DEBUG(SHOW_TRACING, "skb offset %i wds %i\n", offset,
126 init_wds);
127 #endif
128
129 /* align the buffer on 4-byte boundary */
130 skb_reserve(skb, (4 - (long) skb->data) & 0x03);
131 if (init_wds) {
132 /* wds requires an additional address field of 6 bytes */
133 skb_put(skb, 6);
134 #ifdef ISLPCI_ETH_DEBUG
135 printk("islpci_eth_transmit:wds_mac\n");
136 #endif
137 memmove(skb->data + 6, src, skb->len);
138 skb_copy_to_linear_data(skb, wds_mac, 6);
139 } else {
140 memmove(skb->data, src, skb->len);
141 }
142
143 #if VERBOSE > SHOW_ERROR_MESSAGES
144 DEBUG(SHOW_TRACING, "memmove %p %p %i\n", skb->data,
145 src, skb->len);
146 #endif
147 } else {
148 newskb =
149 dev_alloc_skb(init_wds ? skb->len + 6 : skb->len);
150 if (unlikely(newskb == NULL)) {
151 printk(KERN_ERR "%s: Cannot allocate skb\n",
152 ndev->name);
153 goto drop_free;
154 }
155 newskb_offset = (4 - (long) newskb->data) & 0x03;
156
157 /* Check if newskb->data is aligned */
158 if (newskb_offset)
159 skb_reserve(newskb, newskb_offset);
160
161 skb_put(newskb, init_wds ? skb->len + 6 : skb->len);
162 if (init_wds) {
163 skb_copy_from_linear_data(skb,
164 newskb->data + 6,
165 skb->len);
166 skb_copy_to_linear_data(newskb, wds_mac, 6);
167 #ifdef ISLPCI_ETH_DEBUG
168 printk("islpci_eth_transmit:wds_mac\n");
169 #endif
170 } else
171 skb_copy_from_linear_data(skb, newskb->data,
172 skb->len);
173
174 #if VERBOSE > SHOW_ERROR_MESSAGES
175 DEBUG(SHOW_TRACING, "memcpy %p %p %i wds %i\n",
176 newskb->data, skb->data, skb->len, init_wds);
177 #endif
178
179 newskb->dev = skb->dev;
180 dev_kfree_skb_irq(skb);
181 skb = newskb;
182 }
183 }
184 /* display the buffer contents for debugging */
185 #if VERBOSE > SHOW_ERROR_MESSAGES
186 DEBUG(SHOW_BUFFER_CONTENTS, "\ntx %p ", skb->data);
187 display_buffer((char *) skb->data, skb->len);
188 #endif
189
190 /* map the skb buffer to pci memory for DMA operation */
191 pci_map_address = pci_map_single(priv->pdev,
192 (void *) skb->data, skb->len,
193 PCI_DMA_TODEVICE);
194 if (unlikely(pci_map_address == 0)) {
195 printk(KERN_WARNING "%s: cannot map buffer to PCI\n",
196 ndev->name);
197 goto drop_free;
198 }
199 /* Place the fragment in the control block structure. */
200 index = curr_frag % ISL38XX_CB_TX_QSIZE;
201 fragment = &cb->tx_data_low[index];
202
203 priv->pci_map_tx_address[index] = pci_map_address;
204 /* store the skb address for future freeing */
205 priv->data_low_tx[index] = skb;
206 /* set the proper fragment start address and size information */
207 frame_size = skb->len;
208 fragment->size = cpu_to_le16(frame_size);
209 fragment->flags = cpu_to_le16(0); /* set to 1 if more fragments */
210 fragment->address = cpu_to_le32(pci_map_address);
211 curr_frag++;
212
213 /* The fragment address in the control block must have been
214 * written before announcing the frame buffer to device. */
215 wmb();
216 cb->driver_curr_frag[ISL38XX_CB_TX_DATA_LQ] = cpu_to_le32(curr_frag);
217
218 if (curr_frag - priv->free_data_tx + ISL38XX_MIN_QTHRESHOLD
219 > ISL38XX_CB_TX_QSIZE) {
220 /* stop sends from upper layers */
221 netif_stop_queue(ndev);
222
223 /* set the full flag for the transmission queue */
224 priv->data_low_tx_full = 1;
225 }
226
227 ndev->stats.tx_packets++;
228 ndev->stats.tx_bytes += skb->len;
229
230 /* trigger the device */
231 islpci_trigger(priv);
232
233 /* unlock the driver code */
234 spin_unlock_irqrestore(&priv->slock, flags);
235
236 return NETDEV_TX_OK;
237
238 drop_free:
239 ndev->stats.tx_dropped++;
240 spin_unlock_irqrestore(&priv->slock, flags);
241 dev_kfree_skb(skb);
242 return NETDEV_TX_OK;
243 }
244
245 static inline int
islpci_monitor_rx(islpci_private * priv,struct sk_buff ** skb)246 islpci_monitor_rx(islpci_private *priv, struct sk_buff **skb)
247 {
248 /* The card reports full 802.11 packets but with a 20 bytes
249 * header and without the FCS. But there a is a bit that
250 * indicates if the packet is corrupted :-) */
251 struct rfmon_header *hdr = (struct rfmon_header *) (*skb)->data;
252
253 if (hdr->flags & 0x01)
254 /* This one is bad. Drop it ! */
255 return -1;
256 if (priv->ndev->type == ARPHRD_IEEE80211_PRISM) {
257 struct avs_80211_1_header *avs;
258 /* extract the relevant data from the header */
259 u32 clock = le32_to_cpu(hdr->clock);
260 u8 rate = hdr->rate;
261 u16 freq = le16_to_cpu(hdr->freq);
262 u8 rssi = hdr->rssi;
263
264 skb_pull(*skb, sizeof (struct rfmon_header));
265
266 if (skb_headroom(*skb) < sizeof (struct avs_80211_1_header)) {
267 struct sk_buff *newskb = skb_copy_expand(*skb,
268 sizeof (struct
269 avs_80211_1_header),
270 0, GFP_ATOMIC);
271 if (newskb) {
272 dev_kfree_skb_irq(*skb);
273 *skb = newskb;
274 } else
275 return -1;
276 /* This behavior is not very subtile... */
277 }
278
279 /* make room for the new header and fill it. */
280 avs =
281 (struct avs_80211_1_header *) skb_push(*skb,
282 sizeof (struct
283 avs_80211_1_header));
284
285 avs->version = cpu_to_be32(P80211CAPTURE_VERSION);
286 avs->length = cpu_to_be32(sizeof (struct avs_80211_1_header));
287 avs->mactime = cpu_to_be64(clock);
288 avs->hosttime = cpu_to_be64(jiffies);
289 avs->phytype = cpu_to_be32(6); /*OFDM: 6 for (g), 8 for (a) */
290 avs->channel = cpu_to_be32(channel_of_freq(freq));
291 avs->datarate = cpu_to_be32(rate * 5);
292 avs->antenna = cpu_to_be32(0); /*unknown */
293 avs->priority = cpu_to_be32(0); /*unknown */
294 avs->ssi_type = cpu_to_be32(3); /*2: dBm, 3: raw RSSI */
295 avs->ssi_signal = cpu_to_be32(rssi & 0x7f);
296 avs->ssi_noise = cpu_to_be32(priv->local_iwstatistics.qual.noise); /*better than 'undefined', I assume */
297 avs->preamble = cpu_to_be32(0); /*unknown */
298 avs->encoding = cpu_to_be32(0); /*unknown */
299 } else
300 skb_pull(*skb, sizeof (struct rfmon_header));
301
302 (*skb)->protocol = htons(ETH_P_802_2);
303 skb_reset_mac_header(*skb);
304 (*skb)->pkt_type = PACKET_OTHERHOST;
305
306 return 0;
307 }
308
309 int
islpci_eth_receive(islpci_private * priv)310 islpci_eth_receive(islpci_private *priv)
311 {
312 struct net_device *ndev = priv->ndev;
313 isl38xx_control_block *control_block = priv->control_block;
314 struct sk_buff *skb;
315 u16 size;
316 u32 index, offset;
317 unsigned char *src;
318 int discard = 0;
319
320 #if VERBOSE > SHOW_ERROR_MESSAGES
321 DEBUG(SHOW_FUNCTION_CALLS, "islpci_eth_receive\n");
322 #endif
323
324 /* the device has written an Ethernet frame in the data area
325 * of the sk_buff without updating the structure, do it now */
326 index = priv->free_data_rx % ISL38XX_CB_RX_QSIZE;
327 size = le16_to_cpu(control_block->rx_data_low[index].size);
328 skb = priv->data_low_rx[index];
329 offset = ((unsigned long)
330 le32_to_cpu(control_block->rx_data_low[index].address) -
331 (unsigned long) skb->data) & 3;
332
333 #if VERBOSE > SHOW_ERROR_MESSAGES
334 DEBUG(SHOW_TRACING,
335 "frq->addr %x skb->data %p skb->len %u offset %u truesize %u\n ",
336 control_block->rx_data_low[priv->free_data_rx].address, skb->data,
337 skb->len, offset, skb->truesize);
338 #endif
339
340 /* delete the streaming DMA mapping before processing the skb */
341 pci_unmap_single(priv->pdev,
342 priv->pci_map_rx_address[index],
343 MAX_FRAGMENT_SIZE_RX + 2, PCI_DMA_FROMDEVICE);
344
345 /* update the skb structure and align the buffer */
346 skb_put(skb, size);
347 if (offset) {
348 /* shift the buffer allocation offset bytes to get the right frame */
349 skb_pull(skb, 2);
350 skb_put(skb, 2);
351 }
352 #if VERBOSE > SHOW_ERROR_MESSAGES
353 /* display the buffer contents for debugging */
354 DEBUG(SHOW_BUFFER_CONTENTS, "\nrx %p ", skb->data);
355 display_buffer((char *) skb->data, skb->len);
356 #endif
357
358 /* check whether WDS is enabled and whether the data frame is a WDS frame */
359
360 if (init_wds) {
361 /* WDS enabled, check for the wds address on the first 6 bytes of the buffer */
362 src = skb->data + 6;
363 memmove(skb->data, src, skb->len - 6);
364 skb_trim(skb, skb->len - 6);
365 }
366 #if VERBOSE > SHOW_ERROR_MESSAGES
367 DEBUG(SHOW_TRACING, "Fragment size %i in skb at %p\n", size, skb);
368 DEBUG(SHOW_TRACING, "Skb data at %p, length %i\n", skb->data, skb->len);
369
370 /* display the buffer contents for debugging */
371 DEBUG(SHOW_BUFFER_CONTENTS, "\nrx %p ", skb->data);
372 display_buffer((char *) skb->data, skb->len);
373 #endif
374 /* take care of monitor mode and spy monitoring. */
375 if (unlikely(priv->iw_mode == IW_MODE_MONITOR)) {
376 skb->dev = ndev;
377 discard = islpci_monitor_rx(priv, &skb);
378 } else {
379 if (unlikely(skb->data[2 * ETH_ALEN] == 0)) {
380 /* The packet has a rx_annex. Read it for spy monitoring, Then
381 * remove it, while keeping the 2 leading MAC addr.
382 */
383 struct iw_quality wstats;
384 struct rx_annex_header *annex =
385 (struct rx_annex_header *) skb->data;
386 wstats.level = annex->rfmon.rssi;
387 /* The noise value can be a bit outdated if nobody's
388 * reading wireless stats... */
389 wstats.noise = priv->local_iwstatistics.qual.noise;
390 wstats.qual = wstats.level - wstats.noise;
391 wstats.updated = 0x07;
392 /* Update spy records */
393 wireless_spy_update(ndev, annex->addr2, &wstats);
394
395 skb_copy_from_linear_data(skb,
396 (skb->data +
397 sizeof(struct rfmon_header)),
398 2 * ETH_ALEN);
399 skb_pull(skb, sizeof (struct rfmon_header));
400 }
401 skb->protocol = eth_type_trans(skb, ndev);
402 }
403 skb->ip_summed = CHECKSUM_NONE;
404 ndev->stats.rx_packets++;
405 ndev->stats.rx_bytes += size;
406
407 /* deliver the skb to the network layer */
408 #ifdef ISLPCI_ETH_DEBUG
409 printk
410 ("islpci_eth_receive:netif_rx %2.2X %2.2X %2.2X %2.2X %2.2X %2.2X\n",
411 skb->data[0], skb->data[1], skb->data[2], skb->data[3],
412 skb->data[4], skb->data[5]);
413 #endif
414 if (unlikely(discard)) {
415 dev_kfree_skb_irq(skb);
416 skb = NULL;
417 } else
418 netif_rx(skb);
419
420 /* increment the read index for the rx data low queue */
421 priv->free_data_rx++;
422
423 /* add one or more sk_buff structures */
424 while (index =
425 le32_to_cpu(control_block->
426 driver_curr_frag[ISL38XX_CB_RX_DATA_LQ]),
427 index - priv->free_data_rx < ISL38XX_CB_RX_QSIZE) {
428 /* allocate an sk_buff for received data frames storage
429 * include any required allignment operations */
430 skb = dev_alloc_skb(MAX_FRAGMENT_SIZE_RX + 2);
431 if (unlikely(skb == NULL)) {
432 /* error allocating an sk_buff structure elements */
433 DEBUG(SHOW_ERROR_MESSAGES, "Error allocating skb\n");
434 break;
435 }
436 skb_reserve(skb, (4 - (long) skb->data) & 0x03);
437 /* store the new skb structure pointer */
438 index = index % ISL38XX_CB_RX_QSIZE;
439 priv->data_low_rx[index] = skb;
440
441 #if VERBOSE > SHOW_ERROR_MESSAGES
442 DEBUG(SHOW_TRACING,
443 "new alloc skb %p skb->data %p skb->len %u index %u truesize %u\n ",
444 skb, skb->data, skb->len, index, skb->truesize);
445 #endif
446
447 /* set the streaming DMA mapping for proper PCI bus operation */
448 priv->pci_map_rx_address[index] =
449 pci_map_single(priv->pdev, (void *) skb->data,
450 MAX_FRAGMENT_SIZE_RX + 2,
451 PCI_DMA_FROMDEVICE);
452 if (unlikely(!priv->pci_map_rx_address[index])) {
453 /* error mapping the buffer to device accessible memory address */
454 DEBUG(SHOW_ERROR_MESSAGES,
455 "Error mapping DMA address\n");
456
457 /* free the skbuf structure before aborting */
458 dev_kfree_skb_irq((struct sk_buff *) skb);
459 skb = NULL;
460 break;
461 }
462 /* update the fragment address */
463 control_block->rx_data_low[index].address =
464 cpu_to_le32((u32)priv->pci_map_rx_address[index]);
465 wmb();
466
467 /* increment the driver read pointer */
468 le32_add_cpu(&control_block->
469 driver_curr_frag[ISL38XX_CB_RX_DATA_LQ], 1);
470 }
471
472 /* trigger the device */
473 islpci_trigger(priv);
474
475 return 0;
476 }
477
478 void
islpci_do_reset_and_wake(struct work_struct * work)479 islpci_do_reset_and_wake(struct work_struct *work)
480 {
481 islpci_private *priv = container_of(work, islpci_private, reset_task);
482
483 islpci_reset(priv, 1);
484 priv->reset_task_pending = 0;
485 smp_wmb();
486 netif_wake_queue(priv->ndev);
487 }
488
489 void
islpci_eth_tx_timeout(struct net_device * ndev)490 islpci_eth_tx_timeout(struct net_device *ndev)
491 {
492 islpci_private *priv = netdev_priv(ndev);
493
494 /* increment the transmit error counter */
495 ndev->stats.tx_errors++;
496
497 if (!priv->reset_task_pending) {
498 printk(KERN_WARNING
499 "%s: tx_timeout, scheduling reset", ndev->name);
500 netif_stop_queue(ndev);
501 priv->reset_task_pending = 1;
502 schedule_work(&priv->reset_task);
503 } else {
504 printk(KERN_WARNING
505 "%s: tx_timeout, waiting for reset", ndev->name);
506 }
507 }
508