1 /*****************************************************************************
2 * *
3 * File: cxgb2.c *
4 * $Revision: 1.25 $ *
5 * $Date: 2005/06/22 00:43:25 $ *
6 * Description: *
7 * Chelsio 10Gb Ethernet Driver. *
8 * *
9 * This program is free software; you can redistribute it and/or modify *
10 * it under the terms of the GNU General Public License, version 2, as *
11 * published by the Free Software Foundation. *
12 * *
13 * You should have received a copy of the GNU General Public License along *
14 * with this program; if not, write to the Free Software Foundation, Inc., *
15 * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. *
16 * *
17 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED *
18 * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF *
19 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. *
20 * *
21 * http://www.chelsio.com *
22 * *
23 * Copyright (c) 2003 - 2005 Chelsio Communications, Inc. *
24 * All rights reserved. *
25 * *
26 * Maintainers: maintainers@chelsio.com *
27 * *
28 * Authors: Dimitrios Michailidis <dm@chelsio.com> *
29 * Tina Yang <tainay@chelsio.com> *
30 * Felix Marti <felix@chelsio.com> *
31 * Scott Bardone <sbardone@chelsio.com> *
32 * Kurt Ottaway <kottaway@chelsio.com> *
33 * Frank DiMambro <frank@chelsio.com> *
34 * *
35 * History: *
36 * *
37 ****************************************************************************/
38
39 #include "common.h"
40 #include <linux/module.h>
41 #include <linux/init.h>
42 #include <linux/pci.h>
43 #include <linux/netdevice.h>
44 #include <linux/etherdevice.h>
45 #include <linux/if_vlan.h>
46 #include <linux/mii.h>
47 #include <linux/sockios.h>
48 #include <linux/dma-mapping.h>
49 #include <asm/uaccess.h>
50
51 #include "cpl5_cmd.h"
52 #include "regs.h"
53 #include "gmac.h"
54 #include "cphy.h"
55 #include "sge.h"
56 #include "tp.h"
57 #include "espi.h"
58 #include "elmer0.h"
59
60 #include <linux/workqueue.h>
61
schedule_mac_stats_update(struct adapter * ap,int secs)62 static inline void schedule_mac_stats_update(struct adapter *ap, int secs)
63 {
64 schedule_delayed_work(&ap->stats_update_task, secs * HZ);
65 }
66
cancel_mac_stats_update(struct adapter * ap)67 static inline void cancel_mac_stats_update(struct adapter *ap)
68 {
69 cancel_delayed_work(&ap->stats_update_task);
70 }
71
72 #define MAX_CMDQ_ENTRIES 16384
73 #define MAX_CMDQ1_ENTRIES 1024
74 #define MAX_RX_BUFFERS 16384
75 #define MAX_RX_JUMBO_BUFFERS 16384
76 #define MAX_TX_BUFFERS_HIGH 16384U
77 #define MAX_TX_BUFFERS_LOW 1536U
78 #define MAX_TX_BUFFERS 1460U
79 #define MIN_FL_ENTRIES 32
80
81 #define DFLT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK | \
82 NETIF_MSG_TIMER | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP |\
83 NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR)
84
85 /*
86 * The EEPROM is actually bigger but only the first few bytes are used so we
87 * only report those.
88 */
89 #define EEPROM_SIZE 32
90
91 MODULE_DESCRIPTION(DRV_DESCRIPTION);
92 MODULE_AUTHOR("Chelsio Communications");
93 MODULE_LICENSE("GPL");
94
95 static int dflt_msg_enable = DFLT_MSG_ENABLE;
96
97 module_param(dflt_msg_enable, int, 0);
98 MODULE_PARM_DESC(dflt_msg_enable, "Chelsio T1 default message enable bitmap");
99
100 #define HCLOCK 0x0
101 #define LCLOCK 0x1
102
103 /* T1 cards powersave mode */
104 static int t1_clock(struct adapter *adapter, int mode);
105 static int t1powersave = 1; /* HW default is powersave mode. */
106
107 module_param(t1powersave, int, 0);
108 MODULE_PARM_DESC(t1powersave, "Enable/Disable T1 powersaving mode");
109
110 static int disable_msi = 0;
111 module_param(disable_msi, int, 0);
112 MODULE_PARM_DESC(disable_msi, "Disable Message Signaled Interrupt (MSI)");
113
114 static const char pci_speed[][4] = {
115 "33", "66", "100", "133"
116 };
117
118 /*
119 * Setup MAC to receive the types of packets we want.
120 */
t1_set_rxmode(struct net_device * dev)121 static void t1_set_rxmode(struct net_device *dev)
122 {
123 struct adapter *adapter = dev->ml_priv;
124 struct cmac *mac = adapter->port[dev->if_port].mac;
125 struct t1_rx_mode rm;
126
127 rm.dev = dev;
128 mac->ops->set_rx_mode(mac, &rm);
129 }
130
link_report(struct port_info * p)131 static void link_report(struct port_info *p)
132 {
133 if (!netif_carrier_ok(p->dev))
134 printk(KERN_INFO "%s: link down\n", p->dev->name);
135 else {
136 const char *s = "10Mbps";
137
138 switch (p->link_config.speed) {
139 case SPEED_10000: s = "10Gbps"; break;
140 case SPEED_1000: s = "1000Mbps"; break;
141 case SPEED_100: s = "100Mbps"; break;
142 }
143
144 printk(KERN_INFO "%s: link up, %s, %s-duplex\n",
145 p->dev->name, s,
146 p->link_config.duplex == DUPLEX_FULL ? "full" : "half");
147 }
148 }
149
t1_link_negotiated(struct adapter * adapter,int port_id,int link_stat,int speed,int duplex,int pause)150 void t1_link_negotiated(struct adapter *adapter, int port_id, int link_stat,
151 int speed, int duplex, int pause)
152 {
153 struct port_info *p = &adapter->port[port_id];
154
155 if (link_stat != netif_carrier_ok(p->dev)) {
156 if (link_stat)
157 netif_carrier_on(p->dev);
158 else
159 netif_carrier_off(p->dev);
160 link_report(p);
161
162 /* multi-ports: inform toe */
163 if ((speed > 0) && (adapter->params.nports > 1)) {
164 unsigned int sched_speed = 10;
165 switch (speed) {
166 case SPEED_1000:
167 sched_speed = 1000;
168 break;
169 case SPEED_100:
170 sched_speed = 100;
171 break;
172 case SPEED_10:
173 sched_speed = 10;
174 break;
175 }
176 t1_sched_update_parms(adapter->sge, port_id, 0, sched_speed);
177 }
178 }
179 }
180
link_start(struct port_info * p)181 static void link_start(struct port_info *p)
182 {
183 struct cmac *mac = p->mac;
184
185 mac->ops->reset(mac);
186 if (mac->ops->macaddress_set)
187 mac->ops->macaddress_set(mac, p->dev->dev_addr);
188 t1_set_rxmode(p->dev);
189 t1_link_start(p->phy, mac, &p->link_config);
190 mac->ops->enable(mac, MAC_DIRECTION_RX | MAC_DIRECTION_TX);
191 }
192
enable_hw_csum(struct adapter * adapter)193 static void enable_hw_csum(struct adapter *adapter)
194 {
195 if (adapter->port[0].dev->hw_features & NETIF_F_TSO)
196 t1_tp_set_ip_checksum_offload(adapter->tp, 1); /* for TSO only */
197 t1_tp_set_tcp_checksum_offload(adapter->tp, 1);
198 }
199
200 /*
201 * Things to do upon first use of a card.
202 * This must run with the rtnl lock held.
203 */
cxgb_up(struct adapter * adapter)204 static int cxgb_up(struct adapter *adapter)
205 {
206 int err = 0;
207
208 if (!(adapter->flags & FULL_INIT_DONE)) {
209 err = t1_init_hw_modules(adapter);
210 if (err)
211 goto out_err;
212
213 enable_hw_csum(adapter);
214 adapter->flags |= FULL_INIT_DONE;
215 }
216
217 t1_interrupts_clear(adapter);
218
219 adapter->params.has_msi = !disable_msi && !pci_enable_msi(adapter->pdev);
220 err = request_irq(adapter->pdev->irq, t1_interrupt,
221 adapter->params.has_msi ? 0 : IRQF_SHARED,
222 adapter->name, adapter);
223 if (err) {
224 if (adapter->params.has_msi)
225 pci_disable_msi(adapter->pdev);
226
227 goto out_err;
228 }
229
230 t1_sge_start(adapter->sge);
231 t1_interrupts_enable(adapter);
232 out_err:
233 return err;
234 }
235
236 /*
237 * Release resources when all the ports have been stopped.
238 */
cxgb_down(struct adapter * adapter)239 static void cxgb_down(struct adapter *adapter)
240 {
241 t1_sge_stop(adapter->sge);
242 t1_interrupts_disable(adapter);
243 free_irq(adapter->pdev->irq, adapter);
244 if (adapter->params.has_msi)
245 pci_disable_msi(adapter->pdev);
246 }
247
cxgb_open(struct net_device * dev)248 static int cxgb_open(struct net_device *dev)
249 {
250 int err;
251 struct adapter *adapter = dev->ml_priv;
252 int other_ports = adapter->open_device_map & PORT_MASK;
253
254 napi_enable(&adapter->napi);
255 if (!adapter->open_device_map && (err = cxgb_up(adapter)) < 0) {
256 napi_disable(&adapter->napi);
257 return err;
258 }
259
260 __set_bit(dev->if_port, &adapter->open_device_map);
261 link_start(&adapter->port[dev->if_port]);
262 netif_start_queue(dev);
263 if (!other_ports && adapter->params.stats_update_period)
264 schedule_mac_stats_update(adapter,
265 adapter->params.stats_update_period);
266
267 t1_vlan_mode(adapter, dev->features);
268 return 0;
269 }
270
cxgb_close(struct net_device * dev)271 static int cxgb_close(struct net_device *dev)
272 {
273 struct adapter *adapter = dev->ml_priv;
274 struct port_info *p = &adapter->port[dev->if_port];
275 struct cmac *mac = p->mac;
276
277 netif_stop_queue(dev);
278 napi_disable(&adapter->napi);
279 mac->ops->disable(mac, MAC_DIRECTION_TX | MAC_DIRECTION_RX);
280 netif_carrier_off(dev);
281
282 clear_bit(dev->if_port, &adapter->open_device_map);
283 if (adapter->params.stats_update_period &&
284 !(adapter->open_device_map & PORT_MASK)) {
285 /* Stop statistics accumulation. */
286 smp_mb__after_clear_bit();
287 spin_lock(&adapter->work_lock); /* sync with update task */
288 spin_unlock(&adapter->work_lock);
289 cancel_mac_stats_update(adapter);
290 }
291
292 if (!adapter->open_device_map)
293 cxgb_down(adapter);
294 return 0;
295 }
296
t1_get_stats(struct net_device * dev)297 static struct net_device_stats *t1_get_stats(struct net_device *dev)
298 {
299 struct adapter *adapter = dev->ml_priv;
300 struct port_info *p = &adapter->port[dev->if_port];
301 struct net_device_stats *ns = &p->netstats;
302 const struct cmac_statistics *pstats;
303
304 /* Do a full update of the MAC stats */
305 pstats = p->mac->ops->statistics_update(p->mac,
306 MAC_STATS_UPDATE_FULL);
307
308 ns->tx_packets = pstats->TxUnicastFramesOK +
309 pstats->TxMulticastFramesOK + pstats->TxBroadcastFramesOK;
310
311 ns->rx_packets = pstats->RxUnicastFramesOK +
312 pstats->RxMulticastFramesOK + pstats->RxBroadcastFramesOK;
313
314 ns->tx_bytes = pstats->TxOctetsOK;
315 ns->rx_bytes = pstats->RxOctetsOK;
316
317 ns->tx_errors = pstats->TxLateCollisions + pstats->TxLengthErrors +
318 pstats->TxUnderrun + pstats->TxFramesAbortedDueToXSCollisions;
319 ns->rx_errors = pstats->RxDataErrors + pstats->RxJabberErrors +
320 pstats->RxFCSErrors + pstats->RxAlignErrors +
321 pstats->RxSequenceErrors + pstats->RxFrameTooLongErrors +
322 pstats->RxSymbolErrors + pstats->RxRuntErrors;
323
324 ns->multicast = pstats->RxMulticastFramesOK;
325 ns->collisions = pstats->TxTotalCollisions;
326
327 /* detailed rx_errors */
328 ns->rx_length_errors = pstats->RxFrameTooLongErrors +
329 pstats->RxJabberErrors;
330 ns->rx_over_errors = 0;
331 ns->rx_crc_errors = pstats->RxFCSErrors;
332 ns->rx_frame_errors = pstats->RxAlignErrors;
333 ns->rx_fifo_errors = 0;
334 ns->rx_missed_errors = 0;
335
336 /* detailed tx_errors */
337 ns->tx_aborted_errors = pstats->TxFramesAbortedDueToXSCollisions;
338 ns->tx_carrier_errors = 0;
339 ns->tx_fifo_errors = pstats->TxUnderrun;
340 ns->tx_heartbeat_errors = 0;
341 ns->tx_window_errors = pstats->TxLateCollisions;
342 return ns;
343 }
344
get_msglevel(struct net_device * dev)345 static u32 get_msglevel(struct net_device *dev)
346 {
347 struct adapter *adapter = dev->ml_priv;
348
349 return adapter->msg_enable;
350 }
351
set_msglevel(struct net_device * dev,u32 val)352 static void set_msglevel(struct net_device *dev, u32 val)
353 {
354 struct adapter *adapter = dev->ml_priv;
355
356 adapter->msg_enable = val;
357 }
358
359 static char stats_strings[][ETH_GSTRING_LEN] = {
360 "TxOctetsOK",
361 "TxOctetsBad",
362 "TxUnicastFramesOK",
363 "TxMulticastFramesOK",
364 "TxBroadcastFramesOK",
365 "TxPauseFrames",
366 "TxFramesWithDeferredXmissions",
367 "TxLateCollisions",
368 "TxTotalCollisions",
369 "TxFramesAbortedDueToXSCollisions",
370 "TxUnderrun",
371 "TxLengthErrors",
372 "TxInternalMACXmitError",
373 "TxFramesWithExcessiveDeferral",
374 "TxFCSErrors",
375 "TxJumboFramesOk",
376 "TxJumboOctetsOk",
377
378 "RxOctetsOK",
379 "RxOctetsBad",
380 "RxUnicastFramesOK",
381 "RxMulticastFramesOK",
382 "RxBroadcastFramesOK",
383 "RxPauseFrames",
384 "RxFCSErrors",
385 "RxAlignErrors",
386 "RxSymbolErrors",
387 "RxDataErrors",
388 "RxSequenceErrors",
389 "RxRuntErrors",
390 "RxJabberErrors",
391 "RxInternalMACRcvError",
392 "RxInRangeLengthErrors",
393 "RxOutOfRangeLengthField",
394 "RxFrameTooLongErrors",
395 "RxJumboFramesOk",
396 "RxJumboOctetsOk",
397
398 /* Port stats */
399 "RxCsumGood",
400 "TxCsumOffload",
401 "TxTso",
402 "RxVlan",
403 "TxVlan",
404 "TxNeedHeadroom",
405
406 /* Interrupt stats */
407 "rx drops",
408 "pure_rsps",
409 "unhandled irqs",
410 "respQ_empty",
411 "respQ_overflow",
412 "freelistQ_empty",
413 "pkt_too_big",
414 "pkt_mismatch",
415 "cmdQ_full0",
416 "cmdQ_full1",
417
418 "espi_DIP2ParityErr",
419 "espi_DIP4Err",
420 "espi_RxDrops",
421 "espi_TxDrops",
422 "espi_RxOvfl",
423 "espi_ParityErr"
424 };
425
426 #define T2_REGMAP_SIZE (3 * 1024)
427
get_regs_len(struct net_device * dev)428 static int get_regs_len(struct net_device *dev)
429 {
430 return T2_REGMAP_SIZE;
431 }
432
get_drvinfo(struct net_device * dev,struct ethtool_drvinfo * info)433 static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
434 {
435 struct adapter *adapter = dev->ml_priv;
436
437 strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
438 strlcpy(info->version, DRV_VERSION, sizeof(info->version));
439 strlcpy(info->bus_info, pci_name(adapter->pdev),
440 sizeof(info->bus_info));
441 }
442
get_sset_count(struct net_device * dev,int sset)443 static int get_sset_count(struct net_device *dev, int sset)
444 {
445 switch (sset) {
446 case ETH_SS_STATS:
447 return ARRAY_SIZE(stats_strings);
448 default:
449 return -EOPNOTSUPP;
450 }
451 }
452
get_strings(struct net_device * dev,u32 stringset,u8 * data)453 static void get_strings(struct net_device *dev, u32 stringset, u8 *data)
454 {
455 if (stringset == ETH_SS_STATS)
456 memcpy(data, stats_strings, sizeof(stats_strings));
457 }
458
get_stats(struct net_device * dev,struct ethtool_stats * stats,u64 * data)459 static void get_stats(struct net_device *dev, struct ethtool_stats *stats,
460 u64 *data)
461 {
462 struct adapter *adapter = dev->ml_priv;
463 struct cmac *mac = adapter->port[dev->if_port].mac;
464 const struct cmac_statistics *s;
465 const struct sge_intr_counts *t;
466 struct sge_port_stats ss;
467
468 s = mac->ops->statistics_update(mac, MAC_STATS_UPDATE_FULL);
469 t = t1_sge_get_intr_counts(adapter->sge);
470 t1_sge_get_port_stats(adapter->sge, dev->if_port, &ss);
471
472 *data++ = s->TxOctetsOK;
473 *data++ = s->TxOctetsBad;
474 *data++ = s->TxUnicastFramesOK;
475 *data++ = s->TxMulticastFramesOK;
476 *data++ = s->TxBroadcastFramesOK;
477 *data++ = s->TxPauseFrames;
478 *data++ = s->TxFramesWithDeferredXmissions;
479 *data++ = s->TxLateCollisions;
480 *data++ = s->TxTotalCollisions;
481 *data++ = s->TxFramesAbortedDueToXSCollisions;
482 *data++ = s->TxUnderrun;
483 *data++ = s->TxLengthErrors;
484 *data++ = s->TxInternalMACXmitError;
485 *data++ = s->TxFramesWithExcessiveDeferral;
486 *data++ = s->TxFCSErrors;
487 *data++ = s->TxJumboFramesOK;
488 *data++ = s->TxJumboOctetsOK;
489
490 *data++ = s->RxOctetsOK;
491 *data++ = s->RxOctetsBad;
492 *data++ = s->RxUnicastFramesOK;
493 *data++ = s->RxMulticastFramesOK;
494 *data++ = s->RxBroadcastFramesOK;
495 *data++ = s->RxPauseFrames;
496 *data++ = s->RxFCSErrors;
497 *data++ = s->RxAlignErrors;
498 *data++ = s->RxSymbolErrors;
499 *data++ = s->RxDataErrors;
500 *data++ = s->RxSequenceErrors;
501 *data++ = s->RxRuntErrors;
502 *data++ = s->RxJabberErrors;
503 *data++ = s->RxInternalMACRcvError;
504 *data++ = s->RxInRangeLengthErrors;
505 *data++ = s->RxOutOfRangeLengthField;
506 *data++ = s->RxFrameTooLongErrors;
507 *data++ = s->RxJumboFramesOK;
508 *data++ = s->RxJumboOctetsOK;
509
510 *data++ = ss.rx_cso_good;
511 *data++ = ss.tx_cso;
512 *data++ = ss.tx_tso;
513 *data++ = ss.vlan_xtract;
514 *data++ = ss.vlan_insert;
515 *data++ = ss.tx_need_hdrroom;
516
517 *data++ = t->rx_drops;
518 *data++ = t->pure_rsps;
519 *data++ = t->unhandled_irqs;
520 *data++ = t->respQ_empty;
521 *data++ = t->respQ_overflow;
522 *data++ = t->freelistQ_empty;
523 *data++ = t->pkt_too_big;
524 *data++ = t->pkt_mismatch;
525 *data++ = t->cmdQ_full[0];
526 *data++ = t->cmdQ_full[1];
527
528 if (adapter->espi) {
529 const struct espi_intr_counts *e;
530
531 e = t1_espi_get_intr_counts(adapter->espi);
532 *data++ = e->DIP2_parity_err;
533 *data++ = e->DIP4_err;
534 *data++ = e->rx_drops;
535 *data++ = e->tx_drops;
536 *data++ = e->rx_ovflw;
537 *data++ = e->parity_err;
538 }
539 }
540
reg_block_dump(struct adapter * ap,void * buf,unsigned int start,unsigned int end)541 static inline void reg_block_dump(struct adapter *ap, void *buf,
542 unsigned int start, unsigned int end)
543 {
544 u32 *p = buf + start;
545
546 for ( ; start <= end; start += sizeof(u32))
547 *p++ = readl(ap->regs + start);
548 }
549
get_regs(struct net_device * dev,struct ethtool_regs * regs,void * buf)550 static void get_regs(struct net_device *dev, struct ethtool_regs *regs,
551 void *buf)
552 {
553 struct adapter *ap = dev->ml_priv;
554
555 /*
556 * Version scheme: bits 0..9: chip version, bits 10..15: chip revision
557 */
558 regs->version = 2;
559
560 memset(buf, 0, T2_REGMAP_SIZE);
561 reg_block_dump(ap, buf, 0, A_SG_RESPACCUTIMER);
562 reg_block_dump(ap, buf, A_MC3_CFG, A_MC4_INT_CAUSE);
563 reg_block_dump(ap, buf, A_TPI_ADDR, A_TPI_PAR);
564 reg_block_dump(ap, buf, A_TP_IN_CONFIG, A_TP_TX_DROP_COUNT);
565 reg_block_dump(ap, buf, A_RAT_ROUTE_CONTROL, A_RAT_INTR_CAUSE);
566 reg_block_dump(ap, buf, A_CSPI_RX_AE_WM, A_CSPI_INTR_ENABLE);
567 reg_block_dump(ap, buf, A_ESPI_SCH_TOKEN0, A_ESPI_GOSTAT);
568 reg_block_dump(ap, buf, A_ULP_ULIMIT, A_ULP_PIO_CTRL);
569 reg_block_dump(ap, buf, A_PL_ENABLE, A_PL_CAUSE);
570 reg_block_dump(ap, buf, A_MC5_CONFIG, A_MC5_MASK_WRITE_CMD);
571 }
572
get_settings(struct net_device * dev,struct ethtool_cmd * cmd)573 static int get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
574 {
575 struct adapter *adapter = dev->ml_priv;
576 struct port_info *p = &adapter->port[dev->if_port];
577
578 cmd->supported = p->link_config.supported;
579 cmd->advertising = p->link_config.advertising;
580
581 if (netif_carrier_ok(dev)) {
582 ethtool_cmd_speed_set(cmd, p->link_config.speed);
583 cmd->duplex = p->link_config.duplex;
584 } else {
585 ethtool_cmd_speed_set(cmd, -1);
586 cmd->duplex = -1;
587 }
588
589 cmd->port = (cmd->supported & SUPPORTED_TP) ? PORT_TP : PORT_FIBRE;
590 cmd->phy_address = p->phy->mdio.prtad;
591 cmd->transceiver = XCVR_EXTERNAL;
592 cmd->autoneg = p->link_config.autoneg;
593 cmd->maxtxpkt = 0;
594 cmd->maxrxpkt = 0;
595 return 0;
596 }
597
speed_duplex_to_caps(int speed,int duplex)598 static int speed_duplex_to_caps(int speed, int duplex)
599 {
600 int cap = 0;
601
602 switch (speed) {
603 case SPEED_10:
604 if (duplex == DUPLEX_FULL)
605 cap = SUPPORTED_10baseT_Full;
606 else
607 cap = SUPPORTED_10baseT_Half;
608 break;
609 case SPEED_100:
610 if (duplex == DUPLEX_FULL)
611 cap = SUPPORTED_100baseT_Full;
612 else
613 cap = SUPPORTED_100baseT_Half;
614 break;
615 case SPEED_1000:
616 if (duplex == DUPLEX_FULL)
617 cap = SUPPORTED_1000baseT_Full;
618 else
619 cap = SUPPORTED_1000baseT_Half;
620 break;
621 case SPEED_10000:
622 if (duplex == DUPLEX_FULL)
623 cap = SUPPORTED_10000baseT_Full;
624 }
625 return cap;
626 }
627
628 #define ADVERTISED_MASK (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | \
629 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | \
630 ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full | \
631 ADVERTISED_10000baseT_Full)
632
set_settings(struct net_device * dev,struct ethtool_cmd * cmd)633 static int set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
634 {
635 struct adapter *adapter = dev->ml_priv;
636 struct port_info *p = &adapter->port[dev->if_port];
637 struct link_config *lc = &p->link_config;
638
639 if (!(lc->supported & SUPPORTED_Autoneg))
640 return -EOPNOTSUPP; /* can't change speed/duplex */
641
642 if (cmd->autoneg == AUTONEG_DISABLE) {
643 u32 speed = ethtool_cmd_speed(cmd);
644 int cap = speed_duplex_to_caps(speed, cmd->duplex);
645
646 if (!(lc->supported & cap) || (speed == SPEED_1000))
647 return -EINVAL;
648 lc->requested_speed = speed;
649 lc->requested_duplex = cmd->duplex;
650 lc->advertising = 0;
651 } else {
652 cmd->advertising &= ADVERTISED_MASK;
653 if (cmd->advertising & (cmd->advertising - 1))
654 cmd->advertising = lc->supported;
655 cmd->advertising &= lc->supported;
656 if (!cmd->advertising)
657 return -EINVAL;
658 lc->requested_speed = SPEED_INVALID;
659 lc->requested_duplex = DUPLEX_INVALID;
660 lc->advertising = cmd->advertising | ADVERTISED_Autoneg;
661 }
662 lc->autoneg = cmd->autoneg;
663 if (netif_running(dev))
664 t1_link_start(p->phy, p->mac, lc);
665 return 0;
666 }
667
get_pauseparam(struct net_device * dev,struct ethtool_pauseparam * epause)668 static void get_pauseparam(struct net_device *dev,
669 struct ethtool_pauseparam *epause)
670 {
671 struct adapter *adapter = dev->ml_priv;
672 struct port_info *p = &adapter->port[dev->if_port];
673
674 epause->autoneg = (p->link_config.requested_fc & PAUSE_AUTONEG) != 0;
675 epause->rx_pause = (p->link_config.fc & PAUSE_RX) != 0;
676 epause->tx_pause = (p->link_config.fc & PAUSE_TX) != 0;
677 }
678
set_pauseparam(struct net_device * dev,struct ethtool_pauseparam * epause)679 static int set_pauseparam(struct net_device *dev,
680 struct ethtool_pauseparam *epause)
681 {
682 struct adapter *adapter = dev->ml_priv;
683 struct port_info *p = &adapter->port[dev->if_port];
684 struct link_config *lc = &p->link_config;
685
686 if (epause->autoneg == AUTONEG_DISABLE)
687 lc->requested_fc = 0;
688 else if (lc->supported & SUPPORTED_Autoneg)
689 lc->requested_fc = PAUSE_AUTONEG;
690 else
691 return -EINVAL;
692
693 if (epause->rx_pause)
694 lc->requested_fc |= PAUSE_RX;
695 if (epause->tx_pause)
696 lc->requested_fc |= PAUSE_TX;
697 if (lc->autoneg == AUTONEG_ENABLE) {
698 if (netif_running(dev))
699 t1_link_start(p->phy, p->mac, lc);
700 } else {
701 lc->fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
702 if (netif_running(dev))
703 p->mac->ops->set_speed_duplex_fc(p->mac, -1, -1,
704 lc->fc);
705 }
706 return 0;
707 }
708
get_sge_param(struct net_device * dev,struct ethtool_ringparam * e)709 static void get_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
710 {
711 struct adapter *adapter = dev->ml_priv;
712 int jumbo_fl = t1_is_T1B(adapter) ? 1 : 0;
713
714 e->rx_max_pending = MAX_RX_BUFFERS;
715 e->rx_jumbo_max_pending = MAX_RX_JUMBO_BUFFERS;
716 e->tx_max_pending = MAX_CMDQ_ENTRIES;
717
718 e->rx_pending = adapter->params.sge.freelQ_size[!jumbo_fl];
719 e->rx_jumbo_pending = adapter->params.sge.freelQ_size[jumbo_fl];
720 e->tx_pending = adapter->params.sge.cmdQ_size[0];
721 }
722
set_sge_param(struct net_device * dev,struct ethtool_ringparam * e)723 static int set_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
724 {
725 struct adapter *adapter = dev->ml_priv;
726 int jumbo_fl = t1_is_T1B(adapter) ? 1 : 0;
727
728 if (e->rx_pending > MAX_RX_BUFFERS || e->rx_mini_pending ||
729 e->rx_jumbo_pending > MAX_RX_JUMBO_BUFFERS ||
730 e->tx_pending > MAX_CMDQ_ENTRIES ||
731 e->rx_pending < MIN_FL_ENTRIES ||
732 e->rx_jumbo_pending < MIN_FL_ENTRIES ||
733 e->tx_pending < (adapter->params.nports + 1) * (MAX_SKB_FRAGS + 1))
734 return -EINVAL;
735
736 if (adapter->flags & FULL_INIT_DONE)
737 return -EBUSY;
738
739 adapter->params.sge.freelQ_size[!jumbo_fl] = e->rx_pending;
740 adapter->params.sge.freelQ_size[jumbo_fl] = e->rx_jumbo_pending;
741 adapter->params.sge.cmdQ_size[0] = e->tx_pending;
742 adapter->params.sge.cmdQ_size[1] = e->tx_pending > MAX_CMDQ1_ENTRIES ?
743 MAX_CMDQ1_ENTRIES : e->tx_pending;
744 return 0;
745 }
746
set_coalesce(struct net_device * dev,struct ethtool_coalesce * c)747 static int set_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
748 {
749 struct adapter *adapter = dev->ml_priv;
750
751 adapter->params.sge.rx_coalesce_usecs = c->rx_coalesce_usecs;
752 adapter->params.sge.coalesce_enable = c->use_adaptive_rx_coalesce;
753 adapter->params.sge.sample_interval_usecs = c->rate_sample_interval;
754 t1_sge_set_coalesce_params(adapter->sge, &adapter->params.sge);
755 return 0;
756 }
757
get_coalesce(struct net_device * dev,struct ethtool_coalesce * c)758 static int get_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
759 {
760 struct adapter *adapter = dev->ml_priv;
761
762 c->rx_coalesce_usecs = adapter->params.sge.rx_coalesce_usecs;
763 c->rate_sample_interval = adapter->params.sge.sample_interval_usecs;
764 c->use_adaptive_rx_coalesce = adapter->params.sge.coalesce_enable;
765 return 0;
766 }
767
get_eeprom_len(struct net_device * dev)768 static int get_eeprom_len(struct net_device *dev)
769 {
770 struct adapter *adapter = dev->ml_priv;
771
772 return t1_is_asic(adapter) ? EEPROM_SIZE : 0;
773 }
774
775 #define EEPROM_MAGIC(ap) \
776 (PCI_VENDOR_ID_CHELSIO | ((ap)->params.chip_version << 16))
777
get_eeprom(struct net_device * dev,struct ethtool_eeprom * e,u8 * data)778 static int get_eeprom(struct net_device *dev, struct ethtool_eeprom *e,
779 u8 *data)
780 {
781 int i;
782 u8 buf[EEPROM_SIZE] __attribute__((aligned(4)));
783 struct adapter *adapter = dev->ml_priv;
784
785 e->magic = EEPROM_MAGIC(adapter);
786 for (i = e->offset & ~3; i < e->offset + e->len; i += sizeof(u32))
787 t1_seeprom_read(adapter, i, (__le32 *)&buf[i]);
788 memcpy(data, buf + e->offset, e->len);
789 return 0;
790 }
791
792 static const struct ethtool_ops t1_ethtool_ops = {
793 .get_settings = get_settings,
794 .set_settings = set_settings,
795 .get_drvinfo = get_drvinfo,
796 .get_msglevel = get_msglevel,
797 .set_msglevel = set_msglevel,
798 .get_ringparam = get_sge_param,
799 .set_ringparam = set_sge_param,
800 .get_coalesce = get_coalesce,
801 .set_coalesce = set_coalesce,
802 .get_eeprom_len = get_eeprom_len,
803 .get_eeprom = get_eeprom,
804 .get_pauseparam = get_pauseparam,
805 .set_pauseparam = set_pauseparam,
806 .get_link = ethtool_op_get_link,
807 .get_strings = get_strings,
808 .get_sset_count = get_sset_count,
809 .get_ethtool_stats = get_stats,
810 .get_regs_len = get_regs_len,
811 .get_regs = get_regs,
812 };
813
t1_ioctl(struct net_device * dev,struct ifreq * req,int cmd)814 static int t1_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
815 {
816 struct adapter *adapter = dev->ml_priv;
817 struct mdio_if_info *mdio = &adapter->port[dev->if_port].phy->mdio;
818
819 return mdio_mii_ioctl(mdio, if_mii(req), cmd);
820 }
821
t1_change_mtu(struct net_device * dev,int new_mtu)822 static int t1_change_mtu(struct net_device *dev, int new_mtu)
823 {
824 int ret;
825 struct adapter *adapter = dev->ml_priv;
826 struct cmac *mac = adapter->port[dev->if_port].mac;
827
828 if (!mac->ops->set_mtu)
829 return -EOPNOTSUPP;
830 if (new_mtu < 68)
831 return -EINVAL;
832 if ((ret = mac->ops->set_mtu(mac, new_mtu)))
833 return ret;
834 dev->mtu = new_mtu;
835 return 0;
836 }
837
t1_set_mac_addr(struct net_device * dev,void * p)838 static int t1_set_mac_addr(struct net_device *dev, void *p)
839 {
840 struct adapter *adapter = dev->ml_priv;
841 struct cmac *mac = adapter->port[dev->if_port].mac;
842 struct sockaddr *addr = p;
843
844 if (!mac->ops->macaddress_set)
845 return -EOPNOTSUPP;
846
847 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
848 mac->ops->macaddress_set(mac, dev->dev_addr);
849 return 0;
850 }
851
t1_fix_features(struct net_device * dev,netdev_features_t features)852 static netdev_features_t t1_fix_features(struct net_device *dev,
853 netdev_features_t features)
854 {
855 /*
856 * Since there is no support for separate rx/tx vlan accel
857 * enable/disable make sure tx flag is always in same state as rx.
858 */
859 if (features & NETIF_F_HW_VLAN_RX)
860 features |= NETIF_F_HW_VLAN_TX;
861 else
862 features &= ~NETIF_F_HW_VLAN_TX;
863
864 return features;
865 }
866
t1_set_features(struct net_device * dev,netdev_features_t features)867 static int t1_set_features(struct net_device *dev, netdev_features_t features)
868 {
869 netdev_features_t changed = dev->features ^ features;
870 struct adapter *adapter = dev->ml_priv;
871
872 if (changed & NETIF_F_HW_VLAN_RX)
873 t1_vlan_mode(adapter, features);
874
875 return 0;
876 }
877 #ifdef CONFIG_NET_POLL_CONTROLLER
t1_netpoll(struct net_device * dev)878 static void t1_netpoll(struct net_device *dev)
879 {
880 unsigned long flags;
881 struct adapter *adapter = dev->ml_priv;
882
883 local_irq_save(flags);
884 t1_interrupt(adapter->pdev->irq, adapter);
885 local_irq_restore(flags);
886 }
887 #endif
888
889 /*
890 * Periodic accumulation of MAC statistics. This is used only if the MAC
891 * does not have any other way to prevent stats counter overflow.
892 */
mac_stats_task(struct work_struct * work)893 static void mac_stats_task(struct work_struct *work)
894 {
895 int i;
896 struct adapter *adapter =
897 container_of(work, struct adapter, stats_update_task.work);
898
899 for_each_port(adapter, i) {
900 struct port_info *p = &adapter->port[i];
901
902 if (netif_running(p->dev))
903 p->mac->ops->statistics_update(p->mac,
904 MAC_STATS_UPDATE_FAST);
905 }
906
907 /* Schedule the next statistics update if any port is active. */
908 spin_lock(&adapter->work_lock);
909 if (adapter->open_device_map & PORT_MASK)
910 schedule_mac_stats_update(adapter,
911 adapter->params.stats_update_period);
912 spin_unlock(&adapter->work_lock);
913 }
914
915 /*
916 * Processes elmer0 external interrupts in process context.
917 */
ext_intr_task(struct work_struct * work)918 static void ext_intr_task(struct work_struct *work)
919 {
920 struct adapter *adapter =
921 container_of(work, struct adapter, ext_intr_handler_task);
922
923 t1_elmer0_ext_intr_handler(adapter);
924
925 /* Now reenable external interrupts */
926 spin_lock_irq(&adapter->async_lock);
927 adapter->slow_intr_mask |= F_PL_INTR_EXT;
928 writel(F_PL_INTR_EXT, adapter->regs + A_PL_CAUSE);
929 writel(adapter->slow_intr_mask | F_PL_INTR_SGE_DATA,
930 adapter->regs + A_PL_ENABLE);
931 spin_unlock_irq(&adapter->async_lock);
932 }
933
934 /*
935 * Interrupt-context handler for elmer0 external interrupts.
936 */
t1_elmer0_ext_intr(struct adapter * adapter)937 void t1_elmer0_ext_intr(struct adapter *adapter)
938 {
939 /*
940 * Schedule a task to handle external interrupts as we require
941 * a process context. We disable EXT interrupts in the interim
942 * and let the task reenable them when it's done.
943 */
944 adapter->slow_intr_mask &= ~F_PL_INTR_EXT;
945 writel(adapter->slow_intr_mask | F_PL_INTR_SGE_DATA,
946 adapter->regs + A_PL_ENABLE);
947 schedule_work(&adapter->ext_intr_handler_task);
948 }
949
t1_fatal_err(struct adapter * adapter)950 void t1_fatal_err(struct adapter *adapter)
951 {
952 if (adapter->flags & FULL_INIT_DONE) {
953 t1_sge_stop(adapter->sge);
954 t1_interrupts_disable(adapter);
955 }
956 pr_alert("%s: encountered fatal error, operation suspended\n",
957 adapter->name);
958 }
959
960 static const struct net_device_ops cxgb_netdev_ops = {
961 .ndo_open = cxgb_open,
962 .ndo_stop = cxgb_close,
963 .ndo_start_xmit = t1_start_xmit,
964 .ndo_get_stats = t1_get_stats,
965 .ndo_validate_addr = eth_validate_addr,
966 .ndo_set_rx_mode = t1_set_rxmode,
967 .ndo_do_ioctl = t1_ioctl,
968 .ndo_change_mtu = t1_change_mtu,
969 .ndo_set_mac_address = t1_set_mac_addr,
970 .ndo_fix_features = t1_fix_features,
971 .ndo_set_features = t1_set_features,
972 #ifdef CONFIG_NET_POLL_CONTROLLER
973 .ndo_poll_controller = t1_netpoll,
974 #endif
975 };
976
init_one(struct pci_dev * pdev,const struct pci_device_id * ent)977 static int __devinit init_one(struct pci_dev *pdev,
978 const struct pci_device_id *ent)
979 {
980 static int version_printed;
981
982 int i, err, pci_using_dac = 0;
983 unsigned long mmio_start, mmio_len;
984 const struct board_info *bi;
985 struct adapter *adapter = NULL;
986 struct port_info *pi;
987
988 if (!version_printed) {
989 printk(KERN_INFO "%s - version %s\n", DRV_DESCRIPTION,
990 DRV_VERSION);
991 ++version_printed;
992 }
993
994 err = pci_enable_device(pdev);
995 if (err)
996 return err;
997
998 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
999 pr_err("%s: cannot find PCI device memory base address\n",
1000 pci_name(pdev));
1001 err = -ENODEV;
1002 goto out_disable_pdev;
1003 }
1004
1005 if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
1006 pci_using_dac = 1;
1007
1008 if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64))) {
1009 pr_err("%s: unable to obtain 64-bit DMA for "
1010 "consistent allocations\n", pci_name(pdev));
1011 err = -ENODEV;
1012 goto out_disable_pdev;
1013 }
1014
1015 } else if ((err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) != 0) {
1016 pr_err("%s: no usable DMA configuration\n", pci_name(pdev));
1017 goto out_disable_pdev;
1018 }
1019
1020 err = pci_request_regions(pdev, DRV_NAME);
1021 if (err) {
1022 pr_err("%s: cannot obtain PCI resources\n", pci_name(pdev));
1023 goto out_disable_pdev;
1024 }
1025
1026 pci_set_master(pdev);
1027
1028 mmio_start = pci_resource_start(pdev, 0);
1029 mmio_len = pci_resource_len(pdev, 0);
1030 bi = t1_get_board_info(ent->driver_data);
1031
1032 for (i = 0; i < bi->port_number; ++i) {
1033 struct net_device *netdev;
1034
1035 netdev = alloc_etherdev(adapter ? 0 : sizeof(*adapter));
1036 if (!netdev) {
1037 err = -ENOMEM;
1038 goto out_free_dev;
1039 }
1040
1041 SET_NETDEV_DEV(netdev, &pdev->dev);
1042
1043 if (!adapter) {
1044 adapter = netdev_priv(netdev);
1045 adapter->pdev = pdev;
1046 adapter->port[0].dev = netdev; /* so we don't leak it */
1047
1048 adapter->regs = ioremap(mmio_start, mmio_len);
1049 if (!adapter->regs) {
1050 pr_err("%s: cannot map device registers\n",
1051 pci_name(pdev));
1052 err = -ENOMEM;
1053 goto out_free_dev;
1054 }
1055
1056 if (t1_get_board_rev(adapter, bi, &adapter->params)) {
1057 err = -ENODEV; /* Can't handle this chip rev */
1058 goto out_free_dev;
1059 }
1060
1061 adapter->name = pci_name(pdev);
1062 adapter->msg_enable = dflt_msg_enable;
1063 adapter->mmio_len = mmio_len;
1064
1065 spin_lock_init(&adapter->tpi_lock);
1066 spin_lock_init(&adapter->work_lock);
1067 spin_lock_init(&adapter->async_lock);
1068 spin_lock_init(&adapter->mac_lock);
1069
1070 INIT_WORK(&adapter->ext_intr_handler_task,
1071 ext_intr_task);
1072 INIT_DELAYED_WORK(&adapter->stats_update_task,
1073 mac_stats_task);
1074
1075 pci_set_drvdata(pdev, netdev);
1076 }
1077
1078 pi = &adapter->port[i];
1079 pi->dev = netdev;
1080 netif_carrier_off(netdev);
1081 netdev->irq = pdev->irq;
1082 netdev->if_port = i;
1083 netdev->mem_start = mmio_start;
1084 netdev->mem_end = mmio_start + mmio_len - 1;
1085 netdev->ml_priv = adapter;
1086 netdev->hw_features |= NETIF_F_SG | NETIF_F_IP_CSUM |
1087 NETIF_F_RXCSUM;
1088 netdev->features |= NETIF_F_SG | NETIF_F_IP_CSUM |
1089 NETIF_F_RXCSUM | NETIF_F_LLTX;
1090
1091 if (pci_using_dac)
1092 netdev->features |= NETIF_F_HIGHDMA;
1093 if (vlan_tso_capable(adapter)) {
1094 netdev->features |=
1095 NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
1096 netdev->hw_features |= NETIF_F_HW_VLAN_RX;
1097
1098 /* T204: disable TSO */
1099 if (!(is_T2(adapter)) || bi->port_number != 4) {
1100 netdev->hw_features |= NETIF_F_TSO;
1101 netdev->features |= NETIF_F_TSO;
1102 }
1103 }
1104
1105 netdev->netdev_ops = &cxgb_netdev_ops;
1106 netdev->hard_header_len += (netdev->hw_features & NETIF_F_TSO) ?
1107 sizeof(struct cpl_tx_pkt_lso) : sizeof(struct cpl_tx_pkt);
1108
1109 netif_napi_add(netdev, &adapter->napi, t1_poll, 64);
1110
1111 SET_ETHTOOL_OPS(netdev, &t1_ethtool_ops);
1112 }
1113
1114 if (t1_init_sw_modules(adapter, bi) < 0) {
1115 err = -ENODEV;
1116 goto out_free_dev;
1117 }
1118
1119 /*
1120 * The card is now ready to go. If any errors occur during device
1121 * registration we do not fail the whole card but rather proceed only
1122 * with the ports we manage to register successfully. However we must
1123 * register at least one net device.
1124 */
1125 for (i = 0; i < bi->port_number; ++i) {
1126 err = register_netdev(adapter->port[i].dev);
1127 if (err)
1128 pr_warning("%s: cannot register net device %s, skipping\n",
1129 pci_name(pdev), adapter->port[i].dev->name);
1130 else {
1131 /*
1132 * Change the name we use for messages to the name of
1133 * the first successfully registered interface.
1134 */
1135 if (!adapter->registered_device_map)
1136 adapter->name = adapter->port[i].dev->name;
1137
1138 __set_bit(i, &adapter->registered_device_map);
1139 }
1140 }
1141 if (!adapter->registered_device_map) {
1142 pr_err("%s: could not register any net devices\n",
1143 pci_name(pdev));
1144 goto out_release_adapter_res;
1145 }
1146
1147 printk(KERN_INFO "%s: %s (rev %d), %s %dMHz/%d-bit\n", adapter->name,
1148 bi->desc, adapter->params.chip_revision,
1149 adapter->params.pci.is_pcix ? "PCIX" : "PCI",
1150 adapter->params.pci.speed, adapter->params.pci.width);
1151
1152 /*
1153 * Set the T1B ASIC and memory clocks.
1154 */
1155 if (t1powersave)
1156 adapter->t1powersave = LCLOCK; /* HW default is powersave mode. */
1157 else
1158 adapter->t1powersave = HCLOCK;
1159 if (t1_is_T1B(adapter))
1160 t1_clock(adapter, t1powersave);
1161
1162 return 0;
1163
1164 out_release_adapter_res:
1165 t1_free_sw_modules(adapter);
1166 out_free_dev:
1167 if (adapter) {
1168 if (adapter->regs)
1169 iounmap(adapter->regs);
1170 for (i = bi->port_number - 1; i >= 0; --i)
1171 if (adapter->port[i].dev)
1172 free_netdev(adapter->port[i].dev);
1173 }
1174 pci_release_regions(pdev);
1175 out_disable_pdev:
1176 pci_disable_device(pdev);
1177 pci_set_drvdata(pdev, NULL);
1178 return err;
1179 }
1180
bit_bang(struct adapter * adapter,int bitdata,int nbits)1181 static void bit_bang(struct adapter *adapter, int bitdata, int nbits)
1182 {
1183 int data;
1184 int i;
1185 u32 val;
1186
1187 enum {
1188 S_CLOCK = 1 << 3,
1189 S_DATA = 1 << 4
1190 };
1191
1192 for (i = (nbits - 1); i > -1; i--) {
1193
1194 udelay(50);
1195
1196 data = ((bitdata >> i) & 0x1);
1197 __t1_tpi_read(adapter, A_ELMER0_GPO, &val);
1198
1199 if (data)
1200 val |= S_DATA;
1201 else
1202 val &= ~S_DATA;
1203
1204 udelay(50);
1205
1206 /* Set SCLOCK low */
1207 val &= ~S_CLOCK;
1208 __t1_tpi_write(adapter, A_ELMER0_GPO, val);
1209
1210 udelay(50);
1211
1212 /* Write SCLOCK high */
1213 val |= S_CLOCK;
1214 __t1_tpi_write(adapter, A_ELMER0_GPO, val);
1215
1216 }
1217 }
1218
t1_clock(struct adapter * adapter,int mode)1219 static int t1_clock(struct adapter *adapter, int mode)
1220 {
1221 u32 val;
1222 int M_CORE_VAL;
1223 int M_MEM_VAL;
1224
1225 enum {
1226 M_CORE_BITS = 9,
1227 T_CORE_VAL = 0,
1228 T_CORE_BITS = 2,
1229 N_CORE_VAL = 0,
1230 N_CORE_BITS = 2,
1231 M_MEM_BITS = 9,
1232 T_MEM_VAL = 0,
1233 T_MEM_BITS = 2,
1234 N_MEM_VAL = 0,
1235 N_MEM_BITS = 2,
1236 NP_LOAD = 1 << 17,
1237 S_LOAD_MEM = 1 << 5,
1238 S_LOAD_CORE = 1 << 6,
1239 S_CLOCK = 1 << 3
1240 };
1241
1242 if (!t1_is_T1B(adapter))
1243 return -ENODEV; /* Can't re-clock this chip. */
1244
1245 if (mode & 2)
1246 return 0; /* show current mode. */
1247
1248 if ((adapter->t1powersave & 1) == (mode & 1))
1249 return -EALREADY; /* ASIC already running in mode. */
1250
1251 if ((mode & 1) == HCLOCK) {
1252 M_CORE_VAL = 0x14;
1253 M_MEM_VAL = 0x18;
1254 adapter->t1powersave = HCLOCK; /* overclock */
1255 } else {
1256 M_CORE_VAL = 0xe;
1257 M_MEM_VAL = 0x10;
1258 adapter->t1powersave = LCLOCK; /* underclock */
1259 }
1260
1261 /* Don't interrupt this serial stream! */
1262 spin_lock(&adapter->tpi_lock);
1263
1264 /* Initialize for ASIC core */
1265 __t1_tpi_read(adapter, A_ELMER0_GPO, &val);
1266 val |= NP_LOAD;
1267 udelay(50);
1268 __t1_tpi_write(adapter, A_ELMER0_GPO, val);
1269 udelay(50);
1270 __t1_tpi_read(adapter, A_ELMER0_GPO, &val);
1271 val &= ~S_LOAD_CORE;
1272 val &= ~S_CLOCK;
1273 __t1_tpi_write(adapter, A_ELMER0_GPO, val);
1274 udelay(50);
1275
1276 /* Serial program the ASIC clock synthesizer */
1277 bit_bang(adapter, T_CORE_VAL, T_CORE_BITS);
1278 bit_bang(adapter, N_CORE_VAL, N_CORE_BITS);
1279 bit_bang(adapter, M_CORE_VAL, M_CORE_BITS);
1280 udelay(50);
1281
1282 /* Finish ASIC core */
1283 __t1_tpi_read(adapter, A_ELMER0_GPO, &val);
1284 val |= S_LOAD_CORE;
1285 udelay(50);
1286 __t1_tpi_write(adapter, A_ELMER0_GPO, val);
1287 udelay(50);
1288 __t1_tpi_read(adapter, A_ELMER0_GPO, &val);
1289 val &= ~S_LOAD_CORE;
1290 udelay(50);
1291 __t1_tpi_write(adapter, A_ELMER0_GPO, val);
1292 udelay(50);
1293
1294 /* Initialize for memory */
1295 __t1_tpi_read(adapter, A_ELMER0_GPO, &val);
1296 val |= NP_LOAD;
1297 udelay(50);
1298 __t1_tpi_write(adapter, A_ELMER0_GPO, val);
1299 udelay(50);
1300 __t1_tpi_read(adapter, A_ELMER0_GPO, &val);
1301 val &= ~S_LOAD_MEM;
1302 val &= ~S_CLOCK;
1303 udelay(50);
1304 __t1_tpi_write(adapter, A_ELMER0_GPO, val);
1305 udelay(50);
1306
1307 /* Serial program the memory clock synthesizer */
1308 bit_bang(adapter, T_MEM_VAL, T_MEM_BITS);
1309 bit_bang(adapter, N_MEM_VAL, N_MEM_BITS);
1310 bit_bang(adapter, M_MEM_VAL, M_MEM_BITS);
1311 udelay(50);
1312
1313 /* Finish memory */
1314 __t1_tpi_read(adapter, A_ELMER0_GPO, &val);
1315 val |= S_LOAD_MEM;
1316 udelay(50);
1317 __t1_tpi_write(adapter, A_ELMER0_GPO, val);
1318 udelay(50);
1319 __t1_tpi_read(adapter, A_ELMER0_GPO, &val);
1320 val &= ~S_LOAD_MEM;
1321 udelay(50);
1322 __t1_tpi_write(adapter, A_ELMER0_GPO, val);
1323
1324 spin_unlock(&adapter->tpi_lock);
1325
1326 return 0;
1327 }
1328
t1_sw_reset(struct pci_dev * pdev)1329 static inline void t1_sw_reset(struct pci_dev *pdev)
1330 {
1331 pci_write_config_dword(pdev, A_PCICFG_PM_CSR, 3);
1332 pci_write_config_dword(pdev, A_PCICFG_PM_CSR, 0);
1333 }
1334
remove_one(struct pci_dev * pdev)1335 static void __devexit remove_one(struct pci_dev *pdev)
1336 {
1337 struct net_device *dev = pci_get_drvdata(pdev);
1338 struct adapter *adapter = dev->ml_priv;
1339 int i;
1340
1341 for_each_port(adapter, i) {
1342 if (test_bit(i, &adapter->registered_device_map))
1343 unregister_netdev(adapter->port[i].dev);
1344 }
1345
1346 t1_free_sw_modules(adapter);
1347 iounmap(adapter->regs);
1348
1349 while (--i >= 0) {
1350 if (adapter->port[i].dev)
1351 free_netdev(adapter->port[i].dev);
1352 }
1353
1354 pci_release_regions(pdev);
1355 pci_disable_device(pdev);
1356 pci_set_drvdata(pdev, NULL);
1357 t1_sw_reset(pdev);
1358 }
1359
1360 static struct pci_driver driver = {
1361 .name = DRV_NAME,
1362 .id_table = t1_pci_tbl,
1363 .probe = init_one,
1364 .remove = __devexit_p(remove_one),
1365 };
1366
t1_init_module(void)1367 static int __init t1_init_module(void)
1368 {
1369 return pci_register_driver(&driver);
1370 }
1371
t1_cleanup_module(void)1372 static void __exit t1_cleanup_module(void)
1373 {
1374 pci_unregister_driver(&driver);
1375 }
1376
1377 module_init(t1_init_module);
1378 module_exit(t1_cleanup_module);
1379