1 /****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards
3 * Copyright 2005-2006 Fen Systems Ltd.
4 * Copyright 2006-2010 Solarflare Communications Inc.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published
8 * by the Free Software Foundation, incorporated herein by reference.
9 */
10
11 #include <linux/bitops.h>
12 #include <linux/delay.h>
13 #include <linux/pci.h>
14 #include <linux/module.h>
15 #include <linux/slab.h>
16 #include <linux/random.h>
17 #include "net_driver.h"
18 #include "bitfield.h"
19 #include "efx.h"
20 #include "nic.h"
21 #include "spi.h"
22 #include "regs.h"
23 #include "io.h"
24 #include "phy.h"
25 #include "workarounds.h"
26 #include "mcdi.h"
27 #include "mcdi_pcol.h"
28 #include "selftest.h"
29
30 /* Hardware control for SFC9000 family including SFL9021 (aka Siena). */
31
32 static void siena_init_wol(struct efx_nic *efx);
33 static int siena_reset_hw(struct efx_nic *efx, enum reset_type method);
34
35
siena_push_irq_moderation(struct efx_channel * channel)36 static void siena_push_irq_moderation(struct efx_channel *channel)
37 {
38 efx_dword_t timer_cmd;
39
40 if (channel->irq_moderation)
41 EFX_POPULATE_DWORD_2(timer_cmd,
42 FRF_CZ_TC_TIMER_MODE,
43 FFE_CZ_TIMER_MODE_INT_HLDOFF,
44 FRF_CZ_TC_TIMER_VAL,
45 channel->irq_moderation - 1);
46 else
47 EFX_POPULATE_DWORD_2(timer_cmd,
48 FRF_CZ_TC_TIMER_MODE,
49 FFE_CZ_TIMER_MODE_DIS,
50 FRF_CZ_TC_TIMER_VAL, 0);
51 efx_writed_page_locked(channel->efx, &timer_cmd, FR_BZ_TIMER_COMMAND_P0,
52 channel->channel);
53 }
54
siena_mdio_write(struct net_device * net_dev,int prtad,int devad,u16 addr,u16 value)55 static int siena_mdio_write(struct net_device *net_dev,
56 int prtad, int devad, u16 addr, u16 value)
57 {
58 struct efx_nic *efx = netdev_priv(net_dev);
59 uint32_t status;
60 int rc;
61
62 rc = efx_mcdi_mdio_write(efx, efx->mdio_bus, prtad, devad,
63 addr, value, &status);
64 if (rc)
65 return rc;
66 if (status != MC_CMD_MDIO_STATUS_GOOD)
67 return -EIO;
68
69 return 0;
70 }
71
siena_mdio_read(struct net_device * net_dev,int prtad,int devad,u16 addr)72 static int siena_mdio_read(struct net_device *net_dev,
73 int prtad, int devad, u16 addr)
74 {
75 struct efx_nic *efx = netdev_priv(net_dev);
76 uint16_t value;
77 uint32_t status;
78 int rc;
79
80 rc = efx_mcdi_mdio_read(efx, efx->mdio_bus, prtad, devad,
81 addr, &value, &status);
82 if (rc)
83 return rc;
84 if (status != MC_CMD_MDIO_STATUS_GOOD)
85 return -EIO;
86
87 return (int)value;
88 }
89
90 /* This call is responsible for hooking in the MAC and PHY operations */
siena_probe_port(struct efx_nic * efx)91 static int siena_probe_port(struct efx_nic *efx)
92 {
93 int rc;
94
95 /* Hook in PHY operations table */
96 efx->phy_op = &efx_mcdi_phy_ops;
97
98 /* Set up MDIO structure for PHY */
99 efx->mdio.mode_support = MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22;
100 efx->mdio.mdio_read = siena_mdio_read;
101 efx->mdio.mdio_write = siena_mdio_write;
102
103 /* Fill out MDIO structure, loopback modes, and initial link state */
104 rc = efx->phy_op->probe(efx);
105 if (rc != 0)
106 return rc;
107
108 /* Allocate buffer for stats */
109 rc = efx_nic_alloc_buffer(efx, &efx->stats_buffer,
110 MC_CMD_MAC_NSTATS * sizeof(u64));
111 if (rc)
112 return rc;
113 netif_dbg(efx, probe, efx->net_dev,
114 "stats buffer at %llx (virt %p phys %llx)\n",
115 (u64)efx->stats_buffer.dma_addr,
116 efx->stats_buffer.addr,
117 (u64)virt_to_phys(efx->stats_buffer.addr));
118
119 efx_mcdi_mac_stats(efx, efx->stats_buffer.dma_addr, 0, 0, 1);
120
121 return 0;
122 }
123
siena_remove_port(struct efx_nic * efx)124 static void siena_remove_port(struct efx_nic *efx)
125 {
126 efx->phy_op->remove(efx);
127 efx_nic_free_buffer(efx, &efx->stats_buffer);
128 }
129
siena_prepare_flush(struct efx_nic * efx)130 void siena_prepare_flush(struct efx_nic *efx)
131 {
132 if (efx->fc_disable++ == 0)
133 efx_mcdi_set_mac(efx);
134 }
135
siena_finish_flush(struct efx_nic * efx)136 void siena_finish_flush(struct efx_nic *efx)
137 {
138 if (--efx->fc_disable == 0)
139 efx_mcdi_set_mac(efx);
140 }
141
142 static const struct efx_nic_register_test siena_register_tests[] = {
143 { FR_AZ_ADR_REGION,
144 EFX_OWORD32(0x0003FFFF, 0x0003FFFF, 0x0003FFFF, 0x0003FFFF) },
145 { FR_CZ_USR_EV_CFG,
146 EFX_OWORD32(0x000103FF, 0x00000000, 0x00000000, 0x00000000) },
147 { FR_AZ_RX_CFG,
148 EFX_OWORD32(0xFFFFFFFE, 0xFFFFFFFF, 0x0003FFFF, 0x00000000) },
149 { FR_AZ_TX_CFG,
150 EFX_OWORD32(0x7FFF0037, 0xFFFF8000, 0xFFFFFFFF, 0x03FFFFFF) },
151 { FR_AZ_TX_RESERVED,
152 EFX_OWORD32(0xFFFEFE80, 0x1FFFFFFF, 0x020000FE, 0x007FFFFF) },
153 { FR_AZ_SRM_TX_DC_CFG,
154 EFX_OWORD32(0x001FFFFF, 0x00000000, 0x00000000, 0x00000000) },
155 { FR_AZ_RX_DC_CFG,
156 EFX_OWORD32(0x00000003, 0x00000000, 0x00000000, 0x00000000) },
157 { FR_AZ_RX_DC_PF_WM,
158 EFX_OWORD32(0x000003FF, 0x00000000, 0x00000000, 0x00000000) },
159 { FR_BZ_DP_CTRL,
160 EFX_OWORD32(0x00000FFF, 0x00000000, 0x00000000, 0x00000000) },
161 { FR_BZ_RX_RSS_TKEY,
162 EFX_OWORD32(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF) },
163 { FR_CZ_RX_RSS_IPV6_REG1,
164 EFX_OWORD32(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF) },
165 { FR_CZ_RX_RSS_IPV6_REG2,
166 EFX_OWORD32(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF) },
167 { FR_CZ_RX_RSS_IPV6_REG3,
168 EFX_OWORD32(0xFFFFFFFF, 0xFFFFFFFF, 0x00000007, 0x00000000) },
169 };
170
siena_test_chip(struct efx_nic * efx,struct efx_self_tests * tests)171 static int siena_test_chip(struct efx_nic *efx, struct efx_self_tests *tests)
172 {
173 enum reset_type reset_method = RESET_TYPE_ALL;
174 int rc, rc2;
175
176 efx_reset_down(efx, reset_method);
177
178 /* Reset the chip immediately so that it is completely
179 * quiescent regardless of what any VF driver does.
180 */
181 rc = siena_reset_hw(efx, reset_method);
182 if (rc)
183 goto out;
184
185 tests->registers =
186 efx_nic_test_registers(efx, siena_register_tests,
187 ARRAY_SIZE(siena_register_tests))
188 ? -1 : 1;
189
190 rc = siena_reset_hw(efx, reset_method);
191 out:
192 rc2 = efx_reset_up(efx, reset_method, rc == 0);
193 return rc ? rc : rc2;
194 }
195
196 /**************************************************************************
197 *
198 * Device reset
199 *
200 **************************************************************************
201 */
202
siena_map_reset_reason(enum reset_type reason)203 static enum reset_type siena_map_reset_reason(enum reset_type reason)
204 {
205 return RESET_TYPE_ALL;
206 }
207
siena_map_reset_flags(u32 * flags)208 static int siena_map_reset_flags(u32 *flags)
209 {
210 enum {
211 SIENA_RESET_PORT = (ETH_RESET_DMA | ETH_RESET_FILTER |
212 ETH_RESET_OFFLOAD | ETH_RESET_MAC |
213 ETH_RESET_PHY),
214 SIENA_RESET_MC = (SIENA_RESET_PORT |
215 ETH_RESET_MGMT << ETH_RESET_SHARED_SHIFT),
216 };
217
218 if ((*flags & SIENA_RESET_MC) == SIENA_RESET_MC) {
219 *flags &= ~SIENA_RESET_MC;
220 return RESET_TYPE_WORLD;
221 }
222
223 if ((*flags & SIENA_RESET_PORT) == SIENA_RESET_PORT) {
224 *flags &= ~SIENA_RESET_PORT;
225 return RESET_TYPE_ALL;
226 }
227
228 /* no invisible reset implemented */
229
230 return -EINVAL;
231 }
232
siena_reset_hw(struct efx_nic * efx,enum reset_type method)233 static int siena_reset_hw(struct efx_nic *efx, enum reset_type method)
234 {
235 int rc;
236
237 /* Recover from a failed assertion pre-reset */
238 rc = efx_mcdi_handle_assertion(efx);
239 if (rc)
240 return rc;
241
242 if (method == RESET_TYPE_WORLD)
243 return efx_mcdi_reset_mc(efx);
244 else
245 return efx_mcdi_reset_port(efx);
246 }
247
siena_probe_nvconfig(struct efx_nic * efx)248 static int siena_probe_nvconfig(struct efx_nic *efx)
249 {
250 u32 caps = 0;
251 int rc;
252
253 rc = efx_mcdi_get_board_cfg(efx, efx->net_dev->perm_addr, NULL, &caps);
254
255 efx->timer_quantum_ns =
256 (caps & (1 << MC_CMD_CAPABILITIES_TURBO_ACTIVE_LBN)) ?
257 3072 : 6144; /* 768 cycles */
258 return rc;
259 }
260
siena_dimension_resources(struct efx_nic * efx)261 static void siena_dimension_resources(struct efx_nic *efx)
262 {
263 /* Each port has a small block of internal SRAM dedicated to
264 * the buffer table and descriptor caches. In theory we can
265 * map both blocks to one port, but we don't.
266 */
267 efx_nic_dimension_resources(efx, FR_CZ_BUF_FULL_TBL_ROWS / 2);
268 }
269
siena_probe_nic(struct efx_nic * efx)270 static int siena_probe_nic(struct efx_nic *efx)
271 {
272 struct siena_nic_data *nic_data;
273 bool already_attached = false;
274 efx_oword_t reg;
275 int rc;
276
277 /* Allocate storage for hardware specific data */
278 nic_data = kzalloc(sizeof(struct siena_nic_data), GFP_KERNEL);
279 if (!nic_data)
280 return -ENOMEM;
281 efx->nic_data = nic_data;
282
283 if (efx_nic_fpga_ver(efx) != 0) {
284 netif_err(efx, probe, efx->net_dev,
285 "Siena FPGA not supported\n");
286 rc = -ENODEV;
287 goto fail1;
288 }
289
290 efx_reado(efx, ®, FR_AZ_CS_DEBUG);
291 efx->net_dev->dev_id = EFX_OWORD_FIELD(reg, FRF_CZ_CS_PORT_NUM) - 1;
292
293 efx_mcdi_init(efx);
294
295 /* Recover from a failed assertion before probing */
296 rc = efx_mcdi_handle_assertion(efx);
297 if (rc)
298 goto fail1;
299
300 /* Let the BMC know that the driver is now in charge of link and
301 * filter settings. We must do this before we reset the NIC */
302 rc = efx_mcdi_drv_attach(efx, true, &already_attached);
303 if (rc) {
304 netif_err(efx, probe, efx->net_dev,
305 "Unable to register driver with MCPU\n");
306 goto fail2;
307 }
308 if (already_attached)
309 /* Not a fatal error */
310 netif_err(efx, probe, efx->net_dev,
311 "Host already registered with MCPU\n");
312
313 /* Now we can reset the NIC */
314 rc = siena_reset_hw(efx, RESET_TYPE_ALL);
315 if (rc) {
316 netif_err(efx, probe, efx->net_dev, "failed to reset NIC\n");
317 goto fail3;
318 }
319
320 siena_init_wol(efx);
321
322 /* Allocate memory for INT_KER */
323 rc = efx_nic_alloc_buffer(efx, &efx->irq_status, sizeof(efx_oword_t));
324 if (rc)
325 goto fail4;
326 BUG_ON(efx->irq_status.dma_addr & 0x0f);
327
328 netif_dbg(efx, probe, efx->net_dev,
329 "INT_KER at %llx (virt %p phys %llx)\n",
330 (unsigned long long)efx->irq_status.dma_addr,
331 efx->irq_status.addr,
332 (unsigned long long)virt_to_phys(efx->irq_status.addr));
333
334 /* Read in the non-volatile configuration */
335 rc = siena_probe_nvconfig(efx);
336 if (rc == -EINVAL) {
337 netif_err(efx, probe, efx->net_dev,
338 "NVRAM is invalid therefore using defaults\n");
339 efx->phy_type = PHY_TYPE_NONE;
340 efx->mdio.prtad = MDIO_PRTAD_NONE;
341 } else if (rc) {
342 goto fail5;
343 }
344
345 rc = efx_mcdi_mon_probe(efx);
346 if (rc)
347 goto fail5;
348
349 efx_sriov_probe(efx);
350
351 return 0;
352
353 fail5:
354 efx_nic_free_buffer(efx, &efx->irq_status);
355 fail4:
356 fail3:
357 efx_mcdi_drv_attach(efx, false, NULL);
358 fail2:
359 fail1:
360 kfree(efx->nic_data);
361 return rc;
362 }
363
364 /* This call performs hardware-specific global initialisation, such as
365 * defining the descriptor cache sizes and number of RSS channels.
366 * It does not set up any buffers, descriptor rings or event queues.
367 */
siena_init_nic(struct efx_nic * efx)368 static int siena_init_nic(struct efx_nic *efx)
369 {
370 efx_oword_t temp;
371 int rc;
372
373 /* Recover from a failed assertion post-reset */
374 rc = efx_mcdi_handle_assertion(efx);
375 if (rc)
376 return rc;
377
378 /* Squash TX of packets of 16 bytes or less */
379 efx_reado(efx, &temp, FR_AZ_TX_RESERVED);
380 EFX_SET_OWORD_FIELD(temp, FRF_BZ_TX_FLUSH_MIN_LEN_EN, 1);
381 efx_writeo(efx, &temp, FR_AZ_TX_RESERVED);
382
383 /* Do not enable TX_NO_EOP_DISC_EN, since it limits packets to 16
384 * descriptors (which is bad).
385 */
386 efx_reado(efx, &temp, FR_AZ_TX_CFG);
387 EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_NO_EOP_DISC_EN, 0);
388 EFX_SET_OWORD_FIELD(temp, FRF_CZ_TX_FILTER_EN_BIT, 1);
389 efx_writeo(efx, &temp, FR_AZ_TX_CFG);
390
391 efx_reado(efx, &temp, FR_AZ_RX_CFG);
392 EFX_SET_OWORD_FIELD(temp, FRF_BZ_RX_DESC_PUSH_EN, 0);
393 EFX_SET_OWORD_FIELD(temp, FRF_BZ_RX_INGR_EN, 1);
394 /* Enable hash insertion. This is broken for the 'Falcon' hash
395 * if IPv6 hashing is also enabled, so also select Toeplitz
396 * TCP/IPv4 and IPv4 hashes. */
397 EFX_SET_OWORD_FIELD(temp, FRF_BZ_RX_HASH_INSRT_HDR, 1);
398 EFX_SET_OWORD_FIELD(temp, FRF_BZ_RX_HASH_ALG, 1);
399 EFX_SET_OWORD_FIELD(temp, FRF_BZ_RX_IP_HASH, 1);
400 efx_writeo(efx, &temp, FR_AZ_RX_CFG);
401
402 /* Set hash key for IPv4 */
403 memcpy(&temp, efx->rx_hash_key, sizeof(temp));
404 efx_writeo(efx, &temp, FR_BZ_RX_RSS_TKEY);
405
406 /* Enable IPv6 RSS */
407 BUILD_BUG_ON(sizeof(efx->rx_hash_key) <
408 2 * sizeof(temp) + FRF_CZ_RX_RSS_IPV6_TKEY_HI_WIDTH / 8 ||
409 FRF_CZ_RX_RSS_IPV6_TKEY_HI_LBN != 0);
410 memcpy(&temp, efx->rx_hash_key, sizeof(temp));
411 efx_writeo(efx, &temp, FR_CZ_RX_RSS_IPV6_REG1);
412 memcpy(&temp, efx->rx_hash_key + sizeof(temp), sizeof(temp));
413 efx_writeo(efx, &temp, FR_CZ_RX_RSS_IPV6_REG2);
414 EFX_POPULATE_OWORD_2(temp, FRF_CZ_RX_RSS_IPV6_THASH_ENABLE, 1,
415 FRF_CZ_RX_RSS_IPV6_IP_THASH_ENABLE, 1);
416 memcpy(&temp, efx->rx_hash_key + 2 * sizeof(temp),
417 FRF_CZ_RX_RSS_IPV6_TKEY_HI_WIDTH / 8);
418 efx_writeo(efx, &temp, FR_CZ_RX_RSS_IPV6_REG3);
419
420 /* Enable event logging */
421 rc = efx_mcdi_log_ctrl(efx, true, false, 0);
422 if (rc)
423 return rc;
424
425 /* Set destination of both TX and RX Flush events */
426 EFX_POPULATE_OWORD_1(temp, FRF_BZ_FLS_EVQ_ID, 0);
427 efx_writeo(efx, &temp, FR_BZ_DP_CTRL);
428
429 EFX_POPULATE_OWORD_1(temp, FRF_CZ_USREV_DIS, 1);
430 efx_writeo(efx, &temp, FR_CZ_USR_EV_CFG);
431
432 efx_nic_init_common(efx);
433 return 0;
434 }
435
siena_remove_nic(struct efx_nic * efx)436 static void siena_remove_nic(struct efx_nic *efx)
437 {
438 efx_mcdi_mon_remove(efx);
439
440 efx_nic_free_buffer(efx, &efx->irq_status);
441
442 siena_reset_hw(efx, RESET_TYPE_ALL);
443
444 /* Relinquish the device back to the BMC */
445 efx_mcdi_drv_attach(efx, false, NULL);
446
447 /* Tear down the private nic state */
448 kfree(efx->nic_data);
449 efx->nic_data = NULL;
450 }
451
452 #define STATS_GENERATION_INVALID ((__force __le64)(-1))
453
siena_try_update_nic_stats(struct efx_nic * efx)454 static int siena_try_update_nic_stats(struct efx_nic *efx)
455 {
456 __le64 *dma_stats;
457 struct efx_mac_stats *mac_stats;
458 __le64 generation_start, generation_end;
459
460 mac_stats = &efx->mac_stats;
461 dma_stats = efx->stats_buffer.addr;
462
463 generation_end = dma_stats[MC_CMD_MAC_GENERATION_END];
464 if (generation_end == STATS_GENERATION_INVALID)
465 return 0;
466 rmb();
467
468 #define MAC_STAT(M, D) \
469 mac_stats->M = le64_to_cpu(dma_stats[MC_CMD_MAC_ ## D])
470
471 MAC_STAT(tx_bytes, TX_BYTES);
472 MAC_STAT(tx_bad_bytes, TX_BAD_BYTES);
473 mac_stats->tx_good_bytes = (mac_stats->tx_bytes -
474 mac_stats->tx_bad_bytes);
475 MAC_STAT(tx_packets, TX_PKTS);
476 MAC_STAT(tx_bad, TX_BAD_FCS_PKTS);
477 MAC_STAT(tx_pause, TX_PAUSE_PKTS);
478 MAC_STAT(tx_control, TX_CONTROL_PKTS);
479 MAC_STAT(tx_unicast, TX_UNICAST_PKTS);
480 MAC_STAT(tx_multicast, TX_MULTICAST_PKTS);
481 MAC_STAT(tx_broadcast, TX_BROADCAST_PKTS);
482 MAC_STAT(tx_lt64, TX_LT64_PKTS);
483 MAC_STAT(tx_64, TX_64_PKTS);
484 MAC_STAT(tx_65_to_127, TX_65_TO_127_PKTS);
485 MAC_STAT(tx_128_to_255, TX_128_TO_255_PKTS);
486 MAC_STAT(tx_256_to_511, TX_256_TO_511_PKTS);
487 MAC_STAT(tx_512_to_1023, TX_512_TO_1023_PKTS);
488 MAC_STAT(tx_1024_to_15xx, TX_1024_TO_15XX_PKTS);
489 MAC_STAT(tx_15xx_to_jumbo, TX_15XX_TO_JUMBO_PKTS);
490 MAC_STAT(tx_gtjumbo, TX_GTJUMBO_PKTS);
491 mac_stats->tx_collision = 0;
492 MAC_STAT(tx_single_collision, TX_SINGLE_COLLISION_PKTS);
493 MAC_STAT(tx_multiple_collision, TX_MULTIPLE_COLLISION_PKTS);
494 MAC_STAT(tx_excessive_collision, TX_EXCESSIVE_COLLISION_PKTS);
495 MAC_STAT(tx_deferred, TX_DEFERRED_PKTS);
496 MAC_STAT(tx_late_collision, TX_LATE_COLLISION_PKTS);
497 mac_stats->tx_collision = (mac_stats->tx_single_collision +
498 mac_stats->tx_multiple_collision +
499 mac_stats->tx_excessive_collision +
500 mac_stats->tx_late_collision);
501 MAC_STAT(tx_excessive_deferred, TX_EXCESSIVE_DEFERRED_PKTS);
502 MAC_STAT(tx_non_tcpudp, TX_NON_TCPUDP_PKTS);
503 MAC_STAT(tx_mac_src_error, TX_MAC_SRC_ERR_PKTS);
504 MAC_STAT(tx_ip_src_error, TX_IP_SRC_ERR_PKTS);
505 MAC_STAT(rx_bytes, RX_BYTES);
506 MAC_STAT(rx_bad_bytes, RX_BAD_BYTES);
507 mac_stats->rx_good_bytes = (mac_stats->rx_bytes -
508 mac_stats->rx_bad_bytes);
509 MAC_STAT(rx_packets, RX_PKTS);
510 MAC_STAT(rx_good, RX_GOOD_PKTS);
511 MAC_STAT(rx_bad, RX_BAD_FCS_PKTS);
512 MAC_STAT(rx_pause, RX_PAUSE_PKTS);
513 MAC_STAT(rx_control, RX_CONTROL_PKTS);
514 MAC_STAT(rx_unicast, RX_UNICAST_PKTS);
515 MAC_STAT(rx_multicast, RX_MULTICAST_PKTS);
516 MAC_STAT(rx_broadcast, RX_BROADCAST_PKTS);
517 MAC_STAT(rx_lt64, RX_UNDERSIZE_PKTS);
518 MAC_STAT(rx_64, RX_64_PKTS);
519 MAC_STAT(rx_65_to_127, RX_65_TO_127_PKTS);
520 MAC_STAT(rx_128_to_255, RX_128_TO_255_PKTS);
521 MAC_STAT(rx_256_to_511, RX_256_TO_511_PKTS);
522 MAC_STAT(rx_512_to_1023, RX_512_TO_1023_PKTS);
523 MAC_STAT(rx_1024_to_15xx, RX_1024_TO_15XX_PKTS);
524 MAC_STAT(rx_15xx_to_jumbo, RX_15XX_TO_JUMBO_PKTS);
525 MAC_STAT(rx_gtjumbo, RX_GTJUMBO_PKTS);
526 mac_stats->rx_bad_lt64 = 0;
527 mac_stats->rx_bad_64_to_15xx = 0;
528 mac_stats->rx_bad_15xx_to_jumbo = 0;
529 MAC_STAT(rx_bad_gtjumbo, RX_JABBER_PKTS);
530 MAC_STAT(rx_overflow, RX_OVERFLOW_PKTS);
531 mac_stats->rx_missed = 0;
532 MAC_STAT(rx_false_carrier, RX_FALSE_CARRIER_PKTS);
533 MAC_STAT(rx_symbol_error, RX_SYMBOL_ERROR_PKTS);
534 MAC_STAT(rx_align_error, RX_ALIGN_ERROR_PKTS);
535 MAC_STAT(rx_length_error, RX_LENGTH_ERROR_PKTS);
536 MAC_STAT(rx_internal_error, RX_INTERNAL_ERROR_PKTS);
537 mac_stats->rx_good_lt64 = 0;
538
539 efx->n_rx_nodesc_drop_cnt =
540 le64_to_cpu(dma_stats[MC_CMD_MAC_RX_NODESC_DROPS]);
541
542 #undef MAC_STAT
543
544 rmb();
545 generation_start = dma_stats[MC_CMD_MAC_GENERATION_START];
546 if (generation_end != generation_start)
547 return -EAGAIN;
548
549 return 0;
550 }
551
siena_update_nic_stats(struct efx_nic * efx)552 static void siena_update_nic_stats(struct efx_nic *efx)
553 {
554 int retry;
555
556 /* If we're unlucky enough to read statistics wduring the DMA, wait
557 * up to 10ms for it to finish (typically takes <500us) */
558 for (retry = 0; retry < 100; ++retry) {
559 if (siena_try_update_nic_stats(efx) == 0)
560 return;
561 udelay(100);
562 }
563
564 /* Use the old values instead */
565 }
566
siena_start_nic_stats(struct efx_nic * efx)567 static void siena_start_nic_stats(struct efx_nic *efx)
568 {
569 __le64 *dma_stats = efx->stats_buffer.addr;
570
571 dma_stats[MC_CMD_MAC_GENERATION_END] = STATS_GENERATION_INVALID;
572
573 efx_mcdi_mac_stats(efx, efx->stats_buffer.dma_addr,
574 MC_CMD_MAC_NSTATS * sizeof(u64), 1, 0);
575 }
576
siena_stop_nic_stats(struct efx_nic * efx)577 static void siena_stop_nic_stats(struct efx_nic *efx)
578 {
579 efx_mcdi_mac_stats(efx, efx->stats_buffer.dma_addr, 0, 0, 0);
580 }
581
582 /**************************************************************************
583 *
584 * Wake on LAN
585 *
586 **************************************************************************
587 */
588
siena_get_wol(struct efx_nic * efx,struct ethtool_wolinfo * wol)589 static void siena_get_wol(struct efx_nic *efx, struct ethtool_wolinfo *wol)
590 {
591 struct siena_nic_data *nic_data = efx->nic_data;
592
593 wol->supported = WAKE_MAGIC;
594 if (nic_data->wol_filter_id != -1)
595 wol->wolopts = WAKE_MAGIC;
596 else
597 wol->wolopts = 0;
598 memset(&wol->sopass, 0, sizeof(wol->sopass));
599 }
600
601
siena_set_wol(struct efx_nic * efx,u32 type)602 static int siena_set_wol(struct efx_nic *efx, u32 type)
603 {
604 struct siena_nic_data *nic_data = efx->nic_data;
605 int rc;
606
607 if (type & ~WAKE_MAGIC)
608 return -EINVAL;
609
610 if (type & WAKE_MAGIC) {
611 if (nic_data->wol_filter_id != -1)
612 efx_mcdi_wol_filter_remove(efx,
613 nic_data->wol_filter_id);
614 rc = efx_mcdi_wol_filter_set_magic(efx, efx->net_dev->dev_addr,
615 &nic_data->wol_filter_id);
616 if (rc)
617 goto fail;
618
619 pci_wake_from_d3(efx->pci_dev, true);
620 } else {
621 rc = efx_mcdi_wol_filter_reset(efx);
622 nic_data->wol_filter_id = -1;
623 pci_wake_from_d3(efx->pci_dev, false);
624 if (rc)
625 goto fail;
626 }
627
628 return 0;
629 fail:
630 netif_err(efx, hw, efx->net_dev, "%s failed: type=%d rc=%d\n",
631 __func__, type, rc);
632 return rc;
633 }
634
635
siena_init_wol(struct efx_nic * efx)636 static void siena_init_wol(struct efx_nic *efx)
637 {
638 struct siena_nic_data *nic_data = efx->nic_data;
639 int rc;
640
641 rc = efx_mcdi_wol_filter_get_magic(efx, &nic_data->wol_filter_id);
642
643 if (rc != 0) {
644 /* If it failed, attempt to get into a synchronised
645 * state with MC by resetting any set WoL filters */
646 efx_mcdi_wol_filter_reset(efx);
647 nic_data->wol_filter_id = -1;
648 } else if (nic_data->wol_filter_id != -1) {
649 pci_wake_from_d3(efx->pci_dev, true);
650 }
651 }
652
653
654 /**************************************************************************
655 *
656 * Revision-dependent attributes used by efx.c and nic.c
657 *
658 **************************************************************************
659 */
660
661 const struct efx_nic_type siena_a0_nic_type = {
662 .probe = siena_probe_nic,
663 .remove = siena_remove_nic,
664 .init = siena_init_nic,
665 .dimension_resources = siena_dimension_resources,
666 .fini = efx_port_dummy_op_void,
667 .monitor = NULL,
668 .map_reset_reason = siena_map_reset_reason,
669 .map_reset_flags = siena_map_reset_flags,
670 .reset = siena_reset_hw,
671 .probe_port = siena_probe_port,
672 .remove_port = siena_remove_port,
673 .prepare_flush = siena_prepare_flush,
674 .finish_flush = siena_finish_flush,
675 .update_stats = siena_update_nic_stats,
676 .start_stats = siena_start_nic_stats,
677 .stop_stats = siena_stop_nic_stats,
678 .set_id_led = efx_mcdi_set_id_led,
679 .push_irq_moderation = siena_push_irq_moderation,
680 .reconfigure_mac = efx_mcdi_mac_reconfigure,
681 .check_mac_fault = efx_mcdi_mac_check_fault,
682 .reconfigure_port = efx_mcdi_phy_reconfigure,
683 .get_wol = siena_get_wol,
684 .set_wol = siena_set_wol,
685 .resume_wol = siena_init_wol,
686 .test_chip = siena_test_chip,
687 .test_nvram = efx_mcdi_nvram_test_all,
688
689 .revision = EFX_REV_SIENA_A0,
690 .mem_map_size = (FR_CZ_MC_TREG_SMEM +
691 FR_CZ_MC_TREG_SMEM_STEP * FR_CZ_MC_TREG_SMEM_ROWS),
692 .txd_ptr_tbl_base = FR_BZ_TX_DESC_PTR_TBL,
693 .rxd_ptr_tbl_base = FR_BZ_RX_DESC_PTR_TBL,
694 .buf_tbl_base = FR_BZ_BUF_FULL_TBL,
695 .evq_ptr_tbl_base = FR_BZ_EVQ_PTR_TBL,
696 .evq_rptr_tbl_base = FR_BZ_EVQ_RPTR,
697 .max_dma_mask = DMA_BIT_MASK(FSF_AZ_TX_KER_BUF_ADDR_WIDTH),
698 .rx_buffer_hash_size = 0x10,
699 .rx_buffer_padding = 0,
700 .max_interrupt_mode = EFX_INT_MODE_MSIX,
701 .phys_addr_channels = 32, /* Hardware limit is 64, but the legacy
702 * interrupt handler only supports 32
703 * channels */
704 .timer_period_max = 1 << FRF_CZ_TC_TIMER_VAL_WIDTH,
705 .offload_features = (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
706 NETIF_F_RXHASH | NETIF_F_NTUPLE),
707 };
708