1 // SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
2 /* Copyright (C) 2015-2018 Netronome Systems, Inc. */
3
4 /*
5 * nfp_net_ethtool.c
6 * Netronome network device driver: ethtool support
7 * Authors: Jakub Kicinski <jakub.kicinski@netronome.com>
8 * Jason McMullan <jason.mcmullan@netronome.com>
9 * Rolf Neugebauer <rolf.neugebauer@netronome.com>
10 * Brad Petrus <brad.petrus@netronome.com>
11 */
12
13 #include <linux/bitfield.h>
14 #include <linux/kernel.h>
15 #include <linux/netdevice.h>
16 #include <linux/etherdevice.h>
17 #include <linux/interrupt.h>
18 #include <linux/pci.h>
19 #include <linux/ethtool.h>
20 #include <linux/firmware.h>
21 #include <linux/sfp.h>
22
23 #include "nfpcore/nfp.h"
24 #include "nfpcore/nfp_dev.h"
25 #include "nfpcore/nfp_nsp.h"
26 #include "nfp_app.h"
27 #include "nfp_main.h"
28 #include "nfp_net_ctrl.h"
29 #include "nfp_net_dp.h"
30 #include "nfp_net.h"
31 #include "nfp_port.h"
32
33 struct nfp_et_stat {
34 char name[ETH_GSTRING_LEN];
35 int off;
36 };
37
38 static const struct nfp_et_stat nfp_net_et_stats[] = {
39 /* Stats from the device */
40 { "dev_rx_discards", NFP_NET_CFG_STATS_RX_DISCARDS },
41 { "dev_rx_errors", NFP_NET_CFG_STATS_RX_ERRORS },
42 { "dev_rx_bytes", NFP_NET_CFG_STATS_RX_OCTETS },
43 { "dev_rx_uc_bytes", NFP_NET_CFG_STATS_RX_UC_OCTETS },
44 { "dev_rx_mc_bytes", NFP_NET_CFG_STATS_RX_MC_OCTETS },
45 { "dev_rx_bc_bytes", NFP_NET_CFG_STATS_RX_BC_OCTETS },
46 { "dev_rx_pkts", NFP_NET_CFG_STATS_RX_FRAMES },
47 { "dev_rx_mc_pkts", NFP_NET_CFG_STATS_RX_MC_FRAMES },
48 { "dev_rx_bc_pkts", NFP_NET_CFG_STATS_RX_BC_FRAMES },
49
50 { "dev_tx_discards", NFP_NET_CFG_STATS_TX_DISCARDS },
51 { "dev_tx_errors", NFP_NET_CFG_STATS_TX_ERRORS },
52 { "dev_tx_bytes", NFP_NET_CFG_STATS_TX_OCTETS },
53 { "dev_tx_uc_bytes", NFP_NET_CFG_STATS_TX_UC_OCTETS },
54 { "dev_tx_mc_bytes", NFP_NET_CFG_STATS_TX_MC_OCTETS },
55 { "dev_tx_bc_bytes", NFP_NET_CFG_STATS_TX_BC_OCTETS },
56 { "dev_tx_pkts", NFP_NET_CFG_STATS_TX_FRAMES },
57 { "dev_tx_mc_pkts", NFP_NET_CFG_STATS_TX_MC_FRAMES },
58 { "dev_tx_bc_pkts", NFP_NET_CFG_STATS_TX_BC_FRAMES },
59
60 { "bpf_pass_pkts", NFP_NET_CFG_STATS_APP0_FRAMES },
61 { "bpf_pass_bytes", NFP_NET_CFG_STATS_APP0_BYTES },
62 /* see comments in outro functions in nfp_bpf_jit.c to find out
63 * how different BPF modes use app-specific counters
64 */
65 { "bpf_app1_pkts", NFP_NET_CFG_STATS_APP1_FRAMES },
66 { "bpf_app1_bytes", NFP_NET_CFG_STATS_APP1_BYTES },
67 { "bpf_app2_pkts", NFP_NET_CFG_STATS_APP2_FRAMES },
68 { "bpf_app2_bytes", NFP_NET_CFG_STATS_APP2_BYTES },
69 { "bpf_app3_pkts", NFP_NET_CFG_STATS_APP3_FRAMES },
70 { "bpf_app3_bytes", NFP_NET_CFG_STATS_APP3_BYTES },
71 };
72
73 static const struct nfp_et_stat nfp_mac_et_stats[] = {
74 { "rx_octets", NFP_MAC_STATS_RX_IN_OCTETS, },
75 { "rx_frame_too_long_errors",
76 NFP_MAC_STATS_RX_FRAME_TOO_LONG_ERRORS, },
77 { "rx_range_length_errors", NFP_MAC_STATS_RX_RANGE_LENGTH_ERRORS, },
78 { "rx_vlan_received_ok", NFP_MAC_STATS_RX_VLAN_RECEIVED_OK, },
79 { "rx_errors", NFP_MAC_STATS_RX_IN_ERRORS, },
80 { "rx_broadcast_pkts", NFP_MAC_STATS_RX_IN_BROADCAST_PKTS, },
81 { "rx_drop_events", NFP_MAC_STATS_RX_DROP_EVENTS, },
82 { "rx_alignment_errors", NFP_MAC_STATS_RX_ALIGNMENT_ERRORS, },
83 { "rx_pause_mac_ctrl_frames",
84 NFP_MAC_STATS_RX_PAUSE_MAC_CTRL_FRAMES, },
85 { "rx_frames_received_ok", NFP_MAC_STATS_RX_FRAMES_RECEIVED_OK, },
86 { "rx_frame_check_sequence_errors",
87 NFP_MAC_STATS_RX_FRAME_CHECK_SEQUENCE_ERRORS, },
88 { "rx_unicast_pkts", NFP_MAC_STATS_RX_UNICAST_PKTS, },
89 { "rx_multicast_pkts", NFP_MAC_STATS_RX_MULTICAST_PKTS, },
90 { "rx_pkts", NFP_MAC_STATS_RX_PKTS, },
91 { "rx_undersize_pkts", NFP_MAC_STATS_RX_UNDERSIZE_PKTS, },
92 { "rx_pkts_64_octets", NFP_MAC_STATS_RX_PKTS_64_OCTETS, },
93 { "rx_pkts_65_to_127_octets",
94 NFP_MAC_STATS_RX_PKTS_65_TO_127_OCTETS, },
95 { "rx_pkts_128_to_255_octets",
96 NFP_MAC_STATS_RX_PKTS_128_TO_255_OCTETS, },
97 { "rx_pkts_256_to_511_octets",
98 NFP_MAC_STATS_RX_PKTS_256_TO_511_OCTETS, },
99 { "rx_pkts_512_to_1023_octets",
100 NFP_MAC_STATS_RX_PKTS_512_TO_1023_OCTETS, },
101 { "rx_pkts_1024_to_1518_octets",
102 NFP_MAC_STATS_RX_PKTS_1024_TO_1518_OCTETS, },
103 { "rx_pkts_1519_to_max_octets",
104 NFP_MAC_STATS_RX_PKTS_1519_TO_MAX_OCTETS, },
105 { "rx_jabbers", NFP_MAC_STATS_RX_JABBERS, },
106 { "rx_fragments", NFP_MAC_STATS_RX_FRAGMENTS, },
107 { "rx_oversize_pkts", NFP_MAC_STATS_RX_OVERSIZE_PKTS, },
108 { "rx_pause_frames_class0", NFP_MAC_STATS_RX_PAUSE_FRAMES_CLASS0, },
109 { "rx_pause_frames_class1", NFP_MAC_STATS_RX_PAUSE_FRAMES_CLASS1, },
110 { "rx_pause_frames_class2", NFP_MAC_STATS_RX_PAUSE_FRAMES_CLASS2, },
111 { "rx_pause_frames_class3", NFP_MAC_STATS_RX_PAUSE_FRAMES_CLASS3, },
112 { "rx_pause_frames_class4", NFP_MAC_STATS_RX_PAUSE_FRAMES_CLASS4, },
113 { "rx_pause_frames_class5", NFP_MAC_STATS_RX_PAUSE_FRAMES_CLASS5, },
114 { "rx_pause_frames_class6", NFP_MAC_STATS_RX_PAUSE_FRAMES_CLASS6, },
115 { "rx_pause_frames_class7", NFP_MAC_STATS_RX_PAUSE_FRAMES_CLASS7, },
116 { "rx_mac_ctrl_frames_received",
117 NFP_MAC_STATS_RX_MAC_CTRL_FRAMES_RECEIVED, },
118 { "rx_mac_head_drop", NFP_MAC_STATS_RX_MAC_HEAD_DROP, },
119 { "tx_queue_drop", NFP_MAC_STATS_TX_QUEUE_DROP, },
120 { "tx_octets", NFP_MAC_STATS_TX_OUT_OCTETS, },
121 { "tx_vlan_transmitted_ok", NFP_MAC_STATS_TX_VLAN_TRANSMITTED_OK, },
122 { "tx_errors", NFP_MAC_STATS_TX_OUT_ERRORS, },
123 { "tx_broadcast_pkts", NFP_MAC_STATS_TX_BROADCAST_PKTS, },
124 { "tx_pause_mac_ctrl_frames",
125 NFP_MAC_STATS_TX_PAUSE_MAC_CTRL_FRAMES, },
126 { "tx_frames_transmitted_ok",
127 NFP_MAC_STATS_TX_FRAMES_TRANSMITTED_OK, },
128 { "tx_unicast_pkts", NFP_MAC_STATS_TX_UNICAST_PKTS, },
129 { "tx_multicast_pkts", NFP_MAC_STATS_TX_MULTICAST_PKTS, },
130 { "tx_pkts_64_octets", NFP_MAC_STATS_TX_PKTS_64_OCTETS, },
131 { "tx_pkts_65_to_127_octets",
132 NFP_MAC_STATS_TX_PKTS_65_TO_127_OCTETS, },
133 { "tx_pkts_128_to_255_octets",
134 NFP_MAC_STATS_TX_PKTS_128_TO_255_OCTETS, },
135 { "tx_pkts_256_to_511_octets",
136 NFP_MAC_STATS_TX_PKTS_256_TO_511_OCTETS, },
137 { "tx_pkts_512_to_1023_octets",
138 NFP_MAC_STATS_TX_PKTS_512_TO_1023_OCTETS, },
139 { "tx_pkts_1024_to_1518_octets",
140 NFP_MAC_STATS_TX_PKTS_1024_TO_1518_OCTETS, },
141 { "tx_pkts_1519_to_max_octets",
142 NFP_MAC_STATS_TX_PKTS_1519_TO_MAX_OCTETS, },
143 { "tx_pause_frames_class0", NFP_MAC_STATS_TX_PAUSE_FRAMES_CLASS0, },
144 { "tx_pause_frames_class1", NFP_MAC_STATS_TX_PAUSE_FRAMES_CLASS1, },
145 { "tx_pause_frames_class2", NFP_MAC_STATS_TX_PAUSE_FRAMES_CLASS2, },
146 { "tx_pause_frames_class3", NFP_MAC_STATS_TX_PAUSE_FRAMES_CLASS3, },
147 { "tx_pause_frames_class4", NFP_MAC_STATS_TX_PAUSE_FRAMES_CLASS4, },
148 { "tx_pause_frames_class5", NFP_MAC_STATS_TX_PAUSE_FRAMES_CLASS5, },
149 { "tx_pause_frames_class6", NFP_MAC_STATS_TX_PAUSE_FRAMES_CLASS6, },
150 { "tx_pause_frames_class7", NFP_MAC_STATS_TX_PAUSE_FRAMES_CLASS7, },
151 };
152
153 static const char nfp_tlv_stat_names[][ETH_GSTRING_LEN] = {
154 [1] = "dev_rx_discards",
155 [2] = "dev_rx_errors",
156 [3] = "dev_rx_bytes",
157 [4] = "dev_rx_uc_bytes",
158 [5] = "dev_rx_mc_bytes",
159 [6] = "dev_rx_bc_bytes",
160 [7] = "dev_rx_pkts",
161 [8] = "dev_rx_mc_pkts",
162 [9] = "dev_rx_bc_pkts",
163
164 [10] = "dev_tx_discards",
165 [11] = "dev_tx_errors",
166 [12] = "dev_tx_bytes",
167 [13] = "dev_tx_uc_bytes",
168 [14] = "dev_tx_mc_bytes",
169 [15] = "dev_tx_bc_bytes",
170 [16] = "dev_tx_pkts",
171 [17] = "dev_tx_mc_pkts",
172 [18] = "dev_tx_bc_pkts",
173 };
174
175 #define NN_ET_GLOBAL_STATS_LEN ARRAY_SIZE(nfp_net_et_stats)
176 #define NN_ET_SWITCH_STATS_LEN 9
177 #define NN_RVEC_GATHER_STATS 13
178 #define NN_RVEC_PER_Q_STATS 3
179 #define NN_CTRL_PATH_STATS 4
180
181 #define SFP_SFF_REV_COMPLIANCE 1
182
nfp_net_get_nspinfo(struct nfp_app * app,char * version)183 static void nfp_net_get_nspinfo(struct nfp_app *app, char *version)
184 {
185 struct nfp_nsp *nsp;
186
187 if (!app)
188 return;
189
190 nsp = nfp_nsp_open(app->cpp);
191 if (IS_ERR(nsp))
192 return;
193
194 snprintf(version, ETHTOOL_FWVERS_LEN, "%hu.%hu",
195 nfp_nsp_get_abi_ver_major(nsp),
196 nfp_nsp_get_abi_ver_minor(nsp));
197
198 nfp_nsp_close(nsp);
199 }
200
201 static void
nfp_get_drvinfo(struct nfp_app * app,struct pci_dev * pdev,const char * vnic_version,struct ethtool_drvinfo * drvinfo)202 nfp_get_drvinfo(struct nfp_app *app, struct pci_dev *pdev,
203 const char *vnic_version, struct ethtool_drvinfo *drvinfo)
204 {
205 char nsp_version[ETHTOOL_FWVERS_LEN] = {};
206
207 strlcpy(drvinfo->driver, dev_driver_string(&pdev->dev),
208 sizeof(drvinfo->driver));
209 nfp_net_get_nspinfo(app, nsp_version);
210 snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
211 "%s %s %s %s", vnic_version, nsp_version,
212 nfp_app_mip_name(app), nfp_app_name(app));
213 }
214
215 static void
nfp_net_get_drvinfo(struct net_device * netdev,struct ethtool_drvinfo * drvinfo)216 nfp_net_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo)
217 {
218 char vnic_version[ETHTOOL_FWVERS_LEN] = {};
219 struct nfp_net *nn = netdev_priv(netdev);
220
221 snprintf(vnic_version, sizeof(vnic_version), "%d.%d.%d.%d",
222 nn->fw_ver.extend, nn->fw_ver.class,
223 nn->fw_ver.major, nn->fw_ver.minor);
224 strlcpy(drvinfo->bus_info, pci_name(nn->pdev),
225 sizeof(drvinfo->bus_info));
226
227 nfp_get_drvinfo(nn->app, nn->pdev, vnic_version, drvinfo);
228 }
229
230 static void
nfp_app_get_drvinfo(struct net_device * netdev,struct ethtool_drvinfo * drvinfo)231 nfp_app_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo)
232 {
233 struct nfp_app *app = nfp_app_from_netdev(netdev);
234
235 strlcpy(drvinfo->bus_info, pci_name(app->pdev),
236 sizeof(drvinfo->bus_info));
237 nfp_get_drvinfo(app, app->pdev, "*", drvinfo);
238 }
239
240 static void
nfp_net_set_fec_link_mode(struct nfp_eth_table_port * eth_port,struct ethtool_link_ksettings * c)241 nfp_net_set_fec_link_mode(struct nfp_eth_table_port *eth_port,
242 struct ethtool_link_ksettings *c)
243 {
244 unsigned int modes;
245
246 ethtool_link_ksettings_add_link_mode(c, supported, FEC_NONE);
247 if (!nfp_eth_can_support_fec(eth_port)) {
248 ethtool_link_ksettings_add_link_mode(c, advertising, FEC_NONE);
249 return;
250 }
251
252 modes = nfp_eth_supported_fec_modes(eth_port);
253 if (modes & NFP_FEC_BASER) {
254 ethtool_link_ksettings_add_link_mode(c, supported, FEC_BASER);
255 ethtool_link_ksettings_add_link_mode(c, advertising, FEC_BASER);
256 }
257
258 if (modes & NFP_FEC_REED_SOLOMON) {
259 ethtool_link_ksettings_add_link_mode(c, supported, FEC_RS);
260 ethtool_link_ksettings_add_link_mode(c, advertising, FEC_RS);
261 }
262 }
263
264 /**
265 * nfp_net_get_link_ksettings - Get Link Speed settings
266 * @netdev: network interface device structure
267 * @cmd: ethtool command
268 *
269 * Reports speed settings based on info in the BAR provided by the fw.
270 */
271 static int
nfp_net_get_link_ksettings(struct net_device * netdev,struct ethtool_link_ksettings * cmd)272 nfp_net_get_link_ksettings(struct net_device *netdev,
273 struct ethtool_link_ksettings *cmd)
274 {
275 static const u32 ls_to_ethtool[] = {
276 [NFP_NET_CFG_STS_LINK_RATE_UNSUPPORTED] = 0,
277 [NFP_NET_CFG_STS_LINK_RATE_UNKNOWN] = SPEED_UNKNOWN,
278 [NFP_NET_CFG_STS_LINK_RATE_1G] = SPEED_1000,
279 [NFP_NET_CFG_STS_LINK_RATE_10G] = SPEED_10000,
280 [NFP_NET_CFG_STS_LINK_RATE_25G] = SPEED_25000,
281 [NFP_NET_CFG_STS_LINK_RATE_40G] = SPEED_40000,
282 [NFP_NET_CFG_STS_LINK_RATE_50G] = SPEED_50000,
283 [NFP_NET_CFG_STS_LINK_RATE_100G] = SPEED_100000,
284 };
285 struct nfp_eth_table_port *eth_port;
286 struct nfp_port *port;
287 struct nfp_net *nn;
288 u32 sts, ls;
289
290 /* Init to unknowns */
291 ethtool_link_ksettings_add_link_mode(cmd, supported, FIBRE);
292 cmd->base.port = PORT_OTHER;
293 cmd->base.speed = SPEED_UNKNOWN;
294 cmd->base.duplex = DUPLEX_UNKNOWN;
295
296 port = nfp_port_from_netdev(netdev);
297 eth_port = nfp_port_get_eth_port(port);
298 if (eth_port) {
299 ethtool_link_ksettings_add_link_mode(cmd, supported, Pause);
300 ethtool_link_ksettings_add_link_mode(cmd, advertising, Pause);
301 cmd->base.autoneg = eth_port->aneg != NFP_ANEG_DISABLED ?
302 AUTONEG_ENABLE : AUTONEG_DISABLE;
303 nfp_net_set_fec_link_mode(eth_port, cmd);
304 }
305
306 if (!netif_carrier_ok(netdev))
307 return 0;
308
309 /* Use link speed from ETH table if available, otherwise try the BAR */
310 if (eth_port) {
311 cmd->base.port = eth_port->port_type;
312 cmd->base.speed = eth_port->speed;
313 cmd->base.duplex = DUPLEX_FULL;
314 return 0;
315 }
316
317 if (!nfp_netdev_is_nfp_net(netdev))
318 return -EOPNOTSUPP;
319 nn = netdev_priv(netdev);
320
321 sts = nn_readl(nn, NFP_NET_CFG_STS);
322
323 ls = FIELD_GET(NFP_NET_CFG_STS_LINK_RATE, sts);
324 if (ls == NFP_NET_CFG_STS_LINK_RATE_UNSUPPORTED)
325 return -EOPNOTSUPP;
326
327 if (ls == NFP_NET_CFG_STS_LINK_RATE_UNKNOWN ||
328 ls >= ARRAY_SIZE(ls_to_ethtool))
329 return 0;
330
331 cmd->base.speed = ls_to_ethtool[ls];
332 cmd->base.duplex = DUPLEX_FULL;
333
334 return 0;
335 }
336
337 static int
nfp_net_set_link_ksettings(struct net_device * netdev,const struct ethtool_link_ksettings * cmd)338 nfp_net_set_link_ksettings(struct net_device *netdev,
339 const struct ethtool_link_ksettings *cmd)
340 {
341 struct nfp_eth_table_port *eth_port;
342 struct nfp_port *port;
343 struct nfp_nsp *nsp;
344 int err;
345
346 port = nfp_port_from_netdev(netdev);
347 eth_port = __nfp_port_get_eth_port(port);
348 if (!eth_port)
349 return -EOPNOTSUPP;
350
351 if (netif_running(netdev)) {
352 netdev_warn(netdev, "Changing settings not allowed on an active interface. It may cause the port to be disabled until driver reload.\n");
353 return -EBUSY;
354 }
355
356 nsp = nfp_eth_config_start(port->app->cpp, eth_port->index);
357 if (IS_ERR(nsp))
358 return PTR_ERR(nsp);
359
360 err = __nfp_eth_set_aneg(nsp, cmd->base.autoneg == AUTONEG_ENABLE ?
361 NFP_ANEG_AUTO : NFP_ANEG_DISABLED);
362 if (err)
363 goto err_bad_set;
364 if (cmd->base.speed != SPEED_UNKNOWN) {
365 u32 speed = cmd->base.speed / eth_port->lanes;
366
367 err = __nfp_eth_set_speed(nsp, speed);
368 if (err)
369 goto err_bad_set;
370 }
371
372 err = nfp_eth_config_commit_end(nsp);
373 if (err > 0)
374 return 0; /* no change */
375
376 nfp_net_refresh_port_table(port);
377
378 return err;
379
380 err_bad_set:
381 nfp_eth_config_cleanup_end(nsp);
382 return err;
383 }
384
nfp_net_get_ringparam(struct net_device * netdev,struct ethtool_ringparam * ring,struct kernel_ethtool_ringparam * kernel_ring,struct netlink_ext_ack * extack)385 static void nfp_net_get_ringparam(struct net_device *netdev,
386 struct ethtool_ringparam *ring,
387 struct kernel_ethtool_ringparam *kernel_ring,
388 struct netlink_ext_ack *extack)
389 {
390 struct nfp_net *nn = netdev_priv(netdev);
391 u32 qc_max = nn->dev_info->max_qc_size;
392
393 ring->rx_max_pending = qc_max;
394 ring->tx_max_pending = qc_max / nn->dp.ops->tx_min_desc_per_pkt;
395 ring->rx_pending = nn->dp.rxd_cnt;
396 ring->tx_pending = nn->dp.txd_cnt;
397 }
398
nfp_net_set_ring_size(struct nfp_net * nn,u32 rxd_cnt,u32 txd_cnt)399 static int nfp_net_set_ring_size(struct nfp_net *nn, u32 rxd_cnt, u32 txd_cnt)
400 {
401 struct nfp_net_dp *dp;
402
403 dp = nfp_net_clone_dp(nn);
404 if (!dp)
405 return -ENOMEM;
406
407 dp->rxd_cnt = rxd_cnt;
408 dp->txd_cnt = txd_cnt;
409
410 return nfp_net_ring_reconfig(nn, dp, NULL);
411 }
412
nfp_net_set_ringparam(struct net_device * netdev,struct ethtool_ringparam * ring,struct kernel_ethtool_ringparam * kernel_ring,struct netlink_ext_ack * extack)413 static int nfp_net_set_ringparam(struct net_device *netdev,
414 struct ethtool_ringparam *ring,
415 struct kernel_ethtool_ringparam *kernel_ring,
416 struct netlink_ext_ack *extack)
417 {
418 u32 tx_dpp, qc_min, qc_max, rxd_cnt, txd_cnt;
419 struct nfp_net *nn = netdev_priv(netdev);
420
421 /* We don't have separate queues/rings for small/large frames. */
422 if (ring->rx_mini_pending || ring->rx_jumbo_pending)
423 return -EINVAL;
424
425 qc_min = nn->dev_info->min_qc_size;
426 qc_max = nn->dev_info->max_qc_size;
427 tx_dpp = nn->dp.ops->tx_min_desc_per_pkt;
428 /* Round up to supported values */
429 rxd_cnt = roundup_pow_of_two(ring->rx_pending);
430 txd_cnt = roundup_pow_of_two(ring->tx_pending);
431
432 if (rxd_cnt < qc_min || rxd_cnt > qc_max ||
433 txd_cnt < qc_min / tx_dpp || txd_cnt > qc_max / tx_dpp)
434 return -EINVAL;
435
436 if (nn->dp.rxd_cnt == rxd_cnt && nn->dp.txd_cnt == txd_cnt)
437 return 0;
438
439 nn_dbg(nn, "Change ring size: RxQ %u->%u, TxQ %u->%u\n",
440 nn->dp.rxd_cnt, rxd_cnt, nn->dp.txd_cnt, txd_cnt);
441
442 return nfp_net_set_ring_size(nn, rxd_cnt, txd_cnt);
443 }
444
nfp_vnic_get_sw_stats_count(struct net_device * netdev)445 static unsigned int nfp_vnic_get_sw_stats_count(struct net_device *netdev)
446 {
447 struct nfp_net *nn = netdev_priv(netdev);
448
449 return NN_RVEC_GATHER_STATS + nn->max_r_vecs * NN_RVEC_PER_Q_STATS +
450 NN_CTRL_PATH_STATS;
451 }
452
nfp_vnic_get_sw_stats_strings(struct net_device * netdev,u8 * data)453 static u8 *nfp_vnic_get_sw_stats_strings(struct net_device *netdev, u8 *data)
454 {
455 struct nfp_net *nn = netdev_priv(netdev);
456 int i;
457
458 for (i = 0; i < nn->max_r_vecs; i++) {
459 ethtool_sprintf(&data, "rvec_%u_rx_pkts", i);
460 ethtool_sprintf(&data, "rvec_%u_tx_pkts", i);
461 ethtool_sprintf(&data, "rvec_%u_tx_busy", i);
462 }
463
464 ethtool_sprintf(&data, "hw_rx_csum_ok");
465 ethtool_sprintf(&data, "hw_rx_csum_inner_ok");
466 ethtool_sprintf(&data, "hw_rx_csum_complete");
467 ethtool_sprintf(&data, "hw_rx_csum_err");
468 ethtool_sprintf(&data, "rx_replace_buf_alloc_fail");
469 ethtool_sprintf(&data, "rx_tls_decrypted_packets");
470 ethtool_sprintf(&data, "hw_tx_csum");
471 ethtool_sprintf(&data, "hw_tx_inner_csum");
472 ethtool_sprintf(&data, "tx_gather");
473 ethtool_sprintf(&data, "tx_lso");
474 ethtool_sprintf(&data, "tx_tls_encrypted_packets");
475 ethtool_sprintf(&data, "tx_tls_ooo");
476 ethtool_sprintf(&data, "tx_tls_drop_no_sync_data");
477
478 ethtool_sprintf(&data, "hw_tls_no_space");
479 ethtool_sprintf(&data, "rx_tls_resync_req_ok");
480 ethtool_sprintf(&data, "rx_tls_resync_req_ign");
481 ethtool_sprintf(&data, "rx_tls_resync_sent");
482
483 return data;
484 }
485
nfp_vnic_get_sw_stats(struct net_device * netdev,u64 * data)486 static u64 *nfp_vnic_get_sw_stats(struct net_device *netdev, u64 *data)
487 {
488 u64 gathered_stats[NN_RVEC_GATHER_STATS] = {};
489 struct nfp_net *nn = netdev_priv(netdev);
490 u64 tmp[NN_RVEC_GATHER_STATS];
491 unsigned int i, j;
492
493 for (i = 0; i < nn->max_r_vecs; i++) {
494 unsigned int start;
495
496 do {
497 start = u64_stats_fetch_begin_irq(&nn->r_vecs[i].rx_sync);
498 data[0] = nn->r_vecs[i].rx_pkts;
499 tmp[0] = nn->r_vecs[i].hw_csum_rx_ok;
500 tmp[1] = nn->r_vecs[i].hw_csum_rx_inner_ok;
501 tmp[2] = nn->r_vecs[i].hw_csum_rx_complete;
502 tmp[3] = nn->r_vecs[i].hw_csum_rx_error;
503 tmp[4] = nn->r_vecs[i].rx_replace_buf_alloc_fail;
504 tmp[5] = nn->r_vecs[i].hw_tls_rx;
505 } while (u64_stats_fetch_retry_irq(&nn->r_vecs[i].rx_sync, start));
506
507 do {
508 start = u64_stats_fetch_begin_irq(&nn->r_vecs[i].tx_sync);
509 data[1] = nn->r_vecs[i].tx_pkts;
510 data[2] = nn->r_vecs[i].tx_busy;
511 tmp[6] = nn->r_vecs[i].hw_csum_tx;
512 tmp[7] = nn->r_vecs[i].hw_csum_tx_inner;
513 tmp[8] = nn->r_vecs[i].tx_gather;
514 tmp[9] = nn->r_vecs[i].tx_lso;
515 tmp[10] = nn->r_vecs[i].hw_tls_tx;
516 tmp[11] = nn->r_vecs[i].tls_tx_fallback;
517 tmp[12] = nn->r_vecs[i].tls_tx_no_fallback;
518 } while (u64_stats_fetch_retry_irq(&nn->r_vecs[i].tx_sync, start));
519
520 data += NN_RVEC_PER_Q_STATS;
521
522 for (j = 0; j < NN_RVEC_GATHER_STATS; j++)
523 gathered_stats[j] += tmp[j];
524 }
525
526 for (j = 0; j < NN_RVEC_GATHER_STATS; j++)
527 *data++ = gathered_stats[j];
528
529 *data++ = atomic_read(&nn->ktls_no_space);
530 *data++ = atomic_read(&nn->ktls_rx_resync_req);
531 *data++ = atomic_read(&nn->ktls_rx_resync_ign);
532 *data++ = atomic_read(&nn->ktls_rx_resync_sent);
533
534 return data;
535 }
536
nfp_vnic_get_hw_stats_count(unsigned int num_vecs)537 static unsigned int nfp_vnic_get_hw_stats_count(unsigned int num_vecs)
538 {
539 return NN_ET_GLOBAL_STATS_LEN + num_vecs * 4;
540 }
541
542 static u8 *
nfp_vnic_get_hw_stats_strings(u8 * data,unsigned int num_vecs,bool repr)543 nfp_vnic_get_hw_stats_strings(u8 *data, unsigned int num_vecs, bool repr)
544 {
545 int swap_off, i;
546
547 BUILD_BUG_ON(NN_ET_GLOBAL_STATS_LEN < NN_ET_SWITCH_STATS_LEN * 2);
548 /* If repr is true first add SWITCH_STATS_LEN and then subtract it
549 * effectively swapping the RX and TX statistics (giving us the RX
550 * and TX from perspective of the switch).
551 */
552 swap_off = repr * NN_ET_SWITCH_STATS_LEN;
553
554 for (i = 0; i < NN_ET_SWITCH_STATS_LEN; i++)
555 ethtool_sprintf(&data, nfp_net_et_stats[i + swap_off].name);
556
557 for (i = NN_ET_SWITCH_STATS_LEN; i < NN_ET_SWITCH_STATS_LEN * 2; i++)
558 ethtool_sprintf(&data, nfp_net_et_stats[i - swap_off].name);
559
560 for (i = NN_ET_SWITCH_STATS_LEN * 2; i < NN_ET_GLOBAL_STATS_LEN; i++)
561 ethtool_sprintf(&data, nfp_net_et_stats[i].name);
562
563 for (i = 0; i < num_vecs; i++) {
564 ethtool_sprintf(&data, "rxq_%u_pkts", i);
565 ethtool_sprintf(&data, "rxq_%u_bytes", i);
566 ethtool_sprintf(&data, "txq_%u_pkts", i);
567 ethtool_sprintf(&data, "txq_%u_bytes", i);
568 }
569
570 return data;
571 }
572
573 static u64 *
nfp_vnic_get_hw_stats(u64 * data,u8 __iomem * mem,unsigned int num_vecs)574 nfp_vnic_get_hw_stats(u64 *data, u8 __iomem *mem, unsigned int num_vecs)
575 {
576 unsigned int i;
577
578 for (i = 0; i < NN_ET_GLOBAL_STATS_LEN; i++)
579 *data++ = readq(mem + nfp_net_et_stats[i].off);
580
581 for (i = 0; i < num_vecs; i++) {
582 *data++ = readq(mem + NFP_NET_CFG_RXR_STATS(i));
583 *data++ = readq(mem + NFP_NET_CFG_RXR_STATS(i) + 8);
584 *data++ = readq(mem + NFP_NET_CFG_TXR_STATS(i));
585 *data++ = readq(mem + NFP_NET_CFG_TXR_STATS(i) + 8);
586 }
587
588 return data;
589 }
590
nfp_vnic_get_tlv_stats_count(struct nfp_net * nn)591 static unsigned int nfp_vnic_get_tlv_stats_count(struct nfp_net *nn)
592 {
593 return nn->tlv_caps.vnic_stats_cnt + nn->max_r_vecs * 4;
594 }
595
nfp_vnic_get_tlv_stats_strings(struct nfp_net * nn,u8 * data)596 static u8 *nfp_vnic_get_tlv_stats_strings(struct nfp_net *nn, u8 *data)
597 {
598 unsigned int i, id;
599 u8 __iomem *mem;
600 u64 id_word = 0;
601
602 mem = nn->dp.ctrl_bar + nn->tlv_caps.vnic_stats_off;
603 for (i = 0; i < nn->tlv_caps.vnic_stats_cnt; i++) {
604 if (!(i % 4))
605 id_word = readq(mem + i * 2);
606
607 id = (u16)id_word;
608 id_word >>= 16;
609
610 if (id < ARRAY_SIZE(nfp_tlv_stat_names) &&
611 nfp_tlv_stat_names[id][0]) {
612 memcpy(data, nfp_tlv_stat_names[id], ETH_GSTRING_LEN);
613 data += ETH_GSTRING_LEN;
614 } else {
615 ethtool_sprintf(&data, "dev_unknown_stat%u", id);
616 }
617 }
618
619 for (i = 0; i < nn->max_r_vecs; i++) {
620 ethtool_sprintf(&data, "rxq_%u_pkts", i);
621 ethtool_sprintf(&data, "rxq_%u_bytes", i);
622 ethtool_sprintf(&data, "txq_%u_pkts", i);
623 ethtool_sprintf(&data, "txq_%u_bytes", i);
624 }
625
626 return data;
627 }
628
nfp_vnic_get_tlv_stats(struct nfp_net * nn,u64 * data)629 static u64 *nfp_vnic_get_tlv_stats(struct nfp_net *nn, u64 *data)
630 {
631 u8 __iomem *mem;
632 unsigned int i;
633
634 mem = nn->dp.ctrl_bar + nn->tlv_caps.vnic_stats_off;
635 mem += roundup(2 * nn->tlv_caps.vnic_stats_cnt, 8);
636 for (i = 0; i < nn->tlv_caps.vnic_stats_cnt; i++)
637 *data++ = readq(mem + i * 8);
638
639 mem = nn->dp.ctrl_bar;
640 for (i = 0; i < nn->max_r_vecs; i++) {
641 *data++ = readq(mem + NFP_NET_CFG_RXR_STATS(i));
642 *data++ = readq(mem + NFP_NET_CFG_RXR_STATS(i) + 8);
643 *data++ = readq(mem + NFP_NET_CFG_TXR_STATS(i));
644 *data++ = readq(mem + NFP_NET_CFG_TXR_STATS(i) + 8);
645 }
646
647 return data;
648 }
649
nfp_mac_get_stats_count(struct net_device * netdev)650 static unsigned int nfp_mac_get_stats_count(struct net_device *netdev)
651 {
652 struct nfp_port *port;
653
654 port = nfp_port_from_netdev(netdev);
655 if (!__nfp_port_get_eth_port(port) || !port->eth_stats)
656 return 0;
657
658 return ARRAY_SIZE(nfp_mac_et_stats);
659 }
660
nfp_mac_get_stats_strings(struct net_device * netdev,u8 * data)661 static u8 *nfp_mac_get_stats_strings(struct net_device *netdev, u8 *data)
662 {
663 struct nfp_port *port;
664 unsigned int i;
665
666 port = nfp_port_from_netdev(netdev);
667 if (!__nfp_port_get_eth_port(port) || !port->eth_stats)
668 return data;
669
670 for (i = 0; i < ARRAY_SIZE(nfp_mac_et_stats); i++)
671 ethtool_sprintf(&data, "mac.%s", nfp_mac_et_stats[i].name);
672
673 return data;
674 }
675
nfp_mac_get_stats(struct net_device * netdev,u64 * data)676 static u64 *nfp_mac_get_stats(struct net_device *netdev, u64 *data)
677 {
678 struct nfp_port *port;
679 unsigned int i;
680
681 port = nfp_port_from_netdev(netdev);
682 if (!__nfp_port_get_eth_port(port) || !port->eth_stats)
683 return data;
684
685 for (i = 0; i < ARRAY_SIZE(nfp_mac_et_stats); i++)
686 *data++ = readq(port->eth_stats + nfp_mac_et_stats[i].off);
687
688 return data;
689 }
690
nfp_net_get_strings(struct net_device * netdev,u32 stringset,u8 * data)691 static void nfp_net_get_strings(struct net_device *netdev,
692 u32 stringset, u8 *data)
693 {
694 struct nfp_net *nn = netdev_priv(netdev);
695
696 switch (stringset) {
697 case ETH_SS_STATS:
698 data = nfp_vnic_get_sw_stats_strings(netdev, data);
699 if (!nn->tlv_caps.vnic_stats_off)
700 data = nfp_vnic_get_hw_stats_strings(data,
701 nn->max_r_vecs,
702 false);
703 else
704 data = nfp_vnic_get_tlv_stats_strings(nn, data);
705 data = nfp_mac_get_stats_strings(netdev, data);
706 data = nfp_app_port_get_stats_strings(nn->port, data);
707 break;
708 }
709 }
710
711 static void
nfp_net_get_stats(struct net_device * netdev,struct ethtool_stats * stats,u64 * data)712 nfp_net_get_stats(struct net_device *netdev, struct ethtool_stats *stats,
713 u64 *data)
714 {
715 struct nfp_net *nn = netdev_priv(netdev);
716
717 data = nfp_vnic_get_sw_stats(netdev, data);
718 if (!nn->tlv_caps.vnic_stats_off)
719 data = nfp_vnic_get_hw_stats(data, nn->dp.ctrl_bar,
720 nn->max_r_vecs);
721 else
722 data = nfp_vnic_get_tlv_stats(nn, data);
723 data = nfp_mac_get_stats(netdev, data);
724 data = nfp_app_port_get_stats(nn->port, data);
725 }
726
nfp_net_get_sset_count(struct net_device * netdev,int sset)727 static int nfp_net_get_sset_count(struct net_device *netdev, int sset)
728 {
729 struct nfp_net *nn = netdev_priv(netdev);
730 unsigned int cnt;
731
732 switch (sset) {
733 case ETH_SS_STATS:
734 cnt = nfp_vnic_get_sw_stats_count(netdev);
735 if (!nn->tlv_caps.vnic_stats_off)
736 cnt += nfp_vnic_get_hw_stats_count(nn->max_r_vecs);
737 else
738 cnt += nfp_vnic_get_tlv_stats_count(nn);
739 cnt += nfp_mac_get_stats_count(netdev);
740 cnt += nfp_app_port_get_stats_count(nn->port);
741 return cnt;
742 default:
743 return -EOPNOTSUPP;
744 }
745 }
746
nfp_port_get_strings(struct net_device * netdev,u32 stringset,u8 * data)747 static void nfp_port_get_strings(struct net_device *netdev,
748 u32 stringset, u8 *data)
749 {
750 struct nfp_port *port = nfp_port_from_netdev(netdev);
751
752 switch (stringset) {
753 case ETH_SS_STATS:
754 if (nfp_port_is_vnic(port))
755 data = nfp_vnic_get_hw_stats_strings(data, 0, true);
756 else
757 data = nfp_mac_get_stats_strings(netdev, data);
758 data = nfp_app_port_get_stats_strings(port, data);
759 break;
760 }
761 }
762
763 static void
nfp_port_get_stats(struct net_device * netdev,struct ethtool_stats * stats,u64 * data)764 nfp_port_get_stats(struct net_device *netdev, struct ethtool_stats *stats,
765 u64 *data)
766 {
767 struct nfp_port *port = nfp_port_from_netdev(netdev);
768
769 if (nfp_port_is_vnic(port))
770 data = nfp_vnic_get_hw_stats(data, port->vnic, 0);
771 else
772 data = nfp_mac_get_stats(netdev, data);
773 data = nfp_app_port_get_stats(port, data);
774 }
775
nfp_port_get_sset_count(struct net_device * netdev,int sset)776 static int nfp_port_get_sset_count(struct net_device *netdev, int sset)
777 {
778 struct nfp_port *port = nfp_port_from_netdev(netdev);
779 unsigned int count;
780
781 switch (sset) {
782 case ETH_SS_STATS:
783 if (nfp_port_is_vnic(port))
784 count = nfp_vnic_get_hw_stats_count(0);
785 else
786 count = nfp_mac_get_stats_count(netdev);
787 count += nfp_app_port_get_stats_count(port);
788 return count;
789 default:
790 return -EOPNOTSUPP;
791 }
792 }
793
nfp_port_fec_ethtool_to_nsp(u32 fec)794 static int nfp_port_fec_ethtool_to_nsp(u32 fec)
795 {
796 switch (fec) {
797 case ETHTOOL_FEC_AUTO:
798 return NFP_FEC_AUTO_BIT;
799 case ETHTOOL_FEC_OFF:
800 return NFP_FEC_DISABLED_BIT;
801 case ETHTOOL_FEC_RS:
802 return NFP_FEC_REED_SOLOMON_BIT;
803 case ETHTOOL_FEC_BASER:
804 return NFP_FEC_BASER_BIT;
805 default:
806 /* NSP only supports a single mode at a time */
807 return -EOPNOTSUPP;
808 }
809 }
810
nfp_port_fec_nsp_to_ethtool(u32 fec)811 static u32 nfp_port_fec_nsp_to_ethtool(u32 fec)
812 {
813 u32 result = 0;
814
815 if (fec & NFP_FEC_AUTO)
816 result |= ETHTOOL_FEC_AUTO;
817 if (fec & NFP_FEC_BASER)
818 result |= ETHTOOL_FEC_BASER;
819 if (fec & NFP_FEC_REED_SOLOMON)
820 result |= ETHTOOL_FEC_RS;
821 if (fec & NFP_FEC_DISABLED)
822 result |= ETHTOOL_FEC_OFF;
823
824 return result ?: ETHTOOL_FEC_NONE;
825 }
826
827 static int
nfp_port_get_fecparam(struct net_device * netdev,struct ethtool_fecparam * param)828 nfp_port_get_fecparam(struct net_device *netdev,
829 struct ethtool_fecparam *param)
830 {
831 struct nfp_eth_table_port *eth_port;
832 struct nfp_port *port;
833
834 param->active_fec = ETHTOOL_FEC_NONE;
835 param->fec = ETHTOOL_FEC_NONE;
836
837 port = nfp_port_from_netdev(netdev);
838 eth_port = nfp_port_get_eth_port(port);
839 if (!eth_port)
840 return -EOPNOTSUPP;
841
842 if (!nfp_eth_can_support_fec(eth_port))
843 return 0;
844
845 param->fec = nfp_port_fec_nsp_to_ethtool(eth_port->fec_modes_supported);
846 param->active_fec = nfp_port_fec_nsp_to_ethtool(eth_port->fec);
847
848 return 0;
849 }
850
851 static int
nfp_port_set_fecparam(struct net_device * netdev,struct ethtool_fecparam * param)852 nfp_port_set_fecparam(struct net_device *netdev,
853 struct ethtool_fecparam *param)
854 {
855 struct nfp_eth_table_port *eth_port;
856 struct nfp_port *port;
857 int err, fec;
858
859 port = nfp_port_from_netdev(netdev);
860 eth_port = nfp_port_get_eth_port(port);
861 if (!eth_port)
862 return -EOPNOTSUPP;
863
864 if (!nfp_eth_can_support_fec(eth_port))
865 return -EOPNOTSUPP;
866
867 fec = nfp_port_fec_ethtool_to_nsp(param->fec);
868 if (fec < 0)
869 return fec;
870
871 err = nfp_eth_set_fec(port->app->cpp, eth_port->index, fec);
872 if (!err)
873 /* Only refresh if we did something */
874 nfp_net_refresh_port_table(port);
875
876 return err < 0 ? err : 0;
877 }
878
879 /* RX network flow classification (RSS, filters, etc)
880 */
ethtool_flow_to_nfp_flag(u32 flow_type)881 static u32 ethtool_flow_to_nfp_flag(u32 flow_type)
882 {
883 static const u32 xlate_ethtool_to_nfp[IPV6_FLOW + 1] = {
884 [TCP_V4_FLOW] = NFP_NET_CFG_RSS_IPV4_TCP,
885 [TCP_V6_FLOW] = NFP_NET_CFG_RSS_IPV6_TCP,
886 [UDP_V4_FLOW] = NFP_NET_CFG_RSS_IPV4_UDP,
887 [UDP_V6_FLOW] = NFP_NET_CFG_RSS_IPV6_UDP,
888 [IPV4_FLOW] = NFP_NET_CFG_RSS_IPV4,
889 [IPV6_FLOW] = NFP_NET_CFG_RSS_IPV6,
890 };
891
892 if (flow_type >= ARRAY_SIZE(xlate_ethtool_to_nfp))
893 return 0;
894
895 return xlate_ethtool_to_nfp[flow_type];
896 }
897
nfp_net_get_rss_hash_opts(struct nfp_net * nn,struct ethtool_rxnfc * cmd)898 static int nfp_net_get_rss_hash_opts(struct nfp_net *nn,
899 struct ethtool_rxnfc *cmd)
900 {
901 u32 nfp_rss_flag;
902
903 cmd->data = 0;
904
905 if (!(nn->cap & NFP_NET_CFG_CTRL_RSS_ANY))
906 return -EOPNOTSUPP;
907
908 nfp_rss_flag = ethtool_flow_to_nfp_flag(cmd->flow_type);
909 if (!nfp_rss_flag)
910 return -EINVAL;
911
912 cmd->data |= RXH_IP_SRC | RXH_IP_DST;
913 if (nn->rss_cfg & nfp_rss_flag)
914 cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
915
916 return 0;
917 }
918
nfp_net_get_rxnfc(struct net_device * netdev,struct ethtool_rxnfc * cmd,u32 * rule_locs)919 static int nfp_net_get_rxnfc(struct net_device *netdev,
920 struct ethtool_rxnfc *cmd, u32 *rule_locs)
921 {
922 struct nfp_net *nn = netdev_priv(netdev);
923
924 switch (cmd->cmd) {
925 case ETHTOOL_GRXRINGS:
926 cmd->data = nn->dp.num_rx_rings;
927 return 0;
928 case ETHTOOL_GRXFH:
929 return nfp_net_get_rss_hash_opts(nn, cmd);
930 default:
931 return -EOPNOTSUPP;
932 }
933 }
934
nfp_net_set_rss_hash_opt(struct nfp_net * nn,struct ethtool_rxnfc * nfc)935 static int nfp_net_set_rss_hash_opt(struct nfp_net *nn,
936 struct ethtool_rxnfc *nfc)
937 {
938 u32 new_rss_cfg = nn->rss_cfg;
939 u32 nfp_rss_flag;
940 int err;
941
942 if (!(nn->cap & NFP_NET_CFG_CTRL_RSS_ANY))
943 return -EOPNOTSUPP;
944
945 /* RSS only supports IP SA/DA and L4 src/dst ports */
946 if (nfc->data & ~(RXH_IP_SRC | RXH_IP_DST |
947 RXH_L4_B_0_1 | RXH_L4_B_2_3))
948 return -EINVAL;
949
950 /* We need at least the IP SA/DA fields for hashing */
951 if (!(nfc->data & RXH_IP_SRC) ||
952 !(nfc->data & RXH_IP_DST))
953 return -EINVAL;
954
955 nfp_rss_flag = ethtool_flow_to_nfp_flag(nfc->flow_type);
956 if (!nfp_rss_flag)
957 return -EINVAL;
958
959 switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
960 case 0:
961 new_rss_cfg &= ~nfp_rss_flag;
962 break;
963 case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
964 new_rss_cfg |= nfp_rss_flag;
965 break;
966 default:
967 return -EINVAL;
968 }
969
970 new_rss_cfg |= FIELD_PREP(NFP_NET_CFG_RSS_HFUNC, nn->rss_hfunc);
971 new_rss_cfg |= NFP_NET_CFG_RSS_MASK;
972
973 if (new_rss_cfg == nn->rss_cfg)
974 return 0;
975
976 writel(new_rss_cfg, nn->dp.ctrl_bar + NFP_NET_CFG_RSS_CTRL);
977 err = nfp_net_reconfig(nn, NFP_NET_CFG_UPDATE_RSS);
978 if (err)
979 return err;
980
981 nn->rss_cfg = new_rss_cfg;
982
983 nn_dbg(nn, "Changed RSS config to 0x%x\n", nn->rss_cfg);
984 return 0;
985 }
986
nfp_net_set_rxnfc(struct net_device * netdev,struct ethtool_rxnfc * cmd)987 static int nfp_net_set_rxnfc(struct net_device *netdev,
988 struct ethtool_rxnfc *cmd)
989 {
990 struct nfp_net *nn = netdev_priv(netdev);
991
992 switch (cmd->cmd) {
993 case ETHTOOL_SRXFH:
994 return nfp_net_set_rss_hash_opt(nn, cmd);
995 default:
996 return -EOPNOTSUPP;
997 }
998 }
999
nfp_net_get_rxfh_indir_size(struct net_device * netdev)1000 static u32 nfp_net_get_rxfh_indir_size(struct net_device *netdev)
1001 {
1002 struct nfp_net *nn = netdev_priv(netdev);
1003
1004 if (!(nn->cap & NFP_NET_CFG_CTRL_RSS_ANY))
1005 return 0;
1006
1007 return ARRAY_SIZE(nn->rss_itbl);
1008 }
1009
nfp_net_get_rxfh_key_size(struct net_device * netdev)1010 static u32 nfp_net_get_rxfh_key_size(struct net_device *netdev)
1011 {
1012 struct nfp_net *nn = netdev_priv(netdev);
1013
1014 if (!(nn->cap & NFP_NET_CFG_CTRL_RSS_ANY))
1015 return -EOPNOTSUPP;
1016
1017 return nfp_net_rss_key_sz(nn);
1018 }
1019
nfp_net_get_rxfh(struct net_device * netdev,u32 * indir,u8 * key,u8 * hfunc)1020 static int nfp_net_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key,
1021 u8 *hfunc)
1022 {
1023 struct nfp_net *nn = netdev_priv(netdev);
1024 int i;
1025
1026 if (!(nn->cap & NFP_NET_CFG_CTRL_RSS_ANY))
1027 return -EOPNOTSUPP;
1028
1029 if (indir)
1030 for (i = 0; i < ARRAY_SIZE(nn->rss_itbl); i++)
1031 indir[i] = nn->rss_itbl[i];
1032 if (key)
1033 memcpy(key, nn->rss_key, nfp_net_rss_key_sz(nn));
1034 if (hfunc) {
1035 *hfunc = nn->rss_hfunc;
1036 if (*hfunc >= 1 << ETH_RSS_HASH_FUNCS_COUNT)
1037 *hfunc = ETH_RSS_HASH_UNKNOWN;
1038 }
1039
1040 return 0;
1041 }
1042
nfp_net_set_rxfh(struct net_device * netdev,const u32 * indir,const u8 * key,const u8 hfunc)1043 static int nfp_net_set_rxfh(struct net_device *netdev,
1044 const u32 *indir, const u8 *key,
1045 const u8 hfunc)
1046 {
1047 struct nfp_net *nn = netdev_priv(netdev);
1048 int i;
1049
1050 if (!(nn->cap & NFP_NET_CFG_CTRL_RSS_ANY) ||
1051 !(hfunc == ETH_RSS_HASH_NO_CHANGE || hfunc == nn->rss_hfunc))
1052 return -EOPNOTSUPP;
1053
1054 if (!key && !indir)
1055 return 0;
1056
1057 if (key) {
1058 memcpy(nn->rss_key, key, nfp_net_rss_key_sz(nn));
1059 nfp_net_rss_write_key(nn);
1060 }
1061 if (indir) {
1062 for (i = 0; i < ARRAY_SIZE(nn->rss_itbl); i++)
1063 nn->rss_itbl[i] = indir[i];
1064
1065 nfp_net_rss_write_itbl(nn);
1066 }
1067
1068 return nfp_net_reconfig(nn, NFP_NET_CFG_UPDATE_RSS);
1069 }
1070
1071 /* Dump BAR registers
1072 */
nfp_net_get_regs_len(struct net_device * netdev)1073 static int nfp_net_get_regs_len(struct net_device *netdev)
1074 {
1075 return NFP_NET_CFG_BAR_SZ;
1076 }
1077
nfp_net_get_regs(struct net_device * netdev,struct ethtool_regs * regs,void * p)1078 static void nfp_net_get_regs(struct net_device *netdev,
1079 struct ethtool_regs *regs, void *p)
1080 {
1081 struct nfp_net *nn = netdev_priv(netdev);
1082 u32 *regs_buf = p;
1083 int i;
1084
1085 regs->version = nn_readl(nn, NFP_NET_CFG_VERSION);
1086
1087 for (i = 0; i < NFP_NET_CFG_BAR_SZ / sizeof(u32); i++)
1088 regs_buf[i] = readl(nn->dp.ctrl_bar + (i * sizeof(u32)));
1089 }
1090
nfp_net_get_coalesce(struct net_device * netdev,struct ethtool_coalesce * ec,struct kernel_ethtool_coalesce * kernel_coal,struct netlink_ext_ack * extack)1091 static int nfp_net_get_coalesce(struct net_device *netdev,
1092 struct ethtool_coalesce *ec,
1093 struct kernel_ethtool_coalesce *kernel_coal,
1094 struct netlink_ext_ack *extack)
1095 {
1096 struct nfp_net *nn = netdev_priv(netdev);
1097
1098 if (!(nn->cap & NFP_NET_CFG_CTRL_IRQMOD))
1099 return -EINVAL;
1100
1101 ec->use_adaptive_rx_coalesce = nn->rx_coalesce_adapt_on;
1102 ec->use_adaptive_tx_coalesce = nn->tx_coalesce_adapt_on;
1103
1104 ec->rx_coalesce_usecs = nn->rx_coalesce_usecs;
1105 ec->rx_max_coalesced_frames = nn->rx_coalesce_max_frames;
1106 ec->tx_coalesce_usecs = nn->tx_coalesce_usecs;
1107 ec->tx_max_coalesced_frames = nn->tx_coalesce_max_frames;
1108
1109 return 0;
1110 }
1111
1112 /* Other debug dumps
1113 */
1114 static int
nfp_dump_nsp_diag(struct nfp_app * app,struct ethtool_dump * dump,void * buffer)1115 nfp_dump_nsp_diag(struct nfp_app *app, struct ethtool_dump *dump, void *buffer)
1116 {
1117 struct nfp_resource *res;
1118 int ret;
1119
1120 if (!app)
1121 return -EOPNOTSUPP;
1122
1123 dump->version = 1;
1124 dump->flag = NFP_DUMP_NSP_DIAG;
1125
1126 res = nfp_resource_acquire(app->cpp, NFP_RESOURCE_NSP_DIAG);
1127 if (IS_ERR(res))
1128 return PTR_ERR(res);
1129
1130 if (buffer) {
1131 if (dump->len != nfp_resource_size(res)) {
1132 ret = -EINVAL;
1133 goto exit_release;
1134 }
1135
1136 ret = nfp_cpp_read(app->cpp, nfp_resource_cpp_id(res),
1137 nfp_resource_address(res),
1138 buffer, dump->len);
1139 if (ret != dump->len)
1140 ret = ret < 0 ? ret : -EIO;
1141 else
1142 ret = 0;
1143 } else {
1144 dump->len = nfp_resource_size(res);
1145 ret = 0;
1146 }
1147 exit_release:
1148 nfp_resource_release(res);
1149
1150 return ret;
1151 }
1152
1153 /* Set the dump flag/level. Calculate the dump length for flag > 0 only (new TLV
1154 * based dumps), since flag 0 (default) calculates the length in
1155 * nfp_app_get_dump_flag(), and we need to support triggering a level 0 dump
1156 * without setting the flag first, for backward compatibility.
1157 */
nfp_app_set_dump(struct net_device * netdev,struct ethtool_dump * val)1158 static int nfp_app_set_dump(struct net_device *netdev, struct ethtool_dump *val)
1159 {
1160 struct nfp_app *app = nfp_app_from_netdev(netdev);
1161 s64 len;
1162
1163 if (!app)
1164 return -EOPNOTSUPP;
1165
1166 if (val->flag == NFP_DUMP_NSP_DIAG) {
1167 app->pf->dump_flag = val->flag;
1168 return 0;
1169 }
1170
1171 if (!app->pf->dumpspec)
1172 return -EOPNOTSUPP;
1173
1174 len = nfp_net_dump_calculate_size(app->pf, app->pf->dumpspec,
1175 val->flag);
1176 if (len < 0)
1177 return len;
1178
1179 app->pf->dump_flag = val->flag;
1180 app->pf->dump_len = len;
1181
1182 return 0;
1183 }
1184
1185 static int
nfp_app_get_dump_flag(struct net_device * netdev,struct ethtool_dump * dump)1186 nfp_app_get_dump_flag(struct net_device *netdev, struct ethtool_dump *dump)
1187 {
1188 struct nfp_app *app = nfp_app_from_netdev(netdev);
1189
1190 if (!app)
1191 return -EOPNOTSUPP;
1192
1193 if (app->pf->dump_flag == NFP_DUMP_NSP_DIAG)
1194 return nfp_dump_nsp_diag(app, dump, NULL);
1195
1196 dump->flag = app->pf->dump_flag;
1197 dump->len = app->pf->dump_len;
1198
1199 return 0;
1200 }
1201
1202 static int
nfp_app_get_dump_data(struct net_device * netdev,struct ethtool_dump * dump,void * buffer)1203 nfp_app_get_dump_data(struct net_device *netdev, struct ethtool_dump *dump,
1204 void *buffer)
1205 {
1206 struct nfp_app *app = nfp_app_from_netdev(netdev);
1207
1208 if (!app)
1209 return -EOPNOTSUPP;
1210
1211 if (app->pf->dump_flag == NFP_DUMP_NSP_DIAG)
1212 return nfp_dump_nsp_diag(app, dump, buffer);
1213
1214 dump->flag = app->pf->dump_flag;
1215 dump->len = app->pf->dump_len;
1216
1217 return nfp_net_dump_populate_buffer(app->pf, app->pf->dumpspec, dump,
1218 buffer);
1219 }
1220
1221 static int
nfp_port_get_module_info(struct net_device * netdev,struct ethtool_modinfo * modinfo)1222 nfp_port_get_module_info(struct net_device *netdev,
1223 struct ethtool_modinfo *modinfo)
1224 {
1225 struct nfp_eth_table_port *eth_port;
1226 struct nfp_port *port;
1227 unsigned int read_len;
1228 struct nfp_nsp *nsp;
1229 int err = 0;
1230 u8 data;
1231
1232 port = nfp_port_from_netdev(netdev);
1233 /* update port state to get latest interface */
1234 set_bit(NFP_PORT_CHANGED, &port->flags);
1235 eth_port = nfp_port_get_eth_port(port);
1236 if (!eth_port)
1237 return -EOPNOTSUPP;
1238
1239 nsp = nfp_nsp_open(port->app->cpp);
1240 if (IS_ERR(nsp)) {
1241 err = PTR_ERR(nsp);
1242 netdev_err(netdev, "Failed to access the NSP: %d\n", err);
1243 return err;
1244 }
1245
1246 if (!nfp_nsp_has_read_module_eeprom(nsp)) {
1247 netdev_info(netdev, "reading module EEPROM not supported. Please update flash\n");
1248 err = -EOPNOTSUPP;
1249 goto exit_close_nsp;
1250 }
1251
1252 switch (eth_port->interface) {
1253 case NFP_INTERFACE_SFP:
1254 case NFP_INTERFACE_SFP28:
1255 err = nfp_nsp_read_module_eeprom(nsp, eth_port->eth_index,
1256 SFP_SFF8472_COMPLIANCE, &data,
1257 1, &read_len);
1258 if (err < 0)
1259 goto exit_close_nsp;
1260
1261 if (!data) {
1262 modinfo->type = ETH_MODULE_SFF_8079;
1263 modinfo->eeprom_len = ETH_MODULE_SFF_8079_LEN;
1264 } else {
1265 modinfo->type = ETH_MODULE_SFF_8472;
1266 modinfo->eeprom_len = ETH_MODULE_SFF_8472_LEN;
1267 }
1268 break;
1269 case NFP_INTERFACE_QSFP:
1270 err = nfp_nsp_read_module_eeprom(nsp, eth_port->eth_index,
1271 SFP_SFF_REV_COMPLIANCE, &data,
1272 1, &read_len);
1273 if (err < 0)
1274 goto exit_close_nsp;
1275
1276 if (data < 0x3) {
1277 modinfo->type = ETH_MODULE_SFF_8436;
1278 modinfo->eeprom_len = ETH_MODULE_SFF_8436_LEN;
1279 } else {
1280 modinfo->type = ETH_MODULE_SFF_8636;
1281 modinfo->eeprom_len = ETH_MODULE_SFF_8636_LEN;
1282 }
1283 break;
1284 case NFP_INTERFACE_QSFP28:
1285 modinfo->type = ETH_MODULE_SFF_8636;
1286 modinfo->eeprom_len = ETH_MODULE_SFF_8636_LEN;
1287 break;
1288 default:
1289 netdev_err(netdev, "Unsupported module 0x%x detected\n",
1290 eth_port->interface);
1291 err = -EINVAL;
1292 }
1293
1294 exit_close_nsp:
1295 nfp_nsp_close(nsp);
1296 return err;
1297 }
1298
1299 static int
nfp_port_get_module_eeprom(struct net_device * netdev,struct ethtool_eeprom * eeprom,u8 * data)1300 nfp_port_get_module_eeprom(struct net_device *netdev,
1301 struct ethtool_eeprom *eeprom, u8 *data)
1302 {
1303 struct nfp_eth_table_port *eth_port;
1304 struct nfp_port *port;
1305 struct nfp_nsp *nsp;
1306 int err;
1307
1308 port = nfp_port_from_netdev(netdev);
1309 eth_port = __nfp_port_get_eth_port(port);
1310 if (!eth_port)
1311 return -EOPNOTSUPP;
1312
1313 nsp = nfp_nsp_open(port->app->cpp);
1314 if (IS_ERR(nsp)) {
1315 err = PTR_ERR(nsp);
1316 netdev_err(netdev, "Failed to access the NSP: %d\n", err);
1317 return err;
1318 }
1319
1320 if (!nfp_nsp_has_read_module_eeprom(nsp)) {
1321 netdev_info(netdev, "reading module EEPROM not supported. Please update flash\n");
1322 err = -EOPNOTSUPP;
1323 goto exit_close_nsp;
1324 }
1325
1326 err = nfp_nsp_read_module_eeprom(nsp, eth_port->eth_index,
1327 eeprom->offset, data, eeprom->len,
1328 &eeprom->len);
1329 if (err < 0) {
1330 if (eeprom->len) {
1331 netdev_warn(netdev,
1332 "Incomplete read from module EEPROM: %d\n",
1333 err);
1334 err = 0;
1335 } else {
1336 netdev_err(netdev,
1337 "Reading from module EEPROM failed: %d\n",
1338 err);
1339 }
1340 }
1341
1342 exit_close_nsp:
1343 nfp_nsp_close(nsp);
1344 return err;
1345 }
1346
nfp_net_set_coalesce(struct net_device * netdev,struct ethtool_coalesce * ec,struct kernel_ethtool_coalesce * kernel_coal,struct netlink_ext_ack * extack)1347 static int nfp_net_set_coalesce(struct net_device *netdev,
1348 struct ethtool_coalesce *ec,
1349 struct kernel_ethtool_coalesce *kernel_coal,
1350 struct netlink_ext_ack *extack)
1351 {
1352 struct nfp_net *nn = netdev_priv(netdev);
1353 unsigned int factor;
1354
1355 /* Compute factor used to convert coalesce '_usecs' parameters to
1356 * ME timestamp ticks. There are 16 ME clock cycles for each timestamp
1357 * count.
1358 */
1359 factor = nn->tlv_caps.me_freq_mhz / 16;
1360
1361 /* Each pair of (usecs, max_frames) fields specifies that interrupts
1362 * should be coalesced until
1363 * (usecs > 0 && time_since_first_completion >= usecs) ||
1364 * (max_frames > 0 && completed_frames >= max_frames)
1365 *
1366 * It is illegal to set both usecs and max_frames to zero as this would
1367 * cause interrupts to never be generated. To disable coalescing, set
1368 * usecs = 0 and max_frames = 1.
1369 *
1370 * Some implementations ignore the value of max_frames and use the
1371 * condition time_since_first_completion >= usecs
1372 */
1373
1374 if (!(nn->cap & NFP_NET_CFG_CTRL_IRQMOD))
1375 return -EINVAL;
1376
1377 /* ensure valid configuration */
1378 if (!ec->rx_coalesce_usecs && !ec->rx_max_coalesced_frames)
1379 return -EINVAL;
1380
1381 if (!ec->tx_coalesce_usecs && !ec->tx_max_coalesced_frames)
1382 return -EINVAL;
1383
1384 if (nfp_net_coalesce_para_check(ec->rx_coalesce_usecs * factor,
1385 ec->rx_max_coalesced_frames))
1386 return -EINVAL;
1387
1388 if (nfp_net_coalesce_para_check(ec->tx_coalesce_usecs * factor,
1389 ec->tx_max_coalesced_frames))
1390 return -EINVAL;
1391
1392 /* configuration is valid */
1393 nn->rx_coalesce_adapt_on = !!ec->use_adaptive_rx_coalesce;
1394 nn->tx_coalesce_adapt_on = !!ec->use_adaptive_tx_coalesce;
1395
1396 nn->rx_coalesce_usecs = ec->rx_coalesce_usecs;
1397 nn->rx_coalesce_max_frames = ec->rx_max_coalesced_frames;
1398 nn->tx_coalesce_usecs = ec->tx_coalesce_usecs;
1399 nn->tx_coalesce_max_frames = ec->tx_max_coalesced_frames;
1400
1401 /* write configuration to device */
1402 nfp_net_coalesce_write_cfg(nn);
1403 return nfp_net_reconfig(nn, NFP_NET_CFG_UPDATE_IRQMOD);
1404 }
1405
nfp_net_get_channels(struct net_device * netdev,struct ethtool_channels * channel)1406 static void nfp_net_get_channels(struct net_device *netdev,
1407 struct ethtool_channels *channel)
1408 {
1409 struct nfp_net *nn = netdev_priv(netdev);
1410 unsigned int num_tx_rings;
1411
1412 num_tx_rings = nn->dp.num_tx_rings;
1413 if (nn->dp.xdp_prog)
1414 num_tx_rings -= nn->dp.num_rx_rings;
1415
1416 channel->max_rx = min(nn->max_rx_rings, nn->max_r_vecs);
1417 channel->max_tx = min(nn->max_tx_rings, nn->max_r_vecs);
1418 channel->max_combined = min(channel->max_rx, channel->max_tx);
1419 channel->max_other = NFP_NET_NON_Q_VECTORS;
1420 channel->combined_count = min(nn->dp.num_rx_rings, num_tx_rings);
1421 channel->rx_count = nn->dp.num_rx_rings - channel->combined_count;
1422 channel->tx_count = num_tx_rings - channel->combined_count;
1423 channel->other_count = NFP_NET_NON_Q_VECTORS;
1424 }
1425
nfp_net_set_num_rings(struct nfp_net * nn,unsigned int total_rx,unsigned int total_tx)1426 static int nfp_net_set_num_rings(struct nfp_net *nn, unsigned int total_rx,
1427 unsigned int total_tx)
1428 {
1429 struct nfp_net_dp *dp;
1430
1431 dp = nfp_net_clone_dp(nn);
1432 if (!dp)
1433 return -ENOMEM;
1434
1435 dp->num_rx_rings = total_rx;
1436 dp->num_tx_rings = total_tx;
1437 /* nfp_net_check_config() will catch num_tx_rings > nn->max_tx_rings */
1438 if (dp->xdp_prog)
1439 dp->num_tx_rings += total_rx;
1440
1441 return nfp_net_ring_reconfig(nn, dp, NULL);
1442 }
1443
nfp_net_set_channels(struct net_device * netdev,struct ethtool_channels * channel)1444 static int nfp_net_set_channels(struct net_device *netdev,
1445 struct ethtool_channels *channel)
1446 {
1447 struct nfp_net *nn = netdev_priv(netdev);
1448 unsigned int total_rx, total_tx;
1449
1450 /* Reject unsupported */
1451 if (channel->other_count != NFP_NET_NON_Q_VECTORS ||
1452 (channel->rx_count && channel->tx_count))
1453 return -EINVAL;
1454
1455 total_rx = channel->combined_count + channel->rx_count;
1456 total_tx = channel->combined_count + channel->tx_count;
1457
1458 if (total_rx > min(nn->max_rx_rings, nn->max_r_vecs) ||
1459 total_tx > min(nn->max_tx_rings, nn->max_r_vecs))
1460 return -EINVAL;
1461
1462 return nfp_net_set_num_rings(nn, total_rx, total_tx);
1463 }
1464
1465 static const struct ethtool_ops nfp_net_ethtool_ops = {
1466 .supported_coalesce_params = ETHTOOL_COALESCE_USECS |
1467 ETHTOOL_COALESCE_MAX_FRAMES |
1468 ETHTOOL_COALESCE_USE_ADAPTIVE,
1469 .get_drvinfo = nfp_net_get_drvinfo,
1470 .get_link = ethtool_op_get_link,
1471 .get_ringparam = nfp_net_get_ringparam,
1472 .set_ringparam = nfp_net_set_ringparam,
1473 .get_strings = nfp_net_get_strings,
1474 .get_ethtool_stats = nfp_net_get_stats,
1475 .get_sset_count = nfp_net_get_sset_count,
1476 .get_rxnfc = nfp_net_get_rxnfc,
1477 .set_rxnfc = nfp_net_set_rxnfc,
1478 .get_rxfh_indir_size = nfp_net_get_rxfh_indir_size,
1479 .get_rxfh_key_size = nfp_net_get_rxfh_key_size,
1480 .get_rxfh = nfp_net_get_rxfh,
1481 .set_rxfh = nfp_net_set_rxfh,
1482 .get_regs_len = nfp_net_get_regs_len,
1483 .get_regs = nfp_net_get_regs,
1484 .set_dump = nfp_app_set_dump,
1485 .get_dump_flag = nfp_app_get_dump_flag,
1486 .get_dump_data = nfp_app_get_dump_data,
1487 .get_module_info = nfp_port_get_module_info,
1488 .get_module_eeprom = nfp_port_get_module_eeprom,
1489 .get_coalesce = nfp_net_get_coalesce,
1490 .set_coalesce = nfp_net_set_coalesce,
1491 .get_channels = nfp_net_get_channels,
1492 .set_channels = nfp_net_set_channels,
1493 .get_link_ksettings = nfp_net_get_link_ksettings,
1494 .set_link_ksettings = nfp_net_set_link_ksettings,
1495 .get_fecparam = nfp_port_get_fecparam,
1496 .set_fecparam = nfp_port_set_fecparam,
1497 };
1498
1499 const struct ethtool_ops nfp_port_ethtool_ops = {
1500 .get_drvinfo = nfp_app_get_drvinfo,
1501 .get_link = ethtool_op_get_link,
1502 .get_strings = nfp_port_get_strings,
1503 .get_ethtool_stats = nfp_port_get_stats,
1504 .get_sset_count = nfp_port_get_sset_count,
1505 .set_dump = nfp_app_set_dump,
1506 .get_dump_flag = nfp_app_get_dump_flag,
1507 .get_dump_data = nfp_app_get_dump_data,
1508 .get_module_info = nfp_port_get_module_info,
1509 .get_module_eeprom = nfp_port_get_module_eeprom,
1510 .get_link_ksettings = nfp_net_get_link_ksettings,
1511 .set_link_ksettings = nfp_net_set_link_ksettings,
1512 .get_fecparam = nfp_port_get_fecparam,
1513 .set_fecparam = nfp_port_set_fecparam,
1514 };
1515
nfp_net_set_ethtool_ops(struct net_device * netdev)1516 void nfp_net_set_ethtool_ops(struct net_device *netdev)
1517 {
1518 netdev->ethtool_ops = &nfp_net_ethtool_ops;
1519 }
1520