1 // SPDX-License-Identifier: GPL-2.0-only
2 /****************************************************************************
3 * Driver for Solarflare network controllers and boards
4 * Copyright 2019 Solarflare Communications Inc.
5 * Copyright 2020-2022 Xilinx Inc.
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms of the GNU General Public License version 2 as published
9 * by the Free Software Foundation, incorporated herein by reference.
10 */
11
12 #include "ef100_rep.h"
13 #include "ef100_netdev.h"
14 #include "ef100_nic.h"
15 #include "mae.h"
16 #include "rx_common.h"
17 #include "tc_bindings.h"
18
19 #define EFX_EF100_REP_DRIVER "efx_ef100_rep"
20
21 #define EFX_REP_DEFAULT_PSEUDO_RING_SIZE 64
22
23 static int efx_ef100_rep_poll(struct napi_struct *napi, int weight);
24
efx_ef100_rep_init_struct(struct efx_nic * efx,struct efx_rep * efv,unsigned int i)25 static int efx_ef100_rep_init_struct(struct efx_nic *efx, struct efx_rep *efv,
26 unsigned int i)
27 {
28 efv->parent = efx;
29 efv->idx = i;
30 INIT_LIST_HEAD(&efv->list);
31 efv->dflt.fw_id = MC_CMD_MAE_ACTION_RULE_INSERT_OUT_ACTION_RULE_ID_NULL;
32 INIT_LIST_HEAD(&efv->dflt.acts.list);
33 INIT_LIST_HEAD(&efv->rx_list);
34 spin_lock_init(&efv->rx_lock);
35 efv->msg_enable = NETIF_MSG_DRV | NETIF_MSG_PROBE |
36 NETIF_MSG_LINK | NETIF_MSG_IFDOWN |
37 NETIF_MSG_IFUP | NETIF_MSG_RX_ERR |
38 NETIF_MSG_TX_ERR | NETIF_MSG_HW;
39 return 0;
40 }
41
efx_ef100_rep_open(struct net_device * net_dev)42 static int efx_ef100_rep_open(struct net_device *net_dev)
43 {
44 struct efx_rep *efv = netdev_priv(net_dev);
45
46 netif_napi_add(net_dev, &efv->napi, efx_ef100_rep_poll);
47 napi_enable(&efv->napi);
48 return 0;
49 }
50
efx_ef100_rep_close(struct net_device * net_dev)51 static int efx_ef100_rep_close(struct net_device *net_dev)
52 {
53 struct efx_rep *efv = netdev_priv(net_dev);
54
55 napi_disable(&efv->napi);
56 netif_napi_del(&efv->napi);
57 return 0;
58 }
59
efx_ef100_rep_xmit(struct sk_buff * skb,struct net_device * dev)60 static netdev_tx_t efx_ef100_rep_xmit(struct sk_buff *skb,
61 struct net_device *dev)
62 {
63 struct efx_rep *efv = netdev_priv(dev);
64 struct efx_nic *efx = efv->parent;
65 netdev_tx_t rc;
66
67 /* __ef100_hard_start_xmit() will always return success even in the
68 * case of TX drops, where it will increment efx's tx_dropped. The
69 * efv stats really only count attempted TX, not success/failure.
70 */
71 atomic64_inc(&efv->stats.tx_packets);
72 atomic64_add(skb->len, &efv->stats.tx_bytes);
73 netif_tx_lock(efx->net_dev);
74 rc = __ef100_hard_start_xmit(skb, efx, dev, efv);
75 netif_tx_unlock(efx->net_dev);
76 return rc;
77 }
78
efx_ef100_rep_get_port_parent_id(struct net_device * dev,struct netdev_phys_item_id * ppid)79 static int efx_ef100_rep_get_port_parent_id(struct net_device *dev,
80 struct netdev_phys_item_id *ppid)
81 {
82 struct efx_rep *efv = netdev_priv(dev);
83 struct efx_nic *efx = efv->parent;
84 struct ef100_nic_data *nic_data;
85
86 nic_data = efx->nic_data;
87 /* nic_data->port_id is a u8[] */
88 ppid->id_len = sizeof(nic_data->port_id);
89 memcpy(ppid->id, nic_data->port_id, sizeof(nic_data->port_id));
90 return 0;
91 }
92
efx_ef100_rep_get_phys_port_name(struct net_device * dev,char * buf,size_t len)93 static int efx_ef100_rep_get_phys_port_name(struct net_device *dev,
94 char *buf, size_t len)
95 {
96 struct efx_rep *efv = netdev_priv(dev);
97 struct efx_nic *efx = efv->parent;
98 struct ef100_nic_data *nic_data;
99 int ret;
100
101 nic_data = efx->nic_data;
102 ret = snprintf(buf, len, "p%upf%uvf%u", efx->port_num,
103 nic_data->pf_index, efv->idx);
104 if (ret >= len)
105 return -EOPNOTSUPP;
106
107 return 0;
108 }
109
efx_ef100_rep_setup_tc(struct net_device * net_dev,enum tc_setup_type type,void * type_data)110 static int efx_ef100_rep_setup_tc(struct net_device *net_dev,
111 enum tc_setup_type type, void *type_data)
112 {
113 struct efx_rep *efv = netdev_priv(net_dev);
114 struct efx_nic *efx = efv->parent;
115
116 if (type == TC_SETUP_CLSFLOWER)
117 return efx_tc_flower(efx, net_dev, type_data, efv);
118 if (type == TC_SETUP_BLOCK)
119 return efx_tc_setup_block(net_dev, efx, type_data, efv);
120
121 return -EOPNOTSUPP;
122 }
123
efx_ef100_rep_get_stats64(struct net_device * dev,struct rtnl_link_stats64 * stats)124 static void efx_ef100_rep_get_stats64(struct net_device *dev,
125 struct rtnl_link_stats64 *stats)
126 {
127 struct efx_rep *efv = netdev_priv(dev);
128
129 stats->rx_packets = atomic64_read(&efv->stats.rx_packets);
130 stats->tx_packets = atomic64_read(&efv->stats.tx_packets);
131 stats->rx_bytes = atomic64_read(&efv->stats.rx_bytes);
132 stats->tx_bytes = atomic64_read(&efv->stats.tx_bytes);
133 stats->rx_dropped = atomic64_read(&efv->stats.rx_dropped);
134 stats->tx_errors = atomic64_read(&efv->stats.tx_errors);
135 }
136
137 const struct net_device_ops efx_ef100_rep_netdev_ops = {
138 .ndo_open = efx_ef100_rep_open,
139 .ndo_stop = efx_ef100_rep_close,
140 .ndo_start_xmit = efx_ef100_rep_xmit,
141 .ndo_get_port_parent_id = efx_ef100_rep_get_port_parent_id,
142 .ndo_get_phys_port_name = efx_ef100_rep_get_phys_port_name,
143 .ndo_get_stats64 = efx_ef100_rep_get_stats64,
144 .ndo_setup_tc = efx_ef100_rep_setup_tc,
145 };
146
efx_ef100_rep_get_drvinfo(struct net_device * dev,struct ethtool_drvinfo * drvinfo)147 static void efx_ef100_rep_get_drvinfo(struct net_device *dev,
148 struct ethtool_drvinfo *drvinfo)
149 {
150 strscpy(drvinfo->driver, EFX_EF100_REP_DRIVER, sizeof(drvinfo->driver));
151 }
152
efx_ef100_rep_ethtool_get_msglevel(struct net_device * net_dev)153 static u32 efx_ef100_rep_ethtool_get_msglevel(struct net_device *net_dev)
154 {
155 struct efx_rep *efv = netdev_priv(net_dev);
156
157 return efv->msg_enable;
158 }
159
efx_ef100_rep_ethtool_set_msglevel(struct net_device * net_dev,u32 msg_enable)160 static void efx_ef100_rep_ethtool_set_msglevel(struct net_device *net_dev,
161 u32 msg_enable)
162 {
163 struct efx_rep *efv = netdev_priv(net_dev);
164
165 efv->msg_enable = msg_enable;
166 }
167
efx_ef100_rep_ethtool_get_ringparam(struct net_device * net_dev,struct ethtool_ringparam * ring,struct kernel_ethtool_ringparam * kring,struct netlink_ext_ack * ext_ack)168 static void efx_ef100_rep_ethtool_get_ringparam(struct net_device *net_dev,
169 struct ethtool_ringparam *ring,
170 struct kernel_ethtool_ringparam *kring,
171 struct netlink_ext_ack *ext_ack)
172 {
173 struct efx_rep *efv = netdev_priv(net_dev);
174
175 ring->rx_max_pending = U32_MAX;
176 ring->rx_pending = efv->rx_pring_size;
177 }
178
efx_ef100_rep_ethtool_set_ringparam(struct net_device * net_dev,struct ethtool_ringparam * ring,struct kernel_ethtool_ringparam * kring,struct netlink_ext_ack * ext_ack)179 static int efx_ef100_rep_ethtool_set_ringparam(struct net_device *net_dev,
180 struct ethtool_ringparam *ring,
181 struct kernel_ethtool_ringparam *kring,
182 struct netlink_ext_ack *ext_ack)
183 {
184 struct efx_rep *efv = netdev_priv(net_dev);
185
186 if (ring->rx_mini_pending || ring->rx_jumbo_pending || ring->tx_pending)
187 return -EINVAL;
188
189 efv->rx_pring_size = ring->rx_pending;
190 return 0;
191 }
192
193 static const struct ethtool_ops efx_ef100_rep_ethtool_ops = {
194 .get_drvinfo = efx_ef100_rep_get_drvinfo,
195 .get_msglevel = efx_ef100_rep_ethtool_get_msglevel,
196 .set_msglevel = efx_ef100_rep_ethtool_set_msglevel,
197 .get_ringparam = efx_ef100_rep_ethtool_get_ringparam,
198 .set_ringparam = efx_ef100_rep_ethtool_set_ringparam,
199 };
200
efx_ef100_rep_create_netdev(struct efx_nic * efx,unsigned int i)201 static struct efx_rep *efx_ef100_rep_create_netdev(struct efx_nic *efx,
202 unsigned int i)
203 {
204 struct net_device *net_dev;
205 struct efx_rep *efv;
206 int rc;
207
208 net_dev = alloc_etherdev_mq(sizeof(*efv), 1);
209 if (!net_dev)
210 return ERR_PTR(-ENOMEM);
211
212 efv = netdev_priv(net_dev);
213 rc = efx_ef100_rep_init_struct(efx, efv, i);
214 if (rc)
215 goto fail1;
216 efv->net_dev = net_dev;
217 rtnl_lock();
218 spin_lock_bh(&efx->vf_reps_lock);
219 list_add_tail(&efv->list, &efx->vf_reps);
220 spin_unlock_bh(&efx->vf_reps_lock);
221 if (netif_running(efx->net_dev) && efx->state == STATE_NET_UP) {
222 netif_device_attach(net_dev);
223 netif_carrier_on(net_dev);
224 } else {
225 netif_carrier_off(net_dev);
226 netif_tx_stop_all_queues(net_dev);
227 }
228 rtnl_unlock();
229
230 net_dev->netdev_ops = &efx_ef100_rep_netdev_ops;
231 net_dev->ethtool_ops = &efx_ef100_rep_ethtool_ops;
232 net_dev->min_mtu = EFX_MIN_MTU;
233 net_dev->max_mtu = EFX_MAX_MTU;
234 net_dev->features |= NETIF_F_LLTX;
235 net_dev->hw_features |= NETIF_F_LLTX;
236 return efv;
237 fail1:
238 free_netdev(net_dev);
239 return ERR_PTR(rc);
240 }
241
efx_ef100_configure_rep(struct efx_rep * efv)242 static int efx_ef100_configure_rep(struct efx_rep *efv)
243 {
244 struct efx_nic *efx = efv->parent;
245 u32 selector;
246 int rc;
247
248 efv->rx_pring_size = EFX_REP_DEFAULT_PSEUDO_RING_SIZE;
249 /* Construct mport selector for corresponding VF */
250 efx_mae_mport_vf(efx, efv->idx, &selector);
251 /* Look up actual mport ID */
252 rc = efx_mae_lookup_mport(efx, selector, &efv->mport);
253 if (rc)
254 return rc;
255 pci_dbg(efx->pci_dev, "VF %u has mport ID %#x\n", efv->idx, efv->mport);
256 /* mport label should fit in 16 bits */
257 WARN_ON(efv->mport >> 16);
258
259 return efx_tc_configure_default_rule_rep(efv);
260 }
261
efx_ef100_deconfigure_rep(struct efx_rep * efv)262 static void efx_ef100_deconfigure_rep(struct efx_rep *efv)
263 {
264 struct efx_nic *efx = efv->parent;
265
266 efx_tc_deconfigure_default_rule(efx, &efv->dflt);
267 }
268
efx_ef100_rep_destroy_netdev(struct efx_rep * efv)269 static void efx_ef100_rep_destroy_netdev(struct efx_rep *efv)
270 {
271 struct efx_nic *efx = efv->parent;
272
273 rtnl_lock();
274 spin_lock_bh(&efx->vf_reps_lock);
275 list_del(&efv->list);
276 spin_unlock_bh(&efx->vf_reps_lock);
277 rtnl_unlock();
278 synchronize_rcu();
279 free_netdev(efv->net_dev);
280 }
281
efx_ef100_vfrep_create(struct efx_nic * efx,unsigned int i)282 int efx_ef100_vfrep_create(struct efx_nic *efx, unsigned int i)
283 {
284 struct efx_rep *efv;
285 int rc;
286
287 efv = efx_ef100_rep_create_netdev(efx, i);
288 if (IS_ERR(efv)) {
289 rc = PTR_ERR(efv);
290 pci_err(efx->pci_dev,
291 "Failed to create representor for VF %d, rc %d\n", i,
292 rc);
293 return rc;
294 }
295 rc = efx_ef100_configure_rep(efv);
296 if (rc) {
297 pci_err(efx->pci_dev,
298 "Failed to configure representor for VF %d, rc %d\n",
299 i, rc);
300 goto fail1;
301 }
302 rc = register_netdev(efv->net_dev);
303 if (rc) {
304 pci_err(efx->pci_dev,
305 "Failed to register representor for VF %d, rc %d\n",
306 i, rc);
307 goto fail2;
308 }
309 pci_dbg(efx->pci_dev, "Representor for VF %d is %s\n", i,
310 efv->net_dev->name);
311 return 0;
312 fail2:
313 efx_ef100_deconfigure_rep(efv);
314 fail1:
315 efx_ef100_rep_destroy_netdev(efv);
316 return rc;
317 }
318
efx_ef100_vfrep_destroy(struct efx_nic * efx,struct efx_rep * efv)319 void efx_ef100_vfrep_destroy(struct efx_nic *efx, struct efx_rep *efv)
320 {
321 struct net_device *rep_dev;
322
323 rep_dev = efv->net_dev;
324 if (!rep_dev)
325 return;
326 netif_dbg(efx, drv, rep_dev, "Removing VF representor\n");
327 unregister_netdev(rep_dev);
328 efx_ef100_deconfigure_rep(efv);
329 efx_ef100_rep_destroy_netdev(efv);
330 }
331
efx_ef100_fini_vfreps(struct efx_nic * efx)332 void efx_ef100_fini_vfreps(struct efx_nic *efx)
333 {
334 struct ef100_nic_data *nic_data = efx->nic_data;
335 struct efx_rep *efv, *next;
336
337 if (!nic_data->grp_mae)
338 return;
339
340 list_for_each_entry_safe(efv, next, &efx->vf_reps, list)
341 efx_ef100_vfrep_destroy(efx, efv);
342 }
343
efx_ef100_rep_poll(struct napi_struct * napi,int weight)344 static int efx_ef100_rep_poll(struct napi_struct *napi, int weight)
345 {
346 struct efx_rep *efv = container_of(napi, struct efx_rep, napi);
347 unsigned int read_index;
348 struct list_head head;
349 struct sk_buff *skb;
350 bool need_resched;
351 int spent = 0;
352
353 INIT_LIST_HEAD(&head);
354 /* Grab up to 'weight' pending SKBs */
355 spin_lock_bh(&efv->rx_lock);
356 read_index = efv->write_index;
357 while (spent < weight && !list_empty(&efv->rx_list)) {
358 skb = list_first_entry(&efv->rx_list, struct sk_buff, list);
359 list_del(&skb->list);
360 list_add_tail(&skb->list, &head);
361 spent++;
362 }
363 spin_unlock_bh(&efv->rx_lock);
364 /* Receive them */
365 netif_receive_skb_list(&head);
366 if (spent < weight)
367 if (napi_complete_done(napi, spent)) {
368 spin_lock_bh(&efv->rx_lock);
369 efv->read_index = read_index;
370 /* If write_index advanced while we were doing the
371 * RX, then storing our read_index won't re-prime the
372 * fake-interrupt. In that case, we need to schedule
373 * NAPI again to consume the additional packet(s).
374 */
375 need_resched = efv->write_index != read_index;
376 spin_unlock_bh(&efv->rx_lock);
377 if (need_resched)
378 napi_schedule(&efv->napi);
379 }
380 return spent;
381 }
382
efx_ef100_rep_rx_packet(struct efx_rep * efv,struct efx_rx_buffer * rx_buf)383 void efx_ef100_rep_rx_packet(struct efx_rep *efv, struct efx_rx_buffer *rx_buf)
384 {
385 u8 *eh = efx_rx_buf_va(rx_buf);
386 struct sk_buff *skb;
387 bool primed;
388
389 /* Don't allow too many queued SKBs to build up, as they consume
390 * GFP_ATOMIC memory. If we overrun, just start dropping.
391 */
392 if (efv->write_index - READ_ONCE(efv->read_index) > efv->rx_pring_size) {
393 atomic64_inc(&efv->stats.rx_dropped);
394 if (net_ratelimit())
395 netif_dbg(efv->parent, rx_err, efv->net_dev,
396 "nodesc-dropped packet of length %u\n",
397 rx_buf->len);
398 return;
399 }
400
401 skb = netdev_alloc_skb(efv->net_dev, rx_buf->len);
402 if (!skb) {
403 atomic64_inc(&efv->stats.rx_dropped);
404 if (net_ratelimit())
405 netif_dbg(efv->parent, rx_err, efv->net_dev,
406 "noskb-dropped packet of length %u\n",
407 rx_buf->len);
408 return;
409 }
410 memcpy(skb->data, eh, rx_buf->len);
411 __skb_put(skb, rx_buf->len);
412
413 skb_record_rx_queue(skb, 0); /* rep is single-queue */
414
415 /* Move past the ethernet header */
416 skb->protocol = eth_type_trans(skb, efv->net_dev);
417
418 skb_checksum_none_assert(skb);
419
420 atomic64_inc(&efv->stats.rx_packets);
421 atomic64_add(rx_buf->len, &efv->stats.rx_bytes);
422
423 /* Add it to the rx list */
424 spin_lock_bh(&efv->rx_lock);
425 primed = efv->read_index == efv->write_index;
426 list_add_tail(&skb->list, &efv->rx_list);
427 efv->write_index++;
428 spin_unlock_bh(&efv->rx_lock);
429 /* Trigger rx work */
430 if (primed)
431 napi_schedule(&efv->napi);
432 }
433
efx_ef100_find_rep_by_mport(struct efx_nic * efx,u16 mport)434 struct efx_rep *efx_ef100_find_rep_by_mport(struct efx_nic *efx, u16 mport)
435 {
436 struct efx_rep *efv, *out = NULL;
437
438 /* spinlock guards against list mutation while we're walking it;
439 * but caller must also hold rcu_read_lock() to ensure the netdev
440 * isn't freed after we drop the spinlock.
441 */
442 spin_lock_bh(&efx->vf_reps_lock);
443 list_for_each_entry(efv, &efx->vf_reps, list)
444 if (efv->mport == mport) {
445 out = efv;
446 break;
447 }
448 spin_unlock_bh(&efx->vf_reps_lock);
449 return out;
450 }
451