1 /* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
2 /* Copyright (C) 2015-2018 Netronome Systems, Inc. */
3
4 /*
5 * nfp_net.h
6 * Declarations for Netronome network device driver.
7 * Authors: Jakub Kicinski <jakub.kicinski@netronome.com>
8 * Jason McMullan <jason.mcmullan@netronome.com>
9 * Rolf Neugebauer <rolf.neugebauer@netronome.com>
10 */
11
12 #ifndef _NFP_NET_H_
13 #define _NFP_NET_H_
14
15 #include <linux/atomic.h>
16 #include <linux/interrupt.h>
17 #include <linux/list.h>
18 #include <linux/netdevice.h>
19 #include <linux/pci.h>
20 #include <linux/dim.h>
21 #include <linux/io-64-nonatomic-hi-lo.h>
22 #include <linux/semaphore.h>
23 #include <linux/workqueue.h>
24 #include <net/xdp.h>
25
26 #include "nfp_net_ctrl.h"
27
28 #define nn_pr(nn, lvl, fmt, args...) \
29 ({ \
30 struct nfp_net *__nn = (nn); \
31 \
32 if (__nn->dp.netdev) \
33 netdev_printk(lvl, __nn->dp.netdev, fmt, ## args); \
34 else \
35 dev_printk(lvl, __nn->dp.dev, "ctrl: " fmt, ## args); \
36 })
37
38 #define nn_err(nn, fmt, args...) nn_pr(nn, KERN_ERR, fmt, ## args)
39 #define nn_warn(nn, fmt, args...) nn_pr(nn, KERN_WARNING, fmt, ## args)
40 #define nn_info(nn, fmt, args...) nn_pr(nn, KERN_INFO, fmt, ## args)
41 #define nn_dbg(nn, fmt, args...) nn_pr(nn, KERN_DEBUG, fmt, ## args)
42
43 #define nn_dp_warn(dp, fmt, args...) \
44 ({ \
45 struct nfp_net_dp *__dp = (dp); \
46 \
47 if (unlikely(net_ratelimit())) { \
48 if (__dp->netdev) \
49 netdev_warn(__dp->netdev, fmt, ## args); \
50 else \
51 dev_warn(__dp->dev, fmt, ## args); \
52 } \
53 })
54
55 /* Max time to wait for NFP to respond on updates (in seconds) */
56 #define NFP_NET_POLL_TIMEOUT 5
57
58 /* Interval for reading offloaded filter stats */
59 #define NFP_NET_STAT_POLL_IVL msecs_to_jiffies(100)
60
61 /* Bar allocation */
62 #define NFP_NET_CTRL_BAR 0
63 #define NFP_NET_Q0_BAR 2
64 #define NFP_NET_Q1_BAR 4 /* OBSOLETE */
65
66 /* Default size for MTU and freelist buffer sizes */
67 #define NFP_NET_DEFAULT_MTU 1500U
68
69 /* Maximum number of bytes prepended to a packet */
70 #define NFP_NET_MAX_PREPEND 64
71
72 /* Interrupt definitions */
73 #define NFP_NET_NON_Q_VECTORS 2
74 #define NFP_NET_IRQ_LSC_IDX 0
75 #define NFP_NET_IRQ_EXN_IDX 1
76 #define NFP_NET_MIN_VNIC_IRQS (NFP_NET_NON_Q_VECTORS + 1)
77
78 /* Queue/Ring definitions */
79 #define NFP_NET_MAX_TX_RINGS 64 /* Max. # of Tx rings per device */
80 #define NFP_NET_MAX_RX_RINGS 64 /* Max. # of Rx rings per device */
81 #define NFP_NET_MAX_R_VECS (NFP_NET_MAX_TX_RINGS > NFP_NET_MAX_RX_RINGS ? \
82 NFP_NET_MAX_TX_RINGS : NFP_NET_MAX_RX_RINGS)
83 #define NFP_NET_MAX_IRQS (NFP_NET_NON_Q_VECTORS + NFP_NET_MAX_R_VECS)
84
85 #define NFP_NET_TX_DESCS_DEFAULT 4096 /* Default # of Tx descs per ring */
86 #define NFP_NET_RX_DESCS_DEFAULT 4096 /* Default # of Rx descs per ring */
87
88 #define NFP_NET_FL_BATCH 16 /* Add freelist in this Batch size */
89 #define NFP_NET_XDP_MAX_COMPLETE 2048 /* XDP bufs to reclaim in NAPI poll */
90
91 /* MC definitions */
92 #define NFP_NET_CFG_MAC_MC_MAX 1024 /* The maximum number of MC address per port*/
93
94 /* Offload definitions */
95 #define NFP_NET_N_VXLAN_PORTS (NFP_NET_CFG_VXLAN_SZ / sizeof(__be16))
96
97 #define NFP_NET_RX_BUF_HEADROOM (NET_SKB_PAD + NET_IP_ALIGN)
98 #define NFP_NET_RX_BUF_NON_DATA (NFP_NET_RX_BUF_HEADROOM + \
99 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
100
101 /* Forward declarations */
102 struct nfp_cpp;
103 struct nfp_dev_info;
104 struct nfp_dp_ops;
105 struct nfp_eth_table_port;
106 struct nfp_net;
107 struct nfp_net_r_vector;
108 struct nfp_port;
109 struct xsk_buff_pool;
110
111 struct nfp_nfd3_tx_desc;
112 struct nfp_nfd3_tx_buf;
113
114 struct nfp_nfdk_tx_desc;
115 struct nfp_nfdk_tx_buf;
116
117 /* Convenience macro for wrapping descriptor index on ring size */
118 #define D_IDX(ring, idx) ((idx) & ((ring)->cnt - 1))
119
120 /* Convenience macro for writing dma address into RX/TX descriptors */
121 #define nfp_desc_set_dma_addr_40b(desc, dma_addr) \
122 do { \
123 __typeof__(desc) __d = (desc); \
124 dma_addr_t __addr = (dma_addr); \
125 \
126 __d->dma_addr_lo = cpu_to_le32(lower_32_bits(__addr)); \
127 __d->dma_addr_hi = upper_32_bits(__addr) & 0xff; \
128 } while (0)
129
130 #define nfp_desc_set_dma_addr_48b(desc, dma_addr) \
131 do { \
132 __typeof__(desc) __d = (desc); \
133 dma_addr_t __addr = (dma_addr); \
134 \
135 __d->dma_addr_hi = cpu_to_le16(upper_32_bits(__addr)); \
136 __d->dma_addr_lo = cpu_to_le32(lower_32_bits(__addr)); \
137 } while (0)
138
139 /**
140 * struct nfp_net_tx_ring - TX ring structure
141 * @r_vec: Back pointer to ring vector structure
142 * @idx: Ring index from Linux's perspective
143 * @data_pending: number of bytes added to current block (NFDK only)
144 * @qcp_q: Pointer to base of the QCP TX queue
145 * @txrwb: TX pointer write back area
146 * @cnt: Size of the queue in number of descriptors
147 * @wr_p: TX ring write pointer (free running)
148 * @rd_p: TX ring read pointer (free running)
149 * @qcp_rd_p: Local copy of QCP TX queue read pointer
150 * @wr_ptr_add: Accumulated number of buffers to add to QCP write pointer
151 * (used for .xmit_more delayed kick)
152 * @txbufs: Array of transmitted TX buffers, to free on transmit (NFD3)
153 * @ktxbufs: Array of transmitted TX buffers, to free on transmit (NFDK)
154 * @txds: Virtual address of TX ring in host memory (NFD3)
155 * @ktxds: Virtual address of TX ring in host memory (NFDK)
156 *
157 * @qcidx: Queue Controller Peripheral (QCP) queue index for the TX queue
158 * @dma: DMA address of the TX ring
159 * @size: Size, in bytes, of the TX ring (needed to free)
160 * @is_xdp: Is this a XDP TX ring?
161 */
162 struct nfp_net_tx_ring {
163 struct nfp_net_r_vector *r_vec;
164
165 u16 idx;
166 u16 data_pending;
167 u8 __iomem *qcp_q;
168 u64 *txrwb;
169
170 u32 cnt;
171 u32 wr_p;
172 u32 rd_p;
173 u32 qcp_rd_p;
174
175 u32 wr_ptr_add;
176
177 union {
178 struct nfp_nfd3_tx_buf *txbufs;
179 struct nfp_nfdk_tx_buf *ktxbufs;
180 };
181 union {
182 struct nfp_nfd3_tx_desc *txds;
183 struct nfp_nfdk_tx_desc *ktxds;
184 };
185
186 /* Cold data follows */
187 int qcidx;
188
189 dma_addr_t dma;
190 size_t size;
191 bool is_xdp;
192 } ____cacheline_aligned;
193
194 /* RX and freelist descriptor format */
195
196 #define PCIE_DESC_RX_DD BIT(7)
197 #define PCIE_DESC_RX_META_LEN_MASK GENMASK(6, 0)
198
199 /* Flags in the RX descriptor */
200 #define PCIE_DESC_RX_RSS cpu_to_le16(BIT(15))
201 #define PCIE_DESC_RX_I_IP4_CSUM cpu_to_le16(BIT(14))
202 #define PCIE_DESC_RX_I_IP4_CSUM_OK cpu_to_le16(BIT(13))
203 #define PCIE_DESC_RX_I_TCP_CSUM cpu_to_le16(BIT(12))
204 #define PCIE_DESC_RX_I_TCP_CSUM_OK cpu_to_le16(BIT(11))
205 #define PCIE_DESC_RX_I_UDP_CSUM cpu_to_le16(BIT(10))
206 #define PCIE_DESC_RX_I_UDP_CSUM_OK cpu_to_le16(BIT(9))
207 #define PCIE_DESC_RX_DECRYPTED cpu_to_le16(BIT(8))
208 #define PCIE_DESC_RX_EOP cpu_to_le16(BIT(7))
209 #define PCIE_DESC_RX_IP4_CSUM cpu_to_le16(BIT(6))
210 #define PCIE_DESC_RX_IP4_CSUM_OK cpu_to_le16(BIT(5))
211 #define PCIE_DESC_RX_TCP_CSUM cpu_to_le16(BIT(4))
212 #define PCIE_DESC_RX_TCP_CSUM_OK cpu_to_le16(BIT(3))
213 #define PCIE_DESC_RX_UDP_CSUM cpu_to_le16(BIT(2))
214 #define PCIE_DESC_RX_UDP_CSUM_OK cpu_to_le16(BIT(1))
215 #define PCIE_DESC_RX_VLAN cpu_to_le16(BIT(0))
216
217 #define PCIE_DESC_RX_CSUM_ALL (PCIE_DESC_RX_IP4_CSUM | \
218 PCIE_DESC_RX_TCP_CSUM | \
219 PCIE_DESC_RX_UDP_CSUM | \
220 PCIE_DESC_RX_I_IP4_CSUM | \
221 PCIE_DESC_RX_I_TCP_CSUM | \
222 PCIE_DESC_RX_I_UDP_CSUM)
223 #define PCIE_DESC_RX_CSUM_OK_SHIFT 1
224 #define __PCIE_DESC_RX_CSUM_ALL le16_to_cpu(PCIE_DESC_RX_CSUM_ALL)
225 #define __PCIE_DESC_RX_CSUM_ALL_OK (__PCIE_DESC_RX_CSUM_ALL >> \
226 PCIE_DESC_RX_CSUM_OK_SHIFT)
227
228 struct nfp_net_rx_desc {
229 union {
230 struct {
231 __le16 dma_addr_hi; /* High bits of the buf address */
232 u8 reserved; /* Must be zero */
233 u8 meta_len_dd; /* Must be zero */
234
235 __le32 dma_addr_lo; /* Low bits of the buffer address */
236 } __packed fld;
237
238 struct {
239 __le16 data_len; /* Length of the frame + meta data */
240 u8 reserved;
241 u8 meta_len_dd; /* Length of meta data prepended +
242 * descriptor done flag.
243 */
244
245 __le16 flags; /* RX flags. See @PCIE_DESC_RX_* */
246 __le16 vlan; /* VLAN if stripped */
247 } __packed rxd;
248
249 __le32 vals[2];
250 };
251 };
252
253 #define NFP_NET_META_FIELD_MASK GENMASK(NFP_NET_META_FIELD_SIZE - 1, 0)
254 #define NFP_NET_VLAN_CTAG 0
255 #define NFP_NET_VLAN_STAG 1
256
257 struct nfp_meta_parsed {
258 u8 hash_type;
259 u8 csum_type;
260 u32 hash;
261 u32 mark;
262 u32 portid;
263 __wsum csum;
264 struct {
265 bool stripped;
266 u8 tpid;
267 u16 tci;
268 } vlan;
269
270 #ifdef CONFIG_NFP_NET_IPSEC
271 u32 ipsec_saidx;
272 #endif
273 };
274
275 struct nfp_net_rx_hash {
276 __be32 hash_type;
277 __be32 hash;
278 };
279
280 /**
281 * struct nfp_net_rx_buf - software RX buffer descriptor
282 * @frag: page fragment buffer
283 * @dma_addr: DMA mapping address of the buffer
284 */
285 struct nfp_net_rx_buf {
286 void *frag;
287 dma_addr_t dma_addr;
288 };
289
290 /**
291 * struct nfp_net_xsk_rx_buf - software RX XSK buffer descriptor
292 * @dma_addr: DMA mapping address of the buffer
293 * @xdp: XSK buffer pool handle (for AF_XDP)
294 */
295 struct nfp_net_xsk_rx_buf {
296 dma_addr_t dma_addr;
297 struct xdp_buff *xdp;
298 };
299
300 /**
301 * struct nfp_net_rx_ring - RX ring structure
302 * @r_vec: Back pointer to ring vector structure
303 * @cnt: Size of the queue in number of descriptors
304 * @wr_p: FL/RX ring write pointer (free running)
305 * @rd_p: FL/RX ring read pointer (free running)
306 * @idx: Ring index from Linux's perspective
307 * @fl_qcidx: Queue Controller Peripheral (QCP) queue index for the freelist
308 * @qcp_fl: Pointer to base of the QCP freelist queue
309 * @rxbufs: Array of transmitted FL/RX buffers
310 * @xsk_rxbufs: Array of transmitted FL/RX buffers (for AF_XDP)
311 * @rxds: Virtual address of FL/RX ring in host memory
312 * @xdp_rxq: RX-ring info avail for XDP
313 * @dma: DMA address of the FL/RX ring
314 * @size: Size, in bytes, of the FL/RX ring (needed to free)
315 */
316 struct nfp_net_rx_ring {
317 struct nfp_net_r_vector *r_vec;
318
319 u32 cnt;
320 u32 wr_p;
321 u32 rd_p;
322
323 u32 idx;
324
325 int fl_qcidx;
326 u8 __iomem *qcp_fl;
327
328 struct nfp_net_rx_buf *rxbufs;
329 struct nfp_net_xsk_rx_buf *xsk_rxbufs;
330 struct nfp_net_rx_desc *rxds;
331
332 struct xdp_rxq_info xdp_rxq;
333
334 dma_addr_t dma;
335 size_t size;
336 } ____cacheline_aligned;
337
338 /**
339 * struct nfp_net_r_vector - Per ring interrupt vector configuration
340 * @nfp_net: Backpointer to nfp_net structure
341 * @napi: NAPI structure for this ring vec
342 * @tasklet: ctrl vNIC, tasklet for servicing the r_vec
343 * @queue: ctrl vNIC, send queue
344 * @lock: ctrl vNIC, r_vec lock protects @queue
345 * @tx_ring: Pointer to TX ring
346 * @rx_ring: Pointer to RX ring
347 * @xdp_ring: Pointer to an extra TX ring for XDP
348 * @xsk_pool: XSK buffer pool active on vector queue pair (for AF_XDP)
349 * @irq_entry: MSI-X table entry (use for talking to the device)
350 * @event_ctr: Number of interrupt
351 * @rx_dim: Dynamic interrupt moderation structure for RX
352 * @tx_dim: Dynamic interrupt moderation structure for TX
353 * @rx_sync: Seqlock for atomic updates of RX stats
354 * @rx_pkts: Number of received packets
355 * @rx_bytes: Number of received bytes
356 * @rx_drops: Number of packets dropped on RX due to lack of resources
357 * @hw_csum_rx_ok: Counter of packets where the HW checksum was OK
358 * @hw_csum_rx_inner_ok: Counter of packets where the inner HW checksum was OK
359 * @hw_csum_rx_complete: Counter of packets with CHECKSUM_COMPLETE reported
360 * @hw_csum_rx_error: Counter of packets with bad checksums
361 * @hw_tls_rx: Number of packets with TLS decrypted by hardware
362 * @tx_sync: Seqlock for atomic updates of TX stats
363 * @tx_pkts: Number of Transmitted packets
364 * @tx_bytes: Number of Transmitted bytes
365 * @hw_csum_tx: Counter of packets with TX checksum offload requested
366 * @hw_csum_tx_inner: Counter of inner TX checksum offload requests
367 * @tx_gather: Counter of packets with Gather DMA
368 * @tx_lso: Counter of LSO packets sent
369 * @hw_tls_tx: Counter of TLS packets sent with crypto offloaded to HW
370 * @tls_tx_fallback: Counter of TLS packets sent which had to be encrypted
371 * by the fallback path because packets came out of order
372 * @tls_tx_no_fallback: Counter of TLS packets not sent because the fallback
373 * path could not encrypt them
374 * @tx_errors: How many TX errors were encountered
375 * @tx_busy: How often was TX busy (no space)?
376 * @rx_replace_buf_alloc_fail: Counter of RX buffer allocation failures
377 * @irq_vector: Interrupt vector number (use for talking to the OS)
378 * @handler: Interrupt handler for this ring vector
379 * @name: Name of the interrupt vector
380 * @affinity_mask: SMP affinity mask for this vector
381 *
382 * This structure ties RX and TX rings to interrupt vectors and a NAPI
383 * context. This currently only supports one RX and TX ring per
384 * interrupt vector but might be extended in the future to allow
385 * association of multiple rings per vector.
386 */
387 struct nfp_net_r_vector {
388 struct nfp_net *nfp_net;
389 union {
390 struct napi_struct napi;
391 struct {
392 struct tasklet_struct tasklet;
393 struct sk_buff_head queue;
394 spinlock_t lock;
395 };
396 };
397
398 struct nfp_net_tx_ring *tx_ring;
399 struct nfp_net_rx_ring *rx_ring;
400
401 u16 irq_entry;
402
403 u16 event_ctr;
404 struct dim rx_dim;
405 struct dim tx_dim;
406
407 struct u64_stats_sync rx_sync;
408 u64 rx_pkts;
409 u64 rx_bytes;
410 u64 rx_drops;
411 u64 hw_csum_rx_ok;
412 u64 hw_csum_rx_inner_ok;
413 u64 hw_csum_rx_complete;
414 u64 hw_tls_rx;
415
416 u64 hw_csum_rx_error;
417 u64 rx_replace_buf_alloc_fail;
418
419 struct nfp_net_tx_ring *xdp_ring;
420 struct xsk_buff_pool *xsk_pool;
421
422 struct u64_stats_sync tx_sync;
423 u64 tx_pkts;
424 u64 tx_bytes;
425
426 u64 ____cacheline_aligned_in_smp hw_csum_tx;
427 u64 hw_csum_tx_inner;
428 u64 tx_gather;
429 u64 tx_lso;
430 u64 hw_tls_tx;
431
432 u64 tls_tx_fallback;
433 u64 tls_tx_no_fallback;
434 u64 tx_errors;
435 u64 tx_busy;
436
437 /* Cold data follows */
438
439 u32 irq_vector;
440 irq_handler_t handler;
441 char name[IFNAMSIZ + 8];
442 cpumask_t affinity_mask;
443 } ____cacheline_aligned;
444
445 /* Firmware version as it is written in the 32bit value in the BAR */
446 struct nfp_net_fw_version {
447 u8 minor;
448 u8 major;
449 u8 class;
450
451 /* This byte can be exploited for more use, currently,
452 * BIT0: dp type, BIT[7:1]: reserved
453 */
454 u8 extend;
455 } __packed;
456
nfp_net_fw_ver_eq(struct nfp_net_fw_version * fw_ver,u8 extend,u8 class,u8 major,u8 minor)457 static inline bool nfp_net_fw_ver_eq(struct nfp_net_fw_version *fw_ver,
458 u8 extend, u8 class, u8 major, u8 minor)
459 {
460 return fw_ver->extend == extend &&
461 fw_ver->class == class &&
462 fw_ver->major == major &&
463 fw_ver->minor == minor;
464 }
465
466 struct nfp_stat_pair {
467 u64 pkts;
468 u64 bytes;
469 };
470
471 /**
472 * struct nfp_net_dp - NFP network device datapath data structure
473 * @dev: Backpointer to struct device
474 * @netdev: Backpointer to net_device structure
475 * @is_vf: Is the driver attached to a VF?
476 * @chained_metadata_format: Firemware will use new metadata format
477 * @ktls_tx: Is kTLS TX enabled?
478 * @rx_dma_dir: Mapping direction for RX buffers
479 * @rx_dma_off: Offset at which DMA packets (for XDP headroom)
480 * @rx_offset: Offset in the RX buffers where packet data starts
481 * @ctrl: Local copy of the control register/word.
482 * @ctrl_w1: Local copy of the control register/word1.
483 * @fl_bufsz: Currently configured size of the freelist buffers
484 * @xdp_prog: Installed XDP program
485 * @tx_rings: Array of pre-allocated TX ring structures
486 * @rx_rings: Array of pre-allocated RX ring structures
487 * @ctrl_bar: Pointer to mapped control BAR
488 *
489 * @ops: Callbacks and parameters for this vNIC's NFD version
490 * @txrwb: TX pointer write back area (indexed by queue id)
491 * @txrwb_dma: TX pointer write back area DMA address
492 * @txd_cnt: Size of the TX ring in number of min size packets
493 * @rxd_cnt: Size of the RX ring in number of min size packets
494 * @num_r_vecs: Number of used ring vectors
495 * @num_tx_rings: Currently configured number of TX rings
496 * @num_stack_tx_rings: Number of TX rings used by the stack (not XDP)
497 * @num_rx_rings: Currently configured number of RX rings
498 * @mtu: Device MTU
499 * @xsk_pools: XSK buffer pools, @max_r_vecs in size (for AF_XDP).
500 */
501 struct nfp_net_dp {
502 struct device *dev;
503 struct net_device *netdev;
504
505 u8 is_vf:1;
506 u8 chained_metadata_format:1;
507 u8 ktls_tx:1;
508
509 u8 rx_dma_dir;
510 u8 rx_offset;
511
512 u32 rx_dma_off;
513
514 u32 ctrl;
515 u32 ctrl_w1;
516 u32 fl_bufsz;
517
518 struct bpf_prog *xdp_prog;
519
520 struct nfp_net_tx_ring *tx_rings;
521 struct nfp_net_rx_ring *rx_rings;
522
523 u8 __iomem *ctrl_bar;
524
525 /* Cold data follows */
526
527 const struct nfp_dp_ops *ops;
528
529 u64 *txrwb;
530 dma_addr_t txrwb_dma;
531
532 unsigned int txd_cnt;
533 unsigned int rxd_cnt;
534
535 unsigned int num_r_vecs;
536
537 unsigned int num_tx_rings;
538 unsigned int num_stack_tx_rings;
539 unsigned int num_rx_rings;
540
541 unsigned int mtu;
542
543 struct xsk_buff_pool **xsk_pools;
544 };
545
546 /**
547 * struct nfp_net - NFP network device structure
548 * @dp: Datapath structure
549 * @dev_info: NFP ASIC params
550 * @id: vNIC id within the PF (0 for VFs)
551 * @fw_ver: Firmware version
552 * @cap: Capabilities advertised by the Firmware
553 * @cap_w1: Extended capabilities word advertised by the Firmware
554 * @max_mtu: Maximum support MTU advertised by the Firmware
555 * @rss_hfunc: RSS selected hash function
556 * @rss_cfg: RSS configuration
557 * @rss_key: RSS secret key
558 * @rss_itbl: RSS indirection table
559 * @xdp: Information about the driver XDP program
560 * @xdp_hw: Information about the HW XDP program
561 * @max_r_vecs: Number of allocated interrupt vectors for RX/TX
562 * @max_tx_rings: Maximum number of TX rings supported by the Firmware
563 * @max_rx_rings: Maximum number of RX rings supported by the Firmware
564 * @stride_rx: Queue controller RX queue spacing
565 * @stride_tx: Queue controller TX queue spacing
566 * @r_vecs: Pre-allocated array of ring vectors
567 * @irq_entries: Pre-allocated array of MSI-X entries
568 * @lsc_handler: Handler for Link State Change interrupt
569 * @lsc_name: Name for Link State Change interrupt
570 * @exn_handler: Handler for Exception interrupt
571 * @exn_name: Name for Exception interrupt
572 * @shared_handler: Handler for shared interrupts
573 * @shared_name: Name for shared interrupt
574 * @reconfig_lock: Protects @reconfig_posted, @reconfig_timer_active,
575 * @reconfig_sync_present and HW reconfiguration request
576 * regs/machinery from async requests (sync must take
577 * @bar_lock)
578 * @reconfig_posted: Pending reconfig bits coming from async sources
579 * @reconfig_timer_active: Timer for reading reconfiguration results is pending
580 * @reconfig_sync_present: Some thread is performing synchronous reconfig
581 * @reconfig_timer: Timer for async reading of reconfig results
582 * @reconfig_in_progress_update: Update FW is processing now (debug only)
583 * @bar_lock: vNIC config BAR access lock, protects: update,
584 * mailbox area, crypto TLV
585 * @link_up: Is the link up?
586 * @link_status_lock: Protects @link_* and ensures atomicity with BAR reading
587 * @rx_coalesce_adapt_on: Is RX interrupt moderation adaptive?
588 * @tx_coalesce_adapt_on: Is TX interrupt moderation adaptive?
589 * @rx_coalesce_usecs: RX interrupt moderation usecs delay parameter
590 * @rx_coalesce_max_frames: RX interrupt moderation frame count parameter
591 * @tx_coalesce_usecs: TX interrupt moderation usecs delay parameter
592 * @tx_coalesce_max_frames: TX interrupt moderation frame count parameter
593 * @qcp_cfg: Pointer to QCP queue used for configuration notification
594 * @tx_bar: Pointer to mapped TX queues
595 * @rx_bar: Pointer to mapped FL/RX queues
596 * @xa_ipsec: IPsec xarray SA data
597 * @tlv_caps: Parsed TLV capabilities
598 * @ktls_tx_conn_cnt: Number of offloaded kTLS TX connections
599 * @ktls_rx_conn_cnt: Number of offloaded kTLS RX connections
600 * @ktls_conn_id_gen: Trivial generator for kTLS connection ids (for TX)
601 * @ktls_no_space: Counter of firmware rejecting kTLS connection due to
602 * lack of space
603 * @ktls_rx_resync_req: Counter of TLS RX resync requested
604 * @ktls_rx_resync_ign: Counter of TLS RX resync requests ignored
605 * @ktls_rx_resync_sent: Counter of TLS RX resync completed
606 * @mbox_cmsg: Common Control Message via vNIC mailbox state
607 * @mbox_cmsg.queue: CCM mbox queue of pending messages
608 * @mbox_cmsg.wq: CCM mbox wait queue of waiting processes
609 * @mbox_cmsg.workq: CCM mbox work queue for @wait_work and @runq_work
610 * @mbox_cmsg.wait_work: CCM mbox posted msg reconfig wait work
611 * @mbox_cmsg.runq_work: CCM mbox posted msg queue runner work
612 * @mbox_cmsg.tag: CCM mbox message tag allocator
613 * @debugfs_dir: Device directory in debugfs
614 * @vnic_list: Entry on device vNIC list
615 * @pdev: Backpointer to PCI device
616 * @app: APP handle if available
617 * @vnic_no_name: For non-port PF vNIC make ndo_get_phys_port_name return
618 * -EOPNOTSUPP to keep backwards compatibility (set by app)
619 * @port: Pointer to nfp_port structure if vNIC is a port
620 * @mbox_amsg: Asynchronously processed message via mailbox
621 * @mbox_amsg.lock: Protect message list
622 * @mbox_amsg.list: List of message to process
623 * @mbox_amsg.work: Work to process message asynchronously
624 * @app_priv: APP private data for this vNIC
625 */
626 struct nfp_net {
627 struct nfp_net_dp dp;
628
629 const struct nfp_dev_info *dev_info;
630 struct nfp_net_fw_version fw_ver;
631
632 u32 id;
633
634 u32 cap;
635 u32 cap_w1;
636 u32 max_mtu;
637
638 u8 rss_hfunc;
639 u32 rss_cfg;
640 u8 rss_key[NFP_NET_CFG_RSS_KEY_SZ];
641 u8 rss_itbl[NFP_NET_CFG_RSS_ITBL_SZ];
642
643 struct xdp_attachment_info xdp;
644 struct xdp_attachment_info xdp_hw;
645
646 unsigned int max_tx_rings;
647 unsigned int max_rx_rings;
648
649 int stride_tx;
650 int stride_rx;
651
652 unsigned int max_r_vecs;
653 struct nfp_net_r_vector r_vecs[NFP_NET_MAX_R_VECS];
654 struct msix_entry irq_entries[NFP_NET_MAX_IRQS];
655
656 irq_handler_t lsc_handler;
657 char lsc_name[IFNAMSIZ + 8];
658
659 irq_handler_t exn_handler;
660 char exn_name[IFNAMSIZ + 8];
661
662 irq_handler_t shared_handler;
663 char shared_name[IFNAMSIZ + 8];
664
665 bool link_up;
666 spinlock_t link_status_lock;
667
668 spinlock_t reconfig_lock;
669 u32 reconfig_posted;
670 bool reconfig_timer_active;
671 bool reconfig_sync_present;
672 struct timer_list reconfig_timer;
673 u32 reconfig_in_progress_update;
674
675 struct semaphore bar_lock;
676
677 bool rx_coalesce_adapt_on;
678 bool tx_coalesce_adapt_on;
679 u32 rx_coalesce_usecs;
680 u32 rx_coalesce_max_frames;
681 u32 tx_coalesce_usecs;
682 u32 tx_coalesce_max_frames;
683
684 u8 __iomem *qcp_cfg;
685
686 u8 __iomem *tx_bar;
687 u8 __iomem *rx_bar;
688
689 #ifdef CONFIG_NFP_NET_IPSEC
690 struct xarray xa_ipsec;
691 #endif
692
693 struct nfp_net_tlv_caps tlv_caps;
694
695 unsigned int ktls_tx_conn_cnt;
696 unsigned int ktls_rx_conn_cnt;
697
698 atomic64_t ktls_conn_id_gen;
699
700 atomic_t ktls_no_space;
701 atomic_t ktls_rx_resync_req;
702 atomic_t ktls_rx_resync_ign;
703 atomic_t ktls_rx_resync_sent;
704
705 struct {
706 struct sk_buff_head queue;
707 wait_queue_head_t wq;
708 struct workqueue_struct *workq;
709 struct work_struct wait_work;
710 struct work_struct runq_work;
711 u16 tag;
712 } mbox_cmsg;
713
714 struct dentry *debugfs_dir;
715
716 struct list_head vnic_list;
717
718 struct pci_dev *pdev;
719 struct nfp_app *app;
720
721 bool vnic_no_name;
722
723 struct nfp_port *port;
724
725 struct {
726 spinlock_t lock;
727 struct list_head list;
728 struct work_struct work;
729 } mbox_amsg;
730
731 void *app_priv;
732 };
733
734 struct nfp_mbox_amsg_entry {
735 struct list_head list;
736 int (*cfg)(struct nfp_net *nn, struct nfp_mbox_amsg_entry *entry);
737 u32 cmd;
738 char msg[];
739 };
740
741 int nfp_net_sched_mbox_amsg_work(struct nfp_net *nn, u32 cmd, const void *data, size_t len,
742 int (*cb)(struct nfp_net *, struct nfp_mbox_amsg_entry *));
743
744 /* Functions to read/write from/to a BAR
745 * Performs any endian conversion necessary.
746 */
nn_readb(struct nfp_net * nn,int off)747 static inline u16 nn_readb(struct nfp_net *nn, int off)
748 {
749 return readb(nn->dp.ctrl_bar + off);
750 }
751
nn_writeb(struct nfp_net * nn,int off,u8 val)752 static inline void nn_writeb(struct nfp_net *nn, int off, u8 val)
753 {
754 writeb(val, nn->dp.ctrl_bar + off);
755 }
756
nn_readw(struct nfp_net * nn,int off)757 static inline u16 nn_readw(struct nfp_net *nn, int off)
758 {
759 return readw(nn->dp.ctrl_bar + off);
760 }
761
nn_writew(struct nfp_net * nn,int off,u16 val)762 static inline void nn_writew(struct nfp_net *nn, int off, u16 val)
763 {
764 writew(val, nn->dp.ctrl_bar + off);
765 }
766
nn_readl(struct nfp_net * nn,int off)767 static inline u32 nn_readl(struct nfp_net *nn, int off)
768 {
769 return readl(nn->dp.ctrl_bar + off);
770 }
771
nn_writel(struct nfp_net * nn,int off,u32 val)772 static inline void nn_writel(struct nfp_net *nn, int off, u32 val)
773 {
774 writel(val, nn->dp.ctrl_bar + off);
775 }
776
nn_readq(struct nfp_net * nn,int off)777 static inline u64 nn_readq(struct nfp_net *nn, int off)
778 {
779 return readq(nn->dp.ctrl_bar + off);
780 }
781
nn_writeq(struct nfp_net * nn,int off,u64 val)782 static inline void nn_writeq(struct nfp_net *nn, int off, u64 val)
783 {
784 writeq(val, nn->dp.ctrl_bar + off);
785 }
786
787 /* Flush posted PCI writes by reading something without side effects */
nn_pci_flush(struct nfp_net * nn)788 static inline void nn_pci_flush(struct nfp_net *nn)
789 {
790 nn_readl(nn, NFP_NET_CFG_VERSION);
791 }
792
793 /* Queue Controller Peripheral access functions and definitions.
794 *
795 * Some of the BARs of the NFP are mapped to portions of the Queue
796 * Controller Peripheral (QCP) address space on the NFP. A QCP queue
797 * has a read and a write pointer (as well as a size and flags,
798 * indicating overflow etc). The QCP offers a number of different
799 * operation on queue pointers, but here we only offer function to
800 * either add to a pointer or to read the pointer value.
801 */
802 #define NFP_QCP_QUEUE_ADDR_SZ 0x800
803 #define NFP_QCP_QUEUE_OFF(_x) ((_x) * NFP_QCP_QUEUE_ADDR_SZ)
804 #define NFP_QCP_QUEUE_ADD_RPTR 0x0000
805 #define NFP_QCP_QUEUE_ADD_WPTR 0x0004
806 #define NFP_QCP_QUEUE_STS_LO 0x0008
807 #define NFP_QCP_QUEUE_STS_LO_READPTR_mask 0x3ffff
808 #define NFP_QCP_QUEUE_STS_HI 0x000c
809 #define NFP_QCP_QUEUE_STS_HI_WRITEPTR_mask 0x3ffff
810
811 /* nfp_qcp_ptr - Read or Write Pointer of a queue */
812 enum nfp_qcp_ptr {
813 NFP_QCP_READ_PTR = 0,
814 NFP_QCP_WRITE_PTR
815 };
816
817 /**
818 * nfp_qcp_rd_ptr_add() - Add the value to the read pointer of a queue
819 *
820 * @q: Base address for queue structure
821 * @val: Value to add to the queue pointer
822 */
nfp_qcp_rd_ptr_add(u8 __iomem * q,u32 val)823 static inline void nfp_qcp_rd_ptr_add(u8 __iomem *q, u32 val)
824 {
825 writel(val, q + NFP_QCP_QUEUE_ADD_RPTR);
826 }
827
828 /**
829 * nfp_qcp_wr_ptr_add() - Add the value to the write pointer of a queue
830 *
831 * @q: Base address for queue structure
832 * @val: Value to add to the queue pointer
833 */
nfp_qcp_wr_ptr_add(u8 __iomem * q,u32 val)834 static inline void nfp_qcp_wr_ptr_add(u8 __iomem *q, u32 val)
835 {
836 writel(val, q + NFP_QCP_QUEUE_ADD_WPTR);
837 }
838
_nfp_qcp_read(u8 __iomem * q,enum nfp_qcp_ptr ptr)839 static inline u32 _nfp_qcp_read(u8 __iomem *q, enum nfp_qcp_ptr ptr)
840 {
841 u32 off;
842 u32 val;
843
844 if (ptr == NFP_QCP_READ_PTR)
845 off = NFP_QCP_QUEUE_STS_LO;
846 else
847 off = NFP_QCP_QUEUE_STS_HI;
848
849 val = readl(q + off);
850
851 if (ptr == NFP_QCP_READ_PTR)
852 return val & NFP_QCP_QUEUE_STS_LO_READPTR_mask;
853 else
854 return val & NFP_QCP_QUEUE_STS_HI_WRITEPTR_mask;
855 }
856
857 /**
858 * nfp_qcp_rd_ptr_read() - Read the current read pointer value for a queue
859 * @q: Base address for queue structure
860 *
861 * Return: Value read.
862 */
nfp_qcp_rd_ptr_read(u8 __iomem * q)863 static inline u32 nfp_qcp_rd_ptr_read(u8 __iomem *q)
864 {
865 return _nfp_qcp_read(q, NFP_QCP_READ_PTR);
866 }
867
868 /**
869 * nfp_qcp_wr_ptr_read() - Read the current write pointer value for a queue
870 * @q: Base address for queue structure
871 *
872 * Return: Value read.
873 */
nfp_qcp_wr_ptr_read(u8 __iomem * q)874 static inline u32 nfp_qcp_wr_ptr_read(u8 __iomem *q)
875 {
876 return _nfp_qcp_read(q, NFP_QCP_WRITE_PTR);
877 }
878
879 u32 nfp_qcp_queue_offset(const struct nfp_dev_info *dev_info, u16 queue);
880
nfp_net_is_data_vnic(struct nfp_net * nn)881 static inline bool nfp_net_is_data_vnic(struct nfp_net *nn)
882 {
883 WARN_ON_ONCE(!nn->dp.netdev && nn->port);
884 return !!nn->dp.netdev;
885 }
886
nfp_net_running(struct nfp_net * nn)887 static inline bool nfp_net_running(struct nfp_net *nn)
888 {
889 return nn->dp.ctrl & NFP_NET_CFG_CTRL_ENABLE;
890 }
891
nfp_net_name(struct nfp_net * nn)892 static inline const char *nfp_net_name(struct nfp_net *nn)
893 {
894 return nn->dp.netdev ? nn->dp.netdev->name : "ctrl";
895 }
896
nfp_ctrl_lock(struct nfp_net * nn)897 static inline void nfp_ctrl_lock(struct nfp_net *nn)
898 __acquires(&nn->r_vecs[0].lock)
899 {
900 spin_lock_bh(&nn->r_vecs[0].lock);
901 }
902
nfp_ctrl_unlock(struct nfp_net * nn)903 static inline void nfp_ctrl_unlock(struct nfp_net *nn)
904 __releases(&nn->r_vecs[0].lock)
905 {
906 spin_unlock_bh(&nn->r_vecs[0].lock);
907 }
908
nn_ctrl_bar_lock(struct nfp_net * nn)909 static inline void nn_ctrl_bar_lock(struct nfp_net *nn)
910 {
911 down(&nn->bar_lock);
912 }
913
nn_ctrl_bar_trylock(struct nfp_net * nn)914 static inline bool nn_ctrl_bar_trylock(struct nfp_net *nn)
915 {
916 return !down_trylock(&nn->bar_lock);
917 }
918
nn_ctrl_bar_unlock(struct nfp_net * nn)919 static inline void nn_ctrl_bar_unlock(struct nfp_net *nn)
920 {
921 up(&nn->bar_lock);
922 }
923
924 /* Globals */
925 extern const char nfp_driver_version[];
926
927 extern const struct net_device_ops nfp_nfd3_netdev_ops;
928 extern const struct net_device_ops nfp_nfdk_netdev_ops;
929
nfp_netdev_is_nfp_net(struct net_device * netdev)930 static inline bool nfp_netdev_is_nfp_net(struct net_device *netdev)
931 {
932 return netdev->netdev_ops == &nfp_nfd3_netdev_ops ||
933 netdev->netdev_ops == &nfp_nfdk_netdev_ops;
934 }
935
nfp_net_coalesce_para_check(u32 usecs,u32 pkts)936 static inline int nfp_net_coalesce_para_check(u32 usecs, u32 pkts)
937 {
938 if ((usecs >= ((1 << 16) - 1)) || (pkts >= ((1 << 16) - 1)))
939 return -EINVAL;
940
941 return 0;
942 }
943
944 /* Prototypes */
945 void nfp_net_get_fw_version(struct nfp_net_fw_version *fw_ver,
946 void __iomem *ctrl_bar);
947
948 struct nfp_net *
949 nfp_net_alloc(struct pci_dev *pdev, const struct nfp_dev_info *dev_info,
950 void __iomem *ctrl_bar, bool needs_netdev,
951 unsigned int max_tx_rings, unsigned int max_rx_rings);
952 void nfp_net_free(struct nfp_net *nn);
953
954 int nfp_net_init(struct nfp_net *nn);
955 void nfp_net_clean(struct nfp_net *nn);
956
957 int nfp_ctrl_open(struct nfp_net *nn);
958 void nfp_ctrl_close(struct nfp_net *nn);
959
960 void nfp_net_set_ethtool_ops(struct net_device *netdev);
961 void nfp_net_info(struct nfp_net *nn);
962 int __nfp_net_reconfig(struct nfp_net *nn, u32 update);
963 int nfp_net_reconfig(struct nfp_net *nn, u32 update);
964 unsigned int nfp_net_rss_key_sz(struct nfp_net *nn);
965 void nfp_net_rss_write_itbl(struct nfp_net *nn);
966 void nfp_net_rss_write_key(struct nfp_net *nn);
967 void nfp_net_coalesce_write_cfg(struct nfp_net *nn);
968 int nfp_net_mbox_lock(struct nfp_net *nn, unsigned int data_size);
969 int nfp_net_mbox_reconfig(struct nfp_net *nn, u32 mbox_cmd);
970 int nfp_net_mbox_reconfig_and_unlock(struct nfp_net *nn, u32 mbox_cmd);
971 void nfp_net_mbox_reconfig_post(struct nfp_net *nn, u32 update);
972 int nfp_net_mbox_reconfig_wait_posted(struct nfp_net *nn);
973
974 unsigned int
975 nfp_net_irqs_alloc(struct pci_dev *pdev, struct msix_entry *irq_entries,
976 unsigned int min_irqs, unsigned int want_irqs);
977 void nfp_net_irqs_disable(struct pci_dev *pdev);
978 void
979 nfp_net_irqs_assign(struct nfp_net *nn, struct msix_entry *irq_entries,
980 unsigned int n);
981 struct sk_buff *
982 nfp_net_tls_tx(struct nfp_net_dp *dp, struct nfp_net_r_vector *r_vec,
983 struct sk_buff *skb, u64 *tls_handle, int *nr_frags);
984 void nfp_net_tls_tx_undo(struct sk_buff *skb, u64 tls_handle);
985
986 struct nfp_net_dp *nfp_net_clone_dp(struct nfp_net *nn);
987 int nfp_net_ring_reconfig(struct nfp_net *nn, struct nfp_net_dp *new,
988 struct netlink_ext_ack *extack);
989
990 #ifdef CONFIG_NFP_DEBUG
991 void nfp_net_debugfs_create(void);
992 void nfp_net_debugfs_destroy(void);
993 struct dentry *nfp_net_debugfs_device_add(struct pci_dev *pdev);
994 void nfp_net_debugfs_vnic_add(struct nfp_net *nn, struct dentry *ddir);
995 void nfp_net_debugfs_dir_clean(struct dentry **dir);
996 #else
nfp_net_debugfs_create(void)997 static inline void nfp_net_debugfs_create(void)
998 {
999 }
1000
nfp_net_debugfs_destroy(void)1001 static inline void nfp_net_debugfs_destroy(void)
1002 {
1003 }
1004
nfp_net_debugfs_device_add(struct pci_dev * pdev)1005 static inline struct dentry *nfp_net_debugfs_device_add(struct pci_dev *pdev)
1006 {
1007 return NULL;
1008 }
1009
1010 static inline void
nfp_net_debugfs_vnic_add(struct nfp_net * nn,struct dentry * ddir)1011 nfp_net_debugfs_vnic_add(struct nfp_net *nn, struct dentry *ddir)
1012 {
1013 }
1014
nfp_net_debugfs_dir_clean(struct dentry ** dir)1015 static inline void nfp_net_debugfs_dir_clean(struct dentry **dir)
1016 {
1017 }
1018 #endif /* CONFIG_NFP_DEBUG */
1019
1020 #endif /* _NFP_NET_H_ */
1021