1 /* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
2 /* Copyright (C) 2019 Netronome Systems, Inc. */
3 
4 #ifndef _NFP_NET_DP_
5 #define _NFP_NET_DP_
6 
7 #include "nfp_net.h"
8 
nfp_net_dma_map_rx(struct nfp_net_dp * dp,void * frag)9 static inline dma_addr_t nfp_net_dma_map_rx(struct nfp_net_dp *dp, void *frag)
10 {
11 	return dma_map_single_attrs(dp->dev, frag + NFP_NET_RX_BUF_HEADROOM,
12 				    dp->fl_bufsz - NFP_NET_RX_BUF_NON_DATA,
13 				    dp->rx_dma_dir, DMA_ATTR_SKIP_CPU_SYNC);
14 }
15 
16 static inline void
nfp_net_dma_sync_dev_rx(const struct nfp_net_dp * dp,dma_addr_t dma_addr)17 nfp_net_dma_sync_dev_rx(const struct nfp_net_dp *dp, dma_addr_t dma_addr)
18 {
19 	dma_sync_single_for_device(dp->dev, dma_addr,
20 				   dp->fl_bufsz - NFP_NET_RX_BUF_NON_DATA,
21 				   dp->rx_dma_dir);
22 }
23 
nfp_net_dma_unmap_rx(struct nfp_net_dp * dp,dma_addr_t dma_addr)24 static inline void nfp_net_dma_unmap_rx(struct nfp_net_dp *dp,
25 					dma_addr_t dma_addr)
26 {
27 	dma_unmap_single_attrs(dp->dev, dma_addr,
28 			       dp->fl_bufsz - NFP_NET_RX_BUF_NON_DATA,
29 			       dp->rx_dma_dir, DMA_ATTR_SKIP_CPU_SYNC);
30 }
31 
nfp_net_dma_sync_cpu_rx(struct nfp_net_dp * dp,dma_addr_t dma_addr,unsigned int len)32 static inline void nfp_net_dma_sync_cpu_rx(struct nfp_net_dp *dp,
33 					   dma_addr_t dma_addr,
34 					   unsigned int len)
35 {
36 	dma_sync_single_for_cpu(dp->dev, dma_addr - NFP_NET_RX_BUF_HEADROOM,
37 				len, dp->rx_dma_dir);
38 }
39 
40 /**
41  * nfp_net_tx_full() - check if the TX ring is full
42  * @tx_ring: TX ring to check
43  * @dcnt:    Number of descriptors that need to be enqueued (must be >= 1)
44  *
45  * This function checks, based on the *host copy* of read/write
46  * pointer if a given TX ring is full.  The real TX queue may have
47  * some newly made available slots.
48  *
49  * Return: True if the ring is full.
50  */
nfp_net_tx_full(struct nfp_net_tx_ring * tx_ring,int dcnt)51 static inline int nfp_net_tx_full(struct nfp_net_tx_ring *tx_ring, int dcnt)
52 {
53 	return (tx_ring->wr_p - tx_ring->rd_p) >= (tx_ring->cnt - dcnt);
54 }
55 
nfp_net_tx_xmit_more_flush(struct nfp_net_tx_ring * tx_ring)56 static inline void nfp_net_tx_xmit_more_flush(struct nfp_net_tx_ring *tx_ring)
57 {
58 	wmb(); /* drain writebuffer */
59 	nfp_qcp_wr_ptr_add(tx_ring->qcp_q, tx_ring->wr_ptr_add);
60 	tx_ring->wr_ptr_add = 0;
61 }
62 
63 static inline u32
nfp_net_read_tx_cmpl(struct nfp_net_tx_ring * tx_ring,struct nfp_net_dp * dp)64 nfp_net_read_tx_cmpl(struct nfp_net_tx_ring *tx_ring, struct nfp_net_dp *dp)
65 {
66 	if (tx_ring->txrwb)
67 		return *tx_ring->txrwb;
68 	return nfp_qcp_rd_ptr_read(tx_ring->qcp_q);
69 }
70 
nfp_net_free_frag(void * frag,bool xdp)71 static inline void nfp_net_free_frag(void *frag, bool xdp)
72 {
73 	if (!xdp)
74 		skb_free_frag(frag);
75 	else
76 		__free_page(virt_to_page(frag));
77 }
78 
79 /**
80  * nfp_net_irq_unmask() - Unmask automasked interrupt
81  * @nn:       NFP Network structure
82  * @entry_nr: MSI-X table entry
83  *
84  * Clear the ICR for the IRQ entry.
85  */
nfp_net_irq_unmask(struct nfp_net * nn,unsigned int entry_nr)86 static inline void nfp_net_irq_unmask(struct nfp_net *nn, unsigned int entry_nr)
87 {
88 	nn_writeb(nn, NFP_NET_CFG_ICR(entry_nr), NFP_NET_CFG_ICR_UNMASKED);
89 	nn_pci_flush(nn);
90 }
91 
92 struct seq_file;
93 
94 /* Common */
95 void
96 nfp_net_rx_ring_hw_cfg_write(struct nfp_net *nn,
97 			     struct nfp_net_rx_ring *rx_ring, unsigned int idx);
98 void
99 nfp_net_tx_ring_hw_cfg_write(struct nfp_net *nn,
100 			     struct nfp_net_tx_ring *tx_ring, unsigned int idx);
101 void nfp_net_vec_clear_ring_data(struct nfp_net *nn, unsigned int idx);
102 
103 void *nfp_net_rx_alloc_one(struct nfp_net_dp *dp, dma_addr_t *dma_addr);
104 int nfp_net_rx_rings_prepare(struct nfp_net *nn, struct nfp_net_dp *dp);
105 int nfp_net_tx_rings_prepare(struct nfp_net *nn, struct nfp_net_dp *dp);
106 void nfp_net_rx_rings_free(struct nfp_net_dp *dp);
107 void nfp_net_tx_rings_free(struct nfp_net_dp *dp);
108 void nfp_net_rx_ring_reset(struct nfp_net_rx_ring *rx_ring);
109 
110 enum nfp_nfd_version {
111 	NFP_NFD_VER_NFD3,
112 	NFP_NFD_VER_NFDK,
113 };
114 
115 /**
116  * struct nfp_dp_ops - Hooks to wrap different implementation of different dp
117  * @version:			Indicate dp type
118  * @tx_min_desc_per_pkt:	Minimal TX descs needed for each packet
119  * @cap_mask:			Mask of supported features
120  * @poll:			Napi poll for normal rx/tx
121  * @xsk_poll:			Napi poll when xsk is enabled
122  * @ctrl_poll:			Tasklet poll for ctrl rx/tx
123  * @xmit:			Xmit for normal path
124  * @ctrl_tx_one:		Xmit for ctrl path
125  * @rx_ring_fill_freelist:	Give buffers from the ring to FW
126  * @tx_ring_alloc:		Allocate resource for a TX ring
127  * @tx_ring_reset:		Free any untransmitted buffers and reset pointers
128  * @tx_ring_free:		Free resources allocated to a TX ring
129  * @tx_ring_bufs_alloc:		Allocate resource for each TX buffer
130  * @tx_ring_bufs_free:		Free resources allocated to each TX buffer
131  * @print_tx_descs:		Show TX ring's info for debug purpose
132  */
133 struct nfp_dp_ops {
134 	enum nfp_nfd_version version;
135 	unsigned int tx_min_desc_per_pkt;
136 	u32 cap_mask;
137 
138 	int (*poll)(struct napi_struct *napi, int budget);
139 	int (*xsk_poll)(struct napi_struct *napi, int budget);
140 	void (*ctrl_poll)(struct tasklet_struct *t);
141 	netdev_tx_t (*xmit)(struct sk_buff *skb, struct net_device *netdev);
142 	bool (*ctrl_tx_one)(struct nfp_net *nn, struct nfp_net_r_vector *r_vec,
143 			    struct sk_buff *skb, bool old);
144 	void (*rx_ring_fill_freelist)(struct nfp_net_dp *dp,
145 				      struct nfp_net_rx_ring *rx_ring);
146 	int (*tx_ring_alloc)(struct nfp_net_dp *dp,
147 			     struct nfp_net_tx_ring *tx_ring);
148 	void (*tx_ring_reset)(struct nfp_net_dp *dp,
149 			      struct nfp_net_tx_ring *tx_ring);
150 	void (*tx_ring_free)(struct nfp_net_tx_ring *tx_ring);
151 	int (*tx_ring_bufs_alloc)(struct nfp_net_dp *dp,
152 				  struct nfp_net_tx_ring *tx_ring);
153 	void (*tx_ring_bufs_free)(struct nfp_net_dp *dp,
154 				  struct nfp_net_tx_ring *tx_ring);
155 
156 	void (*print_tx_descs)(struct seq_file *file,
157 			       struct nfp_net_r_vector *r_vec,
158 			       struct nfp_net_tx_ring *tx_ring,
159 			       u32 d_rd_p, u32 d_wr_p);
160 };
161 
162 static inline void
nfp_net_tx_ring_reset(struct nfp_net_dp * dp,struct nfp_net_tx_ring * tx_ring)163 nfp_net_tx_ring_reset(struct nfp_net_dp *dp, struct nfp_net_tx_ring *tx_ring)
164 {
165 	return dp->ops->tx_ring_reset(dp, tx_ring);
166 }
167 
168 static inline void
nfp_net_rx_ring_fill_freelist(struct nfp_net_dp * dp,struct nfp_net_rx_ring * rx_ring)169 nfp_net_rx_ring_fill_freelist(struct nfp_net_dp *dp,
170 			      struct nfp_net_rx_ring *rx_ring)
171 {
172 	dp->ops->rx_ring_fill_freelist(dp, rx_ring);
173 }
174 
175 static inline int
nfp_net_tx_ring_alloc(struct nfp_net_dp * dp,struct nfp_net_tx_ring * tx_ring)176 nfp_net_tx_ring_alloc(struct nfp_net_dp *dp, struct nfp_net_tx_ring *tx_ring)
177 {
178 	return dp->ops->tx_ring_alloc(dp, tx_ring);
179 }
180 
181 static inline void
nfp_net_tx_ring_free(struct nfp_net_dp * dp,struct nfp_net_tx_ring * tx_ring)182 nfp_net_tx_ring_free(struct nfp_net_dp *dp, struct nfp_net_tx_ring *tx_ring)
183 {
184 	dp->ops->tx_ring_free(tx_ring);
185 }
186 
187 static inline int
nfp_net_tx_ring_bufs_alloc(struct nfp_net_dp * dp,struct nfp_net_tx_ring * tx_ring)188 nfp_net_tx_ring_bufs_alloc(struct nfp_net_dp *dp,
189 			   struct nfp_net_tx_ring *tx_ring)
190 {
191 	return dp->ops->tx_ring_bufs_alloc(dp, tx_ring);
192 }
193 
194 static inline void
nfp_net_tx_ring_bufs_free(struct nfp_net_dp * dp,struct nfp_net_tx_ring * tx_ring)195 nfp_net_tx_ring_bufs_free(struct nfp_net_dp *dp,
196 			  struct nfp_net_tx_ring *tx_ring)
197 {
198 	dp->ops->tx_ring_bufs_free(dp, tx_ring);
199 }
200 
201 static inline void
nfp_net_debugfs_print_tx_descs(struct seq_file * file,struct nfp_net_dp * dp,struct nfp_net_r_vector * r_vec,struct nfp_net_tx_ring * tx_ring,u32 d_rd_p,u32 d_wr_p)202 nfp_net_debugfs_print_tx_descs(struct seq_file *file, struct nfp_net_dp *dp,
203 			       struct nfp_net_r_vector *r_vec,
204 			       struct nfp_net_tx_ring *tx_ring,
205 			       u32 d_rd_p, u32 d_wr_p)
206 {
207 	dp->ops->print_tx_descs(file, r_vec, tx_ring, d_rd_p, d_wr_p);
208 }
209 
210 extern const struct nfp_dp_ops nfp_nfd3_ops;
211 extern const struct nfp_dp_ops nfp_nfdk_ops;
212 
213 netdev_tx_t nfp_net_tx(struct sk_buff *skb, struct net_device *netdev);
214 
215 #endif /* _NFP_NET_DP_ */
216