1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2019, Intel Corporation. */
3 
4 #include <linux/filter.h>
5 
6 #include "ice_txrx_lib.h"
7 #include "ice_eswitch.h"
8 #include "ice_lib.h"
9 
10 /**
11  * ice_release_rx_desc - Store the new tail and head values
12  * @rx_ring: ring to bump
13  * @val: new head index
14  */
ice_release_rx_desc(struct ice_rx_ring * rx_ring,u16 val)15 void ice_release_rx_desc(struct ice_rx_ring *rx_ring, u16 val)
16 {
17 	u16 prev_ntu = rx_ring->next_to_use & ~0x7;
18 
19 	rx_ring->next_to_use = val;
20 
21 	/* update next to alloc since we have filled the ring */
22 	rx_ring->next_to_alloc = val;
23 
24 	/* QRX_TAIL will be updated with any tail value, but hardware ignores
25 	 * the lower 3 bits. This makes it so we only bump tail on meaningful
26 	 * boundaries. Also, this allows us to bump tail on intervals of 8 up to
27 	 * the budget depending on the current traffic load.
28 	 */
29 	val &= ~0x7;
30 	if (prev_ntu != val) {
31 		/* Force memory writes to complete before letting h/w
32 		 * know there are new descriptors to fetch. (Only
33 		 * applicable for weak-ordered memory model archs,
34 		 * such as IA-64).
35 		 */
36 		wmb();
37 		writel(val, rx_ring->tail);
38 	}
39 }
40 
41 /**
42  * ice_ptype_to_htype - get a hash type
43  * @ptype: the ptype value from the descriptor
44  *
45  * Returns appropriate hash type (such as PKT_HASH_TYPE_L2/L3/L4) to be used by
46  * skb_set_hash based on PTYPE as parsed by HW Rx pipeline and is part of
47  * Rx desc.
48  */
ice_ptype_to_htype(u16 ptype)49 static enum pkt_hash_types ice_ptype_to_htype(u16 ptype)
50 {
51 	struct ice_rx_ptype_decoded decoded = ice_decode_rx_desc_ptype(ptype);
52 
53 	if (!decoded.known)
54 		return PKT_HASH_TYPE_NONE;
55 	if (decoded.payload_layer == ICE_RX_PTYPE_PAYLOAD_LAYER_PAY4)
56 		return PKT_HASH_TYPE_L4;
57 	if (decoded.payload_layer == ICE_RX_PTYPE_PAYLOAD_LAYER_PAY3)
58 		return PKT_HASH_TYPE_L3;
59 	if (decoded.outer_ip == ICE_RX_PTYPE_OUTER_L2)
60 		return PKT_HASH_TYPE_L2;
61 
62 	return PKT_HASH_TYPE_NONE;
63 }
64 
65 /**
66  * ice_rx_hash - set the hash value in the skb
67  * @rx_ring: descriptor ring
68  * @rx_desc: specific descriptor
69  * @skb: pointer to current skb
70  * @rx_ptype: the ptype value from the descriptor
71  */
72 static void
ice_rx_hash(struct ice_rx_ring * rx_ring,union ice_32b_rx_flex_desc * rx_desc,struct sk_buff * skb,u16 rx_ptype)73 ice_rx_hash(struct ice_rx_ring *rx_ring, union ice_32b_rx_flex_desc *rx_desc,
74 	    struct sk_buff *skb, u16 rx_ptype)
75 {
76 	struct ice_32b_rx_flex_desc_nic *nic_mdid;
77 	u32 hash;
78 
79 	if (!(rx_ring->netdev->features & NETIF_F_RXHASH))
80 		return;
81 
82 	if (rx_desc->wb.rxdid != ICE_RXDID_FLEX_NIC)
83 		return;
84 
85 	nic_mdid = (struct ice_32b_rx_flex_desc_nic *)rx_desc;
86 	hash = le32_to_cpu(nic_mdid->rss_hash);
87 	skb_set_hash(skb, hash, ice_ptype_to_htype(rx_ptype));
88 }
89 
90 /**
91  * ice_rx_csum - Indicate in skb if checksum is good
92  * @ring: the ring we care about
93  * @skb: skb currently being received and modified
94  * @rx_desc: the receive descriptor
95  * @ptype: the packet type decoded by hardware
96  *
97  * skb->protocol must be set before this function is called
98  */
99 static void
ice_rx_csum(struct ice_rx_ring * ring,struct sk_buff * skb,union ice_32b_rx_flex_desc * rx_desc,u16 ptype)100 ice_rx_csum(struct ice_rx_ring *ring, struct sk_buff *skb,
101 	    union ice_32b_rx_flex_desc *rx_desc, u16 ptype)
102 {
103 	struct ice_rx_ptype_decoded decoded;
104 	u16 rx_status0, rx_status1;
105 	bool ipv4, ipv6;
106 
107 	rx_status0 = le16_to_cpu(rx_desc->wb.status_error0);
108 	rx_status1 = le16_to_cpu(rx_desc->wb.status_error1);
109 
110 	decoded = ice_decode_rx_desc_ptype(ptype);
111 
112 	/* Start with CHECKSUM_NONE and by default csum_level = 0 */
113 	skb->ip_summed = CHECKSUM_NONE;
114 	skb_checksum_none_assert(skb);
115 
116 	/* check if Rx checksum is enabled */
117 	if (!(ring->netdev->features & NETIF_F_RXCSUM))
118 		return;
119 
120 	/* check if HW has decoded the packet and checksum */
121 	if (!(rx_status0 & BIT(ICE_RX_FLEX_DESC_STATUS0_L3L4P_S)))
122 		return;
123 
124 	if (!(decoded.known && decoded.outer_ip))
125 		return;
126 
127 	ipv4 = (decoded.outer_ip == ICE_RX_PTYPE_OUTER_IP) &&
128 	       (decoded.outer_ip_ver == ICE_RX_PTYPE_OUTER_IPV4);
129 	ipv6 = (decoded.outer_ip == ICE_RX_PTYPE_OUTER_IP) &&
130 	       (decoded.outer_ip_ver == ICE_RX_PTYPE_OUTER_IPV6);
131 
132 	if (ipv4 && (rx_status0 & (BIT(ICE_RX_FLEX_DESC_STATUS0_XSUM_IPE_S) |
133 				   BIT(ICE_RX_FLEX_DESC_STATUS0_XSUM_EIPE_S))))
134 		goto checksum_fail;
135 
136 	if (ipv6 && (rx_status0 & (BIT(ICE_RX_FLEX_DESC_STATUS0_IPV6EXADD_S))))
137 		goto checksum_fail;
138 
139 	/* check for L4 errors and handle packets that were not able to be
140 	 * checksummed due to arrival speed
141 	 */
142 	if (rx_status0 & BIT(ICE_RX_FLEX_DESC_STATUS0_XSUM_L4E_S))
143 		goto checksum_fail;
144 
145 	/* check for outer UDP checksum error in tunneled packets */
146 	if ((rx_status1 & BIT(ICE_RX_FLEX_DESC_STATUS1_NAT_S)) &&
147 	    (rx_status0 & BIT(ICE_RX_FLEX_DESC_STATUS0_XSUM_EUDPE_S)))
148 		goto checksum_fail;
149 
150 	/* If there is an outer header present that might contain a checksum
151 	 * we need to bump the checksum level by 1 to reflect the fact that
152 	 * we are indicating we validated the inner checksum.
153 	 */
154 	if (decoded.tunnel_type >= ICE_RX_PTYPE_TUNNEL_IP_GRENAT)
155 		skb->csum_level = 1;
156 
157 	/* Only report checksum unnecessary for TCP, UDP, or SCTP */
158 	switch (decoded.inner_prot) {
159 	case ICE_RX_PTYPE_INNER_PROT_TCP:
160 	case ICE_RX_PTYPE_INNER_PROT_UDP:
161 	case ICE_RX_PTYPE_INNER_PROT_SCTP:
162 		skb->ip_summed = CHECKSUM_UNNECESSARY;
163 		break;
164 	default:
165 		break;
166 	}
167 	return;
168 
169 checksum_fail:
170 	ring->vsi->back->hw_csum_rx_error++;
171 }
172 
173 /**
174  * ice_process_skb_fields - Populate skb header fields from Rx descriptor
175  * @rx_ring: Rx descriptor ring packet is being transacted on
176  * @rx_desc: pointer to the EOP Rx descriptor
177  * @skb: pointer to current skb being populated
178  * @ptype: the packet type decoded by hardware
179  *
180  * This function checks the ring, descriptor, and packet information in
181  * order to populate the hash, checksum, VLAN, protocol, and
182  * other fields within the skb.
183  */
184 void
ice_process_skb_fields(struct ice_rx_ring * rx_ring,union ice_32b_rx_flex_desc * rx_desc,struct sk_buff * skb,u16 ptype)185 ice_process_skb_fields(struct ice_rx_ring *rx_ring,
186 		       union ice_32b_rx_flex_desc *rx_desc,
187 		       struct sk_buff *skb, u16 ptype)
188 {
189 	ice_rx_hash(rx_ring, rx_desc, skb, ptype);
190 
191 	/* modifies the skb - consumes the enet header */
192 	skb->protocol = eth_type_trans(skb, rx_ring->netdev);
193 
194 	ice_rx_csum(rx_ring, skb, rx_desc, ptype);
195 
196 	if (rx_ring->ptp_rx)
197 		ice_ptp_rx_hwtstamp(rx_ring, rx_desc, skb);
198 }
199 
200 /**
201  * ice_receive_skb - Send a completed packet up the stack
202  * @rx_ring: Rx ring in play
203  * @skb: packet to send up
204  * @vlan_tag: VLAN tag for packet
205  *
206  * This function sends the completed packet (via. skb) up the stack using
207  * gro receive functions (with/without VLAN tag)
208  */
209 void
ice_receive_skb(struct ice_rx_ring * rx_ring,struct sk_buff * skb,u16 vlan_tag)210 ice_receive_skb(struct ice_rx_ring *rx_ring, struct sk_buff *skb, u16 vlan_tag)
211 {
212 	netdev_features_t features = rx_ring->netdev->features;
213 	bool non_zero_vlan = !!(vlan_tag & VLAN_VID_MASK);
214 
215 	if ((features & NETIF_F_HW_VLAN_CTAG_RX) && non_zero_vlan)
216 		__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
217 	else if ((features & NETIF_F_HW_VLAN_STAG_RX) && non_zero_vlan)
218 		__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021AD), vlan_tag);
219 
220 	napi_gro_receive(&rx_ring->q_vector->napi, skb);
221 }
222 
223 /**
224  * ice_clean_xdp_irq - Reclaim resources after transmit completes on XDP ring
225  * @xdp_ring: XDP ring to clean
226  */
ice_clean_xdp_irq(struct ice_tx_ring * xdp_ring)227 static void ice_clean_xdp_irq(struct ice_tx_ring *xdp_ring)
228 {
229 	unsigned int total_bytes = 0, total_pkts = 0;
230 	u16 tx_thresh = ICE_RING_QUARTER(xdp_ring);
231 	u16 ntc = xdp_ring->next_to_clean;
232 	struct ice_tx_desc *next_dd_desc;
233 	u16 next_dd = xdp_ring->next_dd;
234 	struct ice_tx_buf *tx_buf;
235 	int i;
236 
237 	next_dd_desc = ICE_TX_DESC(xdp_ring, next_dd);
238 	if (!(next_dd_desc->cmd_type_offset_bsz &
239 	    cpu_to_le64(ICE_TX_DESC_DTYPE_DESC_DONE)))
240 		return;
241 
242 	for (i = 0; i < tx_thresh; i++) {
243 		tx_buf = &xdp_ring->tx_buf[ntc];
244 
245 		total_bytes += tx_buf->bytecount;
246 		/* normally tx_buf->gso_segs was taken but at this point
247 		 * it's always 1 for us
248 		 */
249 		total_pkts++;
250 
251 		page_frag_free(tx_buf->raw_buf);
252 		dma_unmap_single(xdp_ring->dev, dma_unmap_addr(tx_buf, dma),
253 				 dma_unmap_len(tx_buf, len), DMA_TO_DEVICE);
254 		dma_unmap_len_set(tx_buf, len, 0);
255 		tx_buf->raw_buf = NULL;
256 
257 		ntc++;
258 		if (ntc >= xdp_ring->count)
259 			ntc = 0;
260 	}
261 
262 	next_dd_desc->cmd_type_offset_bsz = 0;
263 	xdp_ring->next_dd = xdp_ring->next_dd + tx_thresh;
264 	if (xdp_ring->next_dd > xdp_ring->count)
265 		xdp_ring->next_dd = tx_thresh - 1;
266 	xdp_ring->next_to_clean = ntc;
267 	ice_update_tx_ring_stats(xdp_ring, total_pkts, total_bytes);
268 }
269 
270 /**
271  * ice_xmit_xdp_ring - submit single packet to XDP ring for transmission
272  * @data: packet data pointer
273  * @size: packet data size
274  * @xdp_ring: XDP ring for transmission
275  */
ice_xmit_xdp_ring(void * data,u16 size,struct ice_tx_ring * xdp_ring)276 int ice_xmit_xdp_ring(void *data, u16 size, struct ice_tx_ring *xdp_ring)
277 {
278 	u16 tx_thresh = ICE_RING_QUARTER(xdp_ring);
279 	u16 i = xdp_ring->next_to_use;
280 	struct ice_tx_desc *tx_desc;
281 	struct ice_tx_buf *tx_buf;
282 	dma_addr_t dma;
283 
284 	if (ICE_DESC_UNUSED(xdp_ring) < tx_thresh)
285 		ice_clean_xdp_irq(xdp_ring);
286 
287 	if (!unlikely(ICE_DESC_UNUSED(xdp_ring))) {
288 		xdp_ring->tx_stats.tx_busy++;
289 		return ICE_XDP_CONSUMED;
290 	}
291 
292 	dma = dma_map_single(xdp_ring->dev, data, size, DMA_TO_DEVICE);
293 	if (dma_mapping_error(xdp_ring->dev, dma))
294 		return ICE_XDP_CONSUMED;
295 
296 	tx_buf = &xdp_ring->tx_buf[i];
297 	tx_buf->bytecount = size;
298 	tx_buf->gso_segs = 1;
299 	tx_buf->raw_buf = data;
300 
301 	/* record length, and DMA address */
302 	dma_unmap_len_set(tx_buf, len, size);
303 	dma_unmap_addr_set(tx_buf, dma, dma);
304 
305 	tx_desc = ICE_TX_DESC(xdp_ring, i);
306 	tx_desc->buf_addr = cpu_to_le64(dma);
307 	tx_desc->cmd_type_offset_bsz = ice_build_ctob(ICE_TX_DESC_CMD_EOP, 0,
308 						      size, 0);
309 
310 	xdp_ring->xdp_tx_active++;
311 	i++;
312 	if (i == xdp_ring->count) {
313 		i = 0;
314 		tx_desc = ICE_TX_DESC(xdp_ring, xdp_ring->next_rs);
315 		tx_desc->cmd_type_offset_bsz |=
316 			cpu_to_le64(ICE_TX_DESC_CMD_RS << ICE_TXD_QW1_CMD_S);
317 		xdp_ring->next_rs = tx_thresh - 1;
318 	}
319 	xdp_ring->next_to_use = i;
320 
321 	if (i > xdp_ring->next_rs) {
322 		tx_desc = ICE_TX_DESC(xdp_ring, xdp_ring->next_rs);
323 		tx_desc->cmd_type_offset_bsz |=
324 			cpu_to_le64(ICE_TX_DESC_CMD_RS << ICE_TXD_QW1_CMD_S);
325 		xdp_ring->next_rs += tx_thresh;
326 	}
327 
328 	return ICE_XDP_TX;
329 }
330 
331 /**
332  * ice_xmit_xdp_buff - convert an XDP buffer to an XDP frame and send it
333  * @xdp: XDP buffer
334  * @xdp_ring: XDP Tx ring
335  *
336  * Returns negative on failure, 0 on success.
337  */
ice_xmit_xdp_buff(struct xdp_buff * xdp,struct ice_tx_ring * xdp_ring)338 int ice_xmit_xdp_buff(struct xdp_buff *xdp, struct ice_tx_ring *xdp_ring)
339 {
340 	struct xdp_frame *xdpf = xdp_convert_buff_to_frame(xdp);
341 
342 	if (unlikely(!xdpf))
343 		return ICE_XDP_CONSUMED;
344 
345 	return ice_xmit_xdp_ring(xdpf->data, xdpf->len, xdp_ring);
346 }
347 
348 /**
349  * ice_finalize_xdp_rx - Bump XDP Tx tail and/or flush redirect map
350  * @xdp_ring: XDP ring
351  * @xdp_res: Result of the receive batch
352  *
353  * This function bumps XDP Tx tail and/or flush redirect map, and
354  * should be called when a batch of packets has been processed in the
355  * napi loop.
356  */
ice_finalize_xdp_rx(struct ice_tx_ring * xdp_ring,unsigned int xdp_res)357 void ice_finalize_xdp_rx(struct ice_tx_ring *xdp_ring, unsigned int xdp_res)
358 {
359 	if (xdp_res & ICE_XDP_REDIR)
360 		xdp_do_flush_map();
361 
362 	if (xdp_res & ICE_XDP_TX) {
363 		if (static_branch_unlikely(&ice_xdp_locking_key))
364 			spin_lock(&xdp_ring->tx_lock);
365 		ice_xdp_ring_update_tail(xdp_ring);
366 		if (static_branch_unlikely(&ice_xdp_locking_key))
367 			spin_unlock(&xdp_ring->tx_lock);
368 	}
369 }
370