1 // SPDX-License-Identifier: GPL-2.0+
2 // Copyright (c) 2021-2021 Hisilicon Limited.
3 #include <linux/skbuff.h>
4 
5 #include "hnae3.h"
6 #include "hclge_comm_cmd.h"
7 #include "hclge_comm_rss.h"
8 
9 static const u8 hclge_comm_hash_key[] = {
10 	0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2,
11 	0x41, 0x67, 0x25, 0x3D, 0x43, 0xA3, 0x8F, 0xB0,
12 	0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4,
13 	0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C,
14 	0x6A, 0x42, 0xB7, 0x3B, 0xBE, 0xAC, 0x01, 0xFA
15 };
16 
17 static void
hclge_comm_init_rss_tuple(struct hnae3_ae_dev * ae_dev,struct hclge_comm_rss_tuple_cfg * rss_tuple_cfg)18 hclge_comm_init_rss_tuple(struct hnae3_ae_dev *ae_dev,
19 			  struct hclge_comm_rss_tuple_cfg *rss_tuple_cfg)
20 {
21 	rss_tuple_cfg->ipv4_tcp_en = HCLGE_COMM_RSS_INPUT_TUPLE_OTHER;
22 	rss_tuple_cfg->ipv4_udp_en = HCLGE_COMM_RSS_INPUT_TUPLE_OTHER;
23 	rss_tuple_cfg->ipv4_sctp_en = HCLGE_COMM_RSS_INPUT_TUPLE_SCTP;
24 	rss_tuple_cfg->ipv4_fragment_en = HCLGE_COMM_RSS_INPUT_TUPLE_OTHER;
25 	rss_tuple_cfg->ipv6_tcp_en = HCLGE_COMM_RSS_INPUT_TUPLE_OTHER;
26 	rss_tuple_cfg->ipv6_udp_en = HCLGE_COMM_RSS_INPUT_TUPLE_OTHER;
27 	rss_tuple_cfg->ipv6_sctp_en =
28 		ae_dev->dev_version <= HNAE3_DEVICE_VERSION_V2 ?
29 		HCLGE_COMM_RSS_INPUT_TUPLE_SCTP_NO_PORT :
30 		HCLGE_COMM_RSS_INPUT_TUPLE_SCTP;
31 	rss_tuple_cfg->ipv6_fragment_en = HCLGE_COMM_RSS_INPUT_TUPLE_OTHER;
32 }
33 
hclge_comm_rss_init_cfg(struct hnae3_handle * nic,struct hnae3_ae_dev * ae_dev,struct hclge_comm_rss_cfg * rss_cfg)34 int hclge_comm_rss_init_cfg(struct hnae3_handle *nic,
35 			    struct hnae3_ae_dev *ae_dev,
36 			    struct hclge_comm_rss_cfg *rss_cfg)
37 {
38 	u16 rss_ind_tbl_size = ae_dev->dev_specs.rss_ind_tbl_size;
39 	int rss_algo = HCLGE_COMM_RSS_HASH_ALGO_TOEPLITZ;
40 	u16 *rss_ind_tbl;
41 
42 	if (nic->flags & HNAE3_SUPPORT_VF)
43 		rss_cfg->rss_size = nic->kinfo.rss_size;
44 
45 	if (ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2)
46 		rss_algo = HCLGE_COMM_RSS_HASH_ALGO_SIMPLE;
47 
48 	hclge_comm_init_rss_tuple(ae_dev, &rss_cfg->rss_tuple_sets);
49 
50 	rss_cfg->rss_algo = rss_algo;
51 
52 	rss_ind_tbl = devm_kcalloc(&ae_dev->pdev->dev, rss_ind_tbl_size,
53 				   sizeof(*rss_ind_tbl), GFP_KERNEL);
54 	if (!rss_ind_tbl)
55 		return -ENOMEM;
56 
57 	rss_cfg->rss_indirection_tbl = rss_ind_tbl;
58 	memcpy(rss_cfg->rss_hash_key, hclge_comm_hash_key,
59 	       HCLGE_COMM_RSS_KEY_SIZE);
60 
61 	hclge_comm_rss_indir_init_cfg(ae_dev, rss_cfg);
62 
63 	return 0;
64 }
65 
hclge_comm_get_rss_tc_info(u16 rss_size,u8 hw_tc_map,u16 * tc_offset,u16 * tc_valid,u16 * tc_size)66 void hclge_comm_get_rss_tc_info(u16 rss_size, u8 hw_tc_map, u16 *tc_offset,
67 				u16 *tc_valid, u16 *tc_size)
68 {
69 	u16 roundup_size;
70 	u32 i;
71 
72 	roundup_size = roundup_pow_of_two(rss_size);
73 	roundup_size = ilog2(roundup_size);
74 
75 	for (i = 0; i < HCLGE_COMM_MAX_TC_NUM; i++) {
76 		tc_valid[i] = 1;
77 		tc_size[i] = roundup_size;
78 		tc_offset[i] = (hw_tc_map & BIT(i)) ? rss_size * i : 0;
79 	}
80 }
81 
hclge_comm_set_rss_tc_mode(struct hclge_comm_hw * hw,u16 * tc_offset,u16 * tc_valid,u16 * tc_size)82 int hclge_comm_set_rss_tc_mode(struct hclge_comm_hw *hw, u16 *tc_offset,
83 			       u16 *tc_valid, u16 *tc_size)
84 {
85 	struct hclge_comm_rss_tc_mode_cmd *req;
86 	struct hclge_desc desc;
87 	unsigned int i;
88 	int ret;
89 
90 	req = (struct hclge_comm_rss_tc_mode_cmd *)desc.data;
91 
92 	hclge_comm_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_TC_MODE, false);
93 	for (i = 0; i < HCLGE_COMM_MAX_TC_NUM; i++) {
94 		u16 mode = 0;
95 
96 		hnae3_set_bit(mode, HCLGE_COMM_RSS_TC_VALID_B,
97 			      (tc_valid[i] & 0x1));
98 		hnae3_set_field(mode, HCLGE_COMM_RSS_TC_SIZE_M,
99 				HCLGE_COMM_RSS_TC_SIZE_S, tc_size[i]);
100 		hnae3_set_bit(mode, HCLGE_COMM_RSS_TC_SIZE_MSB_B,
101 			      tc_size[i] >> HCLGE_COMM_RSS_TC_SIZE_MSB_OFFSET &
102 			      0x1);
103 		hnae3_set_field(mode, HCLGE_COMM_RSS_TC_OFFSET_M,
104 				HCLGE_COMM_RSS_TC_OFFSET_S, tc_offset[i]);
105 
106 		req->rss_tc_mode[i] = cpu_to_le16(mode);
107 	}
108 
109 	ret = hclge_comm_cmd_send(hw, &desc, 1);
110 	if (ret)
111 		dev_err(&hw->cmq.csq.pdev->dev,
112 			"failed to set rss tc mode, ret = %d.\n", ret);
113 
114 	return ret;
115 }
116 
hclge_comm_set_rss_hash_key(struct hclge_comm_rss_cfg * rss_cfg,struct hclge_comm_hw * hw,const u8 * key,const u8 hfunc)117 int hclge_comm_set_rss_hash_key(struct hclge_comm_rss_cfg *rss_cfg,
118 				struct hclge_comm_hw *hw, const u8 *key,
119 				const u8 hfunc)
120 {
121 	u8 hash_algo;
122 	int ret;
123 
124 	ret = hclge_comm_parse_rss_hfunc(rss_cfg, hfunc, &hash_algo);
125 	if (ret)
126 		return ret;
127 
128 	/* Set the RSS Hash Key if specififed by the user */
129 	if (key) {
130 		ret = hclge_comm_set_rss_algo_key(hw, hash_algo, key);
131 		if (ret)
132 			return ret;
133 
134 		/* Update the shadow RSS key with user specified qids */
135 		memcpy(rss_cfg->rss_hash_key, key, HCLGE_COMM_RSS_KEY_SIZE);
136 	} else {
137 		ret = hclge_comm_set_rss_algo_key(hw, hash_algo,
138 						  rss_cfg->rss_hash_key);
139 		if (ret)
140 			return ret;
141 	}
142 	rss_cfg->rss_algo = hash_algo;
143 
144 	return 0;
145 }
146 
hclge_comm_set_rss_tuple(struct hnae3_ae_dev * ae_dev,struct hclge_comm_hw * hw,struct hclge_comm_rss_cfg * rss_cfg,struct ethtool_rxnfc * nfc)147 int hclge_comm_set_rss_tuple(struct hnae3_ae_dev *ae_dev,
148 			     struct hclge_comm_hw *hw,
149 			     struct hclge_comm_rss_cfg *rss_cfg,
150 			     struct ethtool_rxnfc *nfc)
151 {
152 	struct hclge_comm_rss_input_tuple_cmd *req;
153 	struct hclge_desc desc;
154 	int ret;
155 
156 	if (nfc->data &
157 	    ~(RXH_IP_SRC | RXH_IP_DST | RXH_L4_B_0_1 | RXH_L4_B_2_3))
158 		return -EINVAL;
159 
160 	req = (struct hclge_comm_rss_input_tuple_cmd *)desc.data;
161 	hclge_comm_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE,
162 					false);
163 
164 	ret = hclge_comm_init_rss_tuple_cmd(rss_cfg, nfc, ae_dev, req);
165 	if (ret) {
166 		dev_err(&hw->cmq.csq.pdev->dev,
167 			"failed to init rss tuple cmd, ret = %d.\n", ret);
168 		return ret;
169 	}
170 
171 	ret = hclge_comm_cmd_send(hw, &desc, 1);
172 	if (ret) {
173 		dev_err(&hw->cmq.csq.pdev->dev,
174 			"failed to set rss tuple, ret = %d.\n", ret);
175 		return ret;
176 	}
177 
178 	rss_cfg->rss_tuple_sets.ipv4_tcp_en = req->ipv4_tcp_en;
179 	rss_cfg->rss_tuple_sets.ipv4_udp_en = req->ipv4_udp_en;
180 	rss_cfg->rss_tuple_sets.ipv4_sctp_en = req->ipv4_sctp_en;
181 	rss_cfg->rss_tuple_sets.ipv4_fragment_en = req->ipv4_fragment_en;
182 	rss_cfg->rss_tuple_sets.ipv6_tcp_en = req->ipv6_tcp_en;
183 	rss_cfg->rss_tuple_sets.ipv6_udp_en = req->ipv6_udp_en;
184 	rss_cfg->rss_tuple_sets.ipv6_sctp_en = req->ipv6_sctp_en;
185 	rss_cfg->rss_tuple_sets.ipv6_fragment_en = req->ipv6_fragment_en;
186 	return 0;
187 }
188 
hclge_comm_get_rss_key_size(struct hnae3_handle * handle)189 u32 hclge_comm_get_rss_key_size(struct hnae3_handle *handle)
190 {
191 	return HCLGE_COMM_RSS_KEY_SIZE;
192 }
193 
hclge_comm_get_rss_type(struct hnae3_handle * nic,struct hclge_comm_rss_tuple_cfg * rss_tuple_sets)194 void hclge_comm_get_rss_type(struct hnae3_handle *nic,
195 			     struct hclge_comm_rss_tuple_cfg *rss_tuple_sets)
196 {
197 	if (rss_tuple_sets->ipv4_tcp_en ||
198 	    rss_tuple_sets->ipv4_udp_en ||
199 	    rss_tuple_sets->ipv4_sctp_en ||
200 	    rss_tuple_sets->ipv6_tcp_en ||
201 	    rss_tuple_sets->ipv6_udp_en ||
202 	    rss_tuple_sets->ipv6_sctp_en)
203 		nic->kinfo.rss_type = PKT_HASH_TYPE_L4;
204 	else if (rss_tuple_sets->ipv4_fragment_en ||
205 		 rss_tuple_sets->ipv6_fragment_en)
206 		nic->kinfo.rss_type = PKT_HASH_TYPE_L3;
207 	else
208 		nic->kinfo.rss_type = PKT_HASH_TYPE_NONE;
209 }
210 
hclge_comm_parse_rss_hfunc(struct hclge_comm_rss_cfg * rss_cfg,const u8 hfunc,u8 * hash_algo)211 int hclge_comm_parse_rss_hfunc(struct hclge_comm_rss_cfg *rss_cfg,
212 			       const u8 hfunc, u8 *hash_algo)
213 {
214 	switch (hfunc) {
215 	case ETH_RSS_HASH_TOP:
216 		*hash_algo = HCLGE_COMM_RSS_HASH_ALGO_TOEPLITZ;
217 		return 0;
218 	case ETH_RSS_HASH_XOR:
219 		*hash_algo = HCLGE_COMM_RSS_HASH_ALGO_SIMPLE;
220 		return 0;
221 	case ETH_RSS_HASH_NO_CHANGE:
222 		*hash_algo = rss_cfg->rss_algo;
223 		return 0;
224 	default:
225 		return -EINVAL;
226 	}
227 }
228 
hclge_comm_rss_indir_init_cfg(struct hnae3_ae_dev * ae_dev,struct hclge_comm_rss_cfg * rss_cfg)229 void hclge_comm_rss_indir_init_cfg(struct hnae3_ae_dev *ae_dev,
230 				   struct hclge_comm_rss_cfg *rss_cfg)
231 {
232 	u16 i;
233 	/* Initialize RSS indirect table */
234 	for (i = 0; i < ae_dev->dev_specs.rss_ind_tbl_size; i++)
235 		rss_cfg->rss_indirection_tbl[i] = i % rss_cfg->rss_size;
236 }
237 
hclge_comm_get_rss_tuple(struct hclge_comm_rss_cfg * rss_cfg,int flow_type,u8 * tuple_sets)238 int hclge_comm_get_rss_tuple(struct hclge_comm_rss_cfg *rss_cfg, int flow_type,
239 			     u8 *tuple_sets)
240 {
241 	switch (flow_type) {
242 	case TCP_V4_FLOW:
243 		*tuple_sets = rss_cfg->rss_tuple_sets.ipv4_tcp_en;
244 		break;
245 	case UDP_V4_FLOW:
246 		*tuple_sets = rss_cfg->rss_tuple_sets.ipv4_udp_en;
247 		break;
248 	case TCP_V6_FLOW:
249 		*tuple_sets = rss_cfg->rss_tuple_sets.ipv6_tcp_en;
250 		break;
251 	case UDP_V6_FLOW:
252 		*tuple_sets = rss_cfg->rss_tuple_sets.ipv6_udp_en;
253 		break;
254 	case SCTP_V4_FLOW:
255 		*tuple_sets = rss_cfg->rss_tuple_sets.ipv4_sctp_en;
256 		break;
257 	case SCTP_V6_FLOW:
258 		*tuple_sets = rss_cfg->rss_tuple_sets.ipv6_sctp_en;
259 		break;
260 	case IPV4_FLOW:
261 	case IPV6_FLOW:
262 		*tuple_sets = HCLGE_COMM_S_IP_BIT | HCLGE_COMM_D_IP_BIT;
263 		break;
264 	default:
265 		return -EINVAL;
266 	}
267 
268 	return 0;
269 }
270 
271 static void
hclge_comm_append_rss_msb_info(struct hclge_comm_rss_ind_tbl_cmd * req,u16 qid,u32 j)272 hclge_comm_append_rss_msb_info(struct hclge_comm_rss_ind_tbl_cmd *req,
273 			       u16 qid, u32 j)
274 {
275 	u8 rss_msb_oft;
276 	u8 rss_msb_val;
277 
278 	rss_msb_oft =
279 		j * HCLGE_COMM_RSS_CFG_TBL_BW_H / BITS_PER_BYTE;
280 	rss_msb_val = (qid >> HCLGE_COMM_RSS_CFG_TBL_BW_L & 0x1) <<
281 		(j * HCLGE_COMM_RSS_CFG_TBL_BW_H % BITS_PER_BYTE);
282 	req->rss_qid_h[rss_msb_oft] |= rss_msb_val;
283 }
284 
hclge_comm_set_rss_indir_table(struct hnae3_ae_dev * ae_dev,struct hclge_comm_hw * hw,const u16 * indir)285 int hclge_comm_set_rss_indir_table(struct hnae3_ae_dev *ae_dev,
286 				   struct hclge_comm_hw *hw, const u16 *indir)
287 {
288 	struct hclge_comm_rss_ind_tbl_cmd *req;
289 	struct hclge_desc desc;
290 	u16 rss_cfg_tbl_num;
291 	int ret;
292 	u16 qid;
293 	u16 i;
294 	u32 j;
295 
296 	req = (struct hclge_comm_rss_ind_tbl_cmd *)desc.data;
297 	rss_cfg_tbl_num = ae_dev->dev_specs.rss_ind_tbl_size /
298 			  HCLGE_COMM_RSS_CFG_TBL_SIZE;
299 
300 	for (i = 0; i < rss_cfg_tbl_num; i++) {
301 		hclge_comm_cmd_setup_basic_desc(&desc,
302 						HCLGE_OPC_RSS_INDIR_TABLE,
303 						false);
304 
305 		req->start_table_index =
306 			cpu_to_le16(i * HCLGE_COMM_RSS_CFG_TBL_SIZE);
307 		req->rss_set_bitmap =
308 			cpu_to_le16(HCLGE_COMM_RSS_SET_BITMAP_MSK);
309 		for (j = 0; j < HCLGE_COMM_RSS_CFG_TBL_SIZE; j++) {
310 			qid = indir[i * HCLGE_COMM_RSS_CFG_TBL_SIZE + j];
311 			req->rss_qid_l[j] = qid & 0xff;
312 			hclge_comm_append_rss_msb_info(req, qid, j);
313 		}
314 		ret = hclge_comm_cmd_send(hw, &desc, 1);
315 		if (ret) {
316 			dev_err(&hw->cmq.csq.pdev->dev,
317 				"failed to configure rss table, ret = %d.\n",
318 				ret);
319 			return ret;
320 		}
321 	}
322 	return 0;
323 }
324 
hclge_comm_set_rss_input_tuple(struct hnae3_handle * nic,struct hclge_comm_hw * hw,bool is_pf,struct hclge_comm_rss_cfg * rss_cfg)325 int hclge_comm_set_rss_input_tuple(struct hnae3_handle *nic,
326 				   struct hclge_comm_hw *hw, bool is_pf,
327 				   struct hclge_comm_rss_cfg *rss_cfg)
328 {
329 	struct hclge_comm_rss_input_tuple_cmd *req;
330 	struct hclge_desc desc;
331 	int ret;
332 
333 	hclge_comm_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE,
334 					false);
335 
336 	req = (struct hclge_comm_rss_input_tuple_cmd *)desc.data;
337 
338 	req->ipv4_tcp_en = rss_cfg->rss_tuple_sets.ipv4_tcp_en;
339 	req->ipv4_udp_en = rss_cfg->rss_tuple_sets.ipv4_udp_en;
340 	req->ipv4_sctp_en = rss_cfg->rss_tuple_sets.ipv4_sctp_en;
341 	req->ipv4_fragment_en = rss_cfg->rss_tuple_sets.ipv4_fragment_en;
342 	req->ipv6_tcp_en = rss_cfg->rss_tuple_sets.ipv6_tcp_en;
343 	req->ipv6_udp_en = rss_cfg->rss_tuple_sets.ipv6_udp_en;
344 	req->ipv6_sctp_en = rss_cfg->rss_tuple_sets.ipv6_sctp_en;
345 	req->ipv6_fragment_en = rss_cfg->rss_tuple_sets.ipv6_fragment_en;
346 
347 	if (is_pf)
348 		hclge_comm_get_rss_type(nic, &rss_cfg->rss_tuple_sets);
349 
350 	ret = hclge_comm_cmd_send(hw, &desc, 1);
351 	if (ret)
352 		dev_err(&hw->cmq.csq.pdev->dev,
353 			"failed to configure rss input, ret = %d.\n", ret);
354 	return ret;
355 }
356 
hclge_comm_get_rss_hash_info(struct hclge_comm_rss_cfg * rss_cfg,u8 * key,u8 * hfunc)357 void hclge_comm_get_rss_hash_info(struct hclge_comm_rss_cfg *rss_cfg, u8 *key,
358 				  u8 *hfunc)
359 {
360 	/* Get hash algorithm */
361 	if (hfunc) {
362 		switch (rss_cfg->rss_algo) {
363 		case HCLGE_COMM_RSS_HASH_ALGO_TOEPLITZ:
364 			*hfunc = ETH_RSS_HASH_TOP;
365 			break;
366 		case HCLGE_COMM_RSS_HASH_ALGO_SIMPLE:
367 			*hfunc = ETH_RSS_HASH_XOR;
368 			break;
369 		default:
370 			*hfunc = ETH_RSS_HASH_UNKNOWN;
371 			break;
372 		}
373 	}
374 
375 	/* Get the RSS Key required by the user */
376 	if (key)
377 		memcpy(key, rss_cfg->rss_hash_key, HCLGE_COMM_RSS_KEY_SIZE);
378 }
379 
hclge_comm_get_rss_indir_tbl(struct hclge_comm_rss_cfg * rss_cfg,u32 * indir,u16 rss_ind_tbl_size)380 void hclge_comm_get_rss_indir_tbl(struct hclge_comm_rss_cfg *rss_cfg,
381 				  u32 *indir, u16 rss_ind_tbl_size)
382 {
383 	u16 i;
384 
385 	if (!indir)
386 		return;
387 
388 	for (i = 0; i < rss_ind_tbl_size; i++)
389 		indir[i] = rss_cfg->rss_indirection_tbl[i];
390 }
391 
hclge_comm_set_rss_algo_key(struct hclge_comm_hw * hw,const u8 hfunc,const u8 * key)392 int hclge_comm_set_rss_algo_key(struct hclge_comm_hw *hw, const u8 hfunc,
393 				const u8 *key)
394 {
395 	struct hclge_comm_rss_config_cmd *req;
396 	unsigned int key_offset = 0;
397 	struct hclge_desc desc;
398 	int key_counts;
399 	int key_size;
400 	int ret;
401 
402 	key_counts = HCLGE_COMM_RSS_KEY_SIZE;
403 	req = (struct hclge_comm_rss_config_cmd *)desc.data;
404 
405 	while (key_counts) {
406 		hclge_comm_cmd_setup_basic_desc(&desc,
407 						HCLGE_OPC_RSS_GENERIC_CONFIG,
408 						false);
409 
410 		req->hash_config |= (hfunc & HCLGE_COMM_RSS_HASH_ALGO_MASK);
411 		req->hash_config |=
412 			(key_offset << HCLGE_COMM_RSS_HASH_KEY_OFFSET_B);
413 
414 		key_size = min(HCLGE_COMM_RSS_HASH_KEY_NUM, key_counts);
415 		memcpy(req->hash_key,
416 		       key + key_offset * HCLGE_COMM_RSS_HASH_KEY_NUM,
417 		       key_size);
418 
419 		key_counts -= key_size;
420 		key_offset++;
421 		ret = hclge_comm_cmd_send(hw, &desc, 1);
422 		if (ret) {
423 			dev_err(&hw->cmq.csq.pdev->dev,
424 				"failed to configure RSS key, ret = %d.\n",
425 				ret);
426 			return ret;
427 		}
428 	}
429 
430 	return 0;
431 }
432 
hclge_comm_get_rss_hash_bits(struct ethtool_rxnfc * nfc)433 static u8 hclge_comm_get_rss_hash_bits(struct ethtool_rxnfc *nfc)
434 {
435 	u8 hash_sets = nfc->data & RXH_L4_B_0_1 ? HCLGE_COMM_S_PORT_BIT : 0;
436 
437 	if (nfc->data & RXH_L4_B_2_3)
438 		hash_sets |= HCLGE_COMM_D_PORT_BIT;
439 	else
440 		hash_sets &= ~HCLGE_COMM_D_PORT_BIT;
441 
442 	if (nfc->data & RXH_IP_SRC)
443 		hash_sets |= HCLGE_COMM_S_IP_BIT;
444 	else
445 		hash_sets &= ~HCLGE_COMM_S_IP_BIT;
446 
447 	if (nfc->data & RXH_IP_DST)
448 		hash_sets |= HCLGE_COMM_D_IP_BIT;
449 	else
450 		hash_sets &= ~HCLGE_COMM_D_IP_BIT;
451 
452 	if (nfc->flow_type == SCTP_V4_FLOW || nfc->flow_type == SCTP_V6_FLOW)
453 		hash_sets |= HCLGE_COMM_V_TAG_BIT;
454 
455 	return hash_sets;
456 }
457 
hclge_comm_init_rss_tuple_cmd(struct hclge_comm_rss_cfg * rss_cfg,struct ethtool_rxnfc * nfc,struct hnae3_ae_dev * ae_dev,struct hclge_comm_rss_input_tuple_cmd * req)458 int hclge_comm_init_rss_tuple_cmd(struct hclge_comm_rss_cfg *rss_cfg,
459 				  struct ethtool_rxnfc *nfc,
460 				  struct hnae3_ae_dev *ae_dev,
461 				  struct hclge_comm_rss_input_tuple_cmd *req)
462 {
463 	u8 tuple_sets;
464 
465 	req->ipv4_tcp_en = rss_cfg->rss_tuple_sets.ipv4_tcp_en;
466 	req->ipv4_udp_en = rss_cfg->rss_tuple_sets.ipv4_udp_en;
467 	req->ipv4_sctp_en = rss_cfg->rss_tuple_sets.ipv4_sctp_en;
468 	req->ipv4_fragment_en = rss_cfg->rss_tuple_sets.ipv4_fragment_en;
469 	req->ipv6_tcp_en = rss_cfg->rss_tuple_sets.ipv6_tcp_en;
470 	req->ipv6_udp_en = rss_cfg->rss_tuple_sets.ipv6_udp_en;
471 	req->ipv6_sctp_en = rss_cfg->rss_tuple_sets.ipv6_sctp_en;
472 	req->ipv6_fragment_en = rss_cfg->rss_tuple_sets.ipv6_fragment_en;
473 
474 	tuple_sets = hclge_comm_get_rss_hash_bits(nfc);
475 	switch (nfc->flow_type) {
476 	case TCP_V4_FLOW:
477 		req->ipv4_tcp_en = tuple_sets;
478 		break;
479 	case TCP_V6_FLOW:
480 		req->ipv6_tcp_en = tuple_sets;
481 		break;
482 	case UDP_V4_FLOW:
483 		req->ipv4_udp_en = tuple_sets;
484 		break;
485 	case UDP_V6_FLOW:
486 		req->ipv6_udp_en = tuple_sets;
487 		break;
488 	case SCTP_V4_FLOW:
489 		req->ipv4_sctp_en = tuple_sets;
490 		break;
491 	case SCTP_V6_FLOW:
492 		if (ae_dev->dev_version <= HNAE3_DEVICE_VERSION_V2 &&
493 		    (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)))
494 			return -EINVAL;
495 
496 		req->ipv6_sctp_en = tuple_sets;
497 		break;
498 	case IPV4_FLOW:
499 		req->ipv4_fragment_en = HCLGE_COMM_RSS_INPUT_TUPLE_OTHER;
500 		break;
501 	case IPV6_FLOW:
502 		req->ipv6_fragment_en = HCLGE_COMM_RSS_INPUT_TUPLE_OTHER;
503 		break;
504 	default:
505 		return -EINVAL;
506 	}
507 
508 	return 0;
509 }
510 
hclge_comm_convert_rss_tuple(u8 tuple_sets)511 u64 hclge_comm_convert_rss_tuple(u8 tuple_sets)
512 {
513 	u64 tuple_data = 0;
514 
515 	if (tuple_sets & HCLGE_COMM_D_PORT_BIT)
516 		tuple_data |= RXH_L4_B_2_3;
517 	if (tuple_sets & HCLGE_COMM_S_PORT_BIT)
518 		tuple_data |= RXH_L4_B_0_1;
519 	if (tuple_sets & HCLGE_COMM_D_IP_BIT)
520 		tuple_data |= RXH_IP_DST;
521 	if (tuple_sets & HCLGE_COMM_S_IP_BIT)
522 		tuple_data |= RXH_IP_SRC;
523 
524 	return tuple_data;
525 }
526