1 // SPDX-License-Identifier: GPL-2.0
2 /* Marvell MACSEC hardware offload driver
3 *
4 * Copyright (C) 2022 Marvell.
5 */
6
7 #include <crypto/skcipher.h>
8 #include <linux/rtnetlink.h>
9 #include <linux/bitfield.h>
10 #include "otx2_common.h"
11
12 #define MCS_TCAM0_MAC_DA_MASK GENMASK_ULL(47, 0)
13 #define MCS_TCAM0_MAC_SA_MASK GENMASK_ULL(63, 48)
14 #define MCS_TCAM1_MAC_SA_MASK GENMASK_ULL(31, 0)
15 #define MCS_TCAM1_ETYPE_MASK GENMASK_ULL(47, 32)
16
17 #define MCS_SA_MAP_MEM_SA_USE BIT_ULL(9)
18
19 #define MCS_RX_SECY_PLCY_RW_MASK GENMASK_ULL(49, 18)
20 #define MCS_RX_SECY_PLCY_RP BIT_ULL(17)
21 #define MCS_RX_SECY_PLCY_AUTH_ENA BIT_ULL(16)
22 #define MCS_RX_SECY_PLCY_CIP GENMASK_ULL(8, 5)
23 #define MCS_RX_SECY_PLCY_VAL GENMASK_ULL(2, 1)
24 #define MCS_RX_SECY_PLCY_ENA BIT_ULL(0)
25
26 #define MCS_TX_SECY_PLCY_MTU GENMASK_ULL(43, 28)
27 #define MCS_TX_SECY_PLCY_ST_TCI GENMASK_ULL(27, 22)
28 #define MCS_TX_SECY_PLCY_ST_OFFSET GENMASK_ULL(21, 15)
29 #define MCS_TX_SECY_PLCY_INS_MODE BIT_ULL(14)
30 #define MCS_TX_SECY_PLCY_AUTH_ENA BIT_ULL(13)
31 #define MCS_TX_SECY_PLCY_CIP GENMASK_ULL(5, 2)
32 #define MCS_TX_SECY_PLCY_PROTECT BIT_ULL(1)
33 #define MCS_TX_SECY_PLCY_ENA BIT_ULL(0)
34
35 #define MCS_GCM_AES_128 0
36 #define MCS_GCM_AES_256 1
37 #define MCS_GCM_AES_XPN_128 2
38 #define MCS_GCM_AES_XPN_256 3
39
40 #define MCS_TCI_ES 0x40 /* end station */
41 #define MCS_TCI_SC 0x20 /* SCI present */
42 #define MCS_TCI_SCB 0x10 /* epon */
43 #define MCS_TCI_E 0x08 /* encryption */
44 #define MCS_TCI_C 0x04 /* changed text */
45
46 #define CN10K_MAX_HASH_LEN 16
47 #define CN10K_MAX_SAK_LEN 32
48
cn10k_ecb_aes_encrypt(struct otx2_nic * pfvf,u8 * sak,u16 sak_len,u8 * hash)49 static int cn10k_ecb_aes_encrypt(struct otx2_nic *pfvf, u8 *sak,
50 u16 sak_len, u8 *hash)
51 {
52 u8 data[CN10K_MAX_HASH_LEN] = { 0 };
53 struct skcipher_request *req = NULL;
54 struct scatterlist sg_src, sg_dst;
55 struct crypto_skcipher *tfm;
56 DECLARE_CRYPTO_WAIT(wait);
57 int err;
58
59 tfm = crypto_alloc_skcipher("ecb(aes)", 0, 0);
60 if (IS_ERR(tfm)) {
61 dev_err(pfvf->dev, "failed to allocate transform for ecb-aes\n");
62 return PTR_ERR(tfm);
63 }
64
65 req = skcipher_request_alloc(tfm, GFP_KERNEL);
66 if (!req) {
67 dev_err(pfvf->dev, "failed to allocate request for skcipher\n");
68 err = -ENOMEM;
69 goto free_tfm;
70 }
71
72 err = crypto_skcipher_setkey(tfm, sak, sak_len);
73 if (err) {
74 dev_err(pfvf->dev, "failed to set key for skcipher\n");
75 goto free_req;
76 }
77
78 /* build sg list */
79 sg_init_one(&sg_src, data, CN10K_MAX_HASH_LEN);
80 sg_init_one(&sg_dst, hash, CN10K_MAX_HASH_LEN);
81
82 skcipher_request_set_callback(req, 0, crypto_req_done, &wait);
83 skcipher_request_set_crypt(req, &sg_src, &sg_dst,
84 CN10K_MAX_HASH_LEN, NULL);
85
86 err = crypto_skcipher_encrypt(req);
87 err = crypto_wait_req(err, &wait);
88
89 free_req:
90 skcipher_request_free(req);
91 free_tfm:
92 crypto_free_skcipher(tfm);
93 return err;
94 }
95
cn10k_mcs_get_txsc(struct cn10k_mcs_cfg * cfg,struct macsec_secy * secy)96 static struct cn10k_mcs_txsc *cn10k_mcs_get_txsc(struct cn10k_mcs_cfg *cfg,
97 struct macsec_secy *secy)
98 {
99 struct cn10k_mcs_txsc *txsc;
100
101 list_for_each_entry(txsc, &cfg->txsc_list, entry) {
102 if (txsc->sw_secy == secy)
103 return txsc;
104 }
105
106 return NULL;
107 }
108
cn10k_mcs_get_rxsc(struct cn10k_mcs_cfg * cfg,struct macsec_secy * secy,struct macsec_rx_sc * rx_sc)109 static struct cn10k_mcs_rxsc *cn10k_mcs_get_rxsc(struct cn10k_mcs_cfg *cfg,
110 struct macsec_secy *secy,
111 struct macsec_rx_sc *rx_sc)
112 {
113 struct cn10k_mcs_rxsc *rxsc;
114
115 list_for_each_entry(rxsc, &cfg->rxsc_list, entry) {
116 if (rxsc->sw_rxsc == rx_sc && rxsc->sw_secy == secy)
117 return rxsc;
118 }
119
120 return NULL;
121 }
122
rsrc_name(enum mcs_rsrc_type rsrc_type)123 static const char *rsrc_name(enum mcs_rsrc_type rsrc_type)
124 {
125 switch (rsrc_type) {
126 case MCS_RSRC_TYPE_FLOWID:
127 return "FLOW";
128 case MCS_RSRC_TYPE_SC:
129 return "SC";
130 case MCS_RSRC_TYPE_SECY:
131 return "SECY";
132 case MCS_RSRC_TYPE_SA:
133 return "SA";
134 default:
135 return "Unknown";
136 };
137
138 return "Unknown";
139 }
140
cn10k_mcs_alloc_rsrc(struct otx2_nic * pfvf,enum mcs_direction dir,enum mcs_rsrc_type type,u16 * rsrc_id)141 static int cn10k_mcs_alloc_rsrc(struct otx2_nic *pfvf, enum mcs_direction dir,
142 enum mcs_rsrc_type type, u16 *rsrc_id)
143 {
144 struct mbox *mbox = &pfvf->mbox;
145 struct mcs_alloc_rsrc_req *req;
146 struct mcs_alloc_rsrc_rsp *rsp;
147 int ret = -ENOMEM;
148
149 mutex_lock(&mbox->lock);
150
151 req = otx2_mbox_alloc_msg_mcs_alloc_resources(mbox);
152 if (!req)
153 goto fail;
154
155 req->rsrc_type = type;
156 req->rsrc_cnt = 1;
157 req->dir = dir;
158
159 ret = otx2_sync_mbox_msg(mbox);
160 if (ret)
161 goto fail;
162
163 rsp = (struct mcs_alloc_rsrc_rsp *)otx2_mbox_get_rsp(&pfvf->mbox.mbox,
164 0, &req->hdr);
165 if (IS_ERR(rsp) || req->rsrc_cnt != rsp->rsrc_cnt ||
166 req->rsrc_type != rsp->rsrc_type || req->dir != rsp->dir) {
167 ret = -EINVAL;
168 goto fail;
169 }
170
171 switch (rsp->rsrc_type) {
172 case MCS_RSRC_TYPE_FLOWID:
173 *rsrc_id = rsp->flow_ids[0];
174 break;
175 case MCS_RSRC_TYPE_SC:
176 *rsrc_id = rsp->sc_ids[0];
177 break;
178 case MCS_RSRC_TYPE_SECY:
179 *rsrc_id = rsp->secy_ids[0];
180 break;
181 case MCS_RSRC_TYPE_SA:
182 *rsrc_id = rsp->sa_ids[0];
183 break;
184 default:
185 ret = -EINVAL;
186 goto fail;
187 }
188
189 mutex_unlock(&mbox->lock);
190
191 return 0;
192 fail:
193 dev_err(pfvf->dev, "Failed to allocate %s %s resource\n",
194 dir == MCS_TX ? "TX" : "RX", rsrc_name(type));
195 mutex_unlock(&mbox->lock);
196 return ret;
197 }
198
cn10k_mcs_free_rsrc(struct otx2_nic * pfvf,enum mcs_direction dir,enum mcs_rsrc_type type,u16 hw_rsrc_id,bool all)199 static void cn10k_mcs_free_rsrc(struct otx2_nic *pfvf, enum mcs_direction dir,
200 enum mcs_rsrc_type type, u16 hw_rsrc_id,
201 bool all)
202 {
203 struct mcs_clear_stats *clear_req;
204 struct mbox *mbox = &pfvf->mbox;
205 struct mcs_free_rsrc_req *req;
206
207 mutex_lock(&mbox->lock);
208
209 clear_req = otx2_mbox_alloc_msg_mcs_clear_stats(mbox);
210 if (!clear_req)
211 goto fail;
212
213 clear_req->id = hw_rsrc_id;
214 clear_req->type = type;
215 clear_req->dir = dir;
216
217 req = otx2_mbox_alloc_msg_mcs_free_resources(mbox);
218 if (!req)
219 goto fail;
220
221 req->rsrc_id = hw_rsrc_id;
222 req->rsrc_type = type;
223 req->dir = dir;
224 if (all)
225 req->all = 1;
226
227 if (otx2_sync_mbox_msg(&pfvf->mbox))
228 goto fail;
229
230 mutex_unlock(&mbox->lock);
231
232 return;
233 fail:
234 dev_err(pfvf->dev, "Failed to free %s %s resource\n",
235 dir == MCS_TX ? "TX" : "RX", rsrc_name(type));
236 mutex_unlock(&mbox->lock);
237 }
238
cn10k_mcs_alloc_txsa(struct otx2_nic * pfvf,u16 * hw_sa_id)239 static int cn10k_mcs_alloc_txsa(struct otx2_nic *pfvf, u16 *hw_sa_id)
240 {
241 return cn10k_mcs_alloc_rsrc(pfvf, MCS_TX, MCS_RSRC_TYPE_SA, hw_sa_id);
242 }
243
cn10k_mcs_alloc_rxsa(struct otx2_nic * pfvf,u16 * hw_sa_id)244 static int cn10k_mcs_alloc_rxsa(struct otx2_nic *pfvf, u16 *hw_sa_id)
245 {
246 return cn10k_mcs_alloc_rsrc(pfvf, MCS_RX, MCS_RSRC_TYPE_SA, hw_sa_id);
247 }
248
cn10k_mcs_free_txsa(struct otx2_nic * pfvf,u16 hw_sa_id)249 static void cn10k_mcs_free_txsa(struct otx2_nic *pfvf, u16 hw_sa_id)
250 {
251 cn10k_mcs_free_rsrc(pfvf, MCS_TX, MCS_RSRC_TYPE_SA, hw_sa_id, false);
252 }
253
cn10k_mcs_free_rxsa(struct otx2_nic * pfvf,u16 hw_sa_id)254 static void cn10k_mcs_free_rxsa(struct otx2_nic *pfvf, u16 hw_sa_id)
255 {
256 cn10k_mcs_free_rsrc(pfvf, MCS_RX, MCS_RSRC_TYPE_SA, hw_sa_id, false);
257 }
258
cn10k_mcs_write_rx_secy(struct otx2_nic * pfvf,struct macsec_secy * secy,u8 hw_secy_id)259 static int cn10k_mcs_write_rx_secy(struct otx2_nic *pfvf,
260 struct macsec_secy *secy, u8 hw_secy_id)
261 {
262 struct mcs_secy_plcy_write_req *req;
263 struct mbox *mbox = &pfvf->mbox;
264 u64 policy;
265 u8 cipher;
266 int ret;
267
268 mutex_lock(&mbox->lock);
269
270 req = otx2_mbox_alloc_msg_mcs_secy_plcy_write(mbox);
271 if (!req) {
272 ret = -ENOMEM;
273 goto fail;
274 }
275
276 policy = FIELD_PREP(MCS_RX_SECY_PLCY_RW_MASK, secy->replay_window);
277 if (secy->replay_protect)
278 policy |= MCS_RX_SECY_PLCY_RP;
279
280 policy |= MCS_RX_SECY_PLCY_AUTH_ENA;
281
282 switch (secy->key_len) {
283 case 16:
284 cipher = secy->xpn ? MCS_GCM_AES_XPN_128 : MCS_GCM_AES_128;
285 break;
286 case 32:
287 cipher = secy->xpn ? MCS_GCM_AES_XPN_256 : MCS_GCM_AES_256;
288 break;
289 default:
290 cipher = MCS_GCM_AES_128;
291 dev_warn(pfvf->dev, "Unsupported key length\n");
292 break;
293 }
294
295 policy |= FIELD_PREP(MCS_RX_SECY_PLCY_CIP, cipher);
296 policy |= FIELD_PREP(MCS_RX_SECY_PLCY_VAL, secy->validate_frames);
297
298 policy |= MCS_RX_SECY_PLCY_ENA;
299
300 req->plcy = policy;
301 req->secy_id = hw_secy_id;
302 req->dir = MCS_RX;
303
304 ret = otx2_sync_mbox_msg(mbox);
305
306 fail:
307 mutex_unlock(&mbox->lock);
308 return ret;
309 }
310
cn10k_mcs_write_rx_flowid(struct otx2_nic * pfvf,struct cn10k_mcs_rxsc * rxsc,u8 hw_secy_id)311 static int cn10k_mcs_write_rx_flowid(struct otx2_nic *pfvf,
312 struct cn10k_mcs_rxsc *rxsc, u8 hw_secy_id)
313 {
314 struct macsec_rx_sc *sw_rx_sc = rxsc->sw_rxsc;
315 struct macsec_secy *secy = rxsc->sw_secy;
316 struct mcs_flowid_entry_write_req *req;
317 struct mbox *mbox = &pfvf->mbox;
318 u64 mac_da;
319 int ret;
320
321 mutex_lock(&mbox->lock);
322
323 req = otx2_mbox_alloc_msg_mcs_flowid_entry_write(mbox);
324 if (!req) {
325 ret = -ENOMEM;
326 goto fail;
327 }
328
329 mac_da = ether_addr_to_u64(secy->netdev->dev_addr);
330
331 req->data[0] = FIELD_PREP(MCS_TCAM0_MAC_DA_MASK, mac_da);
332 req->mask[0] = ~0ULL;
333 req->mask[0] = ~MCS_TCAM0_MAC_DA_MASK;
334
335 req->data[1] = FIELD_PREP(MCS_TCAM1_ETYPE_MASK, ETH_P_MACSEC);
336 req->mask[1] = ~0ULL;
337 req->mask[1] &= ~MCS_TCAM1_ETYPE_MASK;
338
339 req->mask[2] = ~0ULL;
340 req->mask[3] = ~0ULL;
341
342 req->flow_id = rxsc->hw_flow_id;
343 req->secy_id = hw_secy_id;
344 req->sc_id = rxsc->hw_sc_id;
345 req->dir = MCS_RX;
346
347 if (sw_rx_sc->active)
348 req->ena = 1;
349
350 ret = otx2_sync_mbox_msg(mbox);
351
352 fail:
353 mutex_unlock(&mbox->lock);
354 return ret;
355 }
356
cn10k_mcs_write_sc_cam(struct otx2_nic * pfvf,struct cn10k_mcs_rxsc * rxsc,u8 hw_secy_id)357 static int cn10k_mcs_write_sc_cam(struct otx2_nic *pfvf,
358 struct cn10k_mcs_rxsc *rxsc, u8 hw_secy_id)
359 {
360 struct macsec_rx_sc *sw_rx_sc = rxsc->sw_rxsc;
361 struct mcs_rx_sc_cam_write_req *sc_req;
362 struct mbox *mbox = &pfvf->mbox;
363 int ret;
364
365 mutex_lock(&mbox->lock);
366
367 sc_req = otx2_mbox_alloc_msg_mcs_rx_sc_cam_write(mbox);
368 if (!sc_req) {
369 ret = -ENOMEM;
370 goto fail;
371 }
372
373 sc_req->sci = (__force u64)cpu_to_be64((__force u64)sw_rx_sc->sci);
374 sc_req->sc_id = rxsc->hw_sc_id;
375 sc_req->secy_id = hw_secy_id;
376
377 ret = otx2_sync_mbox_msg(mbox);
378
379 fail:
380 mutex_unlock(&mbox->lock);
381 return ret;
382 }
383
cn10k_mcs_write_keys(struct otx2_nic * pfvf,struct macsec_secy * secy,struct mcs_sa_plcy_write_req * req,u8 * sak,u8 * salt,ssci_t ssci)384 static int cn10k_mcs_write_keys(struct otx2_nic *pfvf,
385 struct macsec_secy *secy,
386 struct mcs_sa_plcy_write_req *req,
387 u8 *sak, u8 *salt, ssci_t ssci)
388 {
389 u8 hash_rev[CN10K_MAX_HASH_LEN];
390 u8 sak_rev[CN10K_MAX_SAK_LEN];
391 u8 salt_rev[MACSEC_SALT_LEN];
392 u8 hash[CN10K_MAX_HASH_LEN];
393 u32 ssci_63_32;
394 int err, i;
395
396 err = cn10k_ecb_aes_encrypt(pfvf, sak, secy->key_len, hash);
397 if (err) {
398 dev_err(pfvf->dev, "Generating hash using ECB(AES) failed\n");
399 return err;
400 }
401
402 for (i = 0; i < secy->key_len; i++)
403 sak_rev[i] = sak[secy->key_len - 1 - i];
404
405 for (i = 0; i < CN10K_MAX_HASH_LEN; i++)
406 hash_rev[i] = hash[CN10K_MAX_HASH_LEN - 1 - i];
407
408 for (i = 0; i < MACSEC_SALT_LEN; i++)
409 salt_rev[i] = salt[MACSEC_SALT_LEN - 1 - i];
410
411 ssci_63_32 = (__force u32)cpu_to_be32((__force u32)ssci);
412
413 memcpy(&req->plcy[0][0], sak_rev, secy->key_len);
414 memcpy(&req->plcy[0][4], hash_rev, CN10K_MAX_HASH_LEN);
415 memcpy(&req->plcy[0][6], salt_rev, MACSEC_SALT_LEN);
416 req->plcy[0][7] |= (u64)ssci_63_32 << 32;
417
418 return 0;
419 }
420
cn10k_mcs_write_rx_sa_plcy(struct otx2_nic * pfvf,struct macsec_secy * secy,struct cn10k_mcs_rxsc * rxsc,u8 assoc_num,bool sa_in_use)421 static int cn10k_mcs_write_rx_sa_plcy(struct otx2_nic *pfvf,
422 struct macsec_secy *secy,
423 struct cn10k_mcs_rxsc *rxsc,
424 u8 assoc_num, bool sa_in_use)
425 {
426 struct mcs_sa_plcy_write_req *plcy_req;
427 u8 *sak = rxsc->sa_key[assoc_num];
428 u8 *salt = rxsc->salt[assoc_num];
429 struct mcs_rx_sc_sa_map *map_req;
430 struct mbox *mbox = &pfvf->mbox;
431 int ret;
432
433 mutex_lock(&mbox->lock);
434
435 plcy_req = otx2_mbox_alloc_msg_mcs_sa_plcy_write(mbox);
436 if (!plcy_req) {
437 ret = -ENOMEM;
438 goto fail;
439 }
440
441 map_req = otx2_mbox_alloc_msg_mcs_rx_sc_sa_map_write(mbox);
442 if (!map_req) {
443 otx2_mbox_reset(&mbox->mbox, 0);
444 ret = -ENOMEM;
445 goto fail;
446 }
447
448 ret = cn10k_mcs_write_keys(pfvf, secy, plcy_req, sak,
449 salt, rxsc->ssci[assoc_num]);
450 if (ret)
451 goto fail;
452
453 plcy_req->sa_index[0] = rxsc->hw_sa_id[assoc_num];
454 plcy_req->sa_cnt = 1;
455 plcy_req->dir = MCS_RX;
456
457 map_req->sa_index = rxsc->hw_sa_id[assoc_num];
458 map_req->sa_in_use = sa_in_use;
459 map_req->sc_id = rxsc->hw_sc_id;
460 map_req->an = assoc_num;
461
462 /* Send two messages together */
463 ret = otx2_sync_mbox_msg(mbox);
464
465 fail:
466 mutex_unlock(&mbox->lock);
467 return ret;
468 }
469
cn10k_mcs_write_rx_sa_pn(struct otx2_nic * pfvf,struct cn10k_mcs_rxsc * rxsc,u8 assoc_num,u64 next_pn)470 static int cn10k_mcs_write_rx_sa_pn(struct otx2_nic *pfvf,
471 struct cn10k_mcs_rxsc *rxsc,
472 u8 assoc_num, u64 next_pn)
473 {
474 struct mcs_pn_table_write_req *req;
475 struct mbox *mbox = &pfvf->mbox;
476 int ret;
477
478 mutex_lock(&mbox->lock);
479
480 req = otx2_mbox_alloc_msg_mcs_pn_table_write(mbox);
481 if (!req) {
482 ret = -ENOMEM;
483 goto fail;
484 }
485
486 req->pn_id = rxsc->hw_sa_id[assoc_num];
487 req->next_pn = next_pn;
488 req->dir = MCS_RX;
489
490 ret = otx2_sync_mbox_msg(mbox);
491
492 fail:
493 mutex_unlock(&mbox->lock);
494 return ret;
495 }
496
cn10k_mcs_write_tx_secy(struct otx2_nic * pfvf,struct macsec_secy * secy,struct cn10k_mcs_txsc * txsc)497 static int cn10k_mcs_write_tx_secy(struct otx2_nic *pfvf,
498 struct macsec_secy *secy,
499 struct cn10k_mcs_txsc *txsc)
500 {
501 struct mcs_secy_plcy_write_req *req;
502 struct mbox *mbox = &pfvf->mbox;
503 struct macsec_tx_sc *sw_tx_sc;
504 u8 sectag_tci = 0;
505 u8 tag_offset;
506 u64 policy;
507 u8 cipher;
508 int ret;
509
510 /* Insert SecTag after 12 bytes (DA+SA) or 16 bytes
511 * if VLAN tag needs to be sent in clear text.
512 */
513 tag_offset = txsc->vlan_dev ? 16 : 12;
514 sw_tx_sc = &secy->tx_sc;
515
516 mutex_lock(&mbox->lock);
517
518 req = otx2_mbox_alloc_msg_mcs_secy_plcy_write(mbox);
519 if (!req) {
520 ret = -ENOMEM;
521 goto fail;
522 }
523
524 if (sw_tx_sc->send_sci) {
525 sectag_tci |= MCS_TCI_SC;
526 } else {
527 if (sw_tx_sc->end_station)
528 sectag_tci |= MCS_TCI_ES;
529 if (sw_tx_sc->scb)
530 sectag_tci |= MCS_TCI_SCB;
531 }
532
533 if (sw_tx_sc->encrypt)
534 sectag_tci |= (MCS_TCI_E | MCS_TCI_C);
535
536 policy = FIELD_PREP(MCS_TX_SECY_PLCY_MTU, secy->netdev->mtu);
537 /* Write SecTag excluding AN bits(1..0) */
538 policy |= FIELD_PREP(MCS_TX_SECY_PLCY_ST_TCI, sectag_tci >> 2);
539 policy |= FIELD_PREP(MCS_TX_SECY_PLCY_ST_OFFSET, tag_offset);
540 policy |= MCS_TX_SECY_PLCY_INS_MODE;
541 policy |= MCS_TX_SECY_PLCY_AUTH_ENA;
542
543 switch (secy->key_len) {
544 case 16:
545 cipher = secy->xpn ? MCS_GCM_AES_XPN_128 : MCS_GCM_AES_128;
546 break;
547 case 32:
548 cipher = secy->xpn ? MCS_GCM_AES_XPN_256 : MCS_GCM_AES_256;
549 break;
550 default:
551 cipher = MCS_GCM_AES_128;
552 dev_warn(pfvf->dev, "Unsupported key length\n");
553 break;
554 }
555
556 policy |= FIELD_PREP(MCS_TX_SECY_PLCY_CIP, cipher);
557
558 if (secy->protect_frames)
559 policy |= MCS_TX_SECY_PLCY_PROTECT;
560
561 /* If the encodingsa does not exist/active and protect is
562 * not set then frames can be sent out as it is. Hence enable
563 * the policy irrespective of secy operational when !protect.
564 */
565 if (!secy->protect_frames || secy->operational)
566 policy |= MCS_TX_SECY_PLCY_ENA;
567
568 req->plcy = policy;
569 req->secy_id = txsc->hw_secy_id_tx;
570 req->dir = MCS_TX;
571
572 ret = otx2_sync_mbox_msg(mbox);
573
574 fail:
575 mutex_unlock(&mbox->lock);
576 return ret;
577 }
578
cn10k_mcs_write_tx_flowid(struct otx2_nic * pfvf,struct macsec_secy * secy,struct cn10k_mcs_txsc * txsc)579 static int cn10k_mcs_write_tx_flowid(struct otx2_nic *pfvf,
580 struct macsec_secy *secy,
581 struct cn10k_mcs_txsc *txsc)
582 {
583 struct mcs_flowid_entry_write_req *req;
584 struct mbox *mbox = &pfvf->mbox;
585 u64 mac_sa;
586 int ret;
587
588 mutex_lock(&mbox->lock);
589
590 req = otx2_mbox_alloc_msg_mcs_flowid_entry_write(mbox);
591 if (!req) {
592 ret = -ENOMEM;
593 goto fail;
594 }
595
596 mac_sa = ether_addr_to_u64(secy->netdev->dev_addr);
597
598 req->data[0] = FIELD_PREP(MCS_TCAM0_MAC_SA_MASK, mac_sa);
599 req->data[1] = FIELD_PREP(MCS_TCAM1_MAC_SA_MASK, mac_sa >> 16);
600
601 req->mask[0] = ~0ULL;
602 req->mask[0] &= ~MCS_TCAM0_MAC_SA_MASK;
603
604 req->mask[1] = ~0ULL;
605 req->mask[1] &= ~MCS_TCAM1_MAC_SA_MASK;
606
607 req->mask[2] = ~0ULL;
608 req->mask[3] = ~0ULL;
609
610 req->flow_id = txsc->hw_flow_id;
611 req->secy_id = txsc->hw_secy_id_tx;
612 req->sc_id = txsc->hw_sc_id;
613 req->sci = (__force u64)cpu_to_be64((__force u64)secy->sci);
614 req->dir = MCS_TX;
615 /* This can be enabled since stack xmits packets only when interface is up */
616 req->ena = 1;
617
618 ret = otx2_sync_mbox_msg(mbox);
619
620 fail:
621 mutex_unlock(&mbox->lock);
622 return ret;
623 }
624
cn10k_mcs_link_tx_sa2sc(struct otx2_nic * pfvf,struct macsec_secy * secy,struct cn10k_mcs_txsc * txsc,u8 sa_num,bool sa_active)625 static int cn10k_mcs_link_tx_sa2sc(struct otx2_nic *pfvf,
626 struct macsec_secy *secy,
627 struct cn10k_mcs_txsc *txsc,
628 u8 sa_num, bool sa_active)
629 {
630 struct mcs_tx_sc_sa_map *map_req;
631 struct mbox *mbox = &pfvf->mbox;
632 int ret;
633
634 /* Link the encoding_sa only to SC out of all SAs */
635 if (txsc->encoding_sa != sa_num)
636 return 0;
637
638 mutex_lock(&mbox->lock);
639
640 map_req = otx2_mbox_alloc_msg_mcs_tx_sc_sa_map_write(mbox);
641 if (!map_req) {
642 otx2_mbox_reset(&mbox->mbox, 0);
643 ret = -ENOMEM;
644 goto fail;
645 }
646
647 map_req->sa_index0 = txsc->hw_sa_id[sa_num];
648 map_req->sa_index0_vld = sa_active;
649 map_req->sectag_sci = (__force u64)cpu_to_be64((__force u64)secy->sci);
650 map_req->sc_id = txsc->hw_sc_id;
651
652 ret = otx2_sync_mbox_msg(mbox);
653
654 fail:
655 mutex_unlock(&mbox->lock);
656 return ret;
657 }
658
cn10k_mcs_write_tx_sa_plcy(struct otx2_nic * pfvf,struct macsec_secy * secy,struct cn10k_mcs_txsc * txsc,u8 assoc_num)659 static int cn10k_mcs_write_tx_sa_plcy(struct otx2_nic *pfvf,
660 struct macsec_secy *secy,
661 struct cn10k_mcs_txsc *txsc,
662 u8 assoc_num)
663 {
664 struct mcs_sa_plcy_write_req *plcy_req;
665 u8 *sak = txsc->sa_key[assoc_num];
666 u8 *salt = txsc->salt[assoc_num];
667 struct mbox *mbox = &pfvf->mbox;
668 int ret;
669
670 mutex_lock(&mbox->lock);
671
672 plcy_req = otx2_mbox_alloc_msg_mcs_sa_plcy_write(mbox);
673 if (!plcy_req) {
674 ret = -ENOMEM;
675 goto fail;
676 }
677
678 ret = cn10k_mcs_write_keys(pfvf, secy, plcy_req, sak,
679 salt, txsc->ssci[assoc_num]);
680 if (ret)
681 goto fail;
682
683 plcy_req->plcy[0][8] = assoc_num;
684 plcy_req->sa_index[0] = txsc->hw_sa_id[assoc_num];
685 plcy_req->sa_cnt = 1;
686 plcy_req->dir = MCS_TX;
687
688 ret = otx2_sync_mbox_msg(mbox);
689
690 fail:
691 mutex_unlock(&mbox->lock);
692 return ret;
693 }
694
cn10k_write_tx_sa_pn(struct otx2_nic * pfvf,struct cn10k_mcs_txsc * txsc,u8 assoc_num,u64 next_pn)695 static int cn10k_write_tx_sa_pn(struct otx2_nic *pfvf,
696 struct cn10k_mcs_txsc *txsc,
697 u8 assoc_num, u64 next_pn)
698 {
699 struct mcs_pn_table_write_req *req;
700 struct mbox *mbox = &pfvf->mbox;
701 int ret;
702
703 mutex_lock(&mbox->lock);
704
705 req = otx2_mbox_alloc_msg_mcs_pn_table_write(mbox);
706 if (!req) {
707 ret = -ENOMEM;
708 goto fail;
709 }
710
711 req->pn_id = txsc->hw_sa_id[assoc_num];
712 req->next_pn = next_pn;
713 req->dir = MCS_TX;
714
715 ret = otx2_sync_mbox_msg(mbox);
716
717 fail:
718 mutex_unlock(&mbox->lock);
719 return ret;
720 }
721
cn10k_mcs_ena_dis_flowid(struct otx2_nic * pfvf,u16 hw_flow_id,bool enable,enum mcs_direction dir)722 static int cn10k_mcs_ena_dis_flowid(struct otx2_nic *pfvf, u16 hw_flow_id,
723 bool enable, enum mcs_direction dir)
724 {
725 struct mcs_flowid_ena_dis_entry *req;
726 struct mbox *mbox = &pfvf->mbox;
727 int ret;
728
729 mutex_lock(&mbox->lock);
730
731 req = otx2_mbox_alloc_msg_mcs_flowid_ena_entry(mbox);
732 if (!req) {
733 ret = -ENOMEM;
734 goto fail;
735 }
736
737 req->flow_id = hw_flow_id;
738 req->ena = enable;
739 req->dir = dir;
740
741 ret = otx2_sync_mbox_msg(mbox);
742
743 fail:
744 mutex_unlock(&mbox->lock);
745 return ret;
746 }
747
cn10k_mcs_sa_stats(struct otx2_nic * pfvf,u8 hw_sa_id,struct mcs_sa_stats * rsp_p,enum mcs_direction dir,bool clear)748 static int cn10k_mcs_sa_stats(struct otx2_nic *pfvf, u8 hw_sa_id,
749 struct mcs_sa_stats *rsp_p,
750 enum mcs_direction dir, bool clear)
751 {
752 struct mcs_clear_stats *clear_req;
753 struct mbox *mbox = &pfvf->mbox;
754 struct mcs_stats_req *req;
755 struct mcs_sa_stats *rsp;
756 int ret;
757
758 mutex_lock(&mbox->lock);
759
760 req = otx2_mbox_alloc_msg_mcs_get_sa_stats(mbox);
761 if (!req) {
762 ret = -ENOMEM;
763 goto fail;
764 }
765
766 req->id = hw_sa_id;
767 req->dir = dir;
768
769 if (!clear)
770 goto send_msg;
771
772 clear_req = otx2_mbox_alloc_msg_mcs_clear_stats(mbox);
773 if (!clear_req) {
774 ret = -ENOMEM;
775 goto fail;
776 }
777 clear_req->id = hw_sa_id;
778 clear_req->dir = dir;
779 clear_req->type = MCS_RSRC_TYPE_SA;
780
781 send_msg:
782 ret = otx2_sync_mbox_msg(mbox);
783 if (ret)
784 goto fail;
785
786 rsp = (struct mcs_sa_stats *)otx2_mbox_get_rsp(&pfvf->mbox.mbox,
787 0, &req->hdr);
788 if (IS_ERR(rsp)) {
789 ret = PTR_ERR(rsp);
790 goto fail;
791 }
792
793 memcpy(rsp_p, rsp, sizeof(*rsp_p));
794
795 mutex_unlock(&mbox->lock);
796
797 return 0;
798 fail:
799 mutex_unlock(&mbox->lock);
800 return ret;
801 }
802
cn10k_mcs_sc_stats(struct otx2_nic * pfvf,u8 hw_sc_id,struct mcs_sc_stats * rsp_p,enum mcs_direction dir,bool clear)803 static int cn10k_mcs_sc_stats(struct otx2_nic *pfvf, u8 hw_sc_id,
804 struct mcs_sc_stats *rsp_p,
805 enum mcs_direction dir, bool clear)
806 {
807 struct mcs_clear_stats *clear_req;
808 struct mbox *mbox = &pfvf->mbox;
809 struct mcs_stats_req *req;
810 struct mcs_sc_stats *rsp;
811 int ret;
812
813 mutex_lock(&mbox->lock);
814
815 req = otx2_mbox_alloc_msg_mcs_get_sc_stats(mbox);
816 if (!req) {
817 ret = -ENOMEM;
818 goto fail;
819 }
820
821 req->id = hw_sc_id;
822 req->dir = dir;
823
824 if (!clear)
825 goto send_msg;
826
827 clear_req = otx2_mbox_alloc_msg_mcs_clear_stats(mbox);
828 if (!clear_req) {
829 ret = -ENOMEM;
830 goto fail;
831 }
832 clear_req->id = hw_sc_id;
833 clear_req->dir = dir;
834 clear_req->type = MCS_RSRC_TYPE_SC;
835
836 send_msg:
837 ret = otx2_sync_mbox_msg(mbox);
838 if (ret)
839 goto fail;
840
841 rsp = (struct mcs_sc_stats *)otx2_mbox_get_rsp(&pfvf->mbox.mbox,
842 0, &req->hdr);
843 if (IS_ERR(rsp)) {
844 ret = PTR_ERR(rsp);
845 goto fail;
846 }
847
848 memcpy(rsp_p, rsp, sizeof(*rsp_p));
849
850 mutex_unlock(&mbox->lock);
851
852 return 0;
853 fail:
854 mutex_unlock(&mbox->lock);
855 return ret;
856 }
857
cn10k_mcs_secy_stats(struct otx2_nic * pfvf,u8 hw_secy_id,struct mcs_secy_stats * rsp_p,enum mcs_direction dir,bool clear)858 static int cn10k_mcs_secy_stats(struct otx2_nic *pfvf, u8 hw_secy_id,
859 struct mcs_secy_stats *rsp_p,
860 enum mcs_direction dir, bool clear)
861 {
862 struct mcs_clear_stats *clear_req;
863 struct mbox *mbox = &pfvf->mbox;
864 struct mcs_secy_stats *rsp;
865 struct mcs_stats_req *req;
866 int ret;
867
868 mutex_lock(&mbox->lock);
869
870 req = otx2_mbox_alloc_msg_mcs_get_secy_stats(mbox);
871 if (!req) {
872 ret = -ENOMEM;
873 goto fail;
874 }
875
876 req->id = hw_secy_id;
877 req->dir = dir;
878
879 if (!clear)
880 goto send_msg;
881
882 clear_req = otx2_mbox_alloc_msg_mcs_clear_stats(mbox);
883 if (!clear_req) {
884 ret = -ENOMEM;
885 goto fail;
886 }
887 clear_req->id = hw_secy_id;
888 clear_req->dir = dir;
889 clear_req->type = MCS_RSRC_TYPE_SECY;
890
891 send_msg:
892 ret = otx2_sync_mbox_msg(mbox);
893 if (ret)
894 goto fail;
895
896 rsp = (struct mcs_secy_stats *)otx2_mbox_get_rsp(&pfvf->mbox.mbox,
897 0, &req->hdr);
898 if (IS_ERR(rsp)) {
899 ret = PTR_ERR(rsp);
900 goto fail;
901 }
902
903 memcpy(rsp_p, rsp, sizeof(*rsp_p));
904
905 mutex_unlock(&mbox->lock);
906
907 return 0;
908 fail:
909 mutex_unlock(&mbox->lock);
910 return ret;
911 }
912
cn10k_mcs_create_txsc(struct otx2_nic * pfvf)913 static struct cn10k_mcs_txsc *cn10k_mcs_create_txsc(struct otx2_nic *pfvf)
914 {
915 struct cn10k_mcs_txsc *txsc;
916 int ret;
917
918 txsc = kzalloc(sizeof(*txsc), GFP_KERNEL);
919 if (!txsc)
920 return ERR_PTR(-ENOMEM);
921
922 ret = cn10k_mcs_alloc_rsrc(pfvf, MCS_TX, MCS_RSRC_TYPE_FLOWID,
923 &txsc->hw_flow_id);
924 if (ret)
925 goto fail;
926
927 /* For a SecY, one TX secy and one RX secy HW resources are needed */
928 ret = cn10k_mcs_alloc_rsrc(pfvf, MCS_TX, MCS_RSRC_TYPE_SECY,
929 &txsc->hw_secy_id_tx);
930 if (ret)
931 goto free_flowid;
932
933 ret = cn10k_mcs_alloc_rsrc(pfvf, MCS_RX, MCS_RSRC_TYPE_SECY,
934 &txsc->hw_secy_id_rx);
935 if (ret)
936 goto free_tx_secy;
937
938 ret = cn10k_mcs_alloc_rsrc(pfvf, MCS_TX, MCS_RSRC_TYPE_SC,
939 &txsc->hw_sc_id);
940 if (ret)
941 goto free_rx_secy;
942
943 return txsc;
944 free_rx_secy:
945 cn10k_mcs_free_rsrc(pfvf, MCS_RX, MCS_RSRC_TYPE_SECY,
946 txsc->hw_secy_id_rx, false);
947 free_tx_secy:
948 cn10k_mcs_free_rsrc(pfvf, MCS_TX, MCS_RSRC_TYPE_SECY,
949 txsc->hw_secy_id_tx, false);
950 free_flowid:
951 cn10k_mcs_free_rsrc(pfvf, MCS_TX, MCS_RSRC_TYPE_FLOWID,
952 txsc->hw_flow_id, false);
953 fail:
954 kfree(txsc);
955 return ERR_PTR(ret);
956 }
957
958 /* Free Tx SC and its SAs(if any) resources to AF
959 */
cn10k_mcs_delete_txsc(struct otx2_nic * pfvf,struct cn10k_mcs_txsc * txsc)960 static void cn10k_mcs_delete_txsc(struct otx2_nic *pfvf,
961 struct cn10k_mcs_txsc *txsc)
962 {
963 u8 sa_bmap = txsc->sa_bmap;
964 u8 sa_num = 0;
965
966 while (sa_bmap) {
967 if (sa_bmap & 1) {
968 cn10k_mcs_write_tx_sa_plcy(pfvf, txsc->sw_secy,
969 txsc, sa_num);
970 cn10k_mcs_free_txsa(pfvf, txsc->hw_sa_id[sa_num]);
971 }
972 sa_num++;
973 sa_bmap >>= 1;
974 }
975
976 cn10k_mcs_free_rsrc(pfvf, MCS_TX, MCS_RSRC_TYPE_SC,
977 txsc->hw_sc_id, false);
978 cn10k_mcs_free_rsrc(pfvf, MCS_RX, MCS_RSRC_TYPE_SECY,
979 txsc->hw_secy_id_rx, false);
980 cn10k_mcs_free_rsrc(pfvf, MCS_TX, MCS_RSRC_TYPE_SECY,
981 txsc->hw_secy_id_tx, false);
982 cn10k_mcs_free_rsrc(pfvf, MCS_TX, MCS_RSRC_TYPE_FLOWID,
983 txsc->hw_flow_id, false);
984 }
985
cn10k_mcs_create_rxsc(struct otx2_nic * pfvf)986 static struct cn10k_mcs_rxsc *cn10k_mcs_create_rxsc(struct otx2_nic *pfvf)
987 {
988 struct cn10k_mcs_rxsc *rxsc;
989 int ret;
990
991 rxsc = kzalloc(sizeof(*rxsc), GFP_KERNEL);
992 if (!rxsc)
993 return ERR_PTR(-ENOMEM);
994
995 ret = cn10k_mcs_alloc_rsrc(pfvf, MCS_RX, MCS_RSRC_TYPE_FLOWID,
996 &rxsc->hw_flow_id);
997 if (ret)
998 goto fail;
999
1000 ret = cn10k_mcs_alloc_rsrc(pfvf, MCS_RX, MCS_RSRC_TYPE_SC,
1001 &rxsc->hw_sc_id);
1002 if (ret)
1003 goto free_flowid;
1004
1005 return rxsc;
1006 free_flowid:
1007 cn10k_mcs_free_rsrc(pfvf, MCS_RX, MCS_RSRC_TYPE_FLOWID,
1008 rxsc->hw_flow_id, false);
1009 fail:
1010 kfree(rxsc);
1011 return ERR_PTR(ret);
1012 }
1013
1014 /* Free Rx SC and its SAs(if any) resources to AF
1015 */
cn10k_mcs_delete_rxsc(struct otx2_nic * pfvf,struct cn10k_mcs_rxsc * rxsc)1016 static void cn10k_mcs_delete_rxsc(struct otx2_nic *pfvf,
1017 struct cn10k_mcs_rxsc *rxsc)
1018 {
1019 u8 sa_bmap = rxsc->sa_bmap;
1020 u8 sa_num = 0;
1021
1022 while (sa_bmap) {
1023 if (sa_bmap & 1) {
1024 cn10k_mcs_write_rx_sa_plcy(pfvf, rxsc->sw_secy, rxsc,
1025 sa_num, false);
1026 cn10k_mcs_free_rxsa(pfvf, rxsc->hw_sa_id[sa_num]);
1027 }
1028 sa_num++;
1029 sa_bmap >>= 1;
1030 }
1031
1032 cn10k_mcs_free_rsrc(pfvf, MCS_RX, MCS_RSRC_TYPE_SC,
1033 rxsc->hw_sc_id, false);
1034 cn10k_mcs_free_rsrc(pfvf, MCS_RX, MCS_RSRC_TYPE_FLOWID,
1035 rxsc->hw_flow_id, false);
1036 }
1037
cn10k_mcs_secy_tx_cfg(struct otx2_nic * pfvf,struct macsec_secy * secy,struct cn10k_mcs_txsc * txsc,struct macsec_tx_sa * sw_tx_sa,u8 sa_num)1038 static int cn10k_mcs_secy_tx_cfg(struct otx2_nic *pfvf, struct macsec_secy *secy,
1039 struct cn10k_mcs_txsc *txsc,
1040 struct macsec_tx_sa *sw_tx_sa, u8 sa_num)
1041 {
1042 if (sw_tx_sa) {
1043 cn10k_mcs_write_tx_sa_plcy(pfvf, secy, txsc, sa_num);
1044 cn10k_write_tx_sa_pn(pfvf, txsc, sa_num, sw_tx_sa->next_pn);
1045 cn10k_mcs_link_tx_sa2sc(pfvf, secy, txsc, sa_num,
1046 sw_tx_sa->active);
1047 }
1048
1049 cn10k_mcs_write_tx_secy(pfvf, secy, txsc);
1050 cn10k_mcs_write_tx_flowid(pfvf, secy, txsc);
1051 /* When updating secy, change RX secy also */
1052 cn10k_mcs_write_rx_secy(pfvf, secy, txsc->hw_secy_id_rx);
1053
1054 return 0;
1055 }
1056
cn10k_mcs_secy_rx_cfg(struct otx2_nic * pfvf,struct macsec_secy * secy,u8 hw_secy_id)1057 static int cn10k_mcs_secy_rx_cfg(struct otx2_nic *pfvf,
1058 struct macsec_secy *secy, u8 hw_secy_id)
1059 {
1060 struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg;
1061 struct cn10k_mcs_rxsc *mcs_rx_sc;
1062 struct macsec_rx_sc *sw_rx_sc;
1063 struct macsec_rx_sa *sw_rx_sa;
1064 u8 sa_num;
1065
1066 for (sw_rx_sc = rcu_dereference_bh(secy->rx_sc); sw_rx_sc && sw_rx_sc->active;
1067 sw_rx_sc = rcu_dereference_bh(sw_rx_sc->next)) {
1068 mcs_rx_sc = cn10k_mcs_get_rxsc(cfg, secy, sw_rx_sc);
1069 if (unlikely(!mcs_rx_sc))
1070 continue;
1071
1072 for (sa_num = 0; sa_num < CN10K_MCS_SA_PER_SC; sa_num++) {
1073 sw_rx_sa = rcu_dereference_bh(sw_rx_sc->sa[sa_num]);
1074 if (!sw_rx_sa)
1075 continue;
1076
1077 cn10k_mcs_write_rx_sa_plcy(pfvf, secy, mcs_rx_sc,
1078 sa_num, sw_rx_sa->active);
1079 cn10k_mcs_write_rx_sa_pn(pfvf, mcs_rx_sc, sa_num,
1080 sw_rx_sa->next_pn);
1081 }
1082
1083 cn10k_mcs_write_rx_flowid(pfvf, mcs_rx_sc, hw_secy_id);
1084 cn10k_mcs_write_sc_cam(pfvf, mcs_rx_sc, hw_secy_id);
1085 }
1086
1087 return 0;
1088 }
1089
cn10k_mcs_disable_rxscs(struct otx2_nic * pfvf,struct macsec_secy * secy,bool delete)1090 static int cn10k_mcs_disable_rxscs(struct otx2_nic *pfvf,
1091 struct macsec_secy *secy,
1092 bool delete)
1093 {
1094 struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg;
1095 struct cn10k_mcs_rxsc *mcs_rx_sc;
1096 struct macsec_rx_sc *sw_rx_sc;
1097 int ret;
1098
1099 for (sw_rx_sc = rcu_dereference_bh(secy->rx_sc); sw_rx_sc && sw_rx_sc->active;
1100 sw_rx_sc = rcu_dereference_bh(sw_rx_sc->next)) {
1101 mcs_rx_sc = cn10k_mcs_get_rxsc(cfg, secy, sw_rx_sc);
1102 if (unlikely(!mcs_rx_sc))
1103 continue;
1104
1105 ret = cn10k_mcs_ena_dis_flowid(pfvf, mcs_rx_sc->hw_flow_id,
1106 false, MCS_RX);
1107 if (ret)
1108 dev_err(pfvf->dev, "Failed to disable TCAM for SC %d\n",
1109 mcs_rx_sc->hw_sc_id);
1110 if (delete) {
1111 cn10k_mcs_delete_rxsc(pfvf, mcs_rx_sc);
1112 list_del(&mcs_rx_sc->entry);
1113 kfree(mcs_rx_sc);
1114 }
1115 }
1116
1117 return 0;
1118 }
1119
cn10k_mcs_sync_stats(struct otx2_nic * pfvf,struct macsec_secy * secy,struct cn10k_mcs_txsc * txsc)1120 static void cn10k_mcs_sync_stats(struct otx2_nic *pfvf, struct macsec_secy *secy,
1121 struct cn10k_mcs_txsc *txsc)
1122 {
1123 struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg;
1124 struct mcs_secy_stats rx_rsp = { 0 };
1125 struct mcs_sc_stats sc_rsp = { 0 };
1126 struct cn10k_mcs_rxsc *rxsc;
1127
1128 /* Because of shared counters for some stats in the hardware, when
1129 * updating secy policy take a snapshot of current stats and reset them.
1130 * Below are the effected stats because of shared counters.
1131 */
1132
1133 /* Check if sync is really needed */
1134 if (secy->validate_frames == txsc->last_validate_frames &&
1135 secy->replay_protect == txsc->last_replay_protect)
1136 return;
1137
1138 cn10k_mcs_secy_stats(pfvf, txsc->hw_secy_id_rx, &rx_rsp, MCS_RX, true);
1139
1140 txsc->stats.InPktsBadTag += rx_rsp.pkt_badtag_cnt;
1141 txsc->stats.InPktsUnknownSCI += rx_rsp.pkt_nosa_cnt;
1142 txsc->stats.InPktsNoSCI += rx_rsp.pkt_nosaerror_cnt;
1143 if (txsc->last_validate_frames == MACSEC_VALIDATE_STRICT)
1144 txsc->stats.InPktsNoTag += rx_rsp.pkt_untaged_cnt;
1145 else
1146 txsc->stats.InPktsUntagged += rx_rsp.pkt_untaged_cnt;
1147
1148 list_for_each_entry(rxsc, &cfg->rxsc_list, entry) {
1149 cn10k_mcs_sc_stats(pfvf, rxsc->hw_sc_id, &sc_rsp, MCS_RX, true);
1150
1151 rxsc->stats.InOctetsValidated += sc_rsp.octet_validate_cnt;
1152 rxsc->stats.InOctetsDecrypted += sc_rsp.octet_decrypt_cnt;
1153
1154 rxsc->stats.InPktsInvalid += sc_rsp.pkt_invalid_cnt;
1155 rxsc->stats.InPktsNotValid += sc_rsp.pkt_notvalid_cnt;
1156
1157 if (txsc->last_replay_protect)
1158 rxsc->stats.InPktsLate += sc_rsp.pkt_late_cnt;
1159 else
1160 rxsc->stats.InPktsDelayed += sc_rsp.pkt_late_cnt;
1161
1162 if (txsc->last_validate_frames == MACSEC_VALIDATE_DISABLED)
1163 rxsc->stats.InPktsUnchecked += sc_rsp.pkt_unchecked_cnt;
1164 else
1165 rxsc->stats.InPktsOK += sc_rsp.pkt_unchecked_cnt;
1166 }
1167
1168 txsc->last_validate_frames = secy->validate_frames;
1169 txsc->last_replay_protect = secy->replay_protect;
1170 }
1171
cn10k_mdo_open(struct macsec_context * ctx)1172 static int cn10k_mdo_open(struct macsec_context *ctx)
1173 {
1174 struct otx2_nic *pfvf = macsec_netdev_priv(ctx->netdev);
1175 struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg;
1176 struct macsec_secy *secy = ctx->secy;
1177 struct macsec_tx_sa *sw_tx_sa;
1178 struct cn10k_mcs_txsc *txsc;
1179 u8 sa_num;
1180 int err;
1181
1182 txsc = cn10k_mcs_get_txsc(cfg, ctx->secy);
1183 if (!txsc)
1184 return -ENOENT;
1185
1186 sa_num = txsc->encoding_sa;
1187 sw_tx_sa = rcu_dereference_bh(secy->tx_sc.sa[sa_num]);
1188
1189 err = cn10k_mcs_secy_tx_cfg(pfvf, secy, txsc, sw_tx_sa, sa_num);
1190 if (err)
1191 return err;
1192
1193 return cn10k_mcs_secy_rx_cfg(pfvf, secy, txsc->hw_secy_id_rx);
1194 }
1195
cn10k_mdo_stop(struct macsec_context * ctx)1196 static int cn10k_mdo_stop(struct macsec_context *ctx)
1197 {
1198 struct otx2_nic *pfvf = macsec_netdev_priv(ctx->netdev);
1199 struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg;
1200 struct cn10k_mcs_txsc *txsc;
1201 int err;
1202
1203 txsc = cn10k_mcs_get_txsc(cfg, ctx->secy);
1204 if (!txsc)
1205 return -ENOENT;
1206
1207 err = cn10k_mcs_ena_dis_flowid(pfvf, txsc->hw_flow_id, false, MCS_TX);
1208 if (err)
1209 return err;
1210
1211 return cn10k_mcs_disable_rxscs(pfvf, ctx->secy, false);
1212 }
1213
cn10k_mdo_add_secy(struct macsec_context * ctx)1214 static int cn10k_mdo_add_secy(struct macsec_context *ctx)
1215 {
1216 struct otx2_nic *pfvf = macsec_netdev_priv(ctx->netdev);
1217 struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg;
1218 struct macsec_secy *secy = ctx->secy;
1219 struct cn10k_mcs_txsc *txsc;
1220
1221 if (secy->icv_len != MACSEC_DEFAULT_ICV_LEN)
1222 return -EOPNOTSUPP;
1223
1224 txsc = cn10k_mcs_create_txsc(pfvf);
1225 if (IS_ERR(txsc))
1226 return -ENOSPC;
1227
1228 txsc->sw_secy = secy;
1229 txsc->encoding_sa = secy->tx_sc.encoding_sa;
1230 txsc->last_validate_frames = secy->validate_frames;
1231 txsc->last_replay_protect = secy->replay_protect;
1232 txsc->vlan_dev = is_vlan_dev(ctx->netdev);
1233
1234 list_add(&txsc->entry, &cfg->txsc_list);
1235
1236 if (netif_running(secy->netdev))
1237 return cn10k_mcs_secy_tx_cfg(pfvf, secy, txsc, NULL, 0);
1238
1239 return 0;
1240 }
1241
cn10k_mdo_upd_secy(struct macsec_context * ctx)1242 static int cn10k_mdo_upd_secy(struct macsec_context *ctx)
1243 {
1244 struct otx2_nic *pfvf = macsec_netdev_priv(ctx->netdev);
1245 struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg;
1246 struct macsec_secy *secy = ctx->secy;
1247 struct macsec_tx_sa *sw_tx_sa;
1248 struct cn10k_mcs_txsc *txsc;
1249 bool active;
1250 u8 sa_num;
1251 int err;
1252
1253 txsc = cn10k_mcs_get_txsc(cfg, secy);
1254 if (!txsc)
1255 return -ENOENT;
1256
1257 /* Encoding SA got changed */
1258 if (txsc->encoding_sa != secy->tx_sc.encoding_sa) {
1259 txsc->encoding_sa = secy->tx_sc.encoding_sa;
1260 sa_num = txsc->encoding_sa;
1261 sw_tx_sa = rcu_dereference_bh(secy->tx_sc.sa[sa_num]);
1262 active = sw_tx_sa ? sw_tx_sa->active : false;
1263 cn10k_mcs_link_tx_sa2sc(pfvf, secy, txsc, sa_num, active);
1264 }
1265
1266 if (netif_running(secy->netdev)) {
1267 cn10k_mcs_sync_stats(pfvf, secy, txsc);
1268
1269 err = cn10k_mcs_secy_tx_cfg(pfvf, secy, txsc, NULL, 0);
1270 if (err)
1271 return err;
1272 }
1273
1274 return 0;
1275 }
1276
cn10k_mdo_del_secy(struct macsec_context * ctx)1277 static int cn10k_mdo_del_secy(struct macsec_context *ctx)
1278 {
1279 struct otx2_nic *pfvf = macsec_netdev_priv(ctx->netdev);
1280 struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg;
1281 struct cn10k_mcs_txsc *txsc;
1282
1283 txsc = cn10k_mcs_get_txsc(cfg, ctx->secy);
1284 if (!txsc)
1285 return -ENOENT;
1286
1287 cn10k_mcs_ena_dis_flowid(pfvf, txsc->hw_flow_id, false, MCS_TX);
1288 cn10k_mcs_disable_rxscs(pfvf, ctx->secy, true);
1289 cn10k_mcs_delete_txsc(pfvf, txsc);
1290 list_del(&txsc->entry);
1291 kfree(txsc);
1292
1293 return 0;
1294 }
1295
cn10k_mdo_add_txsa(struct macsec_context * ctx)1296 static int cn10k_mdo_add_txsa(struct macsec_context *ctx)
1297 {
1298 struct otx2_nic *pfvf = macsec_netdev_priv(ctx->netdev);
1299 struct macsec_tx_sa *sw_tx_sa = ctx->sa.tx_sa;
1300 struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg;
1301 struct macsec_secy *secy = ctx->secy;
1302 u8 sa_num = ctx->sa.assoc_num;
1303 struct cn10k_mcs_txsc *txsc;
1304 int err;
1305
1306 txsc = cn10k_mcs_get_txsc(cfg, secy);
1307 if (!txsc)
1308 return -ENOENT;
1309
1310 if (sa_num >= CN10K_MCS_SA_PER_SC)
1311 return -EOPNOTSUPP;
1312
1313 if (cn10k_mcs_alloc_txsa(pfvf, &txsc->hw_sa_id[sa_num]))
1314 return -ENOSPC;
1315
1316 memcpy(&txsc->sa_key[sa_num], ctx->sa.key, secy->key_len);
1317 memcpy(&txsc->salt[sa_num], sw_tx_sa->key.salt.bytes, MACSEC_SALT_LEN);
1318 txsc->ssci[sa_num] = sw_tx_sa->ssci;
1319
1320 txsc->sa_bmap |= 1 << sa_num;
1321
1322 if (netif_running(secy->netdev)) {
1323 err = cn10k_mcs_write_tx_sa_plcy(pfvf, secy, txsc, sa_num);
1324 if (err)
1325 return err;
1326
1327 err = cn10k_write_tx_sa_pn(pfvf, txsc, sa_num,
1328 sw_tx_sa->next_pn);
1329 if (err)
1330 return err;
1331
1332 err = cn10k_mcs_link_tx_sa2sc(pfvf, secy, txsc,
1333 sa_num, sw_tx_sa->active);
1334 if (err)
1335 return err;
1336 }
1337
1338 return 0;
1339 }
1340
cn10k_mdo_upd_txsa(struct macsec_context * ctx)1341 static int cn10k_mdo_upd_txsa(struct macsec_context *ctx)
1342 {
1343 struct otx2_nic *pfvf = macsec_netdev_priv(ctx->netdev);
1344 struct macsec_tx_sa *sw_tx_sa = ctx->sa.tx_sa;
1345 struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg;
1346 struct macsec_secy *secy = ctx->secy;
1347 u8 sa_num = ctx->sa.assoc_num;
1348 struct cn10k_mcs_txsc *txsc;
1349 int err;
1350
1351 txsc = cn10k_mcs_get_txsc(cfg, secy);
1352 if (!txsc)
1353 return -ENOENT;
1354
1355 if (sa_num >= CN10K_MCS_SA_PER_SC)
1356 return -EOPNOTSUPP;
1357
1358 if (netif_running(secy->netdev)) {
1359 /* Keys cannot be changed after creation */
1360 if (ctx->sa.update_pn) {
1361 err = cn10k_write_tx_sa_pn(pfvf, txsc, sa_num,
1362 sw_tx_sa->next_pn);
1363 if (err)
1364 return err;
1365 }
1366
1367 err = cn10k_mcs_link_tx_sa2sc(pfvf, secy, txsc,
1368 sa_num, sw_tx_sa->active);
1369 if (err)
1370 return err;
1371 }
1372
1373 return 0;
1374 }
1375
cn10k_mdo_del_txsa(struct macsec_context * ctx)1376 static int cn10k_mdo_del_txsa(struct macsec_context *ctx)
1377 {
1378 struct otx2_nic *pfvf = macsec_netdev_priv(ctx->netdev);
1379 struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg;
1380 u8 sa_num = ctx->sa.assoc_num;
1381 struct cn10k_mcs_txsc *txsc;
1382
1383 txsc = cn10k_mcs_get_txsc(cfg, ctx->secy);
1384 if (!txsc)
1385 return -ENOENT;
1386
1387 if (sa_num >= CN10K_MCS_SA_PER_SC)
1388 return -EOPNOTSUPP;
1389
1390 cn10k_mcs_free_txsa(pfvf, txsc->hw_sa_id[sa_num]);
1391 txsc->sa_bmap &= ~(1 << sa_num);
1392
1393 return 0;
1394 }
1395
cn10k_mdo_add_rxsc(struct macsec_context * ctx)1396 static int cn10k_mdo_add_rxsc(struct macsec_context *ctx)
1397 {
1398 struct otx2_nic *pfvf = macsec_netdev_priv(ctx->netdev);
1399 struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg;
1400 struct macsec_secy *secy = ctx->secy;
1401 struct cn10k_mcs_rxsc *rxsc;
1402 struct cn10k_mcs_txsc *txsc;
1403 int err;
1404
1405 txsc = cn10k_mcs_get_txsc(cfg, secy);
1406 if (!txsc)
1407 return -ENOENT;
1408
1409 rxsc = cn10k_mcs_create_rxsc(pfvf);
1410 if (IS_ERR(rxsc))
1411 return -ENOSPC;
1412
1413 rxsc->sw_secy = ctx->secy;
1414 rxsc->sw_rxsc = ctx->rx_sc;
1415 list_add(&rxsc->entry, &cfg->rxsc_list);
1416
1417 if (netif_running(secy->netdev)) {
1418 err = cn10k_mcs_write_rx_flowid(pfvf, rxsc, txsc->hw_secy_id_rx);
1419 if (err)
1420 return err;
1421
1422 err = cn10k_mcs_write_sc_cam(pfvf, rxsc, txsc->hw_secy_id_rx);
1423 if (err)
1424 return err;
1425 }
1426
1427 return 0;
1428 }
1429
cn10k_mdo_upd_rxsc(struct macsec_context * ctx)1430 static int cn10k_mdo_upd_rxsc(struct macsec_context *ctx)
1431 {
1432 struct otx2_nic *pfvf = macsec_netdev_priv(ctx->netdev);
1433 struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg;
1434 struct macsec_secy *secy = ctx->secy;
1435 bool enable = ctx->rx_sc->active;
1436 struct cn10k_mcs_rxsc *rxsc;
1437
1438 rxsc = cn10k_mcs_get_rxsc(cfg, secy, ctx->rx_sc);
1439 if (!rxsc)
1440 return -ENOENT;
1441
1442 if (netif_running(secy->netdev))
1443 return cn10k_mcs_ena_dis_flowid(pfvf, rxsc->hw_flow_id,
1444 enable, MCS_RX);
1445
1446 return 0;
1447 }
1448
cn10k_mdo_del_rxsc(struct macsec_context * ctx)1449 static int cn10k_mdo_del_rxsc(struct macsec_context *ctx)
1450 {
1451 struct otx2_nic *pfvf = macsec_netdev_priv(ctx->netdev);
1452 struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg;
1453 struct cn10k_mcs_rxsc *rxsc;
1454
1455 rxsc = cn10k_mcs_get_rxsc(cfg, ctx->secy, ctx->rx_sc);
1456 if (!rxsc)
1457 return -ENOENT;
1458
1459 cn10k_mcs_ena_dis_flowid(pfvf, rxsc->hw_flow_id, false, MCS_RX);
1460 cn10k_mcs_delete_rxsc(pfvf, rxsc);
1461 list_del(&rxsc->entry);
1462 kfree(rxsc);
1463
1464 return 0;
1465 }
1466
cn10k_mdo_add_rxsa(struct macsec_context * ctx)1467 static int cn10k_mdo_add_rxsa(struct macsec_context *ctx)
1468 {
1469 struct otx2_nic *pfvf = macsec_netdev_priv(ctx->netdev);
1470 struct macsec_rx_sc *sw_rx_sc = ctx->sa.rx_sa->sc;
1471 struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg;
1472 struct macsec_rx_sa *rx_sa = ctx->sa.rx_sa;
1473 struct macsec_secy *secy = ctx->secy;
1474 bool sa_in_use = rx_sa->active;
1475 u8 sa_num = ctx->sa.assoc_num;
1476 struct cn10k_mcs_rxsc *rxsc;
1477 int err;
1478
1479 rxsc = cn10k_mcs_get_rxsc(cfg, secy, sw_rx_sc);
1480 if (!rxsc)
1481 return -ENOENT;
1482
1483 if (sa_num >= CN10K_MCS_SA_PER_SC)
1484 return -EOPNOTSUPP;
1485
1486 if (cn10k_mcs_alloc_rxsa(pfvf, &rxsc->hw_sa_id[sa_num]))
1487 return -ENOSPC;
1488
1489 memcpy(&rxsc->sa_key[sa_num], ctx->sa.key, ctx->secy->key_len);
1490 memcpy(&rxsc->salt[sa_num], rx_sa->key.salt.bytes, MACSEC_SALT_LEN);
1491 rxsc->ssci[sa_num] = rx_sa->ssci;
1492
1493 rxsc->sa_bmap |= 1 << sa_num;
1494
1495 if (netif_running(secy->netdev)) {
1496 err = cn10k_mcs_write_rx_sa_plcy(pfvf, secy, rxsc,
1497 sa_num, sa_in_use);
1498 if (err)
1499 return err;
1500
1501 err = cn10k_mcs_write_rx_sa_pn(pfvf, rxsc, sa_num,
1502 rx_sa->next_pn);
1503 if (err)
1504 return err;
1505 }
1506
1507 return 0;
1508 }
1509
cn10k_mdo_upd_rxsa(struct macsec_context * ctx)1510 static int cn10k_mdo_upd_rxsa(struct macsec_context *ctx)
1511 {
1512 struct otx2_nic *pfvf = macsec_netdev_priv(ctx->netdev);
1513 struct macsec_rx_sc *sw_rx_sc = ctx->sa.rx_sa->sc;
1514 struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg;
1515 struct macsec_rx_sa *rx_sa = ctx->sa.rx_sa;
1516 struct macsec_secy *secy = ctx->secy;
1517 bool sa_in_use = rx_sa->active;
1518 u8 sa_num = ctx->sa.assoc_num;
1519 struct cn10k_mcs_rxsc *rxsc;
1520 int err;
1521
1522 rxsc = cn10k_mcs_get_rxsc(cfg, secy, sw_rx_sc);
1523 if (!rxsc)
1524 return -ENOENT;
1525
1526 if (sa_num >= CN10K_MCS_SA_PER_SC)
1527 return -EOPNOTSUPP;
1528
1529 if (netif_running(secy->netdev)) {
1530 err = cn10k_mcs_write_rx_sa_plcy(pfvf, secy, rxsc, sa_num, sa_in_use);
1531 if (err)
1532 return err;
1533
1534 if (!ctx->sa.update_pn)
1535 return 0;
1536
1537 err = cn10k_mcs_write_rx_sa_pn(pfvf, rxsc, sa_num,
1538 rx_sa->next_pn);
1539 if (err)
1540 return err;
1541 }
1542
1543 return 0;
1544 }
1545
cn10k_mdo_del_rxsa(struct macsec_context * ctx)1546 static int cn10k_mdo_del_rxsa(struct macsec_context *ctx)
1547 {
1548 struct otx2_nic *pfvf = macsec_netdev_priv(ctx->netdev);
1549 struct macsec_rx_sc *sw_rx_sc = ctx->sa.rx_sa->sc;
1550 struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg;
1551 u8 sa_num = ctx->sa.assoc_num;
1552 struct cn10k_mcs_rxsc *rxsc;
1553
1554 rxsc = cn10k_mcs_get_rxsc(cfg, ctx->secy, sw_rx_sc);
1555 if (!rxsc)
1556 return -ENOENT;
1557
1558 if (sa_num >= CN10K_MCS_SA_PER_SC)
1559 return -EOPNOTSUPP;
1560
1561 cn10k_mcs_write_rx_sa_plcy(pfvf, ctx->secy, rxsc, sa_num, false);
1562 cn10k_mcs_free_rxsa(pfvf, rxsc->hw_sa_id[sa_num]);
1563
1564 rxsc->sa_bmap &= ~(1 << sa_num);
1565
1566 return 0;
1567 }
1568
cn10k_mdo_get_dev_stats(struct macsec_context * ctx)1569 static int cn10k_mdo_get_dev_stats(struct macsec_context *ctx)
1570 {
1571 struct otx2_nic *pfvf = macsec_netdev_priv(ctx->netdev);
1572 struct mcs_secy_stats tx_rsp = { 0 }, rx_rsp = { 0 };
1573 struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg;
1574 struct macsec_secy *secy = ctx->secy;
1575 struct cn10k_mcs_txsc *txsc;
1576
1577 txsc = cn10k_mcs_get_txsc(cfg, ctx->secy);
1578 if (!txsc)
1579 return -ENOENT;
1580
1581 cn10k_mcs_secy_stats(pfvf, txsc->hw_secy_id_tx, &tx_rsp, MCS_TX, false);
1582 ctx->stats.dev_stats->OutPktsUntagged = tx_rsp.pkt_untagged_cnt;
1583 ctx->stats.dev_stats->OutPktsTooLong = tx_rsp.pkt_toolong_cnt;
1584
1585 cn10k_mcs_secy_stats(pfvf, txsc->hw_secy_id_rx, &rx_rsp, MCS_RX, true);
1586 txsc->stats.InPktsBadTag += rx_rsp.pkt_badtag_cnt;
1587 txsc->stats.InPktsUnknownSCI += rx_rsp.pkt_nosa_cnt;
1588 txsc->stats.InPktsNoSCI += rx_rsp.pkt_nosaerror_cnt;
1589 if (secy->validate_frames == MACSEC_VALIDATE_STRICT)
1590 txsc->stats.InPktsNoTag += rx_rsp.pkt_untaged_cnt;
1591 else
1592 txsc->stats.InPktsUntagged += rx_rsp.pkt_untaged_cnt;
1593 txsc->stats.InPktsOverrun = 0;
1594
1595 ctx->stats.dev_stats->InPktsNoTag = txsc->stats.InPktsNoTag;
1596 ctx->stats.dev_stats->InPktsUntagged = txsc->stats.InPktsUntagged;
1597 ctx->stats.dev_stats->InPktsBadTag = txsc->stats.InPktsBadTag;
1598 ctx->stats.dev_stats->InPktsUnknownSCI = txsc->stats.InPktsUnknownSCI;
1599 ctx->stats.dev_stats->InPktsNoSCI = txsc->stats.InPktsNoSCI;
1600 ctx->stats.dev_stats->InPktsOverrun = txsc->stats.InPktsOverrun;
1601
1602 return 0;
1603 }
1604
cn10k_mdo_get_tx_sc_stats(struct macsec_context * ctx)1605 static int cn10k_mdo_get_tx_sc_stats(struct macsec_context *ctx)
1606 {
1607 struct otx2_nic *pfvf = macsec_netdev_priv(ctx->netdev);
1608 struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg;
1609 struct mcs_sc_stats rsp = { 0 };
1610 struct cn10k_mcs_txsc *txsc;
1611
1612 txsc = cn10k_mcs_get_txsc(cfg, ctx->secy);
1613 if (!txsc)
1614 return -ENOENT;
1615
1616 cn10k_mcs_sc_stats(pfvf, txsc->hw_sc_id, &rsp, MCS_TX, false);
1617
1618 ctx->stats.tx_sc_stats->OutPktsProtected = rsp.pkt_protected_cnt;
1619 ctx->stats.tx_sc_stats->OutPktsEncrypted = rsp.pkt_encrypt_cnt;
1620 ctx->stats.tx_sc_stats->OutOctetsProtected = rsp.octet_protected_cnt;
1621 ctx->stats.tx_sc_stats->OutOctetsEncrypted = rsp.octet_encrypt_cnt;
1622
1623 return 0;
1624 }
1625
cn10k_mdo_get_tx_sa_stats(struct macsec_context * ctx)1626 static int cn10k_mdo_get_tx_sa_stats(struct macsec_context *ctx)
1627 {
1628 struct otx2_nic *pfvf = macsec_netdev_priv(ctx->netdev);
1629 struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg;
1630 struct mcs_sa_stats rsp = { 0 };
1631 u8 sa_num = ctx->sa.assoc_num;
1632 struct cn10k_mcs_txsc *txsc;
1633
1634 txsc = cn10k_mcs_get_txsc(cfg, ctx->secy);
1635 if (!txsc)
1636 return -ENOENT;
1637
1638 if (sa_num >= CN10K_MCS_SA_PER_SC)
1639 return -EOPNOTSUPP;
1640
1641 cn10k_mcs_sa_stats(pfvf, txsc->hw_sa_id[sa_num], &rsp, MCS_TX, false);
1642
1643 ctx->stats.tx_sa_stats->OutPktsProtected = rsp.pkt_protected_cnt;
1644 ctx->stats.tx_sa_stats->OutPktsEncrypted = rsp.pkt_encrypt_cnt;
1645
1646 return 0;
1647 }
1648
cn10k_mdo_get_rx_sc_stats(struct macsec_context * ctx)1649 static int cn10k_mdo_get_rx_sc_stats(struct macsec_context *ctx)
1650 {
1651 struct otx2_nic *pfvf = macsec_netdev_priv(ctx->netdev);
1652 struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg;
1653 struct macsec_secy *secy = ctx->secy;
1654 struct mcs_sc_stats rsp = { 0 };
1655 struct cn10k_mcs_rxsc *rxsc;
1656
1657 rxsc = cn10k_mcs_get_rxsc(cfg, secy, ctx->rx_sc);
1658 if (!rxsc)
1659 return -ENOENT;
1660
1661 cn10k_mcs_sc_stats(pfvf, rxsc->hw_sc_id, &rsp, MCS_RX, true);
1662
1663 rxsc->stats.InOctetsValidated += rsp.octet_validate_cnt;
1664 rxsc->stats.InOctetsDecrypted += rsp.octet_decrypt_cnt;
1665
1666 rxsc->stats.InPktsInvalid += rsp.pkt_invalid_cnt;
1667 rxsc->stats.InPktsNotValid += rsp.pkt_notvalid_cnt;
1668
1669 if (secy->replay_protect)
1670 rxsc->stats.InPktsLate += rsp.pkt_late_cnt;
1671 else
1672 rxsc->stats.InPktsDelayed += rsp.pkt_late_cnt;
1673
1674 if (secy->validate_frames == MACSEC_VALIDATE_DISABLED)
1675 rxsc->stats.InPktsUnchecked += rsp.pkt_unchecked_cnt;
1676 else
1677 rxsc->stats.InPktsOK += rsp.pkt_unchecked_cnt;
1678
1679 ctx->stats.rx_sc_stats->InOctetsValidated = rxsc->stats.InOctetsValidated;
1680 ctx->stats.rx_sc_stats->InOctetsDecrypted = rxsc->stats.InOctetsDecrypted;
1681 ctx->stats.rx_sc_stats->InPktsInvalid = rxsc->stats.InPktsInvalid;
1682 ctx->stats.rx_sc_stats->InPktsNotValid = rxsc->stats.InPktsNotValid;
1683 ctx->stats.rx_sc_stats->InPktsLate = rxsc->stats.InPktsLate;
1684 ctx->stats.rx_sc_stats->InPktsDelayed = rxsc->stats.InPktsDelayed;
1685 ctx->stats.rx_sc_stats->InPktsUnchecked = rxsc->stats.InPktsUnchecked;
1686 ctx->stats.rx_sc_stats->InPktsOK = rxsc->stats.InPktsOK;
1687
1688 return 0;
1689 }
1690
cn10k_mdo_get_rx_sa_stats(struct macsec_context * ctx)1691 static int cn10k_mdo_get_rx_sa_stats(struct macsec_context *ctx)
1692 {
1693 struct otx2_nic *pfvf = macsec_netdev_priv(ctx->netdev);
1694 struct macsec_rx_sc *sw_rx_sc = ctx->sa.rx_sa->sc;
1695 struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg;
1696 struct mcs_sa_stats rsp = { 0 };
1697 u8 sa_num = ctx->sa.assoc_num;
1698 struct cn10k_mcs_rxsc *rxsc;
1699
1700 rxsc = cn10k_mcs_get_rxsc(cfg, ctx->secy, sw_rx_sc);
1701 if (!rxsc)
1702 return -ENOENT;
1703
1704 if (sa_num >= CN10K_MCS_SA_PER_SC)
1705 return -EOPNOTSUPP;
1706
1707 cn10k_mcs_sa_stats(pfvf, rxsc->hw_sa_id[sa_num], &rsp, MCS_RX, false);
1708
1709 ctx->stats.rx_sa_stats->InPktsOK = rsp.pkt_ok_cnt;
1710 ctx->stats.rx_sa_stats->InPktsInvalid = rsp.pkt_invalid_cnt;
1711 ctx->stats.rx_sa_stats->InPktsNotValid = rsp.pkt_notvalid_cnt;
1712 ctx->stats.rx_sa_stats->InPktsNotUsingSA = rsp.pkt_nosaerror_cnt;
1713 ctx->stats.rx_sa_stats->InPktsUnusedSA = rsp.pkt_nosa_cnt;
1714
1715 return 0;
1716 }
1717
1718 static const struct macsec_ops cn10k_mcs_ops = {
1719 .mdo_dev_open = cn10k_mdo_open,
1720 .mdo_dev_stop = cn10k_mdo_stop,
1721 .mdo_add_secy = cn10k_mdo_add_secy,
1722 .mdo_upd_secy = cn10k_mdo_upd_secy,
1723 .mdo_del_secy = cn10k_mdo_del_secy,
1724 .mdo_add_rxsc = cn10k_mdo_add_rxsc,
1725 .mdo_upd_rxsc = cn10k_mdo_upd_rxsc,
1726 .mdo_del_rxsc = cn10k_mdo_del_rxsc,
1727 .mdo_add_rxsa = cn10k_mdo_add_rxsa,
1728 .mdo_upd_rxsa = cn10k_mdo_upd_rxsa,
1729 .mdo_del_rxsa = cn10k_mdo_del_rxsa,
1730 .mdo_add_txsa = cn10k_mdo_add_txsa,
1731 .mdo_upd_txsa = cn10k_mdo_upd_txsa,
1732 .mdo_del_txsa = cn10k_mdo_del_txsa,
1733 .mdo_get_dev_stats = cn10k_mdo_get_dev_stats,
1734 .mdo_get_tx_sc_stats = cn10k_mdo_get_tx_sc_stats,
1735 .mdo_get_tx_sa_stats = cn10k_mdo_get_tx_sa_stats,
1736 .mdo_get_rx_sc_stats = cn10k_mdo_get_rx_sc_stats,
1737 .mdo_get_rx_sa_stats = cn10k_mdo_get_rx_sa_stats,
1738 };
1739
cn10k_handle_mcs_event(struct otx2_nic * pfvf,struct mcs_intr_info * event)1740 void cn10k_handle_mcs_event(struct otx2_nic *pfvf, struct mcs_intr_info *event)
1741 {
1742 struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg;
1743 struct macsec_tx_sa *sw_tx_sa = NULL;
1744 struct macsec_secy *secy = NULL;
1745 struct cn10k_mcs_txsc *txsc;
1746 u8 an;
1747
1748 if (!test_bit(CN10K_HW_MACSEC, &pfvf->hw.cap_flag))
1749 return;
1750
1751 if (!(event->intr_mask & MCS_CPM_TX_PACKET_XPN_EQ0_INT))
1752 return;
1753
1754 /* Find the SecY to which the expired hardware SA is mapped */
1755 list_for_each_entry(txsc, &cfg->txsc_list, entry) {
1756 for (an = 0; an < CN10K_MCS_SA_PER_SC; an++)
1757 if (txsc->hw_sa_id[an] == event->sa_id) {
1758 secy = txsc->sw_secy;
1759 sw_tx_sa = rcu_dereference_bh(secy->tx_sc.sa[an]);
1760 }
1761 }
1762
1763 if (secy && sw_tx_sa)
1764 macsec_pn_wrapped(secy, sw_tx_sa);
1765 }
1766
cn10k_mcs_init(struct otx2_nic * pfvf)1767 int cn10k_mcs_init(struct otx2_nic *pfvf)
1768 {
1769 struct mbox *mbox = &pfvf->mbox;
1770 struct cn10k_mcs_cfg *cfg;
1771 struct mcs_intr_cfg *req;
1772
1773 if (!test_bit(CN10K_HW_MACSEC, &pfvf->hw.cap_flag))
1774 return 0;
1775
1776 cfg = kzalloc(sizeof(*cfg), GFP_KERNEL);
1777 if (!cfg)
1778 return -ENOMEM;
1779
1780 INIT_LIST_HEAD(&cfg->txsc_list);
1781 INIT_LIST_HEAD(&cfg->rxsc_list);
1782 pfvf->macsec_cfg = cfg;
1783
1784 pfvf->netdev->features |= NETIF_F_HW_MACSEC;
1785 pfvf->netdev->macsec_ops = &cn10k_mcs_ops;
1786
1787 mutex_lock(&mbox->lock);
1788
1789 req = otx2_mbox_alloc_msg_mcs_intr_cfg(mbox);
1790 if (!req)
1791 goto fail;
1792
1793 req->intr_mask = MCS_CPM_TX_PACKET_XPN_EQ0_INT;
1794
1795 if (otx2_sync_mbox_msg(mbox))
1796 goto fail;
1797
1798 mutex_unlock(&mbox->lock);
1799
1800 return 0;
1801 fail:
1802 dev_err(pfvf->dev, "Cannot notify PN wrapped event\n");
1803 mutex_unlock(&mbox->lock);
1804 return 0;
1805 }
1806
cn10k_mcs_free(struct otx2_nic * pfvf)1807 void cn10k_mcs_free(struct otx2_nic *pfvf)
1808 {
1809 if (!test_bit(CN10K_HW_MACSEC, &pfvf->hw.cap_flag))
1810 return;
1811
1812 cn10k_mcs_free_rsrc(pfvf, MCS_TX, MCS_RSRC_TYPE_SECY, 0, true);
1813 cn10k_mcs_free_rsrc(pfvf, MCS_RX, MCS_RSRC_TYPE_SECY, 0, true);
1814 kfree(pfvf->macsec_cfg);
1815 pfvf->macsec_cfg = NULL;
1816 }
1817