1 // SPDX-License-Identifier: GPL-2.0
2 /* Marvell RVU Admin Function driver
3  *
4  * Copyright (C) 2021 Marvell.
5  *
6  */
7 
8 #include <linux/bitfield.h>
9 #include "rvu.h"
10 
rvu_switch_install_rx_rule(struct rvu * rvu,u16 pcifunc,u16 chan_mask)11 static int rvu_switch_install_rx_rule(struct rvu *rvu, u16 pcifunc,
12 				      u16 chan_mask)
13 {
14 	struct npc_install_flow_req req = { 0 };
15 	struct npc_install_flow_rsp rsp = { 0 };
16 	struct rvu_pfvf *pfvf;
17 
18 	pfvf = rvu_get_pfvf(rvu, pcifunc);
19 	/* If the pcifunc is not initialized then nothing to do.
20 	 * This same function will be called again via rvu_switch_update_rules
21 	 * after pcifunc is initialized.
22 	 */
23 	if (!test_bit(NIXLF_INITIALIZED, &pfvf->flags))
24 		return 0;
25 
26 	ether_addr_copy(req.packet.dmac, pfvf->mac_addr);
27 	eth_broadcast_addr((u8 *)&req.mask.dmac);
28 	req.hdr.pcifunc = 0; /* AF is requester */
29 	req.vf = pcifunc;
30 	req.features = BIT_ULL(NPC_DMAC);
31 	req.channel = pfvf->rx_chan_base;
32 	req.chan_mask = chan_mask;
33 	req.intf = pfvf->nix_rx_intf;
34 	req.op = NIX_RX_ACTION_DEFAULT;
35 	req.default_rule = 1;
36 
37 	return rvu_mbox_handler_npc_install_flow(rvu, &req, &rsp);
38 }
39 
rvu_switch_install_tx_rule(struct rvu * rvu,u16 pcifunc,u16 entry)40 static int rvu_switch_install_tx_rule(struct rvu *rvu, u16 pcifunc, u16 entry)
41 {
42 	struct npc_install_flow_req req = { 0 };
43 	struct npc_install_flow_rsp rsp = { 0 };
44 	struct rvu_pfvf *pfvf;
45 	u8 lbkid;
46 
47 	pfvf = rvu_get_pfvf(rvu, pcifunc);
48 	/* If the pcifunc is not initialized then nothing to do.
49 	 * This same function will be called again via rvu_switch_update_rules
50 	 * after pcifunc is initialized.
51 	 */
52 	if (!test_bit(NIXLF_INITIALIZED, &pfvf->flags))
53 		return 0;
54 
55 	lbkid = pfvf->nix_blkaddr == BLKADDR_NIX0 ? 0 : 1;
56 	ether_addr_copy(req.packet.dmac, pfvf->mac_addr);
57 	eth_broadcast_addr((u8 *)&req.mask.dmac);
58 	req.hdr.pcifunc = 0; /* AF is requester */
59 	req.vf = pcifunc;
60 	req.entry = entry;
61 	req.features = BIT_ULL(NPC_DMAC);
62 	req.intf = pfvf->nix_tx_intf;
63 	req.op = NIX_TX_ACTIONOP_UCAST_CHAN;
64 	req.index = (lbkid << 8) | RVU_SWITCH_LBK_CHAN;
65 	req.set_cntr = 1;
66 
67 	return rvu_mbox_handler_npc_install_flow(rvu, &req, &rsp);
68 }
69 
rvu_switch_install_rules(struct rvu * rvu)70 static int rvu_switch_install_rules(struct rvu *rvu)
71 {
72 	struct rvu_switch *rswitch = &rvu->rswitch;
73 	u16 start = rswitch->start_entry;
74 	struct rvu_hwinfo *hw = rvu->hw;
75 	u16 pcifunc, entry = 0;
76 	int pf, vf, numvfs;
77 	int err;
78 
79 	for (pf = 1; pf < hw->total_pfs; pf++) {
80 		if (!is_pf_cgxmapped(rvu, pf))
81 			continue;
82 
83 		pcifunc = pf << 10;
84 		/* rvu_get_nix_blkaddr sets up the corresponding NIX block
85 		 * address and NIX RX and TX interfaces for a pcifunc.
86 		 * Generally it is called during attach call of a pcifunc but it
87 		 * is called here since we are pre-installing rules before
88 		 * nixlfs are attached
89 		 */
90 		rvu_get_nix_blkaddr(rvu, pcifunc);
91 
92 		/* MCAM RX rule for a PF/VF already exists as default unicast
93 		 * rules installed by AF. Hence change the channel in those
94 		 * rules to ignore channel so that packets with the required
95 		 * DMAC received from LBK(by other PF/VFs in system) or from
96 		 * external world (from wire) are accepted.
97 		 */
98 		err = rvu_switch_install_rx_rule(rvu, pcifunc, 0x0);
99 		if (err) {
100 			dev_err(rvu->dev, "RX rule for PF%d failed(%d)\n",
101 				pf, err);
102 			return err;
103 		}
104 
105 		err = rvu_switch_install_tx_rule(rvu, pcifunc, start + entry);
106 		if (err) {
107 			dev_err(rvu->dev, "TX rule for PF%d failed(%d)\n",
108 				pf, err);
109 			return err;
110 		}
111 
112 		rswitch->entry2pcifunc[entry++] = pcifunc;
113 
114 		rvu_get_pf_numvfs(rvu, pf, &numvfs, NULL);
115 		for (vf = 0; vf < numvfs; vf++) {
116 			pcifunc = pf << 10 | ((vf + 1) & 0x3FF);
117 			rvu_get_nix_blkaddr(rvu, pcifunc);
118 
119 			err = rvu_switch_install_rx_rule(rvu, pcifunc, 0x0);
120 			if (err) {
121 				dev_err(rvu->dev,
122 					"RX rule for PF%dVF%d failed(%d)\n",
123 					pf, vf, err);
124 				return err;
125 			}
126 
127 			err = rvu_switch_install_tx_rule(rvu, pcifunc,
128 							 start + entry);
129 			if (err) {
130 				dev_err(rvu->dev,
131 					"TX rule for PF%dVF%d failed(%d)\n",
132 					pf, vf, err);
133 				return err;
134 			}
135 
136 			rswitch->entry2pcifunc[entry++] = pcifunc;
137 		}
138 	}
139 
140 	return 0;
141 }
142 
rvu_switch_enable(struct rvu * rvu)143 void rvu_switch_enable(struct rvu *rvu)
144 {
145 	struct npc_mcam_alloc_entry_req alloc_req = { 0 };
146 	struct npc_mcam_alloc_entry_rsp alloc_rsp = { 0 };
147 	struct npc_delete_flow_req uninstall_req = { 0 };
148 	struct npc_mcam_free_entry_req free_req = { 0 };
149 	struct rvu_switch *rswitch = &rvu->rswitch;
150 	struct msg_rsp rsp;
151 	int ret;
152 
153 	alloc_req.contig = true;
154 	alloc_req.count = rvu->cgx_mapped_pfs + rvu->cgx_mapped_vfs;
155 	ret = rvu_mbox_handler_npc_mcam_alloc_entry(rvu, &alloc_req,
156 						    &alloc_rsp);
157 	if (ret) {
158 		dev_err(rvu->dev,
159 			"Unable to allocate MCAM entries\n");
160 		goto exit;
161 	}
162 
163 	if (alloc_rsp.count != alloc_req.count) {
164 		dev_err(rvu->dev,
165 			"Unable to allocate %d MCAM entries, got %d\n",
166 			alloc_req.count, alloc_rsp.count);
167 		goto free_entries;
168 	}
169 
170 	rswitch->entry2pcifunc = kcalloc(alloc_req.count, sizeof(u16),
171 					 GFP_KERNEL);
172 	if (!rswitch->entry2pcifunc)
173 		goto free_entries;
174 
175 	rswitch->used_entries = alloc_rsp.count;
176 	rswitch->start_entry = alloc_rsp.entry;
177 
178 	ret = rvu_switch_install_rules(rvu);
179 	if (ret)
180 		goto uninstall_rules;
181 
182 	return;
183 
184 uninstall_rules:
185 	uninstall_req.start = rswitch->start_entry;
186 	uninstall_req.end =  rswitch->start_entry + rswitch->used_entries - 1;
187 	rvu_mbox_handler_npc_delete_flow(rvu, &uninstall_req, &rsp);
188 	kfree(rswitch->entry2pcifunc);
189 free_entries:
190 	free_req.all = 1;
191 	rvu_mbox_handler_npc_mcam_free_entry(rvu, &free_req, &rsp);
192 exit:
193 	return;
194 }
195 
rvu_switch_disable(struct rvu * rvu)196 void rvu_switch_disable(struct rvu *rvu)
197 {
198 	struct npc_delete_flow_req uninstall_req = { 0 };
199 	struct npc_mcam_free_entry_req free_req = { 0 };
200 	struct rvu_switch *rswitch = &rvu->rswitch;
201 	struct rvu_hwinfo *hw = rvu->hw;
202 	int pf, vf, numvfs;
203 	struct msg_rsp rsp;
204 	u16 pcifunc;
205 	int err;
206 
207 	if (!rswitch->used_entries)
208 		return;
209 
210 	for (pf = 1; pf < hw->total_pfs; pf++) {
211 		if (!is_pf_cgxmapped(rvu, pf))
212 			continue;
213 
214 		pcifunc = pf << 10;
215 		err = rvu_switch_install_rx_rule(rvu, pcifunc, 0xFFF);
216 		if (err)
217 			dev_err(rvu->dev,
218 				"Reverting RX rule for PF%d failed(%d)\n",
219 				pf, err);
220 
221 		rvu_get_pf_numvfs(rvu, pf, &numvfs, NULL);
222 		for (vf = 0; vf < numvfs; vf++) {
223 			pcifunc = pf << 10 | ((vf + 1) & 0x3FF);
224 			err = rvu_switch_install_rx_rule(rvu, pcifunc, 0xFFF);
225 			if (err)
226 				dev_err(rvu->dev,
227 					"Reverting RX rule for PF%dVF%d failed(%d)\n",
228 					pf, vf, err);
229 		}
230 	}
231 
232 	uninstall_req.start = rswitch->start_entry;
233 	uninstall_req.end =  rswitch->start_entry + rswitch->used_entries - 1;
234 	free_req.all = 1;
235 	rvu_mbox_handler_npc_delete_flow(rvu, &uninstall_req, &rsp);
236 	rvu_mbox_handler_npc_mcam_free_entry(rvu, &free_req, &rsp);
237 	rswitch->used_entries = 0;
238 	kfree(rswitch->entry2pcifunc);
239 }
240 
rvu_switch_update_rules(struct rvu * rvu,u16 pcifunc)241 void rvu_switch_update_rules(struct rvu *rvu, u16 pcifunc)
242 {
243 	struct rvu_switch *rswitch = &rvu->rswitch;
244 	u32 max = rswitch->used_entries;
245 	u16 entry;
246 
247 	if (!rswitch->used_entries)
248 		return;
249 
250 	for (entry = 0; entry < max; entry++) {
251 		if (rswitch->entry2pcifunc[entry] == pcifunc)
252 			break;
253 	}
254 
255 	if (entry >= max)
256 		return;
257 
258 	rvu_switch_install_tx_rule(rvu, pcifunc, rswitch->start_entry + entry);
259 	rvu_switch_install_rx_rule(rvu, pcifunc, 0x0);
260 }
261