1 // SPDX-License-Identifier: GPL-2.0
2 /* Marvell RVU Ethernet driver
3 *
4 * Copyright (C) 2021 Marvell.
5 *
6 */
7
8 #include "otx2_common.h"
9
otx2_dmacflt_do_add(struct otx2_nic * pf,const u8 * mac,u32 * dmac_index)10 static int otx2_dmacflt_do_add(struct otx2_nic *pf, const u8 *mac,
11 u32 *dmac_index)
12 {
13 struct cgx_mac_addr_add_req *req;
14 struct cgx_mac_addr_add_rsp *rsp;
15 int err;
16
17 mutex_lock(&pf->mbox.lock);
18
19 req = otx2_mbox_alloc_msg_cgx_mac_addr_add(&pf->mbox);
20 if (!req) {
21 mutex_unlock(&pf->mbox.lock);
22 return -ENOMEM;
23 }
24
25 ether_addr_copy(req->mac_addr, mac);
26 err = otx2_sync_mbox_msg(&pf->mbox);
27
28 if (!err) {
29 rsp = (struct cgx_mac_addr_add_rsp *)
30 otx2_mbox_get_rsp(&pf->mbox.mbox, 0, &req->hdr);
31 *dmac_index = rsp->index;
32 }
33
34 mutex_unlock(&pf->mbox.lock);
35 return err;
36 }
37
otx2_dmacflt_add_pfmac(struct otx2_nic * pf,u32 * dmac_index)38 static int otx2_dmacflt_add_pfmac(struct otx2_nic *pf, u32 *dmac_index)
39 {
40 struct cgx_mac_addr_set_or_get *req;
41 struct cgx_mac_addr_set_or_get *rsp;
42 int err;
43
44 mutex_lock(&pf->mbox.lock);
45
46 req = otx2_mbox_alloc_msg_cgx_mac_addr_set(&pf->mbox);
47 if (!req) {
48 mutex_unlock(&pf->mbox.lock);
49 return -ENOMEM;
50 }
51
52 req->index = *dmac_index;
53
54 ether_addr_copy(req->mac_addr, pf->netdev->dev_addr);
55 err = otx2_sync_mbox_msg(&pf->mbox);
56
57 if (err)
58 goto out;
59
60 rsp = (struct cgx_mac_addr_set_or_get *)
61 otx2_mbox_get_rsp(&pf->mbox.mbox, 0, &req->hdr);
62
63 if (IS_ERR_OR_NULL(rsp)) {
64 err = -EINVAL;
65 goto out;
66 }
67
68 *dmac_index = rsp->index;
69 out:
70 mutex_unlock(&pf->mbox.lock);
71 return err;
72 }
73
otx2_dmacflt_add(struct otx2_nic * pf,const u8 * mac,u32 bit_pos)74 int otx2_dmacflt_add(struct otx2_nic *pf, const u8 *mac, u32 bit_pos)
75 {
76 u32 *dmacindex;
77
78 /* Store dmacindex returned by CGX/RPM driver which will
79 * be used for macaddr update/remove
80 */
81 dmacindex = &pf->flow_cfg->bmap_to_dmacindex[bit_pos];
82
83 if (ether_addr_equal(mac, pf->netdev->dev_addr))
84 return otx2_dmacflt_add_pfmac(pf, dmacindex);
85 else
86 return otx2_dmacflt_do_add(pf, mac, dmacindex);
87 }
88
otx2_dmacflt_do_remove(struct otx2_nic * pfvf,const u8 * mac,u32 dmac_index)89 static int otx2_dmacflt_do_remove(struct otx2_nic *pfvf, const u8 *mac,
90 u32 dmac_index)
91 {
92 struct cgx_mac_addr_del_req *req;
93 int err;
94
95 mutex_lock(&pfvf->mbox.lock);
96 req = otx2_mbox_alloc_msg_cgx_mac_addr_del(&pfvf->mbox);
97 if (!req) {
98 mutex_unlock(&pfvf->mbox.lock);
99 return -ENOMEM;
100 }
101
102 req->index = dmac_index;
103
104 err = otx2_sync_mbox_msg(&pfvf->mbox);
105 mutex_unlock(&pfvf->mbox.lock);
106
107 return err;
108 }
109
otx2_dmacflt_remove_pfmac(struct otx2_nic * pf,u32 dmac_index)110 static int otx2_dmacflt_remove_pfmac(struct otx2_nic *pf, u32 dmac_index)
111 {
112 struct cgx_mac_addr_reset_req *req;
113 int err;
114
115 mutex_lock(&pf->mbox.lock);
116 req = otx2_mbox_alloc_msg_cgx_mac_addr_reset(&pf->mbox);
117 if (!req) {
118 mutex_unlock(&pf->mbox.lock);
119 return -ENOMEM;
120 }
121 req->index = dmac_index;
122
123 err = otx2_sync_mbox_msg(&pf->mbox);
124
125 mutex_unlock(&pf->mbox.lock);
126 return err;
127 }
128
otx2_dmacflt_remove(struct otx2_nic * pf,const u8 * mac,u32 bit_pos)129 int otx2_dmacflt_remove(struct otx2_nic *pf, const u8 *mac,
130 u32 bit_pos)
131 {
132 u32 dmacindex = pf->flow_cfg->bmap_to_dmacindex[bit_pos];
133
134 if (ether_addr_equal(mac, pf->netdev->dev_addr))
135 return otx2_dmacflt_remove_pfmac(pf, dmacindex);
136 else
137 return otx2_dmacflt_do_remove(pf, mac, dmacindex);
138 }
139
140 /* CGX/RPM blocks support max unicast entries of 32.
141 * on typical configuration MAC block associated
142 * with 4 lmacs, each lmac will have 8 dmac entries
143 */
otx2_dmacflt_get_max_cnt(struct otx2_nic * pf)144 int otx2_dmacflt_get_max_cnt(struct otx2_nic *pf)
145 {
146 struct cgx_max_dmac_entries_get_rsp *rsp;
147 struct msg_req *msg;
148 int err;
149
150 mutex_lock(&pf->mbox.lock);
151 msg = otx2_mbox_alloc_msg_cgx_mac_max_entries_get(&pf->mbox);
152
153 if (!msg) {
154 mutex_unlock(&pf->mbox.lock);
155 return -ENOMEM;
156 }
157
158 err = otx2_sync_mbox_msg(&pf->mbox);
159 if (err)
160 goto out;
161
162 rsp = (struct cgx_max_dmac_entries_get_rsp *)
163 otx2_mbox_get_rsp(&pf->mbox.mbox, 0, &msg->hdr);
164
165 if (IS_ERR_OR_NULL(rsp)) {
166 err = -EINVAL;
167 goto out;
168 }
169
170 pf->flow_cfg->dmacflt_max_flows = rsp->max_dmac_filters;
171
172 out:
173 mutex_unlock(&pf->mbox.lock);
174 return err;
175 }
176
otx2_dmacflt_update(struct otx2_nic * pf,u8 * mac,u32 bit_pos)177 int otx2_dmacflt_update(struct otx2_nic *pf, u8 *mac, u32 bit_pos)
178 {
179 struct cgx_mac_addr_update_req *req;
180 struct cgx_mac_addr_update_rsp *rsp;
181 int rc;
182
183 mutex_lock(&pf->mbox.lock);
184
185 req = otx2_mbox_alloc_msg_cgx_mac_addr_update(&pf->mbox);
186
187 if (!req) {
188 mutex_unlock(&pf->mbox.lock);
189 return -ENOMEM;
190 }
191
192 ether_addr_copy(req->mac_addr, mac);
193 req->index = pf->flow_cfg->bmap_to_dmacindex[bit_pos];
194
195 /* check the response and change index */
196
197 rc = otx2_sync_mbox_msg(&pf->mbox);
198 if (rc)
199 goto out;
200
201 rsp = (struct cgx_mac_addr_update_rsp *)
202 otx2_mbox_get_rsp(&pf->mbox.mbox, 0, &req->hdr);
203
204 pf->flow_cfg->bmap_to_dmacindex[bit_pos] = rsp->index;
205
206 out:
207 mutex_unlock(&pf->mbox.lock);
208 return rc;
209 }
210