1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (C) 2020 Felix Fietkau <nbd@nbd.name> */
3
4 #include <linux/kernel.h>
5 #include <linux/io.h>
6 #include <linux/iopoll.h>
7 #include <linux/etherdevice.h>
8 #include <linux/platform_device.h>
9 #include <linux/if_ether.h>
10 #include <linux/if_vlan.h>
11 #include <net/dsa.h>
12 #include "mtk_eth_soc.h"
13 #include "mtk_ppe.h"
14 #include "mtk_ppe_regs.h"
15
16 static DEFINE_SPINLOCK(ppe_lock);
17
18 static const struct rhashtable_params mtk_flow_l2_ht_params = {
19 .head_offset = offsetof(struct mtk_flow_entry, l2_node),
20 .key_offset = offsetof(struct mtk_flow_entry, data.bridge),
21 .key_len = offsetof(struct mtk_foe_bridge, key_end),
22 .automatic_shrinking = true,
23 };
24
ppe_w32(struct mtk_ppe * ppe,u32 reg,u32 val)25 static void ppe_w32(struct mtk_ppe *ppe, u32 reg, u32 val)
26 {
27 writel(val, ppe->base + reg);
28 }
29
ppe_r32(struct mtk_ppe * ppe,u32 reg)30 static u32 ppe_r32(struct mtk_ppe *ppe, u32 reg)
31 {
32 return readl(ppe->base + reg);
33 }
34
ppe_m32(struct mtk_ppe * ppe,u32 reg,u32 mask,u32 set)35 static u32 ppe_m32(struct mtk_ppe *ppe, u32 reg, u32 mask, u32 set)
36 {
37 u32 val;
38
39 val = ppe_r32(ppe, reg);
40 val &= ~mask;
41 val |= set;
42 ppe_w32(ppe, reg, val);
43
44 return val;
45 }
46
ppe_set(struct mtk_ppe * ppe,u32 reg,u32 val)47 static u32 ppe_set(struct mtk_ppe *ppe, u32 reg, u32 val)
48 {
49 return ppe_m32(ppe, reg, 0, val);
50 }
51
ppe_clear(struct mtk_ppe * ppe,u32 reg,u32 val)52 static u32 ppe_clear(struct mtk_ppe *ppe, u32 reg, u32 val)
53 {
54 return ppe_m32(ppe, reg, val, 0);
55 }
56
mtk_eth_timestamp(struct mtk_eth * eth)57 static u32 mtk_eth_timestamp(struct mtk_eth *eth)
58 {
59 return mtk_r32(eth, 0x0010) & mtk_get_ib1_ts_mask(eth);
60 }
61
mtk_ppe_wait_busy(struct mtk_ppe * ppe)62 static int mtk_ppe_wait_busy(struct mtk_ppe *ppe)
63 {
64 int ret;
65 u32 val;
66
67 ret = readl_poll_timeout(ppe->base + MTK_PPE_GLO_CFG, val,
68 !(val & MTK_PPE_GLO_CFG_BUSY),
69 20, MTK_PPE_WAIT_TIMEOUT_US);
70
71 if (ret)
72 dev_err(ppe->dev, "PPE table busy");
73
74 return ret;
75 }
76
mtk_ppe_cache_clear(struct mtk_ppe * ppe)77 static void mtk_ppe_cache_clear(struct mtk_ppe *ppe)
78 {
79 ppe_set(ppe, MTK_PPE_CACHE_CTL, MTK_PPE_CACHE_CTL_CLEAR);
80 ppe_clear(ppe, MTK_PPE_CACHE_CTL, MTK_PPE_CACHE_CTL_CLEAR);
81 }
82
mtk_ppe_cache_enable(struct mtk_ppe * ppe,bool enable)83 static void mtk_ppe_cache_enable(struct mtk_ppe *ppe, bool enable)
84 {
85 mtk_ppe_cache_clear(ppe);
86
87 ppe_m32(ppe, MTK_PPE_CACHE_CTL, MTK_PPE_CACHE_CTL_EN,
88 enable * MTK_PPE_CACHE_CTL_EN);
89 }
90
mtk_ppe_hash_entry(struct mtk_eth * eth,struct mtk_foe_entry * e)91 static u32 mtk_ppe_hash_entry(struct mtk_eth *eth, struct mtk_foe_entry *e)
92 {
93 u32 hv1, hv2, hv3;
94 u32 hash;
95
96 switch (mtk_get_ib1_pkt_type(eth, e->ib1)) {
97 case MTK_PPE_PKT_TYPE_IPV4_ROUTE:
98 case MTK_PPE_PKT_TYPE_IPV4_HNAPT:
99 hv1 = e->ipv4.orig.ports;
100 hv2 = e->ipv4.orig.dest_ip;
101 hv3 = e->ipv4.orig.src_ip;
102 break;
103 case MTK_PPE_PKT_TYPE_IPV6_ROUTE_3T:
104 case MTK_PPE_PKT_TYPE_IPV6_ROUTE_5T:
105 hv1 = e->ipv6.src_ip[3] ^ e->ipv6.dest_ip[3];
106 hv1 ^= e->ipv6.ports;
107
108 hv2 = e->ipv6.src_ip[2] ^ e->ipv6.dest_ip[2];
109 hv2 ^= e->ipv6.dest_ip[0];
110
111 hv3 = e->ipv6.src_ip[1] ^ e->ipv6.dest_ip[1];
112 hv3 ^= e->ipv6.src_ip[0];
113 break;
114 case MTK_PPE_PKT_TYPE_IPV4_DSLITE:
115 case MTK_PPE_PKT_TYPE_IPV6_6RD:
116 default:
117 WARN_ON_ONCE(1);
118 return MTK_PPE_HASH_MASK;
119 }
120
121 hash = (hv1 & hv2) | ((~hv1) & hv3);
122 hash = (hash >> 24) | ((hash & 0xffffff) << 8);
123 hash ^= hv1 ^ hv2 ^ hv3;
124 hash ^= hash >> 16;
125 hash <<= (ffs(eth->soc->hash_offset) - 1);
126 hash &= MTK_PPE_ENTRIES - 1;
127
128 return hash;
129 }
130
131 static inline struct mtk_foe_mac_info *
mtk_foe_entry_l2(struct mtk_eth * eth,struct mtk_foe_entry * entry)132 mtk_foe_entry_l2(struct mtk_eth *eth, struct mtk_foe_entry *entry)
133 {
134 int type = mtk_get_ib1_pkt_type(eth, entry->ib1);
135
136 if (type == MTK_PPE_PKT_TYPE_BRIDGE)
137 return &entry->bridge.l2;
138
139 if (type >= MTK_PPE_PKT_TYPE_IPV4_DSLITE)
140 return &entry->ipv6.l2;
141
142 return &entry->ipv4.l2;
143 }
144
145 static inline u32 *
mtk_foe_entry_ib2(struct mtk_eth * eth,struct mtk_foe_entry * entry)146 mtk_foe_entry_ib2(struct mtk_eth *eth, struct mtk_foe_entry *entry)
147 {
148 int type = mtk_get_ib1_pkt_type(eth, entry->ib1);
149
150 if (type == MTK_PPE_PKT_TYPE_BRIDGE)
151 return &entry->bridge.ib2;
152
153 if (type >= MTK_PPE_PKT_TYPE_IPV4_DSLITE)
154 return &entry->ipv6.ib2;
155
156 return &entry->ipv4.ib2;
157 }
158
mtk_foe_entry_prepare(struct mtk_eth * eth,struct mtk_foe_entry * entry,int type,int l4proto,u8 pse_port,u8 * src_mac,u8 * dest_mac)159 int mtk_foe_entry_prepare(struct mtk_eth *eth, struct mtk_foe_entry *entry,
160 int type, int l4proto, u8 pse_port, u8 *src_mac,
161 u8 *dest_mac)
162 {
163 struct mtk_foe_mac_info *l2;
164 u32 ports_pad, val;
165
166 memset(entry, 0, sizeof(*entry));
167
168 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
169 val = FIELD_PREP(MTK_FOE_IB1_STATE, MTK_FOE_STATE_BIND) |
170 FIELD_PREP(MTK_FOE_IB1_PACKET_TYPE_V2, type) |
171 FIELD_PREP(MTK_FOE_IB1_UDP, l4proto == IPPROTO_UDP) |
172 MTK_FOE_IB1_BIND_CACHE_V2 | MTK_FOE_IB1_BIND_TTL_V2;
173 entry->ib1 = val;
174
175 val = FIELD_PREP(MTK_FOE_IB2_DEST_PORT_V2, pse_port) |
176 FIELD_PREP(MTK_FOE_IB2_PORT_AG_V2, 0xf);
177 } else {
178 val = FIELD_PREP(MTK_FOE_IB1_STATE, MTK_FOE_STATE_BIND) |
179 FIELD_PREP(MTK_FOE_IB1_PACKET_TYPE, type) |
180 FIELD_PREP(MTK_FOE_IB1_UDP, l4proto == IPPROTO_UDP) |
181 MTK_FOE_IB1_BIND_CACHE | MTK_FOE_IB1_BIND_TTL;
182 entry->ib1 = val;
183
184 val = FIELD_PREP(MTK_FOE_IB2_DEST_PORT, pse_port) |
185 FIELD_PREP(MTK_FOE_IB2_PORT_MG, 0x3f) |
186 FIELD_PREP(MTK_FOE_IB2_PORT_AG, 0x1f);
187 }
188
189 if (is_multicast_ether_addr(dest_mac))
190 val |= mtk_get_ib2_multicast_mask(eth);
191
192 ports_pad = 0xa5a5a500 | (l4proto & 0xff);
193 if (type == MTK_PPE_PKT_TYPE_IPV4_ROUTE)
194 entry->ipv4.orig.ports = ports_pad;
195 if (type == MTK_PPE_PKT_TYPE_IPV6_ROUTE_3T)
196 entry->ipv6.ports = ports_pad;
197
198 if (type == MTK_PPE_PKT_TYPE_BRIDGE) {
199 ether_addr_copy(entry->bridge.src_mac, src_mac);
200 ether_addr_copy(entry->bridge.dest_mac, dest_mac);
201 entry->bridge.ib2 = val;
202 l2 = &entry->bridge.l2;
203 } else if (type >= MTK_PPE_PKT_TYPE_IPV4_DSLITE) {
204 entry->ipv6.ib2 = val;
205 l2 = &entry->ipv6.l2;
206 } else {
207 entry->ipv4.ib2 = val;
208 l2 = &entry->ipv4.l2;
209 }
210
211 l2->dest_mac_hi = get_unaligned_be32(dest_mac);
212 l2->dest_mac_lo = get_unaligned_be16(dest_mac + 4);
213 l2->src_mac_hi = get_unaligned_be32(src_mac);
214 l2->src_mac_lo = get_unaligned_be16(src_mac + 4);
215
216 if (type >= MTK_PPE_PKT_TYPE_IPV6_ROUTE_3T)
217 l2->etype = ETH_P_IPV6;
218 else
219 l2->etype = ETH_P_IP;
220
221 return 0;
222 }
223
mtk_foe_entry_set_pse_port(struct mtk_eth * eth,struct mtk_foe_entry * entry,u8 port)224 int mtk_foe_entry_set_pse_port(struct mtk_eth *eth,
225 struct mtk_foe_entry *entry, u8 port)
226 {
227 u32 *ib2 = mtk_foe_entry_ib2(eth, entry);
228 u32 val = *ib2;
229
230 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
231 val &= ~MTK_FOE_IB2_DEST_PORT_V2;
232 val |= FIELD_PREP(MTK_FOE_IB2_DEST_PORT_V2, port);
233 } else {
234 val &= ~MTK_FOE_IB2_DEST_PORT;
235 val |= FIELD_PREP(MTK_FOE_IB2_DEST_PORT, port);
236 }
237 *ib2 = val;
238
239 return 0;
240 }
241
mtk_foe_entry_set_ipv4_tuple(struct mtk_eth * eth,struct mtk_foe_entry * entry,bool egress,__be32 src_addr,__be16 src_port,__be32 dest_addr,__be16 dest_port)242 int mtk_foe_entry_set_ipv4_tuple(struct mtk_eth *eth,
243 struct mtk_foe_entry *entry, bool egress,
244 __be32 src_addr, __be16 src_port,
245 __be32 dest_addr, __be16 dest_port)
246 {
247 int type = mtk_get_ib1_pkt_type(eth, entry->ib1);
248 struct mtk_ipv4_tuple *t;
249
250 switch (type) {
251 case MTK_PPE_PKT_TYPE_IPV4_HNAPT:
252 if (egress) {
253 t = &entry->ipv4.new;
254 break;
255 }
256 fallthrough;
257 case MTK_PPE_PKT_TYPE_IPV4_DSLITE:
258 case MTK_PPE_PKT_TYPE_IPV4_ROUTE:
259 t = &entry->ipv4.orig;
260 break;
261 case MTK_PPE_PKT_TYPE_IPV6_6RD:
262 entry->ipv6_6rd.tunnel_src_ip = be32_to_cpu(src_addr);
263 entry->ipv6_6rd.tunnel_dest_ip = be32_to_cpu(dest_addr);
264 return 0;
265 default:
266 WARN_ON_ONCE(1);
267 return -EINVAL;
268 }
269
270 t->src_ip = be32_to_cpu(src_addr);
271 t->dest_ip = be32_to_cpu(dest_addr);
272
273 if (type == MTK_PPE_PKT_TYPE_IPV4_ROUTE)
274 return 0;
275
276 t->src_port = be16_to_cpu(src_port);
277 t->dest_port = be16_to_cpu(dest_port);
278
279 return 0;
280 }
281
mtk_foe_entry_set_ipv6_tuple(struct mtk_eth * eth,struct mtk_foe_entry * entry,__be32 * src_addr,__be16 src_port,__be32 * dest_addr,__be16 dest_port)282 int mtk_foe_entry_set_ipv6_tuple(struct mtk_eth *eth,
283 struct mtk_foe_entry *entry,
284 __be32 *src_addr, __be16 src_port,
285 __be32 *dest_addr, __be16 dest_port)
286 {
287 int type = mtk_get_ib1_pkt_type(eth, entry->ib1);
288 u32 *src, *dest;
289 int i;
290
291 switch (type) {
292 case MTK_PPE_PKT_TYPE_IPV4_DSLITE:
293 src = entry->dslite.tunnel_src_ip;
294 dest = entry->dslite.tunnel_dest_ip;
295 break;
296 case MTK_PPE_PKT_TYPE_IPV6_ROUTE_5T:
297 case MTK_PPE_PKT_TYPE_IPV6_6RD:
298 entry->ipv6.src_port = be16_to_cpu(src_port);
299 entry->ipv6.dest_port = be16_to_cpu(dest_port);
300 fallthrough;
301 case MTK_PPE_PKT_TYPE_IPV6_ROUTE_3T:
302 src = entry->ipv6.src_ip;
303 dest = entry->ipv6.dest_ip;
304 break;
305 default:
306 WARN_ON_ONCE(1);
307 return -EINVAL;
308 }
309
310 for (i = 0; i < 4; i++)
311 src[i] = be32_to_cpu(src_addr[i]);
312 for (i = 0; i < 4; i++)
313 dest[i] = be32_to_cpu(dest_addr[i]);
314
315 return 0;
316 }
317
mtk_foe_entry_set_dsa(struct mtk_eth * eth,struct mtk_foe_entry * entry,int port)318 int mtk_foe_entry_set_dsa(struct mtk_eth *eth, struct mtk_foe_entry *entry,
319 int port)
320 {
321 struct mtk_foe_mac_info *l2 = mtk_foe_entry_l2(eth, entry);
322
323 l2->etype = BIT(port);
324
325 if (!(entry->ib1 & mtk_get_ib1_vlan_layer_mask(eth)))
326 entry->ib1 |= mtk_prep_ib1_vlan_layer(eth, 1);
327 else
328 l2->etype |= BIT(8);
329
330 entry->ib1 &= ~mtk_get_ib1_vlan_tag_mask(eth);
331
332 return 0;
333 }
334
mtk_foe_entry_set_vlan(struct mtk_eth * eth,struct mtk_foe_entry * entry,int vid)335 int mtk_foe_entry_set_vlan(struct mtk_eth *eth, struct mtk_foe_entry *entry,
336 int vid)
337 {
338 struct mtk_foe_mac_info *l2 = mtk_foe_entry_l2(eth, entry);
339
340 switch (mtk_get_ib1_vlan_layer(eth, entry->ib1)) {
341 case 0:
342 entry->ib1 |= mtk_get_ib1_vlan_tag_mask(eth) |
343 mtk_prep_ib1_vlan_layer(eth, 1);
344 l2->vlan1 = vid;
345 return 0;
346 case 1:
347 if (!(entry->ib1 & mtk_get_ib1_vlan_tag_mask(eth))) {
348 l2->vlan1 = vid;
349 l2->etype |= BIT(8);
350 } else {
351 l2->vlan2 = vid;
352 entry->ib1 += mtk_prep_ib1_vlan_layer(eth, 1);
353 }
354 return 0;
355 default:
356 return -ENOSPC;
357 }
358 }
359
mtk_foe_entry_set_pppoe(struct mtk_eth * eth,struct mtk_foe_entry * entry,int sid)360 int mtk_foe_entry_set_pppoe(struct mtk_eth *eth, struct mtk_foe_entry *entry,
361 int sid)
362 {
363 struct mtk_foe_mac_info *l2 = mtk_foe_entry_l2(eth, entry);
364
365 if (!(entry->ib1 & mtk_get_ib1_vlan_layer_mask(eth)) ||
366 (entry->ib1 & mtk_get_ib1_vlan_tag_mask(eth)))
367 l2->etype = ETH_P_PPP_SES;
368
369 entry->ib1 |= mtk_get_ib1_ppoe_mask(eth);
370 l2->pppoe_id = sid;
371
372 return 0;
373 }
374
mtk_foe_entry_set_wdma(struct mtk_eth * eth,struct mtk_foe_entry * entry,int wdma_idx,int txq,int bss,int wcid)375 int mtk_foe_entry_set_wdma(struct mtk_eth *eth, struct mtk_foe_entry *entry,
376 int wdma_idx, int txq, int bss, int wcid)
377 {
378 struct mtk_foe_mac_info *l2 = mtk_foe_entry_l2(eth, entry);
379 u32 *ib2 = mtk_foe_entry_ib2(eth, entry);
380
381 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
382 *ib2 &= ~MTK_FOE_IB2_PORT_MG_V2;
383 *ib2 |= FIELD_PREP(MTK_FOE_IB2_RX_IDX, txq) |
384 MTK_FOE_IB2_WDMA_WINFO_V2;
385 l2->winfo = FIELD_PREP(MTK_FOE_WINFO_WCID, wcid) |
386 FIELD_PREP(MTK_FOE_WINFO_BSS, bss);
387 } else {
388 *ib2 &= ~MTK_FOE_IB2_PORT_MG;
389 *ib2 |= MTK_FOE_IB2_WDMA_WINFO;
390 if (wdma_idx)
391 *ib2 |= MTK_FOE_IB2_WDMA_DEVIDX;
392 l2->vlan2 = FIELD_PREP(MTK_FOE_VLAN2_WINFO_BSS, bss) |
393 FIELD_PREP(MTK_FOE_VLAN2_WINFO_WCID, wcid) |
394 FIELD_PREP(MTK_FOE_VLAN2_WINFO_RING, txq);
395 }
396
397 return 0;
398 }
399
400 static bool
mtk_flow_entry_match(struct mtk_eth * eth,struct mtk_flow_entry * entry,struct mtk_foe_entry * data)401 mtk_flow_entry_match(struct mtk_eth *eth, struct mtk_flow_entry *entry,
402 struct mtk_foe_entry *data)
403 {
404 int type, len;
405
406 if ((data->ib1 ^ entry->data.ib1) & MTK_FOE_IB1_UDP)
407 return false;
408
409 type = mtk_get_ib1_pkt_type(eth, entry->data.ib1);
410 if (type > MTK_PPE_PKT_TYPE_IPV4_DSLITE)
411 len = offsetof(struct mtk_foe_entry, ipv6._rsv);
412 else
413 len = offsetof(struct mtk_foe_entry, ipv4.ib2);
414
415 return !memcmp(&entry->data.data, &data->data, len - 4);
416 }
417
418 static void
__mtk_foe_entry_clear(struct mtk_ppe * ppe,struct mtk_flow_entry * entry)419 __mtk_foe_entry_clear(struct mtk_ppe *ppe, struct mtk_flow_entry *entry)
420 {
421 struct hlist_head *head;
422 struct hlist_node *tmp;
423
424 if (entry->type == MTK_FLOW_TYPE_L2) {
425 rhashtable_remove_fast(&ppe->l2_flows, &entry->l2_node,
426 mtk_flow_l2_ht_params);
427
428 head = &entry->l2_flows;
429 hlist_for_each_entry_safe(entry, tmp, head, l2_data.list)
430 __mtk_foe_entry_clear(ppe, entry);
431 return;
432 }
433
434 hlist_del_init(&entry->list);
435 if (entry->hash != 0xffff) {
436 struct mtk_foe_entry *hwe = mtk_foe_get_entry(ppe, entry->hash);
437
438 hwe->ib1 &= ~MTK_FOE_IB1_STATE;
439 hwe->ib1 |= FIELD_PREP(MTK_FOE_IB1_STATE, MTK_FOE_STATE_INVALID);
440 dma_wmb();
441 }
442 entry->hash = 0xffff;
443
444 if (entry->type != MTK_FLOW_TYPE_L2_SUBFLOW)
445 return;
446
447 hlist_del_init(&entry->l2_data.list);
448 kfree(entry);
449 }
450
__mtk_foe_entry_idle_time(struct mtk_ppe * ppe,u32 ib1)451 static int __mtk_foe_entry_idle_time(struct mtk_ppe *ppe, u32 ib1)
452 {
453 u32 ib1_ts_mask = mtk_get_ib1_ts_mask(ppe->eth);
454 u16 now = mtk_eth_timestamp(ppe->eth);
455 u16 timestamp = ib1 & ib1_ts_mask;
456
457 if (timestamp > now)
458 return ib1_ts_mask + 1 - timestamp + now;
459 else
460 return now - timestamp;
461 }
462
463 static void
mtk_flow_entry_update_l2(struct mtk_ppe * ppe,struct mtk_flow_entry * entry)464 mtk_flow_entry_update_l2(struct mtk_ppe *ppe, struct mtk_flow_entry *entry)
465 {
466 u32 ib1_ts_mask = mtk_get_ib1_ts_mask(ppe->eth);
467 struct mtk_flow_entry *cur;
468 struct mtk_foe_entry *hwe;
469 struct hlist_node *tmp;
470 int idle;
471
472 idle = __mtk_foe_entry_idle_time(ppe, entry->data.ib1);
473 hlist_for_each_entry_safe(cur, tmp, &entry->l2_flows, l2_data.list) {
474 int cur_idle;
475 u32 ib1;
476
477 hwe = mtk_foe_get_entry(ppe, cur->hash);
478 ib1 = READ_ONCE(hwe->ib1);
479
480 if (FIELD_GET(MTK_FOE_IB1_STATE, ib1) != MTK_FOE_STATE_BIND) {
481 cur->hash = 0xffff;
482 __mtk_foe_entry_clear(ppe, cur);
483 continue;
484 }
485
486 cur_idle = __mtk_foe_entry_idle_time(ppe, ib1);
487 if (cur_idle >= idle)
488 continue;
489
490 idle = cur_idle;
491 entry->data.ib1 &= ~ib1_ts_mask;
492 entry->data.ib1 |= hwe->ib1 & ib1_ts_mask;
493 }
494 }
495
496 static void
mtk_flow_entry_update(struct mtk_ppe * ppe,struct mtk_flow_entry * entry)497 mtk_flow_entry_update(struct mtk_ppe *ppe, struct mtk_flow_entry *entry)
498 {
499 struct mtk_foe_entry foe = {};
500 struct mtk_foe_entry *hwe;
501
502 spin_lock_bh(&ppe_lock);
503
504 if (entry->type == MTK_FLOW_TYPE_L2) {
505 mtk_flow_entry_update_l2(ppe, entry);
506 goto out;
507 }
508
509 if (entry->hash == 0xffff)
510 goto out;
511
512 hwe = mtk_foe_get_entry(ppe, entry->hash);
513 memcpy(&foe, hwe, ppe->eth->soc->foe_entry_size);
514 if (!mtk_flow_entry_match(ppe->eth, entry, &foe)) {
515 entry->hash = 0xffff;
516 goto out;
517 }
518
519 entry->data.ib1 = foe.ib1;
520
521 out:
522 spin_unlock_bh(&ppe_lock);
523 }
524
525 static void
__mtk_foe_entry_commit(struct mtk_ppe * ppe,struct mtk_foe_entry * entry,u16 hash)526 __mtk_foe_entry_commit(struct mtk_ppe *ppe, struct mtk_foe_entry *entry,
527 u16 hash)
528 {
529 struct mtk_eth *eth = ppe->eth;
530 u16 timestamp = mtk_eth_timestamp(eth);
531 struct mtk_foe_entry *hwe;
532
533 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
534 entry->ib1 &= ~MTK_FOE_IB1_BIND_TIMESTAMP_V2;
535 entry->ib1 |= FIELD_PREP(MTK_FOE_IB1_BIND_TIMESTAMP_V2,
536 timestamp);
537 } else {
538 entry->ib1 &= ~MTK_FOE_IB1_BIND_TIMESTAMP;
539 entry->ib1 |= FIELD_PREP(MTK_FOE_IB1_BIND_TIMESTAMP,
540 timestamp);
541 }
542
543 hwe = mtk_foe_get_entry(ppe, hash);
544 memcpy(&hwe->data, &entry->data, eth->soc->foe_entry_size - sizeof(hwe->ib1));
545 wmb();
546 hwe->ib1 = entry->ib1;
547
548 dma_wmb();
549
550 mtk_ppe_cache_clear(ppe);
551 }
552
mtk_foe_entry_clear(struct mtk_ppe * ppe,struct mtk_flow_entry * entry)553 void mtk_foe_entry_clear(struct mtk_ppe *ppe, struct mtk_flow_entry *entry)
554 {
555 spin_lock_bh(&ppe_lock);
556 __mtk_foe_entry_clear(ppe, entry);
557 spin_unlock_bh(&ppe_lock);
558 }
559
560 static int
mtk_foe_entry_commit_l2(struct mtk_ppe * ppe,struct mtk_flow_entry * entry)561 mtk_foe_entry_commit_l2(struct mtk_ppe *ppe, struct mtk_flow_entry *entry)
562 {
563 entry->type = MTK_FLOW_TYPE_L2;
564
565 return rhashtable_insert_fast(&ppe->l2_flows, &entry->l2_node,
566 mtk_flow_l2_ht_params);
567 }
568
mtk_foe_entry_commit(struct mtk_ppe * ppe,struct mtk_flow_entry * entry)569 int mtk_foe_entry_commit(struct mtk_ppe *ppe, struct mtk_flow_entry *entry)
570 {
571 const struct mtk_soc_data *soc = ppe->eth->soc;
572 int type = mtk_get_ib1_pkt_type(ppe->eth, entry->data.ib1);
573 u32 hash;
574
575 if (type == MTK_PPE_PKT_TYPE_BRIDGE)
576 return mtk_foe_entry_commit_l2(ppe, entry);
577
578 hash = mtk_ppe_hash_entry(ppe->eth, &entry->data);
579 entry->hash = 0xffff;
580 spin_lock_bh(&ppe_lock);
581 hlist_add_head(&entry->list, &ppe->foe_flow[hash / soc->hash_offset]);
582 spin_unlock_bh(&ppe_lock);
583
584 return 0;
585 }
586
587 static void
mtk_foe_entry_commit_subflow(struct mtk_ppe * ppe,struct mtk_flow_entry * entry,u16 hash)588 mtk_foe_entry_commit_subflow(struct mtk_ppe *ppe, struct mtk_flow_entry *entry,
589 u16 hash)
590 {
591 const struct mtk_soc_data *soc = ppe->eth->soc;
592 struct mtk_flow_entry *flow_info;
593 struct mtk_foe_entry foe = {}, *hwe;
594 struct mtk_foe_mac_info *l2;
595 u32 ib1_mask = mtk_get_ib1_pkt_type_mask(ppe->eth) | MTK_FOE_IB1_UDP;
596 int type;
597
598 flow_info = kzalloc(offsetof(struct mtk_flow_entry, l2_data.end),
599 GFP_ATOMIC);
600 if (!flow_info)
601 return;
602
603 flow_info->l2_data.base_flow = entry;
604 flow_info->type = MTK_FLOW_TYPE_L2_SUBFLOW;
605 flow_info->hash = hash;
606 hlist_add_head(&flow_info->list,
607 &ppe->foe_flow[hash / soc->hash_offset]);
608 hlist_add_head(&flow_info->l2_data.list, &entry->l2_flows);
609
610 hwe = mtk_foe_get_entry(ppe, hash);
611 memcpy(&foe, hwe, soc->foe_entry_size);
612 foe.ib1 &= ib1_mask;
613 foe.ib1 |= entry->data.ib1 & ~ib1_mask;
614
615 l2 = mtk_foe_entry_l2(ppe->eth, &foe);
616 memcpy(l2, &entry->data.bridge.l2, sizeof(*l2));
617
618 type = mtk_get_ib1_pkt_type(ppe->eth, foe.ib1);
619 if (type == MTK_PPE_PKT_TYPE_IPV4_HNAPT)
620 memcpy(&foe.ipv4.new, &foe.ipv4.orig, sizeof(foe.ipv4.new));
621 else if (type >= MTK_PPE_PKT_TYPE_IPV6_ROUTE_3T && l2->etype == ETH_P_IP)
622 l2->etype = ETH_P_IPV6;
623
624 *mtk_foe_entry_ib2(ppe->eth, &foe) = entry->data.bridge.ib2;
625
626 __mtk_foe_entry_commit(ppe, &foe, hash);
627 }
628
__mtk_ppe_check_skb(struct mtk_ppe * ppe,struct sk_buff * skb,u16 hash)629 void __mtk_ppe_check_skb(struct mtk_ppe *ppe, struct sk_buff *skb, u16 hash)
630 {
631 const struct mtk_soc_data *soc = ppe->eth->soc;
632 struct hlist_head *head = &ppe->foe_flow[hash / soc->hash_offset];
633 struct mtk_foe_entry *hwe = mtk_foe_get_entry(ppe, hash);
634 struct mtk_flow_entry *entry;
635 struct mtk_foe_bridge key = {};
636 struct hlist_node *n;
637 struct ethhdr *eh;
638 bool found = false;
639 u8 *tag;
640
641 spin_lock_bh(&ppe_lock);
642
643 if (FIELD_GET(MTK_FOE_IB1_STATE, hwe->ib1) == MTK_FOE_STATE_BIND)
644 goto out;
645
646 hlist_for_each_entry_safe(entry, n, head, list) {
647 if (entry->type == MTK_FLOW_TYPE_L2_SUBFLOW) {
648 if (unlikely(FIELD_GET(MTK_FOE_IB1_STATE, hwe->ib1) ==
649 MTK_FOE_STATE_BIND))
650 continue;
651
652 entry->hash = 0xffff;
653 __mtk_foe_entry_clear(ppe, entry);
654 continue;
655 }
656
657 if (found || !mtk_flow_entry_match(ppe->eth, entry, hwe)) {
658 if (entry->hash != 0xffff)
659 entry->hash = 0xffff;
660 continue;
661 }
662
663 entry->hash = hash;
664 __mtk_foe_entry_commit(ppe, &entry->data, hash);
665 found = true;
666 }
667
668 if (found)
669 goto out;
670
671 eh = eth_hdr(skb);
672 ether_addr_copy(key.dest_mac, eh->h_dest);
673 ether_addr_copy(key.src_mac, eh->h_source);
674 tag = skb->data - 2;
675 key.vlan = 0;
676 switch (skb->protocol) {
677 #if IS_ENABLED(CONFIG_NET_DSA)
678 case htons(ETH_P_XDSA):
679 if (!netdev_uses_dsa(skb->dev) ||
680 skb->dev->dsa_ptr->tag_ops->proto != DSA_TAG_PROTO_MTK)
681 goto out;
682
683 tag += 4;
684 if (get_unaligned_be16(tag) != ETH_P_8021Q)
685 break;
686
687 fallthrough;
688 #endif
689 case htons(ETH_P_8021Q):
690 key.vlan = get_unaligned_be16(tag + 2) & VLAN_VID_MASK;
691 break;
692 default:
693 break;
694 }
695
696 entry = rhashtable_lookup_fast(&ppe->l2_flows, &key, mtk_flow_l2_ht_params);
697 if (!entry)
698 goto out;
699
700 mtk_foe_entry_commit_subflow(ppe, entry, hash);
701
702 out:
703 spin_unlock_bh(&ppe_lock);
704 }
705
mtk_foe_entry_idle_time(struct mtk_ppe * ppe,struct mtk_flow_entry * entry)706 int mtk_foe_entry_idle_time(struct mtk_ppe *ppe, struct mtk_flow_entry *entry)
707 {
708 mtk_flow_entry_update(ppe, entry);
709
710 return __mtk_foe_entry_idle_time(ppe, entry->data.ib1);
711 }
712
mtk_ppe_init(struct mtk_eth * eth,void __iomem * base,int version,int index)713 struct mtk_ppe *mtk_ppe_init(struct mtk_eth *eth, void __iomem *base,
714 int version, int index)
715 {
716 const struct mtk_soc_data *soc = eth->soc;
717 struct device *dev = eth->dev;
718 struct mtk_ppe *ppe;
719 u32 foe_flow_size;
720 void *foe;
721
722 ppe = devm_kzalloc(dev, sizeof(*ppe), GFP_KERNEL);
723 if (!ppe)
724 return NULL;
725
726 rhashtable_init(&ppe->l2_flows, &mtk_flow_l2_ht_params);
727
728 /* need to allocate a separate device, since it PPE DMA access is
729 * not coherent.
730 */
731 ppe->base = base;
732 ppe->eth = eth;
733 ppe->dev = dev;
734 ppe->version = version;
735
736 foe = dmam_alloc_coherent(ppe->dev,
737 MTK_PPE_ENTRIES * soc->foe_entry_size,
738 &ppe->foe_phys, GFP_KERNEL);
739 if (!foe)
740 goto err_free_l2_flows;
741
742 ppe->foe_table = foe;
743
744 foe_flow_size = (MTK_PPE_ENTRIES / soc->hash_offset) *
745 sizeof(*ppe->foe_flow);
746 ppe->foe_flow = devm_kzalloc(dev, foe_flow_size, GFP_KERNEL);
747 if (!ppe->foe_flow)
748 goto err_free_l2_flows;
749
750 mtk_ppe_debugfs_init(ppe, index);
751
752 return ppe;
753
754 err_free_l2_flows:
755 rhashtable_destroy(&ppe->l2_flows);
756 return NULL;
757 }
758
mtk_ppe_deinit(struct mtk_eth * eth)759 void mtk_ppe_deinit(struct mtk_eth *eth)
760 {
761 int i;
762
763 for (i = 0; i < ARRAY_SIZE(eth->ppe); i++) {
764 if (!eth->ppe[i])
765 return;
766 rhashtable_destroy(ð->ppe[i]->l2_flows);
767 }
768 }
769
mtk_ppe_init_foe_table(struct mtk_ppe * ppe)770 static void mtk_ppe_init_foe_table(struct mtk_ppe *ppe)
771 {
772 static const u8 skip[] = { 12, 25, 38, 51, 76, 89, 102 };
773 int i, k;
774
775 memset(ppe->foe_table, 0,
776 MTK_PPE_ENTRIES * ppe->eth->soc->foe_entry_size);
777
778 if (!IS_ENABLED(CONFIG_SOC_MT7621))
779 return;
780
781 /* skip all entries that cross the 1024 byte boundary */
782 for (i = 0; i < MTK_PPE_ENTRIES; i += 128) {
783 for (k = 0; k < ARRAY_SIZE(skip); k++) {
784 struct mtk_foe_entry *hwe;
785
786 hwe = mtk_foe_get_entry(ppe, i + skip[k]);
787 hwe->ib1 |= MTK_FOE_IB1_STATIC;
788 }
789 }
790 }
791
mtk_ppe_start(struct mtk_ppe * ppe)792 void mtk_ppe_start(struct mtk_ppe *ppe)
793 {
794 u32 val;
795
796 if (!ppe)
797 return;
798
799 mtk_ppe_init_foe_table(ppe);
800 ppe_w32(ppe, MTK_PPE_TB_BASE, ppe->foe_phys);
801
802 val = MTK_PPE_TB_CFG_ENTRY_80B |
803 MTK_PPE_TB_CFG_AGE_NON_L4 |
804 MTK_PPE_TB_CFG_AGE_UNBIND |
805 MTK_PPE_TB_CFG_AGE_TCP |
806 MTK_PPE_TB_CFG_AGE_UDP |
807 MTK_PPE_TB_CFG_AGE_TCP_FIN |
808 FIELD_PREP(MTK_PPE_TB_CFG_SEARCH_MISS,
809 MTK_PPE_SEARCH_MISS_ACTION_FORWARD_BUILD) |
810 FIELD_PREP(MTK_PPE_TB_CFG_KEEPALIVE,
811 MTK_PPE_KEEPALIVE_DISABLE) |
812 FIELD_PREP(MTK_PPE_TB_CFG_HASH_MODE, 1) |
813 FIELD_PREP(MTK_PPE_TB_CFG_SCAN_MODE,
814 MTK_PPE_SCAN_MODE_KEEPALIVE_AGE) |
815 FIELD_PREP(MTK_PPE_TB_CFG_ENTRY_NUM,
816 MTK_PPE_ENTRIES_SHIFT);
817 if (MTK_HAS_CAPS(ppe->eth->soc->caps, MTK_NETSYS_V2))
818 val |= MTK_PPE_TB_CFG_INFO_SEL;
819 ppe_w32(ppe, MTK_PPE_TB_CFG, val);
820
821 ppe_w32(ppe, MTK_PPE_IP_PROTO_CHK,
822 MTK_PPE_IP_PROTO_CHK_IPV4 | MTK_PPE_IP_PROTO_CHK_IPV6);
823
824 mtk_ppe_cache_enable(ppe, true);
825
826 val = MTK_PPE_FLOW_CFG_IP6_3T_ROUTE |
827 MTK_PPE_FLOW_CFG_IP6_5T_ROUTE |
828 MTK_PPE_FLOW_CFG_IP6_6RD |
829 MTK_PPE_FLOW_CFG_IP4_NAT |
830 MTK_PPE_FLOW_CFG_IP4_NAPT |
831 MTK_PPE_FLOW_CFG_IP4_DSLITE |
832 MTK_PPE_FLOW_CFG_IP4_NAT_FRAG;
833 if (MTK_HAS_CAPS(ppe->eth->soc->caps, MTK_NETSYS_V2))
834 val |= MTK_PPE_MD_TOAP_BYP_CRSN0 |
835 MTK_PPE_MD_TOAP_BYP_CRSN1 |
836 MTK_PPE_MD_TOAP_BYP_CRSN2 |
837 MTK_PPE_FLOW_CFG_IP4_HASH_GRE_KEY;
838 else
839 val |= MTK_PPE_FLOW_CFG_IP4_TCP_FRAG |
840 MTK_PPE_FLOW_CFG_IP4_UDP_FRAG;
841 ppe_w32(ppe, MTK_PPE_FLOW_CFG, val);
842
843 val = FIELD_PREP(MTK_PPE_UNBIND_AGE_MIN_PACKETS, 1000) |
844 FIELD_PREP(MTK_PPE_UNBIND_AGE_DELTA, 3);
845 ppe_w32(ppe, MTK_PPE_UNBIND_AGE, val);
846
847 val = FIELD_PREP(MTK_PPE_BIND_AGE0_DELTA_UDP, 12) |
848 FIELD_PREP(MTK_PPE_BIND_AGE0_DELTA_NON_L4, 1);
849 ppe_w32(ppe, MTK_PPE_BIND_AGE0, val);
850
851 val = FIELD_PREP(MTK_PPE_BIND_AGE1_DELTA_TCP_FIN, 1) |
852 FIELD_PREP(MTK_PPE_BIND_AGE1_DELTA_TCP, 7);
853 ppe_w32(ppe, MTK_PPE_BIND_AGE1, val);
854
855 val = MTK_PPE_BIND_LIMIT0_QUARTER | MTK_PPE_BIND_LIMIT0_HALF;
856 ppe_w32(ppe, MTK_PPE_BIND_LIMIT0, val);
857
858 val = MTK_PPE_BIND_LIMIT1_FULL |
859 FIELD_PREP(MTK_PPE_BIND_LIMIT1_NON_L4, 1);
860 ppe_w32(ppe, MTK_PPE_BIND_LIMIT1, val);
861
862 val = FIELD_PREP(MTK_PPE_BIND_RATE_BIND, 30) |
863 FIELD_PREP(MTK_PPE_BIND_RATE_PREBIND, 1);
864 ppe_w32(ppe, MTK_PPE_BIND_RATE, val);
865
866 /* enable PPE */
867 val = MTK_PPE_GLO_CFG_EN |
868 MTK_PPE_GLO_CFG_IP4_L4_CS_DROP |
869 MTK_PPE_GLO_CFG_IP4_CS_DROP |
870 MTK_PPE_GLO_CFG_FLOW_DROP_UPDATE;
871 ppe_w32(ppe, MTK_PPE_GLO_CFG, val);
872
873 ppe_w32(ppe, MTK_PPE_DEFAULT_CPU_PORT, 0);
874
875 if (MTK_HAS_CAPS(ppe->eth->soc->caps, MTK_NETSYS_V2)) {
876 ppe_w32(ppe, MTK_PPE_DEFAULT_CPU_PORT1, 0xcb777);
877 ppe_w32(ppe, MTK_PPE_SBW_CTRL, 0x7f);
878 }
879 }
880
mtk_ppe_stop(struct mtk_ppe * ppe)881 int mtk_ppe_stop(struct mtk_ppe *ppe)
882 {
883 u32 val;
884 int i;
885
886 if (!ppe)
887 return 0;
888
889 for (i = 0; i < MTK_PPE_ENTRIES; i++) {
890 struct mtk_foe_entry *hwe = mtk_foe_get_entry(ppe, i);
891
892 hwe->ib1 = FIELD_PREP(MTK_FOE_IB1_STATE,
893 MTK_FOE_STATE_INVALID);
894 }
895
896 mtk_ppe_cache_enable(ppe, false);
897
898 /* disable offload engine */
899 ppe_clear(ppe, MTK_PPE_GLO_CFG, MTK_PPE_GLO_CFG_EN);
900 ppe_w32(ppe, MTK_PPE_FLOW_CFG, 0);
901
902 /* disable aging */
903 val = MTK_PPE_TB_CFG_AGE_NON_L4 |
904 MTK_PPE_TB_CFG_AGE_UNBIND |
905 MTK_PPE_TB_CFG_AGE_TCP |
906 MTK_PPE_TB_CFG_AGE_UDP |
907 MTK_PPE_TB_CFG_AGE_TCP_FIN;
908 ppe_clear(ppe, MTK_PPE_TB_CFG, val);
909
910 return mtk_ppe_wait_busy(ppe);
911 }
912