1 // SPDX-License-Identifier: GPL-2.0+
2
3 #include <net/switchdev.h>
4 #include "lan966x_main.h"
5
6 #define LAN966X_MAC_COLUMNS 4
7 #define MACACCESS_CMD_IDLE 0
8 #define MACACCESS_CMD_LEARN 1
9 #define MACACCESS_CMD_FORGET 2
10 #define MACACCESS_CMD_AGE 3
11 #define MACACCESS_CMD_GET_NEXT 4
12 #define MACACCESS_CMD_INIT 5
13 #define MACACCESS_CMD_READ 6
14 #define MACACCESS_CMD_WRITE 7
15 #define MACACCESS_CMD_SYNC_GET_NEXT 8
16
17 #define LAN966X_MAC_INVALID_ROW -1
18
19 struct lan966x_mac_entry {
20 struct list_head list;
21 unsigned char mac[ETH_ALEN] __aligned(2);
22 u16 vid;
23 u16 port_index;
24 int row;
25 };
26
27 struct lan966x_mac_raw_entry {
28 u32 mach;
29 u32 macl;
30 u32 maca;
31 bool processed;
32 };
33
lan966x_mac_get_status(struct lan966x * lan966x)34 static int lan966x_mac_get_status(struct lan966x *lan966x)
35 {
36 return lan_rd(lan966x, ANA_MACACCESS);
37 }
38
lan966x_mac_wait_for_completion(struct lan966x * lan966x)39 static int lan966x_mac_wait_for_completion(struct lan966x *lan966x)
40 {
41 u32 val;
42
43 return readx_poll_timeout_atomic(lan966x_mac_get_status,
44 lan966x, val,
45 (ANA_MACACCESS_MAC_TABLE_CMD_GET(val)) ==
46 MACACCESS_CMD_IDLE,
47 TABLE_UPDATE_SLEEP_US,
48 TABLE_UPDATE_TIMEOUT_US);
49 }
50
lan966x_mac_select(struct lan966x * lan966x,const unsigned char mac[ETH_ALEN],unsigned int vid)51 static void lan966x_mac_select(struct lan966x *lan966x,
52 const unsigned char mac[ETH_ALEN],
53 unsigned int vid)
54 {
55 u32 macl = 0, mach = 0;
56
57 /* Set the MAC address to handle and the vlan associated in a format
58 * understood by the hardware.
59 */
60 mach |= vid << 16;
61 mach |= mac[0] << 8;
62 mach |= mac[1] << 0;
63 macl |= mac[2] << 24;
64 macl |= mac[3] << 16;
65 macl |= mac[4] << 8;
66 macl |= mac[5] << 0;
67
68 lan_wr(macl, lan966x, ANA_MACLDATA);
69 lan_wr(mach, lan966x, ANA_MACHDATA);
70 }
71
__lan966x_mac_learn(struct lan966x * lan966x,int pgid,bool cpu_copy,const unsigned char mac[ETH_ALEN],unsigned int vid,enum macaccess_entry_type type)72 static int __lan966x_mac_learn(struct lan966x *lan966x, int pgid,
73 bool cpu_copy,
74 const unsigned char mac[ETH_ALEN],
75 unsigned int vid,
76 enum macaccess_entry_type type)
77 {
78 int ret;
79
80 spin_lock(&lan966x->mac_lock);
81 lan966x_mac_select(lan966x, mac, vid);
82
83 /* Issue a write command */
84 lan_wr(ANA_MACACCESS_VALID_SET(1) |
85 ANA_MACACCESS_CHANGE2SW_SET(0) |
86 ANA_MACACCESS_MAC_CPU_COPY_SET(cpu_copy) |
87 ANA_MACACCESS_DEST_IDX_SET(pgid) |
88 ANA_MACACCESS_ENTRYTYPE_SET(type) |
89 ANA_MACACCESS_MAC_TABLE_CMD_SET(MACACCESS_CMD_LEARN),
90 lan966x, ANA_MACACCESS);
91
92 ret = lan966x_mac_wait_for_completion(lan966x);
93 spin_unlock(&lan966x->mac_lock);
94
95 return ret;
96 }
97
98 /* The mask of the front ports is encoded inside the mac parameter via a call
99 * to lan966x_mdb_encode_mac().
100 */
lan966x_mac_ip_learn(struct lan966x * lan966x,bool cpu_copy,const unsigned char mac[ETH_ALEN],unsigned int vid,enum macaccess_entry_type type)101 int lan966x_mac_ip_learn(struct lan966x *lan966x,
102 bool cpu_copy,
103 const unsigned char mac[ETH_ALEN],
104 unsigned int vid,
105 enum macaccess_entry_type type)
106 {
107 WARN_ON(type != ENTRYTYPE_MACV4 && type != ENTRYTYPE_MACV6);
108
109 return __lan966x_mac_learn(lan966x, 0, cpu_copy, mac, vid, type);
110 }
111
lan966x_mac_learn(struct lan966x * lan966x,int port,const unsigned char mac[ETH_ALEN],unsigned int vid,enum macaccess_entry_type type)112 int lan966x_mac_learn(struct lan966x *lan966x, int port,
113 const unsigned char mac[ETH_ALEN],
114 unsigned int vid,
115 enum macaccess_entry_type type)
116 {
117 WARN_ON(type != ENTRYTYPE_NORMAL && type != ENTRYTYPE_LOCKED);
118
119 return __lan966x_mac_learn(lan966x, port, false, mac, vid, type);
120 }
121
lan966x_mac_forget_locked(struct lan966x * lan966x,const unsigned char mac[ETH_ALEN],unsigned int vid,enum macaccess_entry_type type)122 static int lan966x_mac_forget_locked(struct lan966x *lan966x,
123 const unsigned char mac[ETH_ALEN],
124 unsigned int vid,
125 enum macaccess_entry_type type)
126 {
127 lockdep_assert_held(&lan966x->mac_lock);
128
129 lan966x_mac_select(lan966x, mac, vid);
130
131 /* Issue a forget command */
132 lan_wr(ANA_MACACCESS_ENTRYTYPE_SET(type) |
133 ANA_MACACCESS_MAC_TABLE_CMD_SET(MACACCESS_CMD_FORGET),
134 lan966x, ANA_MACACCESS);
135
136 return lan966x_mac_wait_for_completion(lan966x);
137 }
138
lan966x_mac_forget(struct lan966x * lan966x,const unsigned char mac[ETH_ALEN],unsigned int vid,enum macaccess_entry_type type)139 int lan966x_mac_forget(struct lan966x *lan966x,
140 const unsigned char mac[ETH_ALEN],
141 unsigned int vid,
142 enum macaccess_entry_type type)
143 {
144 int ret;
145
146 spin_lock(&lan966x->mac_lock);
147 ret = lan966x_mac_forget_locked(lan966x, mac, vid, type);
148 spin_unlock(&lan966x->mac_lock);
149
150 return ret;
151 }
152
lan966x_mac_cpu_learn(struct lan966x * lan966x,const char * addr,u16 vid)153 int lan966x_mac_cpu_learn(struct lan966x *lan966x, const char *addr, u16 vid)
154 {
155 return lan966x_mac_learn(lan966x, PGID_CPU, addr, vid, ENTRYTYPE_LOCKED);
156 }
157
lan966x_mac_cpu_forget(struct lan966x * lan966x,const char * addr,u16 vid)158 int lan966x_mac_cpu_forget(struct lan966x *lan966x, const char *addr, u16 vid)
159 {
160 return lan966x_mac_forget(lan966x, addr, vid, ENTRYTYPE_LOCKED);
161 }
162
lan966x_mac_set_ageing(struct lan966x * lan966x,u32 ageing)163 void lan966x_mac_set_ageing(struct lan966x *lan966x,
164 u32 ageing)
165 {
166 lan_rmw(ANA_AUTOAGE_AGE_PERIOD_SET(ageing / 2),
167 ANA_AUTOAGE_AGE_PERIOD,
168 lan966x, ANA_AUTOAGE);
169 }
170
lan966x_mac_init(struct lan966x * lan966x)171 void lan966x_mac_init(struct lan966x *lan966x)
172 {
173 /* Clear the MAC table */
174 lan_wr(MACACCESS_CMD_INIT, lan966x, ANA_MACACCESS);
175 lan966x_mac_wait_for_completion(lan966x);
176
177 spin_lock_init(&lan966x->mac_lock);
178 INIT_LIST_HEAD(&lan966x->mac_entries);
179 }
180
lan966x_mac_alloc_entry(const unsigned char * mac,u16 vid,u16 port_index)181 static struct lan966x_mac_entry *lan966x_mac_alloc_entry(const unsigned char *mac,
182 u16 vid, u16 port_index)
183 {
184 struct lan966x_mac_entry *mac_entry;
185
186 mac_entry = kzalloc(sizeof(*mac_entry), GFP_ATOMIC);
187 if (!mac_entry)
188 return NULL;
189
190 memcpy(mac_entry->mac, mac, ETH_ALEN);
191 mac_entry->vid = vid;
192 mac_entry->port_index = port_index;
193 mac_entry->row = LAN966X_MAC_INVALID_ROW;
194 return mac_entry;
195 }
196
lan966x_mac_find_entry(struct lan966x * lan966x,const unsigned char * mac,u16 vid,u16 port_index)197 static struct lan966x_mac_entry *lan966x_mac_find_entry(struct lan966x *lan966x,
198 const unsigned char *mac,
199 u16 vid, u16 port_index)
200 {
201 struct lan966x_mac_entry *res = NULL;
202 struct lan966x_mac_entry *mac_entry;
203
204 list_for_each_entry(mac_entry, &lan966x->mac_entries, list) {
205 if (mac_entry->vid == vid &&
206 ether_addr_equal(mac, mac_entry->mac) &&
207 mac_entry->port_index == port_index) {
208 res = mac_entry;
209 break;
210 }
211 }
212
213 return res;
214 }
215
lan966x_mac_lookup(struct lan966x * lan966x,const unsigned char mac[ETH_ALEN],unsigned int vid,enum macaccess_entry_type type)216 static int lan966x_mac_lookup(struct lan966x *lan966x,
217 const unsigned char mac[ETH_ALEN],
218 unsigned int vid, enum macaccess_entry_type type)
219 {
220 int ret;
221
222 lan966x_mac_select(lan966x, mac, vid);
223
224 /* Issue a read command */
225 lan_wr(ANA_MACACCESS_ENTRYTYPE_SET(type) |
226 ANA_MACACCESS_VALID_SET(1) |
227 ANA_MACACCESS_MAC_TABLE_CMD_SET(MACACCESS_CMD_READ),
228 lan966x, ANA_MACACCESS);
229
230 ret = lan966x_mac_wait_for_completion(lan966x);
231 if (ret)
232 return ret;
233
234 return ANA_MACACCESS_VALID_GET(lan_rd(lan966x, ANA_MACACCESS));
235 }
236
lan966x_fdb_call_notifiers(enum switchdev_notifier_type type,const char * mac,u16 vid,struct net_device * dev)237 static void lan966x_fdb_call_notifiers(enum switchdev_notifier_type type,
238 const char *mac, u16 vid,
239 struct net_device *dev)
240 {
241 struct switchdev_notifier_fdb_info info = { 0 };
242
243 info.addr = mac;
244 info.vid = vid;
245 info.offloaded = true;
246 call_switchdev_notifiers(type, dev, &info.info, NULL);
247 }
248
lan966x_mac_add_entry(struct lan966x * lan966x,struct lan966x_port * port,const unsigned char * addr,u16 vid)249 int lan966x_mac_add_entry(struct lan966x *lan966x, struct lan966x_port *port,
250 const unsigned char *addr, u16 vid)
251 {
252 struct lan966x_mac_entry *mac_entry;
253
254 spin_lock(&lan966x->mac_lock);
255 if (lan966x_mac_lookup(lan966x, addr, vid, ENTRYTYPE_NORMAL)) {
256 spin_unlock(&lan966x->mac_lock);
257 return 0;
258 }
259
260 /* In case the entry already exists, don't add it again to SW,
261 * just update HW, but we need to look in the actual HW because
262 * it is possible for an entry to be learn by HW and before we
263 * get the interrupt the frame will reach CPU and the CPU will
264 * add the entry but without the extern_learn flag.
265 */
266 mac_entry = lan966x_mac_find_entry(lan966x, addr, vid, port->chip_port);
267 if (mac_entry) {
268 spin_unlock(&lan966x->mac_lock);
269 goto mac_learn;
270 }
271
272 mac_entry = lan966x_mac_alloc_entry(addr, vid, port->chip_port);
273 if (!mac_entry) {
274 spin_unlock(&lan966x->mac_lock);
275 return -ENOMEM;
276 }
277
278 list_add_tail(&mac_entry->list, &lan966x->mac_entries);
279 spin_unlock(&lan966x->mac_lock);
280
281 lan966x_fdb_call_notifiers(SWITCHDEV_FDB_OFFLOADED, addr, vid, port->dev);
282
283 mac_learn:
284 lan966x_mac_learn(lan966x, port->chip_port, addr, vid, ENTRYTYPE_LOCKED);
285
286 return 0;
287 }
288
lan966x_mac_del_entry(struct lan966x * lan966x,const unsigned char * addr,u16 vid)289 int lan966x_mac_del_entry(struct lan966x *lan966x, const unsigned char *addr,
290 u16 vid)
291 {
292 struct lan966x_mac_entry *mac_entry, *tmp;
293
294 spin_lock(&lan966x->mac_lock);
295 list_for_each_entry_safe(mac_entry, tmp, &lan966x->mac_entries,
296 list) {
297 if (mac_entry->vid == vid &&
298 ether_addr_equal(addr, mac_entry->mac)) {
299 lan966x_mac_forget_locked(lan966x, mac_entry->mac,
300 mac_entry->vid,
301 ENTRYTYPE_LOCKED);
302
303 list_del(&mac_entry->list);
304 kfree(mac_entry);
305 }
306 }
307 spin_unlock(&lan966x->mac_lock);
308
309 return 0;
310 }
311
lan966x_mac_purge_entries(struct lan966x * lan966x)312 void lan966x_mac_purge_entries(struct lan966x *lan966x)
313 {
314 struct lan966x_mac_entry *mac_entry, *tmp;
315
316 spin_lock(&lan966x->mac_lock);
317 list_for_each_entry_safe(mac_entry, tmp, &lan966x->mac_entries,
318 list) {
319 lan966x_mac_forget_locked(lan966x, mac_entry->mac,
320 mac_entry->vid, ENTRYTYPE_LOCKED);
321
322 list_del(&mac_entry->list);
323 kfree(mac_entry);
324 }
325 spin_unlock(&lan966x->mac_lock);
326 }
327
lan966x_mac_notifiers(enum switchdev_notifier_type type,unsigned char * mac,u32 vid,struct net_device * dev)328 static void lan966x_mac_notifiers(enum switchdev_notifier_type type,
329 unsigned char *mac, u32 vid,
330 struct net_device *dev)
331 {
332 rtnl_lock();
333 lan966x_fdb_call_notifiers(type, mac, vid, dev);
334 rtnl_unlock();
335 }
336
lan966x_mac_process_raw_entry(struct lan966x_mac_raw_entry * raw_entry,u8 * mac,u16 * vid,u32 * dest_idx)337 static void lan966x_mac_process_raw_entry(struct lan966x_mac_raw_entry *raw_entry,
338 u8 *mac, u16 *vid, u32 *dest_idx)
339 {
340 mac[0] = (raw_entry->mach >> 8) & 0xff;
341 mac[1] = (raw_entry->mach >> 0) & 0xff;
342 mac[2] = (raw_entry->macl >> 24) & 0xff;
343 mac[3] = (raw_entry->macl >> 16) & 0xff;
344 mac[4] = (raw_entry->macl >> 8) & 0xff;
345 mac[5] = (raw_entry->macl >> 0) & 0xff;
346
347 *vid = (raw_entry->mach >> 16) & 0xfff;
348 *dest_idx = ANA_MACACCESS_DEST_IDX_GET(raw_entry->maca);
349 }
350
lan966x_mac_irq_process(struct lan966x * lan966x,u32 row,struct lan966x_mac_raw_entry * raw_entries)351 static void lan966x_mac_irq_process(struct lan966x *lan966x, u32 row,
352 struct lan966x_mac_raw_entry *raw_entries)
353 {
354 struct lan966x_mac_entry *mac_entry, *tmp;
355 unsigned char mac[ETH_ALEN] __aligned(2);
356 struct list_head mac_deleted_entries;
357 u32 dest_idx;
358 u32 column;
359 u16 vid;
360
361 INIT_LIST_HEAD(&mac_deleted_entries);
362
363 spin_lock(&lan966x->mac_lock);
364 list_for_each_entry_safe(mac_entry, tmp, &lan966x->mac_entries, list) {
365 bool found = false;
366
367 if (mac_entry->row != row)
368 continue;
369
370 for (column = 0; column < LAN966X_MAC_COLUMNS; ++column) {
371 /* All the valid entries are at the start of the row,
372 * so when get one invalid entry it can just skip the
373 * rest of the columns
374 */
375 if (!ANA_MACACCESS_VALID_GET(raw_entries[column].maca))
376 break;
377
378 lan966x_mac_process_raw_entry(&raw_entries[column],
379 mac, &vid, &dest_idx);
380 if (WARN_ON(dest_idx >= lan966x->num_phys_ports))
381 continue;
382
383 /* If the entry in SW is found, then there is nothing
384 * to do
385 */
386 if (mac_entry->vid == vid &&
387 ether_addr_equal(mac_entry->mac, mac) &&
388 mac_entry->port_index == dest_idx) {
389 raw_entries[column].processed = true;
390 found = true;
391 break;
392 }
393 }
394
395 if (!found) {
396 list_del(&mac_entry->list);
397 /* Move the entry from SW list to a tmp list such that
398 * it would be deleted later
399 */
400 list_add_tail(&mac_entry->list, &mac_deleted_entries);
401 }
402 }
403 spin_unlock(&lan966x->mac_lock);
404
405 list_for_each_entry_safe(mac_entry, tmp, &mac_deleted_entries, list) {
406 /* Notify the bridge that the entry doesn't exist
407 * anymore in the HW
408 */
409 lan966x_mac_notifiers(SWITCHDEV_FDB_DEL_TO_BRIDGE,
410 mac_entry->mac, mac_entry->vid,
411 lan966x->ports[mac_entry->port_index]->dev);
412 list_del(&mac_entry->list);
413 kfree(mac_entry);
414 }
415
416 /* Now go to the list of columns and see if any entry was not in the SW
417 * list, then that means that the entry is new so it needs to notify the
418 * bridge.
419 */
420 for (column = 0; column < LAN966X_MAC_COLUMNS; ++column) {
421 /* All the valid entries are at the start of the row, so when
422 * get one invalid entry it can just skip the rest of the columns
423 */
424 if (!ANA_MACACCESS_VALID_GET(raw_entries[column].maca))
425 break;
426
427 /* If the entry already exists then don't do anything */
428 if (raw_entries[column].processed)
429 continue;
430
431 lan966x_mac_process_raw_entry(&raw_entries[column],
432 mac, &vid, &dest_idx);
433 if (WARN_ON(dest_idx >= lan966x->num_phys_ports))
434 continue;
435
436 spin_lock(&lan966x->mac_lock);
437 mac_entry = lan966x_mac_find_entry(lan966x, mac, vid, dest_idx);
438 if (mac_entry) {
439 spin_unlock(&lan966x->mac_lock);
440 continue;
441 }
442
443 mac_entry = lan966x_mac_alloc_entry(mac, vid, dest_idx);
444 if (!mac_entry) {
445 spin_unlock(&lan966x->mac_lock);
446 return;
447 }
448
449 mac_entry->row = row;
450 list_add_tail(&mac_entry->list, &lan966x->mac_entries);
451 spin_unlock(&lan966x->mac_lock);
452
453 lan966x_mac_notifiers(SWITCHDEV_FDB_ADD_TO_BRIDGE,
454 mac, vid, lan966x->ports[dest_idx]->dev);
455 }
456 }
457
lan966x_mac_irq_handler(struct lan966x * lan966x)458 irqreturn_t lan966x_mac_irq_handler(struct lan966x *lan966x)
459 {
460 struct lan966x_mac_raw_entry entry[LAN966X_MAC_COLUMNS] = { 0 };
461 u32 index, column;
462 bool stop = true;
463 u32 val;
464
465 /* Start the scan from 0, 0 */
466 lan_wr(ANA_MACTINDX_M_INDEX_SET(0) |
467 ANA_MACTINDX_BUCKET_SET(0),
468 lan966x, ANA_MACTINDX);
469
470 while (1) {
471 spin_lock(&lan966x->mac_lock);
472 lan_rmw(ANA_MACACCESS_MAC_TABLE_CMD_SET(MACACCESS_CMD_SYNC_GET_NEXT),
473 ANA_MACACCESS_MAC_TABLE_CMD,
474 lan966x, ANA_MACACCESS);
475 lan966x_mac_wait_for_completion(lan966x);
476
477 val = lan_rd(lan966x, ANA_MACTINDX);
478 index = ANA_MACTINDX_M_INDEX_GET(val);
479 column = ANA_MACTINDX_BUCKET_GET(val);
480
481 /* The SYNC-GET-NEXT returns all the entries(4) in a row in
482 * which is suffered a change. By change it means that new entry
483 * was added or an entry was removed because of ageing.
484 * It would return all the columns for that row. And after that
485 * it would return the next row The stop conditions of the
486 * SYNC-GET-NEXT is when it reaches 'directly' to row 0
487 * column 3. So if SYNC-GET-NEXT returns row 0 and column 0
488 * then it is required to continue to read more even if it
489 * reaches row 0 and column 3.
490 */
491 if (index == 0 && column == 0)
492 stop = false;
493
494 if (column == LAN966X_MAC_COLUMNS - 1 &&
495 index == 0 && stop) {
496 spin_unlock(&lan966x->mac_lock);
497 break;
498 }
499
500 entry[column].mach = lan_rd(lan966x, ANA_MACHDATA);
501 entry[column].macl = lan_rd(lan966x, ANA_MACLDATA);
502 entry[column].maca = lan_rd(lan966x, ANA_MACACCESS);
503 spin_unlock(&lan966x->mac_lock);
504
505 /* Once all the columns are read process them */
506 if (column == LAN966X_MAC_COLUMNS - 1) {
507 lan966x_mac_irq_process(lan966x, index, entry);
508 /* A row was processed so it is safe to assume that the
509 * next row/column can be the stop condition
510 */
511 stop = true;
512 }
513 }
514
515 lan_rmw(ANA_ANAINTR_INTR_SET(0),
516 ANA_ANAINTR_INTR,
517 lan966x, ANA_ANAINTR);
518
519 return IRQ_HANDLED;
520 }
521