1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Microchip KSZ9477 switch driver main logic
4 *
5 * Copyright (C) 2017-2019 Microchip Technology Inc.
6 */
7
8 #include <linux/kernel.h>
9 #include <linux/module.h>
10 #include <linux/iopoll.h>
11 #include <linux/platform_data/microchip-ksz.h>
12 #include <linux/phy.h>
13 #include <linux/if_bridge.h>
14 #include <linux/if_vlan.h>
15 #include <net/dsa.h>
16 #include <net/switchdev.h>
17
18 #include "ksz9477_reg.h"
19 #include "ksz_common.h"
20 #include "ksz9477.h"
21
ksz_cfg(struct ksz_device * dev,u32 addr,u8 bits,bool set)22 static void ksz_cfg(struct ksz_device *dev, u32 addr, u8 bits, bool set)
23 {
24 regmap_update_bits(ksz_regmap_8(dev), addr, bits, set ? bits : 0);
25 }
26
ksz_port_cfg(struct ksz_device * dev,int port,int offset,u8 bits,bool set)27 static void ksz_port_cfg(struct ksz_device *dev, int port, int offset, u8 bits,
28 bool set)
29 {
30 regmap_update_bits(ksz_regmap_8(dev), PORT_CTRL_ADDR(port, offset),
31 bits, set ? bits : 0);
32 }
33
ksz9477_cfg32(struct ksz_device * dev,u32 addr,u32 bits,bool set)34 static void ksz9477_cfg32(struct ksz_device *dev, u32 addr, u32 bits, bool set)
35 {
36 regmap_update_bits(ksz_regmap_32(dev), addr, bits, set ? bits : 0);
37 }
38
ksz9477_port_cfg32(struct ksz_device * dev,int port,int offset,u32 bits,bool set)39 static void ksz9477_port_cfg32(struct ksz_device *dev, int port, int offset,
40 u32 bits, bool set)
41 {
42 regmap_update_bits(ksz_regmap_32(dev), PORT_CTRL_ADDR(port, offset),
43 bits, set ? bits : 0);
44 }
45
ksz9477_change_mtu(struct ksz_device * dev,int port,int mtu)46 int ksz9477_change_mtu(struct ksz_device *dev, int port, int mtu)
47 {
48 u16 frame_size;
49
50 if (!dsa_is_cpu_port(dev->ds, port))
51 return 0;
52
53 frame_size = mtu + VLAN_ETH_HLEN + ETH_FCS_LEN;
54
55 return regmap_update_bits(ksz_regmap_16(dev), REG_SW_MTU__2,
56 REG_SW_MTU_MASK, frame_size);
57 }
58
ksz9477_wait_vlan_ctrl_ready(struct ksz_device * dev)59 static int ksz9477_wait_vlan_ctrl_ready(struct ksz_device *dev)
60 {
61 unsigned int val;
62
63 return regmap_read_poll_timeout(ksz_regmap_8(dev), REG_SW_VLAN_CTRL,
64 val, !(val & VLAN_START), 10, 1000);
65 }
66
ksz9477_get_vlan_table(struct ksz_device * dev,u16 vid,u32 * vlan_table)67 static int ksz9477_get_vlan_table(struct ksz_device *dev, u16 vid,
68 u32 *vlan_table)
69 {
70 int ret;
71
72 mutex_lock(&dev->vlan_mutex);
73
74 ksz_write16(dev, REG_SW_VLAN_ENTRY_INDEX__2, vid & VLAN_INDEX_M);
75 ksz_write8(dev, REG_SW_VLAN_CTRL, VLAN_READ | VLAN_START);
76
77 /* wait to be cleared */
78 ret = ksz9477_wait_vlan_ctrl_ready(dev);
79 if (ret) {
80 dev_dbg(dev->dev, "Failed to read vlan table\n");
81 goto exit;
82 }
83
84 ksz_read32(dev, REG_SW_VLAN_ENTRY__4, &vlan_table[0]);
85 ksz_read32(dev, REG_SW_VLAN_ENTRY_UNTAG__4, &vlan_table[1]);
86 ksz_read32(dev, REG_SW_VLAN_ENTRY_PORTS__4, &vlan_table[2]);
87
88 ksz_write8(dev, REG_SW_VLAN_CTRL, 0);
89
90 exit:
91 mutex_unlock(&dev->vlan_mutex);
92
93 return ret;
94 }
95
ksz9477_set_vlan_table(struct ksz_device * dev,u16 vid,u32 * vlan_table)96 static int ksz9477_set_vlan_table(struct ksz_device *dev, u16 vid,
97 u32 *vlan_table)
98 {
99 int ret;
100
101 mutex_lock(&dev->vlan_mutex);
102
103 ksz_write32(dev, REG_SW_VLAN_ENTRY__4, vlan_table[0]);
104 ksz_write32(dev, REG_SW_VLAN_ENTRY_UNTAG__4, vlan_table[1]);
105 ksz_write32(dev, REG_SW_VLAN_ENTRY_PORTS__4, vlan_table[2]);
106
107 ksz_write16(dev, REG_SW_VLAN_ENTRY_INDEX__2, vid & VLAN_INDEX_M);
108 ksz_write8(dev, REG_SW_VLAN_CTRL, VLAN_START | VLAN_WRITE);
109
110 /* wait to be cleared */
111 ret = ksz9477_wait_vlan_ctrl_ready(dev);
112 if (ret) {
113 dev_dbg(dev->dev, "Failed to write vlan table\n");
114 goto exit;
115 }
116
117 ksz_write8(dev, REG_SW_VLAN_CTRL, 0);
118
119 /* update vlan cache table */
120 dev->vlan_cache[vid].table[0] = vlan_table[0];
121 dev->vlan_cache[vid].table[1] = vlan_table[1];
122 dev->vlan_cache[vid].table[2] = vlan_table[2];
123
124 exit:
125 mutex_unlock(&dev->vlan_mutex);
126
127 return ret;
128 }
129
ksz9477_read_table(struct ksz_device * dev,u32 * table)130 static void ksz9477_read_table(struct ksz_device *dev, u32 *table)
131 {
132 ksz_read32(dev, REG_SW_ALU_VAL_A, &table[0]);
133 ksz_read32(dev, REG_SW_ALU_VAL_B, &table[1]);
134 ksz_read32(dev, REG_SW_ALU_VAL_C, &table[2]);
135 ksz_read32(dev, REG_SW_ALU_VAL_D, &table[3]);
136 }
137
ksz9477_write_table(struct ksz_device * dev,u32 * table)138 static void ksz9477_write_table(struct ksz_device *dev, u32 *table)
139 {
140 ksz_write32(dev, REG_SW_ALU_VAL_A, table[0]);
141 ksz_write32(dev, REG_SW_ALU_VAL_B, table[1]);
142 ksz_write32(dev, REG_SW_ALU_VAL_C, table[2]);
143 ksz_write32(dev, REG_SW_ALU_VAL_D, table[3]);
144 }
145
ksz9477_wait_alu_ready(struct ksz_device * dev)146 static int ksz9477_wait_alu_ready(struct ksz_device *dev)
147 {
148 unsigned int val;
149
150 return regmap_read_poll_timeout(ksz_regmap_32(dev), REG_SW_ALU_CTRL__4,
151 val, !(val & ALU_START), 10, 1000);
152 }
153
ksz9477_wait_alu_sta_ready(struct ksz_device * dev)154 static int ksz9477_wait_alu_sta_ready(struct ksz_device *dev)
155 {
156 unsigned int val;
157
158 return regmap_read_poll_timeout(ksz_regmap_32(dev),
159 REG_SW_ALU_STAT_CTRL__4,
160 val, !(val & ALU_STAT_START),
161 10, 1000);
162 }
163
ksz9477_reset_switch(struct ksz_device * dev)164 int ksz9477_reset_switch(struct ksz_device *dev)
165 {
166 u8 data8;
167 u32 data32;
168
169 /* reset switch */
170 ksz_cfg(dev, REG_SW_OPERATION, SW_RESET, true);
171
172 /* turn off SPI DO Edge select */
173 regmap_update_bits(ksz_regmap_8(dev), REG_SW_GLOBAL_SERIAL_CTRL_0,
174 SPI_AUTO_EDGE_DETECTION, 0);
175
176 /* default configuration */
177 ksz_read8(dev, REG_SW_LUE_CTRL_1, &data8);
178 data8 = SW_AGING_ENABLE | SW_LINK_AUTO_AGING |
179 SW_SRC_ADDR_FILTER | SW_FLUSH_STP_TABLE | SW_FLUSH_MSTP_TABLE;
180 ksz_write8(dev, REG_SW_LUE_CTRL_1, data8);
181
182 /* disable interrupts */
183 ksz_write32(dev, REG_SW_INT_MASK__4, SWITCH_INT_MASK);
184 ksz_write32(dev, REG_SW_PORT_INT_MASK__4, 0x7F);
185 ksz_read32(dev, REG_SW_PORT_INT_STATUS__4, &data32);
186
187 /* KSZ9893 compatible chips do not support refclk configuration */
188 if (dev->chip_id == KSZ9893_CHIP_ID ||
189 dev->chip_id == KSZ8563_CHIP_ID ||
190 dev->chip_id == KSZ9563_CHIP_ID)
191 return 0;
192
193 data8 = SW_ENABLE_REFCLKO;
194 if (dev->synclko_disable)
195 data8 = 0;
196 else if (dev->synclko_125)
197 data8 = SW_ENABLE_REFCLKO | SW_REFCLKO_IS_125MHZ;
198 ksz_write8(dev, REG_SW_GLOBAL_OUTPUT_CTRL__1, data8);
199
200 return 0;
201 }
202
ksz9477_r_mib_cnt(struct ksz_device * dev,int port,u16 addr,u64 * cnt)203 void ksz9477_r_mib_cnt(struct ksz_device *dev, int port, u16 addr, u64 *cnt)
204 {
205 struct ksz_port *p = &dev->ports[port];
206 unsigned int val;
207 u32 data;
208 int ret;
209
210 /* retain the flush/freeze bit */
211 data = p->freeze ? MIB_COUNTER_FLUSH_FREEZE : 0;
212 data |= MIB_COUNTER_READ;
213 data |= (addr << MIB_COUNTER_INDEX_S);
214 ksz_pwrite32(dev, port, REG_PORT_MIB_CTRL_STAT__4, data);
215
216 ret = regmap_read_poll_timeout(ksz_regmap_32(dev),
217 PORT_CTRL_ADDR(port, REG_PORT_MIB_CTRL_STAT__4),
218 val, !(val & MIB_COUNTER_READ), 10, 1000);
219 /* failed to read MIB. get out of loop */
220 if (ret) {
221 dev_dbg(dev->dev, "Failed to get MIB\n");
222 return;
223 }
224
225 /* count resets upon read */
226 ksz_pread32(dev, port, REG_PORT_MIB_DATA, &data);
227 *cnt += data;
228 }
229
ksz9477_r_mib_pkt(struct ksz_device * dev,int port,u16 addr,u64 * dropped,u64 * cnt)230 void ksz9477_r_mib_pkt(struct ksz_device *dev, int port, u16 addr,
231 u64 *dropped, u64 *cnt)
232 {
233 addr = dev->info->mib_names[addr].index;
234 ksz9477_r_mib_cnt(dev, port, addr, cnt);
235 }
236
ksz9477_freeze_mib(struct ksz_device * dev,int port,bool freeze)237 void ksz9477_freeze_mib(struct ksz_device *dev, int port, bool freeze)
238 {
239 u32 val = freeze ? MIB_COUNTER_FLUSH_FREEZE : 0;
240 struct ksz_port *p = &dev->ports[port];
241
242 /* enable/disable the port for flush/freeze function */
243 mutex_lock(&p->mib.cnt_mutex);
244 ksz_pwrite32(dev, port, REG_PORT_MIB_CTRL_STAT__4, val);
245
246 /* used by MIB counter reading code to know freeze is enabled */
247 p->freeze = freeze;
248 mutex_unlock(&p->mib.cnt_mutex);
249 }
250
ksz9477_port_init_cnt(struct ksz_device * dev,int port)251 void ksz9477_port_init_cnt(struct ksz_device *dev, int port)
252 {
253 struct ksz_port_mib *mib = &dev->ports[port].mib;
254
255 /* flush all enabled port MIB counters */
256 mutex_lock(&mib->cnt_mutex);
257 ksz_pwrite32(dev, port, REG_PORT_MIB_CTRL_STAT__4,
258 MIB_COUNTER_FLUSH_FREEZE);
259 ksz_write8(dev, REG_SW_MAC_CTRL_6, SW_MIB_COUNTER_FLUSH);
260 ksz_pwrite32(dev, port, REG_PORT_MIB_CTRL_STAT__4, 0);
261 mutex_unlock(&mib->cnt_mutex);
262 }
263
ksz9477_r_phy_quirks(struct ksz_device * dev,u16 addr,u16 reg,u16 * data)264 static void ksz9477_r_phy_quirks(struct ksz_device *dev, u16 addr, u16 reg,
265 u16 *data)
266 {
267 /* KSZ8563R do not have extended registers but BMSR_ESTATEN and
268 * BMSR_ERCAP bits are set.
269 */
270 if (dev->chip_id == KSZ8563_CHIP_ID && reg == MII_BMSR)
271 *data &= ~(BMSR_ESTATEN | BMSR_ERCAP);
272 }
273
ksz9477_r_phy(struct ksz_device * dev,u16 addr,u16 reg,u16 * data)274 int ksz9477_r_phy(struct ksz_device *dev, u16 addr, u16 reg, u16 *data)
275 {
276 u16 val = 0xffff;
277 int ret;
278
279 /* No real PHY after this. Simulate the PHY.
280 * A fixed PHY can be setup in the device tree, but this function is
281 * still called for that port during initialization.
282 * For RGMII PHY there is no way to access it so the fixed PHY should
283 * be used. For SGMII PHY the supporting code will be added later.
284 */
285 if (!dev->info->internal_phy[addr]) {
286 struct ksz_port *p = &dev->ports[addr];
287
288 switch (reg) {
289 case MII_BMCR:
290 val = 0x1140;
291 break;
292 case MII_BMSR:
293 val = 0x796d;
294 break;
295 case MII_PHYSID1:
296 val = 0x0022;
297 break;
298 case MII_PHYSID2:
299 val = 0x1631;
300 break;
301 case MII_ADVERTISE:
302 val = 0x05e1;
303 break;
304 case MII_LPA:
305 val = 0xc5e1;
306 break;
307 case MII_CTRL1000:
308 val = 0x0700;
309 break;
310 case MII_STAT1000:
311 if (p->phydev.speed == SPEED_1000)
312 val = 0x3800;
313 else
314 val = 0;
315 break;
316 }
317 } else {
318 ret = ksz_pread16(dev, addr, 0x100 + (reg << 1), &val);
319 if (ret)
320 return ret;
321
322 ksz9477_r_phy_quirks(dev, addr, reg, &val);
323 }
324
325 *data = val;
326
327 return 0;
328 }
329
ksz9477_w_phy(struct ksz_device * dev,u16 addr,u16 reg,u16 val)330 int ksz9477_w_phy(struct ksz_device *dev, u16 addr, u16 reg, u16 val)
331 {
332 u32 mask, val32;
333
334 /* No real PHY after this. */
335 if (!dev->info->internal_phy[addr])
336 return 0;
337
338 if (reg < 0x10)
339 return ksz_pwrite16(dev, addr, 0x100 + (reg << 1), val);
340
341 /* Errata: When using SPI, I2C, or in-band register access,
342 * writes to certain PHY registers should be performed as
343 * 32-bit writes instead of 16-bit writes.
344 */
345 val32 = val;
346 mask = 0xffff;
347 if ((reg & 1) == 0) {
348 val32 <<= 16;
349 mask <<= 16;
350 }
351 reg &= ~1;
352 return ksz_prmw32(dev, addr, 0x100 + (reg << 1), mask, val32);
353 }
354
ksz9477_cfg_port_member(struct ksz_device * dev,int port,u8 member)355 void ksz9477_cfg_port_member(struct ksz_device *dev, int port, u8 member)
356 {
357 ksz_pwrite32(dev, port, REG_PORT_VLAN_MEMBERSHIP__4, member);
358 }
359
ksz9477_flush_dyn_mac_table(struct ksz_device * dev,int port)360 void ksz9477_flush_dyn_mac_table(struct ksz_device *dev, int port)
361 {
362 const u16 *regs = dev->info->regs;
363 u8 data;
364
365 regmap_update_bits(ksz_regmap_8(dev), REG_SW_LUE_CTRL_2,
366 SW_FLUSH_OPTION_M << SW_FLUSH_OPTION_S,
367 SW_FLUSH_OPTION_DYN_MAC << SW_FLUSH_OPTION_S);
368
369 if (port < dev->info->port_cnt) {
370 /* flush individual port */
371 ksz_pread8(dev, port, regs[P_STP_CTRL], &data);
372 if (!(data & PORT_LEARN_DISABLE))
373 ksz_pwrite8(dev, port, regs[P_STP_CTRL],
374 data | PORT_LEARN_DISABLE);
375 ksz_cfg(dev, S_FLUSH_TABLE_CTRL, SW_FLUSH_DYN_MAC_TABLE, true);
376 ksz_pwrite8(dev, port, regs[P_STP_CTRL], data);
377 } else {
378 /* flush all */
379 ksz_cfg(dev, S_FLUSH_TABLE_CTRL, SW_FLUSH_STP_TABLE, true);
380 }
381 }
382
ksz9477_port_vlan_filtering(struct ksz_device * dev,int port,bool flag,struct netlink_ext_ack * extack)383 int ksz9477_port_vlan_filtering(struct ksz_device *dev, int port,
384 bool flag, struct netlink_ext_ack *extack)
385 {
386 if (flag) {
387 ksz_port_cfg(dev, port, REG_PORT_LUE_CTRL,
388 PORT_VLAN_LOOKUP_VID_0, true);
389 ksz_cfg(dev, REG_SW_LUE_CTRL_0, SW_VLAN_ENABLE, true);
390 } else {
391 ksz_cfg(dev, REG_SW_LUE_CTRL_0, SW_VLAN_ENABLE, false);
392 ksz_port_cfg(dev, port, REG_PORT_LUE_CTRL,
393 PORT_VLAN_LOOKUP_VID_0, false);
394 }
395
396 return 0;
397 }
398
ksz9477_port_vlan_add(struct ksz_device * dev,int port,const struct switchdev_obj_port_vlan * vlan,struct netlink_ext_ack * extack)399 int ksz9477_port_vlan_add(struct ksz_device *dev, int port,
400 const struct switchdev_obj_port_vlan *vlan,
401 struct netlink_ext_ack *extack)
402 {
403 u32 vlan_table[3];
404 bool untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED;
405 int err;
406
407 err = ksz9477_get_vlan_table(dev, vlan->vid, vlan_table);
408 if (err) {
409 NL_SET_ERR_MSG_MOD(extack, "Failed to get vlan table");
410 return err;
411 }
412
413 vlan_table[0] = VLAN_VALID | (vlan->vid & VLAN_FID_M);
414 if (untagged)
415 vlan_table[1] |= BIT(port);
416 else
417 vlan_table[1] &= ~BIT(port);
418 vlan_table[1] &= ~(BIT(dev->cpu_port));
419
420 vlan_table[2] |= BIT(port) | BIT(dev->cpu_port);
421
422 err = ksz9477_set_vlan_table(dev, vlan->vid, vlan_table);
423 if (err) {
424 NL_SET_ERR_MSG_MOD(extack, "Failed to set vlan table");
425 return err;
426 }
427
428 /* change PVID */
429 if (vlan->flags & BRIDGE_VLAN_INFO_PVID)
430 ksz_pwrite16(dev, port, REG_PORT_DEFAULT_VID, vlan->vid);
431
432 return 0;
433 }
434
ksz9477_port_vlan_del(struct ksz_device * dev,int port,const struct switchdev_obj_port_vlan * vlan)435 int ksz9477_port_vlan_del(struct ksz_device *dev, int port,
436 const struct switchdev_obj_port_vlan *vlan)
437 {
438 bool untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED;
439 u32 vlan_table[3];
440 u16 pvid;
441
442 ksz_pread16(dev, port, REG_PORT_DEFAULT_VID, &pvid);
443 pvid = pvid & 0xFFF;
444
445 if (ksz9477_get_vlan_table(dev, vlan->vid, vlan_table)) {
446 dev_dbg(dev->dev, "Failed to get vlan table\n");
447 return -ETIMEDOUT;
448 }
449
450 vlan_table[2] &= ~BIT(port);
451
452 if (pvid == vlan->vid)
453 pvid = 1;
454
455 if (untagged)
456 vlan_table[1] &= ~BIT(port);
457
458 if (ksz9477_set_vlan_table(dev, vlan->vid, vlan_table)) {
459 dev_dbg(dev->dev, "Failed to set vlan table\n");
460 return -ETIMEDOUT;
461 }
462
463 ksz_pwrite16(dev, port, REG_PORT_DEFAULT_VID, pvid);
464
465 return 0;
466 }
467
ksz9477_fdb_add(struct ksz_device * dev,int port,const unsigned char * addr,u16 vid,struct dsa_db db)468 int ksz9477_fdb_add(struct ksz_device *dev, int port,
469 const unsigned char *addr, u16 vid, struct dsa_db db)
470 {
471 u32 alu_table[4];
472 u32 data;
473 int ret = 0;
474
475 mutex_lock(&dev->alu_mutex);
476
477 /* find any entry with mac & vid */
478 data = vid << ALU_FID_INDEX_S;
479 data |= ((addr[0] << 8) | addr[1]);
480 ksz_write32(dev, REG_SW_ALU_INDEX_0, data);
481
482 data = ((addr[2] << 24) | (addr[3] << 16));
483 data |= ((addr[4] << 8) | addr[5]);
484 ksz_write32(dev, REG_SW_ALU_INDEX_1, data);
485
486 /* start read operation */
487 ksz_write32(dev, REG_SW_ALU_CTRL__4, ALU_READ | ALU_START);
488
489 /* wait to be finished */
490 ret = ksz9477_wait_alu_ready(dev);
491 if (ret) {
492 dev_dbg(dev->dev, "Failed to read ALU\n");
493 goto exit;
494 }
495
496 /* read ALU entry */
497 ksz9477_read_table(dev, alu_table);
498
499 /* update ALU entry */
500 alu_table[0] = ALU_V_STATIC_VALID;
501 alu_table[1] |= BIT(port);
502 if (vid)
503 alu_table[1] |= ALU_V_USE_FID;
504 alu_table[2] = (vid << ALU_V_FID_S);
505 alu_table[2] |= ((addr[0] << 8) | addr[1]);
506 alu_table[3] = ((addr[2] << 24) | (addr[3] << 16));
507 alu_table[3] |= ((addr[4] << 8) | addr[5]);
508
509 ksz9477_write_table(dev, alu_table);
510
511 ksz_write32(dev, REG_SW_ALU_CTRL__4, ALU_WRITE | ALU_START);
512
513 /* wait to be finished */
514 ret = ksz9477_wait_alu_ready(dev);
515 if (ret)
516 dev_dbg(dev->dev, "Failed to write ALU\n");
517
518 exit:
519 mutex_unlock(&dev->alu_mutex);
520
521 return ret;
522 }
523
ksz9477_fdb_del(struct ksz_device * dev,int port,const unsigned char * addr,u16 vid,struct dsa_db db)524 int ksz9477_fdb_del(struct ksz_device *dev, int port,
525 const unsigned char *addr, u16 vid, struct dsa_db db)
526 {
527 u32 alu_table[4];
528 u32 data;
529 int ret = 0;
530
531 mutex_lock(&dev->alu_mutex);
532
533 /* read any entry with mac & vid */
534 data = vid << ALU_FID_INDEX_S;
535 data |= ((addr[0] << 8) | addr[1]);
536 ksz_write32(dev, REG_SW_ALU_INDEX_0, data);
537
538 data = ((addr[2] << 24) | (addr[3] << 16));
539 data |= ((addr[4] << 8) | addr[5]);
540 ksz_write32(dev, REG_SW_ALU_INDEX_1, data);
541
542 /* start read operation */
543 ksz_write32(dev, REG_SW_ALU_CTRL__4, ALU_READ | ALU_START);
544
545 /* wait to be finished */
546 ret = ksz9477_wait_alu_ready(dev);
547 if (ret) {
548 dev_dbg(dev->dev, "Failed to read ALU\n");
549 goto exit;
550 }
551
552 ksz_read32(dev, REG_SW_ALU_VAL_A, &alu_table[0]);
553 if (alu_table[0] & ALU_V_STATIC_VALID) {
554 ksz_read32(dev, REG_SW_ALU_VAL_B, &alu_table[1]);
555 ksz_read32(dev, REG_SW_ALU_VAL_C, &alu_table[2]);
556 ksz_read32(dev, REG_SW_ALU_VAL_D, &alu_table[3]);
557
558 /* clear forwarding port */
559 alu_table[1] &= ~BIT(port);
560
561 /* if there is no port to forward, clear table */
562 if ((alu_table[1] & ALU_V_PORT_MAP) == 0) {
563 alu_table[0] = 0;
564 alu_table[1] = 0;
565 alu_table[2] = 0;
566 alu_table[3] = 0;
567 }
568 } else {
569 alu_table[0] = 0;
570 alu_table[1] = 0;
571 alu_table[2] = 0;
572 alu_table[3] = 0;
573 }
574
575 ksz9477_write_table(dev, alu_table);
576
577 ksz_write32(dev, REG_SW_ALU_CTRL__4, ALU_WRITE | ALU_START);
578
579 /* wait to be finished */
580 ret = ksz9477_wait_alu_ready(dev);
581 if (ret)
582 dev_dbg(dev->dev, "Failed to write ALU\n");
583
584 exit:
585 mutex_unlock(&dev->alu_mutex);
586
587 return ret;
588 }
589
ksz9477_convert_alu(struct alu_struct * alu,u32 * alu_table)590 static void ksz9477_convert_alu(struct alu_struct *alu, u32 *alu_table)
591 {
592 alu->is_static = !!(alu_table[0] & ALU_V_STATIC_VALID);
593 alu->is_src_filter = !!(alu_table[0] & ALU_V_SRC_FILTER);
594 alu->is_dst_filter = !!(alu_table[0] & ALU_V_DST_FILTER);
595 alu->prio_age = (alu_table[0] >> ALU_V_PRIO_AGE_CNT_S) &
596 ALU_V_PRIO_AGE_CNT_M;
597 alu->mstp = alu_table[0] & ALU_V_MSTP_M;
598
599 alu->is_override = !!(alu_table[1] & ALU_V_OVERRIDE);
600 alu->is_use_fid = !!(alu_table[1] & ALU_V_USE_FID);
601 alu->port_forward = alu_table[1] & ALU_V_PORT_MAP;
602
603 alu->fid = (alu_table[2] >> ALU_V_FID_S) & ALU_V_FID_M;
604
605 alu->mac[0] = (alu_table[2] >> 8) & 0xFF;
606 alu->mac[1] = alu_table[2] & 0xFF;
607 alu->mac[2] = (alu_table[3] >> 24) & 0xFF;
608 alu->mac[3] = (alu_table[3] >> 16) & 0xFF;
609 alu->mac[4] = (alu_table[3] >> 8) & 0xFF;
610 alu->mac[5] = alu_table[3] & 0xFF;
611 }
612
ksz9477_fdb_dump(struct ksz_device * dev,int port,dsa_fdb_dump_cb_t * cb,void * data)613 int ksz9477_fdb_dump(struct ksz_device *dev, int port,
614 dsa_fdb_dump_cb_t *cb, void *data)
615 {
616 int ret = 0;
617 u32 ksz_data;
618 u32 alu_table[4];
619 struct alu_struct alu;
620 int timeout;
621
622 mutex_lock(&dev->alu_mutex);
623
624 /* start ALU search */
625 ksz_write32(dev, REG_SW_ALU_CTRL__4, ALU_START | ALU_SEARCH);
626
627 do {
628 timeout = 1000;
629 do {
630 ksz_read32(dev, REG_SW_ALU_CTRL__4, &ksz_data);
631 if ((ksz_data & ALU_VALID) || !(ksz_data & ALU_START))
632 break;
633 usleep_range(1, 10);
634 } while (timeout-- > 0);
635
636 if (!timeout) {
637 dev_dbg(dev->dev, "Failed to search ALU\n");
638 ret = -ETIMEDOUT;
639 goto exit;
640 }
641
642 if (!(ksz_data & ALU_VALID))
643 continue;
644
645 /* read ALU table */
646 ksz9477_read_table(dev, alu_table);
647
648 ksz9477_convert_alu(&alu, alu_table);
649
650 if (alu.port_forward & BIT(port)) {
651 ret = cb(alu.mac, alu.fid, alu.is_static, data);
652 if (ret)
653 goto exit;
654 }
655 } while (ksz_data & ALU_START);
656
657 exit:
658
659 /* stop ALU search */
660 ksz_write32(dev, REG_SW_ALU_CTRL__4, 0);
661
662 mutex_unlock(&dev->alu_mutex);
663
664 return ret;
665 }
666
ksz9477_mdb_add(struct ksz_device * dev,int port,const struct switchdev_obj_port_mdb * mdb,struct dsa_db db)667 int ksz9477_mdb_add(struct ksz_device *dev, int port,
668 const struct switchdev_obj_port_mdb *mdb, struct dsa_db db)
669 {
670 u32 static_table[4];
671 const u8 *shifts;
672 const u32 *masks;
673 u32 data;
674 int index;
675 u32 mac_hi, mac_lo;
676 int err = 0;
677
678 shifts = dev->info->shifts;
679 masks = dev->info->masks;
680
681 mac_hi = ((mdb->addr[0] << 8) | mdb->addr[1]);
682 mac_lo = ((mdb->addr[2] << 24) | (mdb->addr[3] << 16));
683 mac_lo |= ((mdb->addr[4] << 8) | mdb->addr[5]);
684
685 mutex_lock(&dev->alu_mutex);
686
687 for (index = 0; index < dev->info->num_statics; index++) {
688 /* find empty slot first */
689 data = (index << shifts[ALU_STAT_INDEX]) |
690 masks[ALU_STAT_READ] | ALU_STAT_START;
691 ksz_write32(dev, REG_SW_ALU_STAT_CTRL__4, data);
692
693 /* wait to be finished */
694 err = ksz9477_wait_alu_sta_ready(dev);
695 if (err) {
696 dev_dbg(dev->dev, "Failed to read ALU STATIC\n");
697 goto exit;
698 }
699
700 /* read ALU static table */
701 ksz9477_read_table(dev, static_table);
702
703 if (static_table[0] & ALU_V_STATIC_VALID) {
704 /* check this has same vid & mac address */
705 if (((static_table[2] >> ALU_V_FID_S) == mdb->vid) &&
706 ((static_table[2] & ALU_V_MAC_ADDR_HI) == mac_hi) &&
707 static_table[3] == mac_lo) {
708 /* found matching one */
709 break;
710 }
711 } else {
712 /* found empty one */
713 break;
714 }
715 }
716
717 /* no available entry */
718 if (index == dev->info->num_statics) {
719 err = -ENOSPC;
720 goto exit;
721 }
722
723 /* add entry */
724 static_table[0] = ALU_V_STATIC_VALID;
725 static_table[1] |= BIT(port);
726 if (mdb->vid)
727 static_table[1] |= ALU_V_USE_FID;
728 static_table[2] = (mdb->vid << ALU_V_FID_S);
729 static_table[2] |= mac_hi;
730 static_table[3] = mac_lo;
731
732 ksz9477_write_table(dev, static_table);
733
734 data = (index << shifts[ALU_STAT_INDEX]) | ALU_STAT_START;
735 ksz_write32(dev, REG_SW_ALU_STAT_CTRL__4, data);
736
737 /* wait to be finished */
738 if (ksz9477_wait_alu_sta_ready(dev))
739 dev_dbg(dev->dev, "Failed to read ALU STATIC\n");
740
741 exit:
742 mutex_unlock(&dev->alu_mutex);
743 return err;
744 }
745
ksz9477_mdb_del(struct ksz_device * dev,int port,const struct switchdev_obj_port_mdb * mdb,struct dsa_db db)746 int ksz9477_mdb_del(struct ksz_device *dev, int port,
747 const struct switchdev_obj_port_mdb *mdb, struct dsa_db db)
748 {
749 u32 static_table[4];
750 const u8 *shifts;
751 const u32 *masks;
752 u32 data;
753 int index;
754 int ret = 0;
755 u32 mac_hi, mac_lo;
756
757 shifts = dev->info->shifts;
758 masks = dev->info->masks;
759
760 mac_hi = ((mdb->addr[0] << 8) | mdb->addr[1]);
761 mac_lo = ((mdb->addr[2] << 24) | (mdb->addr[3] << 16));
762 mac_lo |= ((mdb->addr[4] << 8) | mdb->addr[5]);
763
764 mutex_lock(&dev->alu_mutex);
765
766 for (index = 0; index < dev->info->num_statics; index++) {
767 /* find empty slot first */
768 data = (index << shifts[ALU_STAT_INDEX]) |
769 masks[ALU_STAT_READ] | ALU_STAT_START;
770 ksz_write32(dev, REG_SW_ALU_STAT_CTRL__4, data);
771
772 /* wait to be finished */
773 ret = ksz9477_wait_alu_sta_ready(dev);
774 if (ret) {
775 dev_dbg(dev->dev, "Failed to read ALU STATIC\n");
776 goto exit;
777 }
778
779 /* read ALU static table */
780 ksz9477_read_table(dev, static_table);
781
782 if (static_table[0] & ALU_V_STATIC_VALID) {
783 /* check this has same vid & mac address */
784
785 if (((static_table[2] >> ALU_V_FID_S) == mdb->vid) &&
786 ((static_table[2] & ALU_V_MAC_ADDR_HI) == mac_hi) &&
787 static_table[3] == mac_lo) {
788 /* found matching one */
789 break;
790 }
791 }
792 }
793
794 /* no available entry */
795 if (index == dev->info->num_statics)
796 goto exit;
797
798 /* clear port */
799 static_table[1] &= ~BIT(port);
800
801 if ((static_table[1] & ALU_V_PORT_MAP) == 0) {
802 /* delete entry */
803 static_table[0] = 0;
804 static_table[1] = 0;
805 static_table[2] = 0;
806 static_table[3] = 0;
807 }
808
809 ksz9477_write_table(dev, static_table);
810
811 data = (index << shifts[ALU_STAT_INDEX]) | ALU_STAT_START;
812 ksz_write32(dev, REG_SW_ALU_STAT_CTRL__4, data);
813
814 /* wait to be finished */
815 ret = ksz9477_wait_alu_sta_ready(dev);
816 if (ret)
817 dev_dbg(dev->dev, "Failed to read ALU STATIC\n");
818
819 exit:
820 mutex_unlock(&dev->alu_mutex);
821
822 return ret;
823 }
824
ksz9477_port_mirror_add(struct ksz_device * dev,int port,struct dsa_mall_mirror_tc_entry * mirror,bool ingress,struct netlink_ext_ack * extack)825 int ksz9477_port_mirror_add(struct ksz_device *dev, int port,
826 struct dsa_mall_mirror_tc_entry *mirror,
827 bool ingress, struct netlink_ext_ack *extack)
828 {
829 u8 data;
830 int p;
831
832 /* Limit to one sniffer port
833 * Check if any of the port is already set for sniffing
834 * If yes, instruct the user to remove the previous entry & exit
835 */
836 for (p = 0; p < dev->info->port_cnt; p++) {
837 /* Skip the current sniffing port */
838 if (p == mirror->to_local_port)
839 continue;
840
841 ksz_pread8(dev, p, P_MIRROR_CTRL, &data);
842
843 if (data & PORT_MIRROR_SNIFFER) {
844 NL_SET_ERR_MSG_MOD(extack,
845 "Sniffer port is already configured, delete existing rules & retry");
846 return -EBUSY;
847 }
848 }
849
850 if (ingress)
851 ksz_port_cfg(dev, port, P_MIRROR_CTRL, PORT_MIRROR_RX, true);
852 else
853 ksz_port_cfg(dev, port, P_MIRROR_CTRL, PORT_MIRROR_TX, true);
854
855 /* configure mirror port */
856 ksz_port_cfg(dev, mirror->to_local_port, P_MIRROR_CTRL,
857 PORT_MIRROR_SNIFFER, true);
858
859 ksz_cfg(dev, S_MIRROR_CTRL, SW_MIRROR_RX_TX, false);
860
861 return 0;
862 }
863
ksz9477_port_mirror_del(struct ksz_device * dev,int port,struct dsa_mall_mirror_tc_entry * mirror)864 void ksz9477_port_mirror_del(struct ksz_device *dev, int port,
865 struct dsa_mall_mirror_tc_entry *mirror)
866 {
867 bool in_use = false;
868 u8 data;
869 int p;
870
871 if (mirror->ingress)
872 ksz_port_cfg(dev, port, P_MIRROR_CTRL, PORT_MIRROR_RX, false);
873 else
874 ksz_port_cfg(dev, port, P_MIRROR_CTRL, PORT_MIRROR_TX, false);
875
876
877 /* Check if any of the port is still referring to sniffer port */
878 for (p = 0; p < dev->info->port_cnt; p++) {
879 ksz_pread8(dev, p, P_MIRROR_CTRL, &data);
880
881 if ((data & (PORT_MIRROR_RX | PORT_MIRROR_TX))) {
882 in_use = true;
883 break;
884 }
885 }
886
887 /* delete sniffing if there are no other mirroring rules */
888 if (!in_use)
889 ksz_port_cfg(dev, mirror->to_local_port, P_MIRROR_CTRL,
890 PORT_MIRROR_SNIFFER, false);
891 }
892
ksz9477_get_interface(struct ksz_device * dev,int port)893 static phy_interface_t ksz9477_get_interface(struct ksz_device *dev, int port)
894 {
895 phy_interface_t interface;
896 bool gbit;
897
898 if (dev->info->internal_phy[port])
899 return PHY_INTERFACE_MODE_NA;
900
901 gbit = ksz_get_gbit(dev, port);
902
903 interface = ksz_get_xmii(dev, port, gbit);
904
905 return interface;
906 }
907
ksz9477_get_caps(struct ksz_device * dev,int port,struct phylink_config * config)908 void ksz9477_get_caps(struct ksz_device *dev, int port,
909 struct phylink_config *config)
910 {
911 config->mac_capabilities = MAC_10 | MAC_100 | MAC_ASYM_PAUSE |
912 MAC_SYM_PAUSE;
913
914 if (dev->info->gbit_capable[port])
915 config->mac_capabilities |= MAC_1000FD;
916 }
917
ksz9477_set_ageing_time(struct ksz_device * dev,unsigned int msecs)918 int ksz9477_set_ageing_time(struct ksz_device *dev, unsigned int msecs)
919 {
920 u32 secs = msecs / 1000;
921 u8 value;
922 u8 data;
923 int ret;
924
925 value = FIELD_GET(SW_AGE_PERIOD_7_0_M, secs);
926
927 ret = ksz_write8(dev, REG_SW_LUE_CTRL_3, value);
928 if (ret < 0)
929 return ret;
930
931 data = FIELD_GET(SW_AGE_PERIOD_10_8_M, secs);
932
933 ret = ksz_read8(dev, REG_SW_LUE_CTRL_0, &value);
934 if (ret < 0)
935 return ret;
936
937 value &= ~SW_AGE_CNT_M;
938 value |= FIELD_PREP(SW_AGE_CNT_M, data);
939
940 return ksz_write8(dev, REG_SW_LUE_CTRL_0, value);
941 }
942
ksz9477_port_queue_split(struct ksz_device * dev,int port)943 void ksz9477_port_queue_split(struct ksz_device *dev, int port)
944 {
945 u8 data;
946
947 if (dev->info->num_tx_queues == 8)
948 data = PORT_EIGHT_QUEUE;
949 else if (dev->info->num_tx_queues == 4)
950 data = PORT_FOUR_QUEUE;
951 else if (dev->info->num_tx_queues == 2)
952 data = PORT_TWO_QUEUE;
953 else
954 data = PORT_SINGLE_QUEUE;
955
956 ksz_prmw8(dev, port, REG_PORT_CTRL_0, PORT_QUEUE_SPLIT_MASK, data);
957 }
958
ksz9477_port_setup(struct ksz_device * dev,int port,bool cpu_port)959 void ksz9477_port_setup(struct ksz_device *dev, int port, bool cpu_port)
960 {
961 struct dsa_switch *ds = dev->ds;
962 u16 data16;
963 u8 member;
964
965 /* enable tag tail for host port */
966 if (cpu_port)
967 ksz_port_cfg(dev, port, REG_PORT_CTRL_0, PORT_TAIL_TAG_ENABLE,
968 true);
969
970 ksz9477_port_queue_split(dev, port);
971
972 ksz_port_cfg(dev, port, REG_PORT_CTRL_0, PORT_MAC_LOOPBACK, false);
973
974 /* set back pressure */
975 ksz_port_cfg(dev, port, REG_PORT_MAC_CTRL_1, PORT_BACK_PRESSURE, true);
976
977 /* enable broadcast storm limit */
978 ksz_port_cfg(dev, port, P_BCAST_STORM_CTRL, PORT_BROADCAST_STORM, true);
979
980 /* disable DiffServ priority */
981 ksz_port_cfg(dev, port, P_PRIO_CTRL, PORT_DIFFSERV_PRIO_ENABLE, false);
982
983 /* replace priority */
984 ksz_port_cfg(dev, port, REG_PORT_MRI_MAC_CTRL, PORT_USER_PRIO_CEILING,
985 false);
986 ksz9477_port_cfg32(dev, port, REG_PORT_MTI_QUEUE_CTRL_0__4,
987 MTI_PVID_REPLACE, false);
988
989 /* enable 802.1p priority */
990 ksz_port_cfg(dev, port, P_PRIO_CTRL, PORT_802_1P_PRIO_ENABLE, true);
991
992 /* force flow control for non-PHY ports only */
993 ksz_port_cfg(dev, port, REG_PORT_CTRL_0,
994 PORT_FORCE_TX_FLOW_CTRL | PORT_FORCE_RX_FLOW_CTRL,
995 !dev->info->internal_phy[port]);
996
997 if (cpu_port)
998 member = dsa_user_ports(ds);
999 else
1000 member = BIT(dsa_upstream_port(ds, port));
1001
1002 ksz9477_cfg_port_member(dev, port, member);
1003
1004 /* clear pending interrupts */
1005 if (dev->info->internal_phy[port])
1006 ksz_pread16(dev, port, REG_PORT_PHY_INT_ENABLE, &data16);
1007 }
1008
ksz9477_config_cpu_port(struct dsa_switch * ds)1009 void ksz9477_config_cpu_port(struct dsa_switch *ds)
1010 {
1011 struct ksz_device *dev = ds->priv;
1012 struct ksz_port *p;
1013 int i;
1014
1015 for (i = 0; i < dev->info->port_cnt; i++) {
1016 if (dsa_is_cpu_port(ds, i) &&
1017 (dev->info->cpu_ports & (1 << i))) {
1018 phy_interface_t interface;
1019 const char *prev_msg;
1020 const char *prev_mode;
1021
1022 dev->cpu_port = i;
1023 p = &dev->ports[i];
1024
1025 /* Read from XMII register to determine host port
1026 * interface. If set specifically in device tree
1027 * note the difference to help debugging.
1028 */
1029 interface = ksz9477_get_interface(dev, i);
1030 if (!p->interface) {
1031 if (dev->compat_interface) {
1032 dev_warn(dev->dev,
1033 "Using legacy switch \"phy-mode\" property, because it is missing on port %d node. "
1034 "Please update your device tree.\n",
1035 i);
1036 p->interface = dev->compat_interface;
1037 } else {
1038 p->interface = interface;
1039 }
1040 }
1041 if (interface && interface != p->interface) {
1042 prev_msg = " instead of ";
1043 prev_mode = phy_modes(interface);
1044 } else {
1045 prev_msg = "";
1046 prev_mode = "";
1047 }
1048 dev_info(dev->dev,
1049 "Port%d: using phy mode %s%s%s\n",
1050 i,
1051 phy_modes(p->interface),
1052 prev_msg,
1053 prev_mode);
1054
1055 /* enable cpu port */
1056 ksz9477_port_setup(dev, i, true);
1057 }
1058 }
1059
1060 for (i = 0; i < dev->info->port_cnt; i++) {
1061 if (i == dev->cpu_port)
1062 continue;
1063 ksz_port_stp_state_set(ds, i, BR_STATE_DISABLED);
1064 }
1065 }
1066
ksz9477_enable_stp_addr(struct ksz_device * dev)1067 int ksz9477_enable_stp_addr(struct ksz_device *dev)
1068 {
1069 const u32 *masks;
1070 u32 data;
1071 int ret;
1072
1073 masks = dev->info->masks;
1074
1075 /* Enable Reserved multicast table */
1076 ksz_cfg(dev, REG_SW_LUE_CTRL_0, SW_RESV_MCAST_ENABLE, true);
1077
1078 /* Set the Override bit for forwarding BPDU packet to CPU */
1079 ret = ksz_write32(dev, REG_SW_ALU_VAL_B,
1080 ALU_V_OVERRIDE | BIT(dev->cpu_port));
1081 if (ret < 0)
1082 return ret;
1083
1084 data = ALU_STAT_START | ALU_RESV_MCAST_ADDR | masks[ALU_STAT_WRITE];
1085
1086 ret = ksz_write32(dev, REG_SW_ALU_STAT_CTRL__4, data);
1087 if (ret < 0)
1088 return ret;
1089
1090 /* wait to be finished */
1091 ret = ksz9477_wait_alu_sta_ready(dev);
1092 if (ret < 0) {
1093 dev_err(dev->dev, "Failed to update Reserved Multicast table\n");
1094 return ret;
1095 }
1096
1097 return 0;
1098 }
1099
ksz9477_setup(struct dsa_switch * ds)1100 int ksz9477_setup(struct dsa_switch *ds)
1101 {
1102 struct ksz_device *dev = ds->priv;
1103 int ret = 0;
1104
1105 ds->mtu_enforcement_ingress = true;
1106
1107 /* Required for port partitioning. */
1108 ksz9477_cfg32(dev, REG_SW_QM_CTRL__4, UNICAST_VLAN_BOUNDARY,
1109 true);
1110
1111 /* Do not work correctly with tail tagging. */
1112 ksz_cfg(dev, REG_SW_MAC_CTRL_0, SW_CHECK_LENGTH, false);
1113
1114 /* Enable REG_SW_MTU__2 reg by setting SW_JUMBO_PACKET */
1115 ksz_cfg(dev, REG_SW_MAC_CTRL_1, SW_JUMBO_PACKET, true);
1116
1117 /* Now we can configure default MTU value */
1118 ret = regmap_update_bits(ksz_regmap_16(dev), REG_SW_MTU__2, REG_SW_MTU_MASK,
1119 VLAN_ETH_FRAME_LEN + ETH_FCS_LEN);
1120 if (ret)
1121 return ret;
1122
1123 /* queue based egress rate limit */
1124 ksz_cfg(dev, REG_SW_MAC_CTRL_5, SW_OUT_RATE_LIMIT_QUEUE_BASED, true);
1125
1126 /* enable global MIB counter freeze function */
1127 ksz_cfg(dev, REG_SW_MAC_CTRL_6, SW_MIB_COUNTER_FREEZE, true);
1128
1129 return 0;
1130 }
1131
ksz9477_get_port_addr(int port,int offset)1132 u32 ksz9477_get_port_addr(int port, int offset)
1133 {
1134 return PORT_CTRL_ADDR(port, offset);
1135 }
1136
ksz9477_tc_cbs_set_cinc(struct ksz_device * dev,int port,u32 val)1137 int ksz9477_tc_cbs_set_cinc(struct ksz_device *dev, int port, u32 val)
1138 {
1139 val = val >> 8;
1140
1141 return ksz_pwrite16(dev, port, REG_PORT_MTI_CREDIT_INCREMENT, val);
1142 }
1143
ksz9477_switch_init(struct ksz_device * dev)1144 int ksz9477_switch_init(struct ksz_device *dev)
1145 {
1146 u8 data8;
1147 int ret;
1148
1149 dev->port_mask = (1 << dev->info->port_cnt) - 1;
1150
1151 /* turn off SPI DO Edge select */
1152 ret = ksz_read8(dev, REG_SW_GLOBAL_SERIAL_CTRL_0, &data8);
1153 if (ret)
1154 return ret;
1155
1156 data8 &= ~SPI_AUTO_EDGE_DETECTION;
1157 ret = ksz_write8(dev, REG_SW_GLOBAL_SERIAL_CTRL_0, data8);
1158 if (ret)
1159 return ret;
1160
1161 return 0;
1162 }
1163
ksz9477_switch_exit(struct ksz_device * dev)1164 void ksz9477_switch_exit(struct ksz_device *dev)
1165 {
1166 ksz9477_reset_switch(dev);
1167 }
1168
1169 MODULE_AUTHOR("Woojung Huh <Woojung.Huh@microchip.com>");
1170 MODULE_DESCRIPTION("Microchip KSZ9477 Series Switch DSA Driver");
1171 MODULE_LICENSE("GPL");
1172