1 /**
2 * drivers/net/ethernet/micrel/ks8851_mll.c
3 * Copyright (c) 2009 Micrel Inc.
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
17 */
18
19 /**
20 * Supports:
21 * KS8851 16bit MLL chip from Micrel Inc.
22 */
23
24 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
25
26 #include <linux/interrupt.h>
27 #include <linux/module.h>
28 #include <linux/kernel.h>
29 #include <linux/netdevice.h>
30 #include <linux/etherdevice.h>
31 #include <linux/ethtool.h>
32 #include <linux/cache.h>
33 #include <linux/crc32.h>
34 #include <linux/mii.h>
35 #include <linux/platform_device.h>
36 #include <linux/delay.h>
37 #include <linux/slab.h>
38 #include <asm/io.h>
39
40 #define DRV_NAME "ks8851_mll"
41
42 static u8 KS_DEFAULT_MAC_ADDRESS[] = { 0x00, 0x10, 0xA1, 0x86, 0x95, 0x11 };
43 #define MAX_RECV_FRAMES 255
44 #define MAX_BUF_SIZE 2048
45 #define TX_BUF_SIZE 2000
46 #define RX_BUF_SIZE 2000
47
48 #define KS_CCR 0x08
49 #define CCR_EEPROM (1 << 9)
50 #define CCR_SPI (1 << 8)
51 #define CCR_8BIT (1 << 7)
52 #define CCR_16BIT (1 << 6)
53 #define CCR_32BIT (1 << 5)
54 #define CCR_SHARED (1 << 4)
55 #define CCR_32PIN (1 << 0)
56
57 /* MAC address registers */
58 #define KS_MARL 0x10
59 #define KS_MARM 0x12
60 #define KS_MARH 0x14
61
62 #define KS_OBCR 0x20
63 #define OBCR_ODS_16MA (1 << 6)
64
65 #define KS_EEPCR 0x22
66 #define EEPCR_EESA (1 << 4)
67 #define EEPCR_EESB (1 << 3)
68 #define EEPCR_EEDO (1 << 2)
69 #define EEPCR_EESCK (1 << 1)
70 #define EEPCR_EECS (1 << 0)
71
72 #define KS_MBIR 0x24
73 #define MBIR_TXMBF (1 << 12)
74 #define MBIR_TXMBFA (1 << 11)
75 #define MBIR_RXMBF (1 << 4)
76 #define MBIR_RXMBFA (1 << 3)
77
78 #define KS_GRR 0x26
79 #define GRR_QMU (1 << 1)
80 #define GRR_GSR (1 << 0)
81
82 #define KS_WFCR 0x2A
83 #define WFCR_MPRXE (1 << 7)
84 #define WFCR_WF3E (1 << 3)
85 #define WFCR_WF2E (1 << 2)
86 #define WFCR_WF1E (1 << 1)
87 #define WFCR_WF0E (1 << 0)
88
89 #define KS_WF0CRC0 0x30
90 #define KS_WF0CRC1 0x32
91 #define KS_WF0BM0 0x34
92 #define KS_WF0BM1 0x36
93 #define KS_WF0BM2 0x38
94 #define KS_WF0BM3 0x3A
95
96 #define KS_WF1CRC0 0x40
97 #define KS_WF1CRC1 0x42
98 #define KS_WF1BM0 0x44
99 #define KS_WF1BM1 0x46
100 #define KS_WF1BM2 0x48
101 #define KS_WF1BM3 0x4A
102
103 #define KS_WF2CRC0 0x50
104 #define KS_WF2CRC1 0x52
105 #define KS_WF2BM0 0x54
106 #define KS_WF2BM1 0x56
107 #define KS_WF2BM2 0x58
108 #define KS_WF2BM3 0x5A
109
110 #define KS_WF3CRC0 0x60
111 #define KS_WF3CRC1 0x62
112 #define KS_WF3BM0 0x64
113 #define KS_WF3BM1 0x66
114 #define KS_WF3BM2 0x68
115 #define KS_WF3BM3 0x6A
116
117 #define KS_TXCR 0x70
118 #define TXCR_TCGICMP (1 << 8)
119 #define TXCR_TCGUDP (1 << 7)
120 #define TXCR_TCGTCP (1 << 6)
121 #define TXCR_TCGIP (1 << 5)
122 #define TXCR_FTXQ (1 << 4)
123 #define TXCR_TXFCE (1 << 3)
124 #define TXCR_TXPE (1 << 2)
125 #define TXCR_TXCRC (1 << 1)
126 #define TXCR_TXE (1 << 0)
127
128 #define KS_TXSR 0x72
129 #define TXSR_TXLC (1 << 13)
130 #define TXSR_TXMC (1 << 12)
131 #define TXSR_TXFID_MASK (0x3f << 0)
132 #define TXSR_TXFID_SHIFT (0)
133 #define TXSR_TXFID_GET(_v) (((_v) >> 0) & 0x3f)
134
135
136 #define KS_RXCR1 0x74
137 #define RXCR1_FRXQ (1 << 15)
138 #define RXCR1_RXUDPFCC (1 << 14)
139 #define RXCR1_RXTCPFCC (1 << 13)
140 #define RXCR1_RXIPFCC (1 << 12)
141 #define RXCR1_RXPAFMA (1 << 11)
142 #define RXCR1_RXFCE (1 << 10)
143 #define RXCR1_RXEFE (1 << 9)
144 #define RXCR1_RXMAFMA (1 << 8)
145 #define RXCR1_RXBE (1 << 7)
146 #define RXCR1_RXME (1 << 6)
147 #define RXCR1_RXUE (1 << 5)
148 #define RXCR1_RXAE (1 << 4)
149 #define RXCR1_RXINVF (1 << 1)
150 #define RXCR1_RXE (1 << 0)
151 #define RXCR1_FILTER_MASK (RXCR1_RXINVF | RXCR1_RXAE | \
152 RXCR1_RXMAFMA | RXCR1_RXPAFMA)
153
154 #define KS_RXCR2 0x76
155 #define RXCR2_SRDBL_MASK (0x7 << 5)
156 #define RXCR2_SRDBL_SHIFT (5)
157 #define RXCR2_SRDBL_4B (0x0 << 5)
158 #define RXCR2_SRDBL_8B (0x1 << 5)
159 #define RXCR2_SRDBL_16B (0x2 << 5)
160 #define RXCR2_SRDBL_32B (0x3 << 5)
161 /* #define RXCR2_SRDBL_FRAME (0x4 << 5) */
162 #define RXCR2_IUFFP (1 << 4)
163 #define RXCR2_RXIUFCEZ (1 << 3)
164 #define RXCR2_UDPLFE (1 << 2)
165 #define RXCR2_RXICMPFCC (1 << 1)
166 #define RXCR2_RXSAF (1 << 0)
167
168 #define KS_TXMIR 0x78
169
170 #define KS_RXFHSR 0x7C
171 #define RXFSHR_RXFV (1 << 15)
172 #define RXFSHR_RXICMPFCS (1 << 13)
173 #define RXFSHR_RXIPFCS (1 << 12)
174 #define RXFSHR_RXTCPFCS (1 << 11)
175 #define RXFSHR_RXUDPFCS (1 << 10)
176 #define RXFSHR_RXBF (1 << 7)
177 #define RXFSHR_RXMF (1 << 6)
178 #define RXFSHR_RXUF (1 << 5)
179 #define RXFSHR_RXMR (1 << 4)
180 #define RXFSHR_RXFT (1 << 3)
181 #define RXFSHR_RXFTL (1 << 2)
182 #define RXFSHR_RXRF (1 << 1)
183 #define RXFSHR_RXCE (1 << 0)
184 #define RXFSHR_ERR (RXFSHR_RXCE | RXFSHR_RXRF |\
185 RXFSHR_RXFTL | RXFSHR_RXMR |\
186 RXFSHR_RXICMPFCS | RXFSHR_RXIPFCS |\
187 RXFSHR_RXTCPFCS)
188 #define KS_RXFHBCR 0x7E
189 #define RXFHBCR_CNT_MASK 0x0FFF
190
191 #define KS_TXQCR 0x80
192 #define TXQCR_AETFE (1 << 2)
193 #define TXQCR_TXQMAM (1 << 1)
194 #define TXQCR_METFE (1 << 0)
195
196 #define KS_RXQCR 0x82
197 #define RXQCR_RXDTTS (1 << 12)
198 #define RXQCR_RXDBCTS (1 << 11)
199 #define RXQCR_RXFCTS (1 << 10)
200 #define RXQCR_RXIPHTOE (1 << 9)
201 #define RXQCR_RXDTTE (1 << 7)
202 #define RXQCR_RXDBCTE (1 << 6)
203 #define RXQCR_RXFCTE (1 << 5)
204 #define RXQCR_ADRFE (1 << 4)
205 #define RXQCR_SDA (1 << 3)
206 #define RXQCR_RRXEF (1 << 0)
207 #define RXQCR_CMD_CNTL (RXQCR_RXFCTE|RXQCR_ADRFE)
208
209 #define KS_TXFDPR 0x84
210 #define TXFDPR_TXFPAI (1 << 14)
211 #define TXFDPR_TXFP_MASK (0x7ff << 0)
212 #define TXFDPR_TXFP_SHIFT (0)
213
214 #define KS_RXFDPR 0x86
215 #define RXFDPR_RXFPAI (1 << 14)
216
217 #define KS_RXDTTR 0x8C
218 #define KS_RXDBCTR 0x8E
219
220 #define KS_IER 0x90
221 #define KS_ISR 0x92
222 #define IRQ_LCI (1 << 15)
223 #define IRQ_TXI (1 << 14)
224 #define IRQ_RXI (1 << 13)
225 #define IRQ_RXOI (1 << 11)
226 #define IRQ_TXPSI (1 << 9)
227 #define IRQ_RXPSI (1 << 8)
228 #define IRQ_TXSAI (1 << 6)
229 #define IRQ_RXWFDI (1 << 5)
230 #define IRQ_RXMPDI (1 << 4)
231 #define IRQ_LDI (1 << 3)
232 #define IRQ_EDI (1 << 2)
233 #define IRQ_SPIBEI (1 << 1)
234 #define IRQ_DEDI (1 << 0)
235
236 #define KS_RXFCTR 0x9C
237 #define RXFCTR_THRESHOLD_MASK 0x00FF
238
239 #define KS_RXFC 0x9D
240 #define RXFCTR_RXFC_MASK (0xff << 8)
241 #define RXFCTR_RXFC_SHIFT (8)
242 #define RXFCTR_RXFC_GET(_v) (((_v) >> 8) & 0xff)
243 #define RXFCTR_RXFCT_MASK (0xff << 0)
244 #define RXFCTR_RXFCT_SHIFT (0)
245
246 #define KS_TXNTFSR 0x9E
247
248 #define KS_MAHTR0 0xA0
249 #define KS_MAHTR1 0xA2
250 #define KS_MAHTR2 0xA4
251 #define KS_MAHTR3 0xA6
252
253 #define KS_FCLWR 0xB0
254 #define KS_FCHWR 0xB2
255 #define KS_FCOWR 0xB4
256
257 #define KS_CIDER 0xC0
258 #define CIDER_ID 0x8870
259 #define CIDER_REV_MASK (0x7 << 1)
260 #define CIDER_REV_SHIFT (1)
261 #define CIDER_REV_GET(_v) (((_v) >> 1) & 0x7)
262
263 #define KS_CGCR 0xC6
264 #define KS_IACR 0xC8
265 #define IACR_RDEN (1 << 12)
266 #define IACR_TSEL_MASK (0x3 << 10)
267 #define IACR_TSEL_SHIFT (10)
268 #define IACR_TSEL_MIB (0x3 << 10)
269 #define IACR_ADDR_MASK (0x1f << 0)
270 #define IACR_ADDR_SHIFT (0)
271
272 #define KS_IADLR 0xD0
273 #define KS_IAHDR 0xD2
274
275 #define KS_PMECR 0xD4
276 #define PMECR_PME_DELAY (1 << 14)
277 #define PMECR_PME_POL (1 << 12)
278 #define PMECR_WOL_WAKEUP (1 << 11)
279 #define PMECR_WOL_MAGICPKT (1 << 10)
280 #define PMECR_WOL_LINKUP (1 << 9)
281 #define PMECR_WOL_ENERGY (1 << 8)
282 #define PMECR_AUTO_WAKE_EN (1 << 7)
283 #define PMECR_WAKEUP_NORMAL (1 << 6)
284 #define PMECR_WKEVT_MASK (0xf << 2)
285 #define PMECR_WKEVT_SHIFT (2)
286 #define PMECR_WKEVT_GET(_v) (((_v) >> 2) & 0xf)
287 #define PMECR_WKEVT_ENERGY (0x1 << 2)
288 #define PMECR_WKEVT_LINK (0x2 << 2)
289 #define PMECR_WKEVT_MAGICPKT (0x4 << 2)
290 #define PMECR_WKEVT_FRAME (0x8 << 2)
291 #define PMECR_PM_MASK (0x3 << 0)
292 #define PMECR_PM_SHIFT (0)
293 #define PMECR_PM_NORMAL (0x0 << 0)
294 #define PMECR_PM_ENERGY (0x1 << 0)
295 #define PMECR_PM_SOFTDOWN (0x2 << 0)
296 #define PMECR_PM_POWERSAVE (0x3 << 0)
297
298 /* Standard MII PHY data */
299 #define KS_P1MBCR 0xE4
300 #define P1MBCR_FORCE_FDX (1 << 8)
301
302 #define KS_P1MBSR 0xE6
303 #define P1MBSR_AN_COMPLETE (1 << 5)
304 #define P1MBSR_AN_CAPABLE (1 << 3)
305 #define P1MBSR_LINK_UP (1 << 2)
306
307 #define KS_PHY1ILR 0xE8
308 #define KS_PHY1IHR 0xEA
309 #define KS_P1ANAR 0xEC
310 #define KS_P1ANLPR 0xEE
311
312 #define KS_P1SCLMD 0xF4
313 #define P1SCLMD_LEDOFF (1 << 15)
314 #define P1SCLMD_TXIDS (1 << 14)
315 #define P1SCLMD_RESTARTAN (1 << 13)
316 #define P1SCLMD_DISAUTOMDIX (1 << 10)
317 #define P1SCLMD_FORCEMDIX (1 << 9)
318 #define P1SCLMD_AUTONEGEN (1 << 7)
319 #define P1SCLMD_FORCE100 (1 << 6)
320 #define P1SCLMD_FORCEFDX (1 << 5)
321 #define P1SCLMD_ADV_FLOW (1 << 4)
322 #define P1SCLMD_ADV_100BT_FDX (1 << 3)
323 #define P1SCLMD_ADV_100BT_HDX (1 << 2)
324 #define P1SCLMD_ADV_10BT_FDX (1 << 1)
325 #define P1SCLMD_ADV_10BT_HDX (1 << 0)
326
327 #define KS_P1CR 0xF6
328 #define P1CR_HP_MDIX (1 << 15)
329 #define P1CR_REV_POL (1 << 13)
330 #define P1CR_OP_100M (1 << 10)
331 #define P1CR_OP_FDX (1 << 9)
332 #define P1CR_OP_MDI (1 << 7)
333 #define P1CR_AN_DONE (1 << 6)
334 #define P1CR_LINK_GOOD (1 << 5)
335 #define P1CR_PNTR_FLOW (1 << 4)
336 #define P1CR_PNTR_100BT_FDX (1 << 3)
337 #define P1CR_PNTR_100BT_HDX (1 << 2)
338 #define P1CR_PNTR_10BT_FDX (1 << 1)
339 #define P1CR_PNTR_10BT_HDX (1 << 0)
340
341 /* TX Frame control */
342
343 #define TXFR_TXIC (1 << 15)
344 #define TXFR_TXFID_MASK (0x3f << 0)
345 #define TXFR_TXFID_SHIFT (0)
346
347 #define KS_P1SR 0xF8
348 #define P1SR_HP_MDIX (1 << 15)
349 #define P1SR_REV_POL (1 << 13)
350 #define P1SR_OP_100M (1 << 10)
351 #define P1SR_OP_FDX (1 << 9)
352 #define P1SR_OP_MDI (1 << 7)
353 #define P1SR_AN_DONE (1 << 6)
354 #define P1SR_LINK_GOOD (1 << 5)
355 #define P1SR_PNTR_FLOW (1 << 4)
356 #define P1SR_PNTR_100BT_FDX (1 << 3)
357 #define P1SR_PNTR_100BT_HDX (1 << 2)
358 #define P1SR_PNTR_10BT_FDX (1 << 1)
359 #define P1SR_PNTR_10BT_HDX (1 << 0)
360
361 #define ENUM_BUS_NONE 0
362 #define ENUM_BUS_8BIT 1
363 #define ENUM_BUS_16BIT 2
364 #define ENUM_BUS_32BIT 3
365
366 #define MAX_MCAST_LST 32
367 #define HW_MCAST_SIZE 8
368
369 /**
370 * union ks_tx_hdr - tx header data
371 * @txb: The header as bytes
372 * @txw: The header as 16bit, little-endian words
373 *
374 * A dual representation of the tx header data to allow
375 * access to individual bytes, and to allow 16bit accesses
376 * with 16bit alignment.
377 */
378 union ks_tx_hdr {
379 u8 txb[4];
380 __le16 txw[2];
381 };
382
383 /**
384 * struct ks_net - KS8851 driver private data
385 * @net_device : The network device we're bound to
386 * @hw_addr : start address of data register.
387 * @hw_addr_cmd : start address of command register.
388 * @txh : temporaly buffer to save status/length.
389 * @lock : Lock to ensure that the device is not accessed when busy.
390 * @pdev : Pointer to platform device.
391 * @mii : The MII state information for the mii calls.
392 * @frame_head_info : frame header information for multi-pkt rx.
393 * @statelock : Lock on this structure for tx list.
394 * @msg_enable : The message flags controlling driver output (see ethtool).
395 * @frame_cnt : number of frames received.
396 * @bus_width : i/o bus width.
397 * @rc_rxqcr : Cached copy of KS_RXQCR.
398 * @rc_txcr : Cached copy of KS_TXCR.
399 * @rc_ier : Cached copy of KS_IER.
400 * @sharedbus : Multipex(addr and data bus) mode indicator.
401 * @cmd_reg_cache : command register cached.
402 * @cmd_reg_cache_int : command register cached. Used in the irq handler.
403 * @promiscuous : promiscuous mode indicator.
404 * @all_mcast : mutlicast indicator.
405 * @mcast_lst_size : size of multicast list.
406 * @mcast_lst : multicast list.
407 * @mcast_bits : multicast enabed.
408 * @mac_addr : MAC address assigned to this device.
409 * @fid : frame id.
410 * @extra_byte : number of extra byte prepended rx pkt.
411 * @enabled : indicator this device works.
412 *
413 * The @lock ensures that the chip is protected when certain operations are
414 * in progress. When the read or write packet transfer is in progress, most
415 * of the chip registers are not accessible until the transfer is finished and
416 * the DMA has been de-asserted.
417 *
418 * The @statelock is used to protect information in the structure which may
419 * need to be accessed via several sources, such as the network driver layer
420 * or one of the work queues.
421 *
422 */
423
424 /* Receive multiplex framer header info */
425 struct type_frame_head {
426 u16 sts; /* Frame status */
427 u16 len; /* Byte count */
428 };
429
430 struct ks_net {
431 struct net_device *netdev;
432 void __iomem *hw_addr;
433 void __iomem *hw_addr_cmd;
434 union ks_tx_hdr txh ____cacheline_aligned;
435 struct mutex lock; /* spinlock to be interrupt safe */
436 struct platform_device *pdev;
437 struct mii_if_info mii;
438 struct type_frame_head *frame_head_info;
439 spinlock_t statelock;
440 u32 msg_enable;
441 u32 frame_cnt;
442 int bus_width;
443
444 u16 rc_rxqcr;
445 u16 rc_txcr;
446 u16 rc_ier;
447 u16 sharedbus;
448 u16 cmd_reg_cache;
449 u16 cmd_reg_cache_int;
450 u16 promiscuous;
451 u16 all_mcast;
452 u16 mcast_lst_size;
453 u8 mcast_lst[MAX_MCAST_LST][ETH_ALEN];
454 u8 mcast_bits[HW_MCAST_SIZE];
455 u8 mac_addr[6];
456 u8 fid;
457 u8 extra_byte;
458 u8 enabled;
459 };
460
461 static int msg_enable;
462
463 #define BE3 0x8000 /* Byte Enable 3 */
464 #define BE2 0x4000 /* Byte Enable 2 */
465 #define BE1 0x2000 /* Byte Enable 1 */
466 #define BE0 0x1000 /* Byte Enable 0 */
467
468 /**
469 * register read/write calls.
470 *
471 * All these calls issue transactions to access the chip's registers. They
472 * all require that the necessary lock is held to prevent accesses when the
473 * chip is busy transferring packet data (RX/TX FIFO accesses).
474 */
475
476 /**
477 * ks_rdreg8 - read 8 bit register from device
478 * @ks : The chip information
479 * @offset: The register address
480 *
481 * Read a 8bit register from the chip, returning the result
482 */
ks_rdreg8(struct ks_net * ks,int offset)483 static u8 ks_rdreg8(struct ks_net *ks, int offset)
484 {
485 u16 data;
486 u8 shift_bit = offset & 0x03;
487 u8 shift_data = (offset & 1) << 3;
488 ks->cmd_reg_cache = (u16) offset | (u16)(BE0 << shift_bit);
489 iowrite16(ks->cmd_reg_cache, ks->hw_addr_cmd);
490 data = ioread16(ks->hw_addr);
491 return (u8)(data >> shift_data);
492 }
493
494 /**
495 * ks_rdreg16 - read 16 bit register from device
496 * @ks : The chip information
497 * @offset: The register address
498 *
499 * Read a 16bit register from the chip, returning the result
500 */
501
ks_rdreg16(struct ks_net * ks,int offset)502 static u16 ks_rdreg16(struct ks_net *ks, int offset)
503 {
504 ks->cmd_reg_cache = (u16)offset | ((BE1 | BE0) << (offset & 0x02));
505 iowrite16(ks->cmd_reg_cache, ks->hw_addr_cmd);
506 return ioread16(ks->hw_addr);
507 }
508
509 /**
510 * ks_wrreg8 - write 8bit register value to chip
511 * @ks: The chip information
512 * @offset: The register address
513 * @value: The value to write
514 *
515 */
ks_wrreg8(struct ks_net * ks,int offset,u8 value)516 static void ks_wrreg8(struct ks_net *ks, int offset, u8 value)
517 {
518 u8 shift_bit = (offset & 0x03);
519 u16 value_write = (u16)(value << ((offset & 1) << 3));
520 ks->cmd_reg_cache = (u16)offset | (BE0 << shift_bit);
521 iowrite16(ks->cmd_reg_cache, ks->hw_addr_cmd);
522 iowrite16(value_write, ks->hw_addr);
523 }
524
525 /**
526 * ks_wrreg16 - write 16bit register value to chip
527 * @ks: The chip information
528 * @offset: The register address
529 * @value: The value to write
530 *
531 */
532
ks_wrreg16(struct ks_net * ks,int offset,u16 value)533 static void ks_wrreg16(struct ks_net *ks, int offset, u16 value)
534 {
535 ks->cmd_reg_cache = (u16)offset | ((BE1 | BE0) << (offset & 0x02));
536 iowrite16(ks->cmd_reg_cache, ks->hw_addr_cmd);
537 iowrite16(value, ks->hw_addr);
538 }
539
540 /**
541 * ks_inblk - read a block of data from QMU. This is called after sudo DMA mode enabled.
542 * @ks: The chip state
543 * @wptr: buffer address to save data
544 * @len: length in byte to read
545 *
546 */
ks_inblk(struct ks_net * ks,u16 * wptr,u32 len)547 static inline void ks_inblk(struct ks_net *ks, u16 *wptr, u32 len)
548 {
549 len >>= 1;
550 while (len--)
551 *wptr++ = (u16)ioread16(ks->hw_addr);
552 }
553
554 /**
555 * ks_outblk - write data to QMU. This is called after sudo DMA mode enabled.
556 * @ks: The chip information
557 * @wptr: buffer address
558 * @len: length in byte to write
559 *
560 */
ks_outblk(struct ks_net * ks,u16 * wptr,u32 len)561 static inline void ks_outblk(struct ks_net *ks, u16 *wptr, u32 len)
562 {
563 len >>= 1;
564 while (len--)
565 iowrite16(*wptr++, ks->hw_addr);
566 }
567
ks_disable_int(struct ks_net * ks)568 static void ks_disable_int(struct ks_net *ks)
569 {
570 ks_wrreg16(ks, KS_IER, 0x0000);
571 } /* ks_disable_int */
572
ks_enable_int(struct ks_net * ks)573 static void ks_enable_int(struct ks_net *ks)
574 {
575 ks_wrreg16(ks, KS_IER, ks->rc_ier);
576 } /* ks_enable_int */
577
578 /**
579 * ks_tx_fifo_space - return the available hardware buffer size.
580 * @ks: The chip information
581 *
582 */
ks_tx_fifo_space(struct ks_net * ks)583 static inline u16 ks_tx_fifo_space(struct ks_net *ks)
584 {
585 return ks_rdreg16(ks, KS_TXMIR) & 0x1fff;
586 }
587
588 /**
589 * ks_save_cmd_reg - save the command register from the cache.
590 * @ks: The chip information
591 *
592 */
ks_save_cmd_reg(struct ks_net * ks)593 static inline void ks_save_cmd_reg(struct ks_net *ks)
594 {
595 /*ks8851 MLL has a bug to read back the command register.
596 * So rely on software to save the content of command register.
597 */
598 ks->cmd_reg_cache_int = ks->cmd_reg_cache;
599 }
600
601 /**
602 * ks_restore_cmd_reg - restore the command register from the cache and
603 * write to hardware register.
604 * @ks: The chip information
605 *
606 */
ks_restore_cmd_reg(struct ks_net * ks)607 static inline void ks_restore_cmd_reg(struct ks_net *ks)
608 {
609 ks->cmd_reg_cache = ks->cmd_reg_cache_int;
610 iowrite16(ks->cmd_reg_cache, ks->hw_addr_cmd);
611 }
612
613 /**
614 * ks_set_powermode - set power mode of the device
615 * @ks: The chip information
616 * @pwrmode: The power mode value to write to KS_PMECR.
617 *
618 * Change the power mode of the chip.
619 */
ks_set_powermode(struct ks_net * ks,unsigned pwrmode)620 static void ks_set_powermode(struct ks_net *ks, unsigned pwrmode)
621 {
622 unsigned pmecr;
623
624 netif_dbg(ks, hw, ks->netdev, "setting power mode %d\n", pwrmode);
625
626 ks_rdreg16(ks, KS_GRR);
627 pmecr = ks_rdreg16(ks, KS_PMECR);
628 pmecr &= ~PMECR_PM_MASK;
629 pmecr |= pwrmode;
630
631 ks_wrreg16(ks, KS_PMECR, pmecr);
632 }
633
634 /**
635 * ks_read_config - read chip configuration of bus width.
636 * @ks: The chip information
637 *
638 */
ks_read_config(struct ks_net * ks)639 static void ks_read_config(struct ks_net *ks)
640 {
641 u16 reg_data = 0;
642
643 /* Regardless of bus width, 8 bit read should always work.*/
644 reg_data = ks_rdreg8(ks, KS_CCR) & 0x00FF;
645 reg_data |= ks_rdreg8(ks, KS_CCR+1) << 8;
646
647 /* addr/data bus are multiplexed */
648 ks->sharedbus = (reg_data & CCR_SHARED) == CCR_SHARED;
649
650 /* There are garbage data when reading data from QMU,
651 depending on bus-width.
652 */
653
654 if (reg_data & CCR_8BIT) {
655 ks->bus_width = ENUM_BUS_8BIT;
656 ks->extra_byte = 1;
657 } else if (reg_data & CCR_16BIT) {
658 ks->bus_width = ENUM_BUS_16BIT;
659 ks->extra_byte = 2;
660 } else {
661 ks->bus_width = ENUM_BUS_32BIT;
662 ks->extra_byte = 4;
663 }
664 }
665
666 /**
667 * ks_soft_reset - issue one of the soft reset to the device
668 * @ks: The device state.
669 * @op: The bit(s) to set in the GRR
670 *
671 * Issue the relevant soft-reset command to the device's GRR register
672 * specified by @op.
673 *
674 * Note, the delays are in there as a caution to ensure that the reset
675 * has time to take effect and then complete. Since the datasheet does
676 * not currently specify the exact sequence, we have chosen something
677 * that seems to work with our device.
678 */
ks_soft_reset(struct ks_net * ks,unsigned op)679 static void ks_soft_reset(struct ks_net *ks, unsigned op)
680 {
681 /* Disable interrupt first */
682 ks_wrreg16(ks, KS_IER, 0x0000);
683 ks_wrreg16(ks, KS_GRR, op);
684 mdelay(10); /* wait a short time to effect reset */
685 ks_wrreg16(ks, KS_GRR, 0);
686 mdelay(1); /* wait for condition to clear */
687 }
688
689
ks_enable_qmu(struct ks_net * ks)690 void ks_enable_qmu(struct ks_net *ks)
691 {
692 u16 w;
693
694 w = ks_rdreg16(ks, KS_TXCR);
695 /* Enables QMU Transmit (TXCR). */
696 ks_wrreg16(ks, KS_TXCR, w | TXCR_TXE);
697
698 /*
699 * RX Frame Count Threshold Enable and Auto-Dequeue RXQ Frame
700 * Enable
701 */
702
703 w = ks_rdreg16(ks, KS_RXQCR);
704 ks_wrreg16(ks, KS_RXQCR, w | RXQCR_RXFCTE);
705
706 /* Enables QMU Receive (RXCR1). */
707 w = ks_rdreg16(ks, KS_RXCR1);
708 ks_wrreg16(ks, KS_RXCR1, w | RXCR1_RXE);
709 ks->enabled = true;
710 } /* ks_enable_qmu */
711
ks_disable_qmu(struct ks_net * ks)712 static void ks_disable_qmu(struct ks_net *ks)
713 {
714 u16 w;
715
716 w = ks_rdreg16(ks, KS_TXCR);
717
718 /* Disables QMU Transmit (TXCR). */
719 w &= ~TXCR_TXE;
720 ks_wrreg16(ks, KS_TXCR, w);
721
722 /* Disables QMU Receive (RXCR1). */
723 w = ks_rdreg16(ks, KS_RXCR1);
724 w &= ~RXCR1_RXE ;
725 ks_wrreg16(ks, KS_RXCR1, w);
726
727 ks->enabled = false;
728
729 } /* ks_disable_qmu */
730
731 /**
732 * ks_read_qmu - read 1 pkt data from the QMU.
733 * @ks: The chip information
734 * @buf: buffer address to save 1 pkt
735 * @len: Pkt length
736 * Here is the sequence to read 1 pkt:
737 * 1. set sudo DMA mode
738 * 2. read prepend data
739 * 3. read pkt data
740 * 4. reset sudo DMA Mode
741 */
ks_read_qmu(struct ks_net * ks,u16 * buf,u32 len)742 static inline void ks_read_qmu(struct ks_net *ks, u16 *buf, u32 len)
743 {
744 u32 r = ks->extra_byte & 0x1 ;
745 u32 w = ks->extra_byte - r;
746
747 /* 1. set sudo DMA mode */
748 ks_wrreg16(ks, KS_RXFDPR, RXFDPR_RXFPAI);
749 ks_wrreg8(ks, KS_RXQCR, (ks->rc_rxqcr | RXQCR_SDA) & 0xff);
750
751 /* 2. read prepend data */
752 /**
753 * read 4 + extra bytes and discard them.
754 * extra bytes for dummy, 2 for status, 2 for len
755 */
756
757 /* use likely(r) for 8 bit access for performance */
758 if (unlikely(r))
759 ioread8(ks->hw_addr);
760 ks_inblk(ks, buf, w + 2 + 2);
761
762 /* 3. read pkt data */
763 ks_inblk(ks, buf, ALIGN(len, 4));
764
765 /* 4. reset sudo DMA Mode */
766 ks_wrreg8(ks, KS_RXQCR, ks->rc_rxqcr);
767 }
768
769 /**
770 * ks_rcv - read multiple pkts data from the QMU.
771 * @ks: The chip information
772 * @netdev: The network device being opened.
773 *
774 * Read all of header information before reading pkt content.
775 * It is not allowed only port of pkts in QMU after issuing
776 * interrupt ack.
777 */
ks_rcv(struct ks_net * ks,struct net_device * netdev)778 static void ks_rcv(struct ks_net *ks, struct net_device *netdev)
779 {
780 u32 i;
781 struct type_frame_head *frame_hdr = ks->frame_head_info;
782 struct sk_buff *skb;
783
784 ks->frame_cnt = ks_rdreg16(ks, KS_RXFCTR) >> 8;
785
786 /* read all header information */
787 for (i = 0; i < ks->frame_cnt; i++) {
788 /* Checking Received packet status */
789 frame_hdr->sts = ks_rdreg16(ks, KS_RXFHSR);
790 /* Get packet len from hardware */
791 frame_hdr->len = ks_rdreg16(ks, KS_RXFHBCR);
792 frame_hdr++;
793 }
794
795 frame_hdr = ks->frame_head_info;
796 while (ks->frame_cnt--) {
797 skb = netdev_alloc_skb(netdev, frame_hdr->len + 16);
798 if (likely(skb && (frame_hdr->sts & RXFSHR_RXFV) &&
799 (frame_hdr->len < RX_BUF_SIZE) && frame_hdr->len)) {
800 skb_reserve(skb, 2);
801 /* read data block including CRC 4 bytes */
802 ks_read_qmu(ks, (u16 *)skb->data, frame_hdr->len);
803 skb_put(skb, frame_hdr->len);
804 skb->protocol = eth_type_trans(skb, netdev);
805 netif_rx(skb);
806 } else {
807 pr_err("%s: err:skb alloc\n", __func__);
808 ks_wrreg16(ks, KS_RXQCR, (ks->rc_rxqcr | RXQCR_RRXEF));
809 if (skb)
810 dev_kfree_skb_irq(skb);
811 }
812 frame_hdr++;
813 }
814 }
815
816 /**
817 * ks_update_link_status - link status update.
818 * @netdev: The network device being opened.
819 * @ks: The chip information
820 *
821 */
822
ks_update_link_status(struct net_device * netdev,struct ks_net * ks)823 static void ks_update_link_status(struct net_device *netdev, struct ks_net *ks)
824 {
825 /* check the status of the link */
826 u32 link_up_status;
827 if (ks_rdreg16(ks, KS_P1SR) & P1SR_LINK_GOOD) {
828 netif_carrier_on(netdev);
829 link_up_status = true;
830 } else {
831 netif_carrier_off(netdev);
832 link_up_status = false;
833 }
834 netif_dbg(ks, link, ks->netdev,
835 "%s: %s\n", __func__, link_up_status ? "UP" : "DOWN");
836 }
837
838 /**
839 * ks_irq - device interrupt handler
840 * @irq: Interrupt number passed from the IRQ handler.
841 * @pw: The private word passed to register_irq(), our struct ks_net.
842 *
843 * This is the handler invoked to find out what happened
844 *
845 * Read the interrupt status, work out what needs to be done and then clear
846 * any of the interrupts that are not needed.
847 */
848
ks_irq(int irq,void * pw)849 static irqreturn_t ks_irq(int irq, void *pw)
850 {
851 struct net_device *netdev = pw;
852 struct ks_net *ks = netdev_priv(netdev);
853 u16 status;
854
855 /*this should be the first in IRQ handler */
856 ks_save_cmd_reg(ks);
857
858 status = ks_rdreg16(ks, KS_ISR);
859 if (unlikely(!status)) {
860 ks_restore_cmd_reg(ks);
861 return IRQ_NONE;
862 }
863
864 ks_wrreg16(ks, KS_ISR, status);
865
866 if (likely(status & IRQ_RXI))
867 ks_rcv(ks, netdev);
868
869 if (unlikely(status & IRQ_LCI))
870 ks_update_link_status(netdev, ks);
871
872 if (unlikely(status & IRQ_TXI))
873 netif_wake_queue(netdev);
874
875 if (unlikely(status & IRQ_LDI)) {
876
877 u16 pmecr = ks_rdreg16(ks, KS_PMECR);
878 pmecr &= ~PMECR_WKEVT_MASK;
879 ks_wrreg16(ks, KS_PMECR, pmecr | PMECR_WKEVT_LINK);
880 }
881
882 /* this should be the last in IRQ handler*/
883 ks_restore_cmd_reg(ks);
884 return IRQ_HANDLED;
885 }
886
887
888 /**
889 * ks_net_open - open network device
890 * @netdev: The network device being opened.
891 *
892 * Called when the network device is marked active, such as a user executing
893 * 'ifconfig up' on the device.
894 */
ks_net_open(struct net_device * netdev)895 static int ks_net_open(struct net_device *netdev)
896 {
897 struct ks_net *ks = netdev_priv(netdev);
898 int err;
899
900 #define KS_INT_FLAGS (IRQF_DISABLED|IRQF_TRIGGER_LOW)
901 /* lock the card, even if we may not actually do anything
902 * else at the moment.
903 */
904
905 netif_dbg(ks, ifup, ks->netdev, "%s - entry\n", __func__);
906
907 /* reset the HW */
908 err = request_irq(netdev->irq, ks_irq, KS_INT_FLAGS, DRV_NAME, netdev);
909
910 if (err) {
911 pr_err("Failed to request IRQ: %d: %d\n", netdev->irq, err);
912 return err;
913 }
914
915 /* wake up powermode to normal mode */
916 ks_set_powermode(ks, PMECR_PM_NORMAL);
917 mdelay(1); /* wait for normal mode to take effect */
918
919 ks_wrreg16(ks, KS_ISR, 0xffff);
920 ks_enable_int(ks);
921 ks_enable_qmu(ks);
922 netif_start_queue(ks->netdev);
923
924 netif_dbg(ks, ifup, ks->netdev, "network device up\n");
925
926 return 0;
927 }
928
929 /**
930 * ks_net_stop - close network device
931 * @netdev: The device being closed.
932 *
933 * Called to close down a network device which has been active. Cancell any
934 * work, shutdown the RX and TX process and then place the chip into a low
935 * power state whilst it is not being used.
936 */
ks_net_stop(struct net_device * netdev)937 static int ks_net_stop(struct net_device *netdev)
938 {
939 struct ks_net *ks = netdev_priv(netdev);
940
941 netif_info(ks, ifdown, netdev, "shutting down\n");
942
943 netif_stop_queue(netdev);
944
945 mutex_lock(&ks->lock);
946
947 /* turn off the IRQs and ack any outstanding */
948 ks_wrreg16(ks, KS_IER, 0x0000);
949 ks_wrreg16(ks, KS_ISR, 0xffff);
950
951 /* shutdown RX/TX QMU */
952 ks_disable_qmu(ks);
953
954 /* set powermode to soft power down to save power */
955 ks_set_powermode(ks, PMECR_PM_SOFTDOWN);
956 free_irq(netdev->irq, netdev);
957 mutex_unlock(&ks->lock);
958 return 0;
959 }
960
961
962 /**
963 * ks_write_qmu - write 1 pkt data to the QMU.
964 * @ks: The chip information
965 * @pdata: buffer address to save 1 pkt
966 * @len: Pkt length in byte
967 * Here is the sequence to write 1 pkt:
968 * 1. set sudo DMA mode
969 * 2. write status/length
970 * 3. write pkt data
971 * 4. reset sudo DMA Mode
972 * 5. reset sudo DMA mode
973 * 6. Wait until pkt is out
974 */
ks_write_qmu(struct ks_net * ks,u8 * pdata,u16 len)975 static void ks_write_qmu(struct ks_net *ks, u8 *pdata, u16 len)
976 {
977 /* start header at txb[0] to align txw entries */
978 ks->txh.txw[0] = 0;
979 ks->txh.txw[1] = cpu_to_le16(len);
980
981 /* 1. set sudo-DMA mode */
982 ks_wrreg8(ks, KS_RXQCR, (ks->rc_rxqcr | RXQCR_SDA) & 0xff);
983 /* 2. write status/lenth info */
984 ks_outblk(ks, ks->txh.txw, 4);
985 /* 3. write pkt data */
986 ks_outblk(ks, (u16 *)pdata, ALIGN(len, 4));
987 /* 4. reset sudo-DMA mode */
988 ks_wrreg8(ks, KS_RXQCR, ks->rc_rxqcr);
989 /* 5. Enqueue Tx(move the pkt from TX buffer into TXQ) */
990 ks_wrreg16(ks, KS_TXQCR, TXQCR_METFE);
991 /* 6. wait until TXQCR_METFE is auto-cleared */
992 while (ks_rdreg16(ks, KS_TXQCR) & TXQCR_METFE)
993 ;
994 }
995
996 /**
997 * ks_start_xmit - transmit packet
998 * @skb : The buffer to transmit
999 * @netdev : The device used to transmit the packet.
1000 *
1001 * Called by the network layer to transmit the @skb.
1002 * spin_lock_irqsave is required because tx and rx should be mutual exclusive.
1003 * So while tx is in-progress, prevent IRQ interrupt from happenning.
1004 */
ks_start_xmit(struct sk_buff * skb,struct net_device * netdev)1005 static int ks_start_xmit(struct sk_buff *skb, struct net_device *netdev)
1006 {
1007 int retv = NETDEV_TX_OK;
1008 struct ks_net *ks = netdev_priv(netdev);
1009
1010 disable_irq(netdev->irq);
1011 ks_disable_int(ks);
1012 spin_lock(&ks->statelock);
1013
1014 /* Extra space are required:
1015 * 4 byte for alignment, 4 for status/length, 4 for CRC
1016 */
1017
1018 if (likely(ks_tx_fifo_space(ks) >= skb->len + 12)) {
1019 ks_write_qmu(ks, skb->data, skb->len);
1020 dev_kfree_skb(skb);
1021 } else
1022 retv = NETDEV_TX_BUSY;
1023 spin_unlock(&ks->statelock);
1024 ks_enable_int(ks);
1025 enable_irq(netdev->irq);
1026 return retv;
1027 }
1028
1029 /**
1030 * ks_start_rx - ready to serve pkts
1031 * @ks : The chip information
1032 *
1033 */
ks_start_rx(struct ks_net * ks)1034 static void ks_start_rx(struct ks_net *ks)
1035 {
1036 u16 cntl;
1037
1038 /* Enables QMU Receive (RXCR1). */
1039 cntl = ks_rdreg16(ks, KS_RXCR1);
1040 cntl |= RXCR1_RXE ;
1041 ks_wrreg16(ks, KS_RXCR1, cntl);
1042 } /* ks_start_rx */
1043
1044 /**
1045 * ks_stop_rx - stop to serve pkts
1046 * @ks : The chip information
1047 *
1048 */
ks_stop_rx(struct ks_net * ks)1049 static void ks_stop_rx(struct ks_net *ks)
1050 {
1051 u16 cntl;
1052
1053 /* Disables QMU Receive (RXCR1). */
1054 cntl = ks_rdreg16(ks, KS_RXCR1);
1055 cntl &= ~RXCR1_RXE ;
1056 ks_wrreg16(ks, KS_RXCR1, cntl);
1057
1058 } /* ks_stop_rx */
1059
1060 static unsigned long const ethernet_polynomial = 0x04c11db7U;
1061
ether_gen_crc(int length,u8 * data)1062 static unsigned long ether_gen_crc(int length, u8 *data)
1063 {
1064 long crc = -1;
1065 while (--length >= 0) {
1066 u8 current_octet = *data++;
1067 int bit;
1068
1069 for (bit = 0; bit < 8; bit++, current_octet >>= 1) {
1070 crc = (crc << 1) ^
1071 ((crc < 0) ^ (current_octet & 1) ?
1072 ethernet_polynomial : 0);
1073 }
1074 }
1075 return (unsigned long)crc;
1076 } /* ether_gen_crc */
1077
1078 /**
1079 * ks_set_grpaddr - set multicast information
1080 * @ks : The chip information
1081 */
1082
ks_set_grpaddr(struct ks_net * ks)1083 static void ks_set_grpaddr(struct ks_net *ks)
1084 {
1085 u8 i;
1086 u32 index, position, value;
1087
1088 memset(ks->mcast_bits, 0, sizeof(u8) * HW_MCAST_SIZE);
1089
1090 for (i = 0; i < ks->mcast_lst_size; i++) {
1091 position = (ether_gen_crc(6, ks->mcast_lst[i]) >> 26) & 0x3f;
1092 index = position >> 3;
1093 value = 1 << (position & 7);
1094 ks->mcast_bits[index] |= (u8)value;
1095 }
1096
1097 for (i = 0; i < HW_MCAST_SIZE; i++) {
1098 if (i & 1) {
1099 ks_wrreg16(ks, (u16)((KS_MAHTR0 + i) & ~1),
1100 (ks->mcast_bits[i] << 8) |
1101 ks->mcast_bits[i - 1]);
1102 }
1103 }
1104 } /* ks_set_grpaddr */
1105
1106 /*
1107 * ks_clear_mcast - clear multicast information
1108 *
1109 * @ks : The chip information
1110 * This routine removes all mcast addresses set in the hardware.
1111 */
1112
ks_clear_mcast(struct ks_net * ks)1113 static void ks_clear_mcast(struct ks_net *ks)
1114 {
1115 u16 i, mcast_size;
1116 for (i = 0; i < HW_MCAST_SIZE; i++)
1117 ks->mcast_bits[i] = 0;
1118
1119 mcast_size = HW_MCAST_SIZE >> 2;
1120 for (i = 0; i < mcast_size; i++)
1121 ks_wrreg16(ks, KS_MAHTR0 + (2*i), 0);
1122 }
1123
ks_set_promis(struct ks_net * ks,u16 promiscuous_mode)1124 static void ks_set_promis(struct ks_net *ks, u16 promiscuous_mode)
1125 {
1126 u16 cntl;
1127 ks->promiscuous = promiscuous_mode;
1128 ks_stop_rx(ks); /* Stop receiving for reconfiguration */
1129 cntl = ks_rdreg16(ks, KS_RXCR1);
1130
1131 cntl &= ~RXCR1_FILTER_MASK;
1132 if (promiscuous_mode)
1133 /* Enable Promiscuous mode */
1134 cntl |= RXCR1_RXAE | RXCR1_RXINVF;
1135 else
1136 /* Disable Promiscuous mode (default normal mode) */
1137 cntl |= RXCR1_RXPAFMA;
1138
1139 ks_wrreg16(ks, KS_RXCR1, cntl);
1140
1141 if (ks->enabled)
1142 ks_start_rx(ks);
1143
1144 } /* ks_set_promis */
1145
ks_set_mcast(struct ks_net * ks,u16 mcast)1146 static void ks_set_mcast(struct ks_net *ks, u16 mcast)
1147 {
1148 u16 cntl;
1149
1150 ks->all_mcast = mcast;
1151 ks_stop_rx(ks); /* Stop receiving for reconfiguration */
1152 cntl = ks_rdreg16(ks, KS_RXCR1);
1153 cntl &= ~RXCR1_FILTER_MASK;
1154 if (mcast)
1155 /* Enable "Perfect with Multicast address passed mode" */
1156 cntl |= (RXCR1_RXAE | RXCR1_RXMAFMA | RXCR1_RXPAFMA);
1157 else
1158 /**
1159 * Disable "Perfect with Multicast address passed
1160 * mode" (normal mode).
1161 */
1162 cntl |= RXCR1_RXPAFMA;
1163
1164 ks_wrreg16(ks, KS_RXCR1, cntl);
1165
1166 if (ks->enabled)
1167 ks_start_rx(ks);
1168 } /* ks_set_mcast */
1169
ks_set_rx_mode(struct net_device * netdev)1170 static void ks_set_rx_mode(struct net_device *netdev)
1171 {
1172 struct ks_net *ks = netdev_priv(netdev);
1173 struct netdev_hw_addr *ha;
1174
1175 /* Turn on/off promiscuous mode. */
1176 if ((netdev->flags & IFF_PROMISC) == IFF_PROMISC)
1177 ks_set_promis(ks,
1178 (u16)((netdev->flags & IFF_PROMISC) == IFF_PROMISC));
1179 /* Turn on/off all mcast mode. */
1180 else if ((netdev->flags & IFF_ALLMULTI) == IFF_ALLMULTI)
1181 ks_set_mcast(ks,
1182 (u16)((netdev->flags & IFF_ALLMULTI) == IFF_ALLMULTI));
1183 else
1184 ks_set_promis(ks, false);
1185
1186 if ((netdev->flags & IFF_MULTICAST) && netdev_mc_count(netdev)) {
1187 if (netdev_mc_count(netdev) <= MAX_MCAST_LST) {
1188 int i = 0;
1189
1190 netdev_for_each_mc_addr(ha, netdev) {
1191 if (i >= MAX_MCAST_LST)
1192 break;
1193 memcpy(ks->mcast_lst[i++], ha->addr, ETH_ALEN);
1194 }
1195 ks->mcast_lst_size = (u8)i;
1196 ks_set_grpaddr(ks);
1197 } else {
1198 /**
1199 * List too big to support so
1200 * turn on all mcast mode.
1201 */
1202 ks->mcast_lst_size = MAX_MCAST_LST;
1203 ks_set_mcast(ks, true);
1204 }
1205 } else {
1206 ks->mcast_lst_size = 0;
1207 ks_clear_mcast(ks);
1208 }
1209 } /* ks_set_rx_mode */
1210
ks_set_mac(struct ks_net * ks,u8 * data)1211 static void ks_set_mac(struct ks_net *ks, u8 *data)
1212 {
1213 u16 *pw = (u16 *)data;
1214 u16 w, u;
1215
1216 ks_stop_rx(ks); /* Stop receiving for reconfiguration */
1217
1218 u = *pw++;
1219 w = ((u & 0xFF) << 8) | ((u >> 8) & 0xFF);
1220 ks_wrreg16(ks, KS_MARH, w);
1221
1222 u = *pw++;
1223 w = ((u & 0xFF) << 8) | ((u >> 8) & 0xFF);
1224 ks_wrreg16(ks, KS_MARM, w);
1225
1226 u = *pw;
1227 w = ((u & 0xFF) << 8) | ((u >> 8) & 0xFF);
1228 ks_wrreg16(ks, KS_MARL, w);
1229
1230 memcpy(ks->mac_addr, data, 6);
1231
1232 if (ks->enabled)
1233 ks_start_rx(ks);
1234 }
1235
ks_set_mac_address(struct net_device * netdev,void * paddr)1236 static int ks_set_mac_address(struct net_device *netdev, void *paddr)
1237 {
1238 struct ks_net *ks = netdev_priv(netdev);
1239 struct sockaddr *addr = paddr;
1240 u8 *da;
1241
1242 netdev->addr_assign_type &= ~NET_ADDR_RANDOM;
1243 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
1244
1245 da = (u8 *)netdev->dev_addr;
1246
1247 ks_set_mac(ks, da);
1248 return 0;
1249 }
1250
ks_net_ioctl(struct net_device * netdev,struct ifreq * req,int cmd)1251 static int ks_net_ioctl(struct net_device *netdev, struct ifreq *req, int cmd)
1252 {
1253 struct ks_net *ks = netdev_priv(netdev);
1254
1255 if (!netif_running(netdev))
1256 return -EINVAL;
1257
1258 return generic_mii_ioctl(&ks->mii, if_mii(req), cmd, NULL);
1259 }
1260
1261 static const struct net_device_ops ks_netdev_ops = {
1262 .ndo_open = ks_net_open,
1263 .ndo_stop = ks_net_stop,
1264 .ndo_do_ioctl = ks_net_ioctl,
1265 .ndo_start_xmit = ks_start_xmit,
1266 .ndo_set_mac_address = ks_set_mac_address,
1267 .ndo_set_rx_mode = ks_set_rx_mode,
1268 .ndo_change_mtu = eth_change_mtu,
1269 .ndo_validate_addr = eth_validate_addr,
1270 };
1271
1272 /* ethtool support */
1273
ks_get_drvinfo(struct net_device * netdev,struct ethtool_drvinfo * di)1274 static void ks_get_drvinfo(struct net_device *netdev,
1275 struct ethtool_drvinfo *di)
1276 {
1277 strlcpy(di->driver, DRV_NAME, sizeof(di->driver));
1278 strlcpy(di->version, "1.00", sizeof(di->version));
1279 strlcpy(di->bus_info, dev_name(netdev->dev.parent),
1280 sizeof(di->bus_info));
1281 }
1282
ks_get_msglevel(struct net_device * netdev)1283 static u32 ks_get_msglevel(struct net_device *netdev)
1284 {
1285 struct ks_net *ks = netdev_priv(netdev);
1286 return ks->msg_enable;
1287 }
1288
ks_set_msglevel(struct net_device * netdev,u32 to)1289 static void ks_set_msglevel(struct net_device *netdev, u32 to)
1290 {
1291 struct ks_net *ks = netdev_priv(netdev);
1292 ks->msg_enable = to;
1293 }
1294
ks_get_settings(struct net_device * netdev,struct ethtool_cmd * cmd)1295 static int ks_get_settings(struct net_device *netdev, struct ethtool_cmd *cmd)
1296 {
1297 struct ks_net *ks = netdev_priv(netdev);
1298 return mii_ethtool_gset(&ks->mii, cmd);
1299 }
1300
ks_set_settings(struct net_device * netdev,struct ethtool_cmd * cmd)1301 static int ks_set_settings(struct net_device *netdev, struct ethtool_cmd *cmd)
1302 {
1303 struct ks_net *ks = netdev_priv(netdev);
1304 return mii_ethtool_sset(&ks->mii, cmd);
1305 }
1306
ks_get_link(struct net_device * netdev)1307 static u32 ks_get_link(struct net_device *netdev)
1308 {
1309 struct ks_net *ks = netdev_priv(netdev);
1310 return mii_link_ok(&ks->mii);
1311 }
1312
ks_nway_reset(struct net_device * netdev)1313 static int ks_nway_reset(struct net_device *netdev)
1314 {
1315 struct ks_net *ks = netdev_priv(netdev);
1316 return mii_nway_restart(&ks->mii);
1317 }
1318
1319 static const struct ethtool_ops ks_ethtool_ops = {
1320 .get_drvinfo = ks_get_drvinfo,
1321 .get_msglevel = ks_get_msglevel,
1322 .set_msglevel = ks_set_msglevel,
1323 .get_settings = ks_get_settings,
1324 .set_settings = ks_set_settings,
1325 .get_link = ks_get_link,
1326 .nway_reset = ks_nway_reset,
1327 };
1328
1329 /* MII interface controls */
1330
1331 /**
1332 * ks_phy_reg - convert MII register into a KS8851 register
1333 * @reg: MII register number.
1334 *
1335 * Return the KS8851 register number for the corresponding MII PHY register
1336 * if possible. Return zero if the MII register has no direct mapping to the
1337 * KS8851 register set.
1338 */
ks_phy_reg(int reg)1339 static int ks_phy_reg(int reg)
1340 {
1341 switch (reg) {
1342 case MII_BMCR:
1343 return KS_P1MBCR;
1344 case MII_BMSR:
1345 return KS_P1MBSR;
1346 case MII_PHYSID1:
1347 return KS_PHY1ILR;
1348 case MII_PHYSID2:
1349 return KS_PHY1IHR;
1350 case MII_ADVERTISE:
1351 return KS_P1ANAR;
1352 case MII_LPA:
1353 return KS_P1ANLPR;
1354 }
1355
1356 return 0x0;
1357 }
1358
1359 /**
1360 * ks_phy_read - MII interface PHY register read.
1361 * @netdev: The network device the PHY is on.
1362 * @phy_addr: Address of PHY (ignored as we only have one)
1363 * @reg: The register to read.
1364 *
1365 * This call reads data from the PHY register specified in @reg. Since the
1366 * device does not support all the MII registers, the non-existent values
1367 * are always returned as zero.
1368 *
1369 * We return zero for unsupported registers as the MII code does not check
1370 * the value returned for any error status, and simply returns it to the
1371 * caller. The mii-tool that the driver was tested with takes any -ve error
1372 * as real PHY capabilities, thus displaying incorrect data to the user.
1373 */
ks_phy_read(struct net_device * netdev,int phy_addr,int reg)1374 static int ks_phy_read(struct net_device *netdev, int phy_addr, int reg)
1375 {
1376 struct ks_net *ks = netdev_priv(netdev);
1377 int ksreg;
1378 int result;
1379
1380 ksreg = ks_phy_reg(reg);
1381 if (!ksreg)
1382 return 0x0; /* no error return allowed, so use zero */
1383
1384 mutex_lock(&ks->lock);
1385 result = ks_rdreg16(ks, ksreg);
1386 mutex_unlock(&ks->lock);
1387
1388 return result;
1389 }
1390
ks_phy_write(struct net_device * netdev,int phy,int reg,int value)1391 static void ks_phy_write(struct net_device *netdev,
1392 int phy, int reg, int value)
1393 {
1394 struct ks_net *ks = netdev_priv(netdev);
1395 int ksreg;
1396
1397 ksreg = ks_phy_reg(reg);
1398 if (ksreg) {
1399 mutex_lock(&ks->lock);
1400 ks_wrreg16(ks, ksreg, value);
1401 mutex_unlock(&ks->lock);
1402 }
1403 }
1404
1405 /**
1406 * ks_read_selftest - read the selftest memory info.
1407 * @ks: The device state
1408 *
1409 * Read and check the TX/RX memory selftest information.
1410 */
ks_read_selftest(struct ks_net * ks)1411 static int ks_read_selftest(struct ks_net *ks)
1412 {
1413 unsigned both_done = MBIR_TXMBF | MBIR_RXMBF;
1414 int ret = 0;
1415 unsigned rd;
1416
1417 rd = ks_rdreg16(ks, KS_MBIR);
1418
1419 if ((rd & both_done) != both_done) {
1420 netdev_warn(ks->netdev, "Memory selftest not finished\n");
1421 return 0;
1422 }
1423
1424 if (rd & MBIR_TXMBFA) {
1425 netdev_err(ks->netdev, "TX memory selftest fails\n");
1426 ret |= 1;
1427 }
1428
1429 if (rd & MBIR_RXMBFA) {
1430 netdev_err(ks->netdev, "RX memory selftest fails\n");
1431 ret |= 2;
1432 }
1433
1434 netdev_info(ks->netdev, "the selftest passes\n");
1435 return ret;
1436 }
1437
ks_setup(struct ks_net * ks)1438 static void ks_setup(struct ks_net *ks)
1439 {
1440 u16 w;
1441
1442 /**
1443 * Configure QMU Transmit
1444 */
1445
1446 /* Setup Transmit Frame Data Pointer Auto-Increment (TXFDPR) */
1447 ks_wrreg16(ks, KS_TXFDPR, TXFDPR_TXFPAI);
1448
1449 /* Setup Receive Frame Data Pointer Auto-Increment */
1450 ks_wrreg16(ks, KS_RXFDPR, RXFDPR_RXFPAI);
1451
1452 /* Setup Receive Frame Threshold - 1 frame (RXFCTFC) */
1453 ks_wrreg16(ks, KS_RXFCTR, 1 & RXFCTR_THRESHOLD_MASK);
1454
1455 /* Setup RxQ Command Control (RXQCR) */
1456 ks->rc_rxqcr = RXQCR_CMD_CNTL;
1457 ks_wrreg16(ks, KS_RXQCR, ks->rc_rxqcr);
1458
1459 /**
1460 * set the force mode to half duplex, default is full duplex
1461 * because if the auto-negotiation fails, most switch uses
1462 * half-duplex.
1463 */
1464
1465 w = ks_rdreg16(ks, KS_P1MBCR);
1466 w &= ~P1MBCR_FORCE_FDX;
1467 ks_wrreg16(ks, KS_P1MBCR, w);
1468
1469 w = TXCR_TXFCE | TXCR_TXPE | TXCR_TXCRC | TXCR_TCGIP;
1470 ks_wrreg16(ks, KS_TXCR, w);
1471
1472 w = RXCR1_RXFCE | RXCR1_RXBE | RXCR1_RXUE | RXCR1_RXME | RXCR1_RXIPFCC;
1473
1474 if (ks->promiscuous) /* bPromiscuous */
1475 w |= (RXCR1_RXAE | RXCR1_RXINVF);
1476 else if (ks->all_mcast) /* Multicast address passed mode */
1477 w |= (RXCR1_RXAE | RXCR1_RXMAFMA | RXCR1_RXPAFMA);
1478 else /* Normal mode */
1479 w |= RXCR1_RXPAFMA;
1480
1481 ks_wrreg16(ks, KS_RXCR1, w);
1482 } /*ks_setup */
1483
1484
ks_setup_int(struct ks_net * ks)1485 static void ks_setup_int(struct ks_net *ks)
1486 {
1487 ks->rc_ier = 0x00;
1488 /* Clear the interrupts status of the hardware. */
1489 ks_wrreg16(ks, KS_ISR, 0xffff);
1490
1491 /* Enables the interrupts of the hardware. */
1492 ks->rc_ier = (IRQ_LCI | IRQ_TXI | IRQ_RXI);
1493 } /* ks_setup_int */
1494
ks_hw_init(struct ks_net * ks)1495 static int ks_hw_init(struct ks_net *ks)
1496 {
1497 #define MHEADER_SIZE (sizeof(struct type_frame_head) * MAX_RECV_FRAMES)
1498 ks->promiscuous = 0;
1499 ks->all_mcast = 0;
1500 ks->mcast_lst_size = 0;
1501
1502 ks->frame_head_info = kmalloc(MHEADER_SIZE, GFP_KERNEL);
1503 if (!ks->frame_head_info)
1504 return false;
1505
1506 ks_set_mac(ks, KS_DEFAULT_MAC_ADDRESS);
1507 return true;
1508 }
1509
1510
ks8851_probe(struct platform_device * pdev)1511 static int __devinit ks8851_probe(struct platform_device *pdev)
1512 {
1513 int err = -ENOMEM;
1514 struct resource *io_d, *io_c;
1515 struct net_device *netdev;
1516 struct ks_net *ks;
1517 u16 id, data;
1518
1519 io_d = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1520 io_c = platform_get_resource(pdev, IORESOURCE_MEM, 1);
1521
1522 if (!request_mem_region(io_d->start, resource_size(io_d), DRV_NAME))
1523 goto err_mem_region;
1524
1525 if (!request_mem_region(io_c->start, resource_size(io_c), DRV_NAME))
1526 goto err_mem_region1;
1527
1528 netdev = alloc_etherdev(sizeof(struct ks_net));
1529 if (!netdev)
1530 goto err_alloc_etherdev;
1531
1532 SET_NETDEV_DEV(netdev, &pdev->dev);
1533
1534 ks = netdev_priv(netdev);
1535 ks->netdev = netdev;
1536 ks->hw_addr = ioremap(io_d->start, resource_size(io_d));
1537
1538 if (!ks->hw_addr)
1539 goto err_ioremap;
1540
1541 ks->hw_addr_cmd = ioremap(io_c->start, resource_size(io_c));
1542 if (!ks->hw_addr_cmd)
1543 goto err_ioremap1;
1544
1545 netdev->irq = platform_get_irq(pdev, 0);
1546
1547 if ((int)netdev->irq < 0) {
1548 err = netdev->irq;
1549 goto err_get_irq;
1550 }
1551
1552 ks->pdev = pdev;
1553
1554 mutex_init(&ks->lock);
1555 spin_lock_init(&ks->statelock);
1556
1557 netdev->netdev_ops = &ks_netdev_ops;
1558 netdev->ethtool_ops = &ks_ethtool_ops;
1559
1560 /* setup mii state */
1561 ks->mii.dev = netdev;
1562 ks->mii.phy_id = 1,
1563 ks->mii.phy_id_mask = 1;
1564 ks->mii.reg_num_mask = 0xf;
1565 ks->mii.mdio_read = ks_phy_read;
1566 ks->mii.mdio_write = ks_phy_write;
1567
1568 netdev_info(netdev, "message enable is %d\n", msg_enable);
1569 /* set the default message enable */
1570 ks->msg_enable = netif_msg_init(msg_enable, (NETIF_MSG_DRV |
1571 NETIF_MSG_PROBE |
1572 NETIF_MSG_LINK));
1573 ks_read_config(ks);
1574
1575 /* simple check for a valid chip being connected to the bus */
1576 if ((ks_rdreg16(ks, KS_CIDER) & ~CIDER_REV_MASK) != CIDER_ID) {
1577 netdev_err(netdev, "failed to read device ID\n");
1578 err = -ENODEV;
1579 goto err_register;
1580 }
1581
1582 if (ks_read_selftest(ks)) {
1583 netdev_err(netdev, "failed to read device ID\n");
1584 err = -ENODEV;
1585 goto err_register;
1586 }
1587
1588 err = register_netdev(netdev);
1589 if (err)
1590 goto err_register;
1591
1592 platform_set_drvdata(pdev, netdev);
1593
1594 ks_soft_reset(ks, GRR_GSR);
1595 ks_hw_init(ks);
1596 ks_disable_qmu(ks);
1597 ks_setup(ks);
1598 ks_setup_int(ks);
1599 memcpy(netdev->dev_addr, ks->mac_addr, 6);
1600
1601 data = ks_rdreg16(ks, KS_OBCR);
1602 ks_wrreg16(ks, KS_OBCR, data | OBCR_ODS_16MA);
1603
1604 /**
1605 * If you want to use the default MAC addr,
1606 * comment out the 2 functions below.
1607 */
1608
1609 random_ether_addr(netdev->dev_addr);
1610 ks_set_mac(ks, netdev->dev_addr);
1611
1612 id = ks_rdreg16(ks, KS_CIDER);
1613
1614 netdev_info(netdev, "Found chip, family: 0x%x, id: 0x%x, rev: 0x%x\n",
1615 (id >> 8) & 0xff, (id >> 4) & 0xf, (id >> 1) & 0x7);
1616 return 0;
1617
1618 err_register:
1619 err_get_irq:
1620 iounmap(ks->hw_addr_cmd);
1621 err_ioremap1:
1622 iounmap(ks->hw_addr);
1623 err_ioremap:
1624 free_netdev(netdev);
1625 err_alloc_etherdev:
1626 release_mem_region(io_c->start, resource_size(io_c));
1627 err_mem_region1:
1628 release_mem_region(io_d->start, resource_size(io_d));
1629 err_mem_region:
1630 return err;
1631 }
1632
ks8851_remove(struct platform_device * pdev)1633 static int __devexit ks8851_remove(struct platform_device *pdev)
1634 {
1635 struct net_device *netdev = platform_get_drvdata(pdev);
1636 struct ks_net *ks = netdev_priv(netdev);
1637 struct resource *iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1638
1639 kfree(ks->frame_head_info);
1640 unregister_netdev(netdev);
1641 iounmap(ks->hw_addr);
1642 free_netdev(netdev);
1643 release_mem_region(iomem->start, resource_size(iomem));
1644 platform_set_drvdata(pdev, NULL);
1645 return 0;
1646
1647 }
1648
1649 static struct platform_driver ks8851_platform_driver = {
1650 .driver = {
1651 .name = DRV_NAME,
1652 .owner = THIS_MODULE,
1653 },
1654 .probe = ks8851_probe,
1655 .remove = __devexit_p(ks8851_remove),
1656 };
1657
1658 module_platform_driver(ks8851_platform_driver);
1659
1660 MODULE_DESCRIPTION("KS8851 MLL Network driver");
1661 MODULE_AUTHOR("David Choi <david.choi@micrel.com>");
1662 MODULE_LICENSE("GPL");
1663 module_param_named(message, msg_enable, int, 0);
1664 MODULE_PARM_DESC(message, "Message verbosity level (0=none, 31=all)");
1665
1666