1 /**
2  * drivers/net/ks8851_mll.c
3  * Copyright (c) 2009 Micrel Inc.
4  *
5  * This program is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License version 2 as
7  * published by the Free Software Foundation.
8  *
9  * This program is distributed in the hope that it will be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12  * GNU General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with this program; if not, write to the Free Software
16  * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
17  */
18 
19 /**
20  * Supports:
21  * KS8851 16bit MLL chip from Micrel Inc.
22  */
23 
24 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
25 
26 #include <linux/module.h>
27 #include <linux/kernel.h>
28 #include <linux/netdevice.h>
29 #include <linux/etherdevice.h>
30 #include <linux/ethtool.h>
31 #include <linux/cache.h>
32 #include <linux/crc32.h>
33 #include <linux/mii.h>
34 #include <linux/platform_device.h>
35 #include <linux/delay.h>
36 #include <linux/slab.h>
37 
38 #define	DRV_NAME	"ks8851_mll"
39 
40 static u8 KS_DEFAULT_MAC_ADDRESS[] = { 0x00, 0x10, 0xA1, 0x86, 0x95, 0x11 };
41 #define MAX_RECV_FRAMES			32
42 #define MAX_BUF_SIZE			2048
43 #define TX_BUF_SIZE			2000
44 #define RX_BUF_SIZE			2000
45 
46 #define KS_CCR				0x08
47 #define CCR_EEPROM			(1 << 9)
48 #define CCR_SPI				(1 << 8)
49 #define CCR_8BIT			(1 << 7)
50 #define CCR_16BIT			(1 << 6)
51 #define CCR_32BIT			(1 << 5)
52 #define CCR_SHARED			(1 << 4)
53 #define CCR_32PIN			(1 << 0)
54 
55 /* MAC address registers */
56 #define KS_MARL				0x10
57 #define KS_MARM				0x12
58 #define KS_MARH				0x14
59 
60 #define KS_OBCR				0x20
61 #define OBCR_ODS_16MA			(1 << 6)
62 
63 #define KS_EEPCR			0x22
64 #define EEPCR_EESA			(1 << 4)
65 #define EEPCR_EESB			(1 << 3)
66 #define EEPCR_EEDO			(1 << 2)
67 #define EEPCR_EESCK			(1 << 1)
68 #define EEPCR_EECS			(1 << 0)
69 
70 #define KS_MBIR				0x24
71 #define MBIR_TXMBF			(1 << 12)
72 #define MBIR_TXMBFA			(1 << 11)
73 #define MBIR_RXMBF			(1 << 4)
74 #define MBIR_RXMBFA			(1 << 3)
75 
76 #define KS_GRR				0x26
77 #define GRR_QMU				(1 << 1)
78 #define GRR_GSR				(1 << 0)
79 
80 #define KS_WFCR				0x2A
81 #define WFCR_MPRXE			(1 << 7)
82 #define WFCR_WF3E			(1 << 3)
83 #define WFCR_WF2E			(1 << 2)
84 #define WFCR_WF1E			(1 << 1)
85 #define WFCR_WF0E			(1 << 0)
86 
87 #define KS_WF0CRC0			0x30
88 #define KS_WF0CRC1			0x32
89 #define KS_WF0BM0			0x34
90 #define KS_WF0BM1			0x36
91 #define KS_WF0BM2			0x38
92 #define KS_WF0BM3			0x3A
93 
94 #define KS_WF1CRC0			0x40
95 #define KS_WF1CRC1			0x42
96 #define KS_WF1BM0			0x44
97 #define KS_WF1BM1			0x46
98 #define KS_WF1BM2			0x48
99 #define KS_WF1BM3			0x4A
100 
101 #define KS_WF2CRC0			0x50
102 #define KS_WF2CRC1			0x52
103 #define KS_WF2BM0			0x54
104 #define KS_WF2BM1			0x56
105 #define KS_WF2BM2			0x58
106 #define KS_WF2BM3			0x5A
107 
108 #define KS_WF3CRC0			0x60
109 #define KS_WF3CRC1			0x62
110 #define KS_WF3BM0			0x64
111 #define KS_WF3BM1			0x66
112 #define KS_WF3BM2			0x68
113 #define KS_WF3BM3			0x6A
114 
115 #define KS_TXCR				0x70
116 #define TXCR_TCGICMP			(1 << 8)
117 #define TXCR_TCGUDP			(1 << 7)
118 #define TXCR_TCGTCP			(1 << 6)
119 #define TXCR_TCGIP			(1 << 5)
120 #define TXCR_FTXQ			(1 << 4)
121 #define TXCR_TXFCE			(1 << 3)
122 #define TXCR_TXPE			(1 << 2)
123 #define TXCR_TXCRC			(1 << 1)
124 #define TXCR_TXE			(1 << 0)
125 
126 #define KS_TXSR				0x72
127 #define TXSR_TXLC			(1 << 13)
128 #define TXSR_TXMC			(1 << 12)
129 #define TXSR_TXFID_MASK			(0x3f << 0)
130 #define TXSR_TXFID_SHIFT		(0)
131 #define TXSR_TXFID_GET(_v)		(((_v) >> 0) & 0x3f)
132 
133 
134 #define KS_RXCR1			0x74
135 #define RXCR1_FRXQ			(1 << 15)
136 #define RXCR1_RXUDPFCC			(1 << 14)
137 #define RXCR1_RXTCPFCC			(1 << 13)
138 #define RXCR1_RXIPFCC			(1 << 12)
139 #define RXCR1_RXPAFMA			(1 << 11)
140 #define RXCR1_RXFCE			(1 << 10)
141 #define RXCR1_RXEFE			(1 << 9)
142 #define RXCR1_RXMAFMA			(1 << 8)
143 #define RXCR1_RXBE			(1 << 7)
144 #define RXCR1_RXME			(1 << 6)
145 #define RXCR1_RXUE			(1 << 5)
146 #define RXCR1_RXAE			(1 << 4)
147 #define RXCR1_RXINVF			(1 << 1)
148 #define RXCR1_RXE			(1 << 0)
149 #define RXCR1_FILTER_MASK    		(RXCR1_RXINVF | RXCR1_RXAE | \
150 					 RXCR1_RXMAFMA | RXCR1_RXPAFMA)
151 
152 #define KS_RXCR2			0x76
153 #define RXCR2_SRDBL_MASK		(0x7 << 5)
154 #define RXCR2_SRDBL_SHIFT		(5)
155 #define RXCR2_SRDBL_4B			(0x0 << 5)
156 #define RXCR2_SRDBL_8B			(0x1 << 5)
157 #define RXCR2_SRDBL_16B			(0x2 << 5)
158 #define RXCR2_SRDBL_32B			(0x3 << 5)
159 /* #define RXCR2_SRDBL_FRAME		(0x4 << 5) */
160 #define RXCR2_IUFFP			(1 << 4)
161 #define RXCR2_RXIUFCEZ			(1 << 3)
162 #define RXCR2_UDPLFE			(1 << 2)
163 #define RXCR2_RXICMPFCC			(1 << 1)
164 #define RXCR2_RXSAF			(1 << 0)
165 
166 #define KS_TXMIR			0x78
167 
168 #define KS_RXFHSR			0x7C
169 #define RXFSHR_RXFV			(1 << 15)
170 #define RXFSHR_RXICMPFCS		(1 << 13)
171 #define RXFSHR_RXIPFCS			(1 << 12)
172 #define RXFSHR_RXTCPFCS			(1 << 11)
173 #define RXFSHR_RXUDPFCS			(1 << 10)
174 #define RXFSHR_RXBF			(1 << 7)
175 #define RXFSHR_RXMF			(1 << 6)
176 #define RXFSHR_RXUF			(1 << 5)
177 #define RXFSHR_RXMR			(1 << 4)
178 #define RXFSHR_RXFT			(1 << 3)
179 #define RXFSHR_RXFTL			(1 << 2)
180 #define RXFSHR_RXRF			(1 << 1)
181 #define RXFSHR_RXCE			(1 << 0)
182 #define	RXFSHR_ERR			(RXFSHR_RXCE | RXFSHR_RXRF |\
183 					RXFSHR_RXFTL | RXFSHR_RXMR |\
184 					RXFSHR_RXICMPFCS | RXFSHR_RXIPFCS |\
185 					RXFSHR_RXTCPFCS)
186 #define KS_RXFHBCR			0x7E
187 #define RXFHBCR_CNT_MASK		0x0FFF
188 
189 #define KS_TXQCR			0x80
190 #define TXQCR_AETFE			(1 << 2)
191 #define TXQCR_TXQMAM			(1 << 1)
192 #define TXQCR_METFE			(1 << 0)
193 
194 #define KS_RXQCR			0x82
195 #define RXQCR_RXDTTS			(1 << 12)
196 #define RXQCR_RXDBCTS			(1 << 11)
197 #define RXQCR_RXFCTS			(1 << 10)
198 #define RXQCR_RXIPHTOE			(1 << 9)
199 #define RXQCR_RXDTTE			(1 << 7)
200 #define RXQCR_RXDBCTE			(1 << 6)
201 #define RXQCR_RXFCTE			(1 << 5)
202 #define RXQCR_ADRFE			(1 << 4)
203 #define RXQCR_SDA			(1 << 3)
204 #define RXQCR_RRXEF			(1 << 0)
205 #define RXQCR_CMD_CNTL                	(RXQCR_RXFCTE|RXQCR_ADRFE)
206 
207 #define KS_TXFDPR			0x84
208 #define TXFDPR_TXFPAI			(1 << 14)
209 #define TXFDPR_TXFP_MASK		(0x7ff << 0)
210 #define TXFDPR_TXFP_SHIFT		(0)
211 
212 #define KS_RXFDPR			0x86
213 #define RXFDPR_RXFPAI			(1 << 14)
214 
215 #define KS_RXDTTR			0x8C
216 #define KS_RXDBCTR			0x8E
217 
218 #define KS_IER				0x90
219 #define KS_ISR				0x92
220 #define IRQ_LCI				(1 << 15)
221 #define IRQ_TXI				(1 << 14)
222 #define IRQ_RXI				(1 << 13)
223 #define IRQ_RXOI			(1 << 11)
224 #define IRQ_TXPSI			(1 << 9)
225 #define IRQ_RXPSI			(1 << 8)
226 #define IRQ_TXSAI			(1 << 6)
227 #define IRQ_RXWFDI			(1 << 5)
228 #define IRQ_RXMPDI			(1 << 4)
229 #define IRQ_LDI				(1 << 3)
230 #define IRQ_EDI				(1 << 2)
231 #define IRQ_SPIBEI			(1 << 1)
232 #define IRQ_DEDI			(1 << 0)
233 
234 #define KS_RXFCTR			0x9C
235 #define RXFCTR_THRESHOLD_MASK     	0x00FF
236 
237 #define KS_RXFC				0x9D
238 #define RXFCTR_RXFC_MASK		(0xff << 8)
239 #define RXFCTR_RXFC_SHIFT		(8)
240 #define RXFCTR_RXFC_GET(_v)		(((_v) >> 8) & 0xff)
241 #define RXFCTR_RXFCT_MASK		(0xff << 0)
242 #define RXFCTR_RXFCT_SHIFT		(0)
243 
244 #define KS_TXNTFSR			0x9E
245 
246 #define KS_MAHTR0			0xA0
247 #define KS_MAHTR1			0xA2
248 #define KS_MAHTR2			0xA4
249 #define KS_MAHTR3			0xA6
250 
251 #define KS_FCLWR			0xB0
252 #define KS_FCHWR			0xB2
253 #define KS_FCOWR			0xB4
254 
255 #define KS_CIDER			0xC0
256 #define CIDER_ID			0x8870
257 #define CIDER_REV_MASK			(0x7 << 1)
258 #define CIDER_REV_SHIFT			(1)
259 #define CIDER_REV_GET(_v)		(((_v) >> 1) & 0x7)
260 
261 #define KS_CGCR				0xC6
262 #define KS_IACR				0xC8
263 #define IACR_RDEN			(1 << 12)
264 #define IACR_TSEL_MASK			(0x3 << 10)
265 #define IACR_TSEL_SHIFT			(10)
266 #define IACR_TSEL_MIB			(0x3 << 10)
267 #define IACR_ADDR_MASK			(0x1f << 0)
268 #define IACR_ADDR_SHIFT			(0)
269 
270 #define KS_IADLR			0xD0
271 #define KS_IAHDR			0xD2
272 
273 #define KS_PMECR			0xD4
274 #define PMECR_PME_DELAY			(1 << 14)
275 #define PMECR_PME_POL			(1 << 12)
276 #define PMECR_WOL_WAKEUP		(1 << 11)
277 #define PMECR_WOL_MAGICPKT		(1 << 10)
278 #define PMECR_WOL_LINKUP		(1 << 9)
279 #define PMECR_WOL_ENERGY		(1 << 8)
280 #define PMECR_AUTO_WAKE_EN		(1 << 7)
281 #define PMECR_WAKEUP_NORMAL		(1 << 6)
282 #define PMECR_WKEVT_MASK		(0xf << 2)
283 #define PMECR_WKEVT_SHIFT		(2)
284 #define PMECR_WKEVT_GET(_v)		(((_v) >> 2) & 0xf)
285 #define PMECR_WKEVT_ENERGY		(0x1 << 2)
286 #define PMECR_WKEVT_LINK		(0x2 << 2)
287 #define PMECR_WKEVT_MAGICPKT		(0x4 << 2)
288 #define PMECR_WKEVT_FRAME		(0x8 << 2)
289 #define PMECR_PM_MASK			(0x3 << 0)
290 #define PMECR_PM_SHIFT			(0)
291 #define PMECR_PM_NORMAL			(0x0 << 0)
292 #define PMECR_PM_ENERGY			(0x1 << 0)
293 #define PMECR_PM_SOFTDOWN		(0x2 << 0)
294 #define PMECR_PM_POWERSAVE		(0x3 << 0)
295 
296 /* Standard MII PHY data */
297 #define KS_P1MBCR			0xE4
298 #define P1MBCR_FORCE_FDX		(1 << 8)
299 
300 #define KS_P1MBSR			0xE6
301 #define P1MBSR_AN_COMPLETE		(1 << 5)
302 #define P1MBSR_AN_CAPABLE		(1 << 3)
303 #define P1MBSR_LINK_UP			(1 << 2)
304 
305 #define KS_PHY1ILR			0xE8
306 #define KS_PHY1IHR			0xEA
307 #define KS_P1ANAR			0xEC
308 #define KS_P1ANLPR			0xEE
309 
310 #define KS_P1SCLMD			0xF4
311 #define P1SCLMD_LEDOFF			(1 << 15)
312 #define P1SCLMD_TXIDS			(1 << 14)
313 #define P1SCLMD_RESTARTAN		(1 << 13)
314 #define P1SCLMD_DISAUTOMDIX		(1 << 10)
315 #define P1SCLMD_FORCEMDIX		(1 << 9)
316 #define P1SCLMD_AUTONEGEN		(1 << 7)
317 #define P1SCLMD_FORCE100		(1 << 6)
318 #define P1SCLMD_FORCEFDX		(1 << 5)
319 #define P1SCLMD_ADV_FLOW		(1 << 4)
320 #define P1SCLMD_ADV_100BT_FDX		(1 << 3)
321 #define P1SCLMD_ADV_100BT_HDX		(1 << 2)
322 #define P1SCLMD_ADV_10BT_FDX		(1 << 1)
323 #define P1SCLMD_ADV_10BT_HDX		(1 << 0)
324 
325 #define KS_P1CR				0xF6
326 #define P1CR_HP_MDIX			(1 << 15)
327 #define P1CR_REV_POL			(1 << 13)
328 #define P1CR_OP_100M			(1 << 10)
329 #define P1CR_OP_FDX			(1 << 9)
330 #define P1CR_OP_MDI			(1 << 7)
331 #define P1CR_AN_DONE			(1 << 6)
332 #define P1CR_LINK_GOOD			(1 << 5)
333 #define P1CR_PNTR_FLOW			(1 << 4)
334 #define P1CR_PNTR_100BT_FDX		(1 << 3)
335 #define P1CR_PNTR_100BT_HDX		(1 << 2)
336 #define P1CR_PNTR_10BT_FDX		(1 << 1)
337 #define P1CR_PNTR_10BT_HDX		(1 << 0)
338 
339 /* TX Frame control */
340 
341 #define TXFR_TXIC			(1 << 15)
342 #define TXFR_TXFID_MASK			(0x3f << 0)
343 #define TXFR_TXFID_SHIFT		(0)
344 
345 #define KS_P1SR				0xF8
346 #define P1SR_HP_MDIX			(1 << 15)
347 #define P1SR_REV_POL			(1 << 13)
348 #define P1SR_OP_100M			(1 << 10)
349 #define P1SR_OP_FDX			(1 << 9)
350 #define P1SR_OP_MDI			(1 << 7)
351 #define P1SR_AN_DONE			(1 << 6)
352 #define P1SR_LINK_GOOD			(1 << 5)
353 #define P1SR_PNTR_FLOW			(1 << 4)
354 #define P1SR_PNTR_100BT_FDX		(1 << 3)
355 #define P1SR_PNTR_100BT_HDX		(1 << 2)
356 #define P1SR_PNTR_10BT_FDX		(1 << 1)
357 #define P1SR_PNTR_10BT_HDX		(1 << 0)
358 
359 #define	ENUM_BUS_NONE			0
360 #define	ENUM_BUS_8BIT			1
361 #define	ENUM_BUS_16BIT			2
362 #define	ENUM_BUS_32BIT			3
363 
364 #define MAX_MCAST_LST			32
365 #define HW_MCAST_SIZE			8
366 
367 /**
368  * union ks_tx_hdr - tx header data
369  * @txb: The header as bytes
370  * @txw: The header as 16bit, little-endian words
371  *
372  * A dual representation of the tx header data to allow
373  * access to individual bytes, and to allow 16bit accesses
374  * with 16bit alignment.
375  */
376 union ks_tx_hdr {
377 	u8      txb[4];
378 	__le16  txw[2];
379 };
380 
381 /**
382  * struct ks_net - KS8851 driver private data
383  * @net_device 	: The network device we're bound to
384  * @hw_addr	: start address of data register.
385  * @hw_addr_cmd	: start address of command register.
386  * @txh    	: temporaly buffer to save status/length.
387  * @lock	: Lock to ensure that the device is not accessed when busy.
388  * @pdev	: Pointer to platform device.
389  * @mii		: The MII state information for the mii calls.
390  * @frame_head_info   	: frame header information for multi-pkt rx.
391  * @statelock	: Lock on this structure for tx list.
392  * @msg_enable	: The message flags controlling driver output (see ethtool).
393  * @frame_cnt  	: number of frames received.
394  * @bus_width  	: i/o bus width.
395  * @irq    	: irq number assigned to this device.
396  * @rc_rxqcr	: Cached copy of KS_RXQCR.
397  * @rc_txcr	: Cached copy of KS_TXCR.
398  * @rc_ier	: Cached copy of KS_IER.
399  * @sharedbus  	: Multipex(addr and data bus) mode indicator.
400  * @cmd_reg_cache	: command register cached.
401  * @cmd_reg_cache_int	: command register cached. Used in the irq handler.
402  * @promiscuous	: promiscuous mode indicator.
403  * @all_mcast  	: mutlicast indicator.
404  * @mcast_lst_size   	: size of multicast list.
405  * @mcast_lst    	: multicast list.
406  * @mcast_bits    	: multicast enabed.
407  * @mac_addr   		: MAC address assigned to this device.
408  * @fid    		: frame id.
409  * @extra_byte    	: number of extra byte prepended rx pkt.
410  * @enabled    		: indicator this device works.
411  *
412  * The @lock ensures that the chip is protected when certain operations are
413  * in progress. When the read or write packet transfer is in progress, most
414  * of the chip registers are not accessible until the transfer is finished and
415  * the DMA has been de-asserted.
416  *
417  * The @statelock is used to protect information in the structure which may
418  * need to be accessed via several sources, such as the network driver layer
419  * or one of the work queues.
420  *
421  */
422 
423 /* Receive multiplex framer header info */
424 struct type_frame_head {
425 	u16	sts;         /* Frame status */
426 	u16	len;         /* Byte count */
427 };
428 
429 struct ks_net {
430 	struct net_device	*netdev;
431 	void __iomem    	*hw_addr;
432 	void __iomem    	*hw_addr_cmd;
433 	union ks_tx_hdr		txh ____cacheline_aligned;
434 	struct mutex      	lock; /* spinlock to be interrupt safe */
435 	struct platform_device *pdev;
436 	struct mii_if_info	mii;
437 	struct type_frame_head	*frame_head_info;
438 	spinlock_t		statelock;
439 	u32			msg_enable;
440 	u32			frame_cnt;
441 	int			bus_width;
442 	int             	irq;
443 
444 	u16			rc_rxqcr;
445 	u16			rc_txcr;
446 	u16			rc_ier;
447 	u16			sharedbus;
448 	u16			cmd_reg_cache;
449 	u16			cmd_reg_cache_int;
450 	u16			promiscuous;
451 	u16			all_mcast;
452 	u16			mcast_lst_size;
453 	u8			mcast_lst[MAX_MCAST_LST][ETH_ALEN];
454 	u8			mcast_bits[HW_MCAST_SIZE];
455 	u8			mac_addr[6];
456 	u8                      fid;
457 	u8			extra_byte;
458 	u8			enabled;
459 };
460 
461 static int msg_enable;
462 
463 #define BE3             0x8000      /* Byte Enable 3 */
464 #define BE2             0x4000      /* Byte Enable 2 */
465 #define BE1             0x2000      /* Byte Enable 1 */
466 #define BE0             0x1000      /* Byte Enable 0 */
467 
468 /**
469  * register read/write calls.
470  *
471  * All these calls issue transactions to access the chip's registers. They
472  * all require that the necessary lock is held to prevent accesses when the
473  * chip is busy transferring packet data (RX/TX FIFO accesses).
474  */
475 
476 /**
477  * ks_rdreg8 - read 8 bit register from device
478  * @ks	  : The chip information
479  * @offset: The register address
480  *
481  * Read a 8bit register from the chip, returning the result
482  */
ks_rdreg8(struct ks_net * ks,int offset)483 static u8 ks_rdreg8(struct ks_net *ks, int offset)
484 {
485 	u16 data;
486 	u8 shift_bit = offset & 0x03;
487 	u8 shift_data = (offset & 1) << 3;
488 	ks->cmd_reg_cache = (u16) offset | (u16)(BE0 << shift_bit);
489 	iowrite16(ks->cmd_reg_cache, ks->hw_addr_cmd);
490 	data  = ioread16(ks->hw_addr);
491 	return (u8)(data >> shift_data);
492 }
493 
494 /**
495  * ks_rdreg16 - read 16 bit register from device
496  * @ks	  : The chip information
497  * @offset: The register address
498  *
499  * Read a 16bit register from the chip, returning the result
500  */
501 
ks_rdreg16(struct ks_net * ks,int offset)502 static u16 ks_rdreg16(struct ks_net *ks, int offset)
503 {
504 	ks->cmd_reg_cache = (u16)offset | ((BE1 | BE0) << (offset & 0x02));
505 	iowrite16(ks->cmd_reg_cache, ks->hw_addr_cmd);
506 	return ioread16(ks->hw_addr);
507 }
508 
509 /**
510  * ks_wrreg8 - write 8bit register value to chip
511  * @ks: The chip information
512  * @offset: The register address
513  * @value: The value to write
514  *
515  */
ks_wrreg8(struct ks_net * ks,int offset,u8 value)516 static void ks_wrreg8(struct ks_net *ks, int offset, u8 value)
517 {
518 	u8  shift_bit = (offset & 0x03);
519 	u16 value_write = (u16)(value << ((offset & 1) << 3));
520 	ks->cmd_reg_cache = (u16)offset | (BE0 << shift_bit);
521 	iowrite16(ks->cmd_reg_cache, ks->hw_addr_cmd);
522 	iowrite16(value_write, ks->hw_addr);
523 }
524 
525 /**
526  * ks_wrreg16 - write 16bit register value to chip
527  * @ks: The chip information
528  * @offset: The register address
529  * @value: The value to write
530  *
531  */
532 
ks_wrreg16(struct ks_net * ks,int offset,u16 value)533 static void ks_wrreg16(struct ks_net *ks, int offset, u16 value)
534 {
535 	ks->cmd_reg_cache = (u16)offset | ((BE1 | BE0) << (offset & 0x02));
536 	iowrite16(ks->cmd_reg_cache, ks->hw_addr_cmd);
537 	iowrite16(value, ks->hw_addr);
538 }
539 
540 /**
541  * ks_inblk - read a block of data from QMU. This is called after sudo DMA mode enabled.
542  * @ks: The chip state
543  * @wptr: buffer address to save data
544  * @len: length in byte to read
545  *
546  */
ks_inblk(struct ks_net * ks,u16 * wptr,u32 len)547 static inline void ks_inblk(struct ks_net *ks, u16 *wptr, u32 len)
548 {
549 	len >>= 1;
550 	while (len--)
551 		*wptr++ = (u16)ioread16(ks->hw_addr);
552 }
553 
554 /**
555  * ks_outblk - write data to QMU. This is called after sudo DMA mode enabled.
556  * @ks: The chip information
557  * @wptr: buffer address
558  * @len: length in byte to write
559  *
560  */
ks_outblk(struct ks_net * ks,u16 * wptr,u32 len)561 static inline void ks_outblk(struct ks_net *ks, u16 *wptr, u32 len)
562 {
563 	len >>= 1;
564 	while (len--)
565 		iowrite16(*wptr++, ks->hw_addr);
566 }
567 
ks_disable_int(struct ks_net * ks)568 static void ks_disable_int(struct ks_net *ks)
569 {
570 	ks_wrreg16(ks, KS_IER, 0x0000);
571 }  /* ks_disable_int */
572 
ks_enable_int(struct ks_net * ks)573 static void ks_enable_int(struct ks_net *ks)
574 {
575 	ks_wrreg16(ks, KS_IER, ks->rc_ier);
576 }  /* ks_enable_int */
577 
578 /**
579  * ks_tx_fifo_space - return the available hardware buffer size.
580  * @ks: The chip information
581  *
582  */
ks_tx_fifo_space(struct ks_net * ks)583 static inline u16 ks_tx_fifo_space(struct ks_net *ks)
584 {
585 	return ks_rdreg16(ks, KS_TXMIR) & 0x1fff;
586 }
587 
588 /**
589  * ks_save_cmd_reg - save the command register from the cache.
590  * @ks: The chip information
591  *
592  */
ks_save_cmd_reg(struct ks_net * ks)593 static inline void ks_save_cmd_reg(struct ks_net *ks)
594 {
595 	/*ks8851 MLL has a bug to read back the command register.
596 	* So rely on software to save the content of command register.
597 	*/
598 	ks->cmd_reg_cache_int = ks->cmd_reg_cache;
599 }
600 
601 /**
602  * ks_restore_cmd_reg - restore the command register from the cache and
603  * 	write to hardware register.
604  * @ks: The chip information
605  *
606  */
ks_restore_cmd_reg(struct ks_net * ks)607 static inline void ks_restore_cmd_reg(struct ks_net *ks)
608 {
609 	ks->cmd_reg_cache = ks->cmd_reg_cache_int;
610 	iowrite16(ks->cmd_reg_cache, ks->hw_addr_cmd);
611 }
612 
613 /**
614  * ks_set_powermode - set power mode of the device
615  * @ks: The chip information
616  * @pwrmode: The power mode value to write to KS_PMECR.
617  *
618  * Change the power mode of the chip.
619  */
ks_set_powermode(struct ks_net * ks,unsigned pwrmode)620 static void ks_set_powermode(struct ks_net *ks, unsigned pwrmode)
621 {
622 	unsigned pmecr;
623 
624 	netif_dbg(ks, hw, ks->netdev, "setting power mode %d\n", pwrmode);
625 
626 	ks_rdreg16(ks, KS_GRR);
627 	pmecr = ks_rdreg16(ks, KS_PMECR);
628 	pmecr &= ~PMECR_PM_MASK;
629 	pmecr |= pwrmode;
630 
631 	ks_wrreg16(ks, KS_PMECR, pmecr);
632 }
633 
634 /**
635  * ks_read_config - read chip configuration of bus width.
636  * @ks: The chip information
637  *
638  */
ks_read_config(struct ks_net * ks)639 static void ks_read_config(struct ks_net *ks)
640 {
641 	u16 reg_data = 0;
642 
643 	/* Regardless of bus width, 8 bit read should always work.*/
644 	reg_data = ks_rdreg8(ks, KS_CCR) & 0x00FF;
645 	reg_data |= ks_rdreg8(ks, KS_CCR+1) << 8;
646 
647 	/* addr/data bus are multiplexed */
648 	ks->sharedbus = (reg_data & CCR_SHARED) == CCR_SHARED;
649 
650 	/* There are garbage data when reading data from QMU,
651 	depending on bus-width.
652 	*/
653 
654 	if (reg_data & CCR_8BIT) {
655 		ks->bus_width = ENUM_BUS_8BIT;
656 		ks->extra_byte = 1;
657 	} else if (reg_data & CCR_16BIT) {
658 		ks->bus_width = ENUM_BUS_16BIT;
659 		ks->extra_byte = 2;
660 	} else {
661 		ks->bus_width = ENUM_BUS_32BIT;
662 		ks->extra_byte = 4;
663 	}
664 }
665 
666 /**
667  * ks_soft_reset - issue one of the soft reset to the device
668  * @ks: The device state.
669  * @op: The bit(s) to set in the GRR
670  *
671  * Issue the relevant soft-reset command to the device's GRR register
672  * specified by @op.
673  *
674  * Note, the delays are in there as a caution to ensure that the reset
675  * has time to take effect and then complete. Since the datasheet does
676  * not currently specify the exact sequence, we have chosen something
677  * that seems to work with our device.
678  */
ks_soft_reset(struct ks_net * ks,unsigned op)679 static void ks_soft_reset(struct ks_net *ks, unsigned op)
680 {
681 	/* Disable interrupt first */
682 	ks_wrreg16(ks, KS_IER, 0x0000);
683 	ks_wrreg16(ks, KS_GRR, op);
684 	mdelay(10);	/* wait a short time to effect reset */
685 	ks_wrreg16(ks, KS_GRR, 0);
686 	mdelay(1);	/* wait for condition to clear */
687 }
688 
689 
ks_enable_qmu(struct ks_net * ks)690 void ks_enable_qmu(struct ks_net *ks)
691 {
692 	u16 w;
693 
694 	w = ks_rdreg16(ks, KS_TXCR);
695 	/* Enables QMU Transmit (TXCR). */
696 	ks_wrreg16(ks, KS_TXCR, w | TXCR_TXE);
697 
698 	/*
699 	 * RX Frame Count Threshold Enable and Auto-Dequeue RXQ Frame
700 	 * Enable
701 	 */
702 
703 	w = ks_rdreg16(ks, KS_RXQCR);
704 	ks_wrreg16(ks, KS_RXQCR, w | RXQCR_RXFCTE);
705 
706 	/* Enables QMU Receive (RXCR1). */
707 	w = ks_rdreg16(ks, KS_RXCR1);
708 	ks_wrreg16(ks, KS_RXCR1, w | RXCR1_RXE);
709 	ks->enabled = true;
710 }  /* ks_enable_qmu */
711 
ks_disable_qmu(struct ks_net * ks)712 static void ks_disable_qmu(struct ks_net *ks)
713 {
714 	u16	w;
715 
716 	w = ks_rdreg16(ks, KS_TXCR);
717 
718 	/* Disables QMU Transmit (TXCR). */
719 	w  &= ~TXCR_TXE;
720 	ks_wrreg16(ks, KS_TXCR, w);
721 
722 	/* Disables QMU Receive (RXCR1). */
723 	w = ks_rdreg16(ks, KS_RXCR1);
724 	w &= ~RXCR1_RXE ;
725 	ks_wrreg16(ks, KS_RXCR1, w);
726 
727 	ks->enabled = false;
728 
729 }  /* ks_disable_qmu */
730 
731 /**
732  * ks_read_qmu - read 1 pkt data from the QMU.
733  * @ks: The chip information
734  * @buf: buffer address to save 1 pkt
735  * @len: Pkt length
736  * Here is the sequence to read 1 pkt:
737  *	1. set sudo DMA mode
738  *	2. read prepend data
739  *	3. read pkt data
740  *	4. reset sudo DMA Mode
741  */
ks_read_qmu(struct ks_net * ks,u16 * buf,u32 len)742 static inline void ks_read_qmu(struct ks_net *ks, u16 *buf, u32 len)
743 {
744 	u32 r =  ks->extra_byte & 0x1 ;
745 	u32 w = ks->extra_byte - r;
746 
747 	/* 1. set sudo DMA mode */
748 	ks_wrreg16(ks, KS_RXFDPR, RXFDPR_RXFPAI);
749 	ks_wrreg8(ks, KS_RXQCR, (ks->rc_rxqcr | RXQCR_SDA) & 0xff);
750 
751 	/* 2. read prepend data */
752 	/**
753 	 * read 4 + extra bytes and discard them.
754 	 * extra bytes for dummy, 2 for status, 2 for len
755 	 */
756 
757 	/* use likely(r) for 8 bit access for performance */
758 	if (unlikely(r))
759 		ioread8(ks->hw_addr);
760 	ks_inblk(ks, buf, w + 2 + 2);
761 
762 	/* 3. read pkt data */
763 	ks_inblk(ks, buf, ALIGN(len, 4));
764 
765 	/* 4. reset sudo DMA Mode */
766 	ks_wrreg8(ks, KS_RXQCR, ks->rc_rxqcr);
767 }
768 
769 /**
770  * ks_rcv - read multiple pkts data from the QMU.
771  * @ks: The chip information
772  * @netdev: The network device being opened.
773  *
774  * Read all of header information before reading pkt content.
775  * It is not allowed only port of pkts in QMU after issuing
776  * interrupt ack.
777  */
ks_rcv(struct ks_net * ks,struct net_device * netdev)778 static void ks_rcv(struct ks_net *ks, struct net_device *netdev)
779 {
780 	u32	i;
781 	struct type_frame_head *frame_hdr = ks->frame_head_info;
782 	struct sk_buff *skb;
783 
784 	ks->frame_cnt = ks_rdreg16(ks, KS_RXFCTR) >> 8;
785 
786 	/* read all header information */
787 	for (i = 0; i < ks->frame_cnt; i++) {
788 		/* Checking Received packet status */
789 		frame_hdr->sts = ks_rdreg16(ks, KS_RXFHSR);
790 		/* Get packet len from hardware */
791 		frame_hdr->len = ks_rdreg16(ks, KS_RXFHBCR);
792 		frame_hdr++;
793 	}
794 
795 	frame_hdr = ks->frame_head_info;
796 	while (ks->frame_cnt--) {
797 		skb = dev_alloc_skb(frame_hdr->len + 16);
798 		if (likely(skb && (frame_hdr->sts & RXFSHR_RXFV) &&
799 			(frame_hdr->len < RX_BUF_SIZE) && frame_hdr->len)) {
800 			skb_reserve(skb, 2);
801 			/* read data block including CRC 4 bytes */
802 			ks_read_qmu(ks, (u16 *)skb->data, frame_hdr->len);
803 			skb_put(skb, frame_hdr->len);
804 			skb->protocol = eth_type_trans(skb, netdev);
805 			netif_rx(skb);
806 		} else {
807 			pr_err("%s: err:skb alloc\n", __func__);
808 			ks_wrreg16(ks, KS_RXQCR, (ks->rc_rxqcr | RXQCR_RRXEF));
809 			if (skb)
810 				dev_kfree_skb_irq(skb);
811 		}
812 		frame_hdr++;
813 	}
814 }
815 
816 /**
817  * ks_update_link_status - link status update.
818  * @netdev: The network device being opened.
819  * @ks: The chip information
820  *
821  */
822 
ks_update_link_status(struct net_device * netdev,struct ks_net * ks)823 static void ks_update_link_status(struct net_device *netdev, struct ks_net *ks)
824 {
825 	/* check the status of the link */
826 	u32 link_up_status;
827 	if (ks_rdreg16(ks, KS_P1SR) & P1SR_LINK_GOOD) {
828 		netif_carrier_on(netdev);
829 		link_up_status = true;
830 	} else {
831 		netif_carrier_off(netdev);
832 		link_up_status = false;
833 	}
834 	netif_dbg(ks, link, ks->netdev,
835 		  "%s: %s\n", __func__, link_up_status ? "UP" : "DOWN");
836 }
837 
838 /**
839  * ks_irq - device interrupt handler
840  * @irq: Interrupt number passed from the IRQ hnalder.
841  * @pw: The private word passed to register_irq(), our struct ks_net.
842  *
843  * This is the handler invoked to find out what happened
844  *
845  * Read the interrupt status, work out what needs to be done and then clear
846  * any of the interrupts that are not needed.
847  */
848 
ks_irq(int irq,void * pw)849 static irqreturn_t ks_irq(int irq, void *pw)
850 {
851 	struct net_device *netdev = pw;
852 	struct ks_net *ks = netdev_priv(netdev);
853 	u16 status;
854 
855 	/*this should be the first in IRQ handler */
856 	ks_save_cmd_reg(ks);
857 
858 	status = ks_rdreg16(ks, KS_ISR);
859 	if (unlikely(!status)) {
860 		ks_restore_cmd_reg(ks);
861 		return IRQ_NONE;
862 	}
863 
864 	ks_wrreg16(ks, KS_ISR, status);
865 
866 	if (likely(status & IRQ_RXI))
867 		ks_rcv(ks, netdev);
868 
869 	if (unlikely(status & IRQ_LCI))
870 		ks_update_link_status(netdev, ks);
871 
872 	if (unlikely(status & IRQ_TXI))
873 		netif_wake_queue(netdev);
874 
875 	if (unlikely(status & IRQ_LDI)) {
876 
877 		u16 pmecr = ks_rdreg16(ks, KS_PMECR);
878 		pmecr &= ~PMECR_WKEVT_MASK;
879 		ks_wrreg16(ks, KS_PMECR, pmecr | PMECR_WKEVT_LINK);
880 	}
881 
882 	/* this should be the last in IRQ handler*/
883 	ks_restore_cmd_reg(ks);
884 	return IRQ_HANDLED;
885 }
886 
887 
888 /**
889  * ks_net_open - open network device
890  * @netdev: The network device being opened.
891  *
892  * Called when the network device is marked active, such as a user executing
893  * 'ifconfig up' on the device.
894  */
ks_net_open(struct net_device * netdev)895 static int ks_net_open(struct net_device *netdev)
896 {
897 	struct ks_net *ks = netdev_priv(netdev);
898 	int err;
899 
900 #define	KS_INT_FLAGS	(IRQF_DISABLED|IRQF_TRIGGER_LOW)
901 	/* lock the card, even if we may not actually do anything
902 	 * else at the moment.
903 	 */
904 
905 	netif_dbg(ks, ifup, ks->netdev, "%s - entry\n", __func__);
906 
907 	/* reset the HW */
908 	err = request_irq(ks->irq, ks_irq, KS_INT_FLAGS, DRV_NAME, netdev);
909 
910 	if (err) {
911 		pr_err("Failed to request IRQ: %d: %d\n", ks->irq, err);
912 		return err;
913 	}
914 
915 	/* wake up powermode to normal mode */
916 	ks_set_powermode(ks, PMECR_PM_NORMAL);
917 	mdelay(1);	/* wait for normal mode to take effect */
918 
919 	ks_wrreg16(ks, KS_ISR, 0xffff);
920 	ks_enable_int(ks);
921 	ks_enable_qmu(ks);
922 	netif_start_queue(ks->netdev);
923 
924 	netif_dbg(ks, ifup, ks->netdev, "network device up\n");
925 
926 	return 0;
927 }
928 
929 /**
930  * ks_net_stop - close network device
931  * @netdev: The device being closed.
932  *
933  * Called to close down a network device which has been active. Cancell any
934  * work, shutdown the RX and TX process and then place the chip into a low
935  * power state whilst it is not being used.
936  */
ks_net_stop(struct net_device * netdev)937 static int ks_net_stop(struct net_device *netdev)
938 {
939 	struct ks_net *ks = netdev_priv(netdev);
940 
941 	netif_info(ks, ifdown, netdev, "shutting down\n");
942 
943 	netif_stop_queue(netdev);
944 
945 	mutex_lock(&ks->lock);
946 
947 	/* turn off the IRQs and ack any outstanding */
948 	ks_wrreg16(ks, KS_IER, 0x0000);
949 	ks_wrreg16(ks, KS_ISR, 0xffff);
950 
951 	/* shutdown RX/TX QMU */
952 	ks_disable_qmu(ks);
953 
954 	/* set powermode to soft power down to save power */
955 	ks_set_powermode(ks, PMECR_PM_SOFTDOWN);
956 	free_irq(ks->irq, netdev);
957 	mutex_unlock(&ks->lock);
958 	return 0;
959 }
960 
961 
962 /**
963  * ks_write_qmu - write 1 pkt data to the QMU.
964  * @ks: The chip information
965  * @pdata: buffer address to save 1 pkt
966  * @len: Pkt length in byte
967  * Here is the sequence to write 1 pkt:
968  *	1. set sudo DMA mode
969  *	2. write status/length
970  *	3. write pkt data
971  *	4. reset sudo DMA Mode
972  *	5. reset sudo DMA mode
973  *	6. Wait until pkt is out
974  */
ks_write_qmu(struct ks_net * ks,u8 * pdata,u16 len)975 static void ks_write_qmu(struct ks_net *ks, u8 *pdata, u16 len)
976 {
977 	/* start header at txb[0] to align txw entries */
978 	ks->txh.txw[0] = 0;
979 	ks->txh.txw[1] = cpu_to_le16(len);
980 
981 	/* 1. set sudo-DMA mode */
982 	ks_wrreg8(ks, KS_RXQCR, (ks->rc_rxqcr | RXQCR_SDA) & 0xff);
983 	/* 2. write status/lenth info */
984 	ks_outblk(ks, ks->txh.txw, 4);
985 	/* 3. write pkt data */
986 	ks_outblk(ks, (u16 *)pdata, ALIGN(len, 4));
987 	/* 4. reset sudo-DMA mode */
988 	ks_wrreg8(ks, KS_RXQCR, ks->rc_rxqcr);
989 	/* 5. Enqueue Tx(move the pkt from TX buffer into TXQ) */
990 	ks_wrreg16(ks, KS_TXQCR, TXQCR_METFE);
991 	/* 6. wait until TXQCR_METFE is auto-cleared */
992 	while (ks_rdreg16(ks, KS_TXQCR) & TXQCR_METFE)
993 		;
994 }
995 
996 /**
997  * ks_start_xmit - transmit packet
998  * @skb		: The buffer to transmit
999  * @netdev	: The device used to transmit the packet.
1000  *
1001  * Called by the network layer to transmit the @skb.
1002  * spin_lock_irqsave is required because tx and rx should be mutual exclusive.
1003  * So while tx is in-progress, prevent IRQ interrupt from happenning.
1004  */
ks_start_xmit(struct sk_buff * skb,struct net_device * netdev)1005 static int ks_start_xmit(struct sk_buff *skb, struct net_device *netdev)
1006 {
1007 	int retv = NETDEV_TX_OK;
1008 	struct ks_net *ks = netdev_priv(netdev);
1009 
1010 	disable_irq(netdev->irq);
1011 	ks_disable_int(ks);
1012 	spin_lock(&ks->statelock);
1013 
1014 	/* Extra space are required:
1015 	*  4 byte for alignment, 4 for status/length, 4 for CRC
1016 	*/
1017 
1018 	if (likely(ks_tx_fifo_space(ks) >= skb->len + 12)) {
1019 		ks_write_qmu(ks, skb->data, skb->len);
1020 		dev_kfree_skb(skb);
1021 	} else
1022 		retv = NETDEV_TX_BUSY;
1023 	spin_unlock(&ks->statelock);
1024 	ks_enable_int(ks);
1025 	enable_irq(netdev->irq);
1026 	return retv;
1027 }
1028 
1029 /**
1030  * ks_start_rx - ready to serve pkts
1031  * @ks		: The chip information
1032  *
1033  */
ks_start_rx(struct ks_net * ks)1034 static void ks_start_rx(struct ks_net *ks)
1035 {
1036 	u16 cntl;
1037 
1038 	/* Enables QMU Receive (RXCR1). */
1039 	cntl = ks_rdreg16(ks, KS_RXCR1);
1040 	cntl |= RXCR1_RXE ;
1041 	ks_wrreg16(ks, KS_RXCR1, cntl);
1042 }  /* ks_start_rx */
1043 
1044 /**
1045  * ks_stop_rx - stop to serve pkts
1046  * @ks		: The chip information
1047  *
1048  */
ks_stop_rx(struct ks_net * ks)1049 static void ks_stop_rx(struct ks_net *ks)
1050 {
1051 	u16 cntl;
1052 
1053 	/* Disables QMU Receive (RXCR1). */
1054 	cntl = ks_rdreg16(ks, KS_RXCR1);
1055 	cntl &= ~RXCR1_RXE ;
1056 	ks_wrreg16(ks, KS_RXCR1, cntl);
1057 
1058 }  /* ks_stop_rx */
1059 
1060 static unsigned long const ethernet_polynomial = 0x04c11db7U;
1061 
ether_gen_crc(int length,u8 * data)1062 static unsigned long ether_gen_crc(int length, u8 *data)
1063 {
1064 	long crc = -1;
1065 	while (--length >= 0) {
1066 		u8 current_octet = *data++;
1067 		int bit;
1068 
1069 		for (bit = 0; bit < 8; bit++, current_octet >>= 1) {
1070 			crc = (crc << 1) ^
1071 				((crc < 0) ^ (current_octet & 1) ?
1072 			ethernet_polynomial : 0);
1073 		}
1074 	}
1075 	return (unsigned long)crc;
1076 }  /* ether_gen_crc */
1077 
1078 /**
1079 * ks_set_grpaddr - set multicast information
1080 * @ks : The chip information
1081 */
1082 
ks_set_grpaddr(struct ks_net * ks)1083 static void ks_set_grpaddr(struct ks_net *ks)
1084 {
1085 	u8	i;
1086 	u32	index, position, value;
1087 
1088 	memset(ks->mcast_bits, 0, sizeof(u8) * HW_MCAST_SIZE);
1089 
1090 	for (i = 0; i < ks->mcast_lst_size; i++) {
1091 		position = (ether_gen_crc(6, ks->mcast_lst[i]) >> 26) & 0x3f;
1092 		index = position >> 3;
1093 		value = 1 << (position & 7);
1094 		ks->mcast_bits[index] |= (u8)value;
1095 	}
1096 
1097 	for (i  = 0; i < HW_MCAST_SIZE; i++) {
1098 		if (i & 1) {
1099 			ks_wrreg16(ks, (u16)((KS_MAHTR0 + i) & ~1),
1100 				(ks->mcast_bits[i] << 8) |
1101 				ks->mcast_bits[i - 1]);
1102 		}
1103 	}
1104 }  /* ks_set_grpaddr */
1105 
1106 /*
1107 * ks_clear_mcast - clear multicast information
1108 *
1109 * @ks : The chip information
1110 * This routine removes all mcast addresses set in the hardware.
1111 */
1112 
ks_clear_mcast(struct ks_net * ks)1113 static void ks_clear_mcast(struct ks_net *ks)
1114 {
1115 	u16	i, mcast_size;
1116 	for (i = 0; i < HW_MCAST_SIZE; i++)
1117 		ks->mcast_bits[i] = 0;
1118 
1119 	mcast_size = HW_MCAST_SIZE >> 2;
1120 	for (i = 0; i < mcast_size; i++)
1121 		ks_wrreg16(ks, KS_MAHTR0 + (2*i), 0);
1122 }
1123 
ks_set_promis(struct ks_net * ks,u16 promiscuous_mode)1124 static void ks_set_promis(struct ks_net *ks, u16 promiscuous_mode)
1125 {
1126 	u16		cntl;
1127 	ks->promiscuous = promiscuous_mode;
1128 	ks_stop_rx(ks);  /* Stop receiving for reconfiguration */
1129 	cntl = ks_rdreg16(ks, KS_RXCR1);
1130 
1131 	cntl &= ~RXCR1_FILTER_MASK;
1132 	if (promiscuous_mode)
1133 		/* Enable Promiscuous mode */
1134 		cntl |= RXCR1_RXAE | RXCR1_RXINVF;
1135 	else
1136 		/* Disable Promiscuous mode (default normal mode) */
1137 		cntl |= RXCR1_RXPAFMA;
1138 
1139 	ks_wrreg16(ks, KS_RXCR1, cntl);
1140 
1141 	if (ks->enabled)
1142 		ks_start_rx(ks);
1143 
1144 }  /* ks_set_promis */
1145 
ks_set_mcast(struct ks_net * ks,u16 mcast)1146 static void ks_set_mcast(struct ks_net *ks, u16 mcast)
1147 {
1148 	u16	cntl;
1149 
1150 	ks->all_mcast = mcast;
1151 	ks_stop_rx(ks);  /* Stop receiving for reconfiguration */
1152 	cntl = ks_rdreg16(ks, KS_RXCR1);
1153 	cntl &= ~RXCR1_FILTER_MASK;
1154 	if (mcast)
1155 		/* Enable "Perfect with Multicast address passed mode" */
1156 		cntl |= (RXCR1_RXAE | RXCR1_RXMAFMA | RXCR1_RXPAFMA);
1157 	else
1158 		/**
1159 		 * Disable "Perfect with Multicast address passed
1160 		 * mode" (normal mode).
1161 		 */
1162 		cntl |= RXCR1_RXPAFMA;
1163 
1164 	ks_wrreg16(ks, KS_RXCR1, cntl);
1165 
1166 	if (ks->enabled)
1167 		ks_start_rx(ks);
1168 }  /* ks_set_mcast */
1169 
ks_set_rx_mode(struct net_device * netdev)1170 static void ks_set_rx_mode(struct net_device *netdev)
1171 {
1172 	struct ks_net *ks = netdev_priv(netdev);
1173 	struct netdev_hw_addr *ha;
1174 
1175 	/* Turn on/off promiscuous mode. */
1176 	if ((netdev->flags & IFF_PROMISC) == IFF_PROMISC)
1177 		ks_set_promis(ks,
1178 			(u16)((netdev->flags & IFF_PROMISC) == IFF_PROMISC));
1179 	/* Turn on/off all mcast mode. */
1180 	else if ((netdev->flags & IFF_ALLMULTI) == IFF_ALLMULTI)
1181 		ks_set_mcast(ks,
1182 			(u16)((netdev->flags & IFF_ALLMULTI) == IFF_ALLMULTI));
1183 	else
1184 		ks_set_promis(ks, false);
1185 
1186 	if ((netdev->flags & IFF_MULTICAST) && netdev_mc_count(netdev)) {
1187 		if (netdev_mc_count(netdev) <= MAX_MCAST_LST) {
1188 			int i = 0;
1189 
1190 			netdev_for_each_mc_addr(ha, netdev) {
1191 				if (!(*ha->addr & 1))
1192 					continue;
1193 				if (i >= MAX_MCAST_LST)
1194 					break;
1195 				memcpy(ks->mcast_lst[i++], ha->addr, ETH_ALEN);
1196 			}
1197 			ks->mcast_lst_size = (u8)i;
1198 			ks_set_grpaddr(ks);
1199 		} else {
1200 			/**
1201 			 * List too big to support so
1202 			 * turn on all mcast mode.
1203 			 */
1204 			ks->mcast_lst_size = MAX_MCAST_LST;
1205 			ks_set_mcast(ks, true);
1206 		}
1207 	} else {
1208 		ks->mcast_lst_size = 0;
1209 		ks_clear_mcast(ks);
1210 	}
1211 } /* ks_set_rx_mode */
1212 
ks_set_mac(struct ks_net * ks,u8 * data)1213 static void ks_set_mac(struct ks_net *ks, u8 *data)
1214 {
1215 	u16 *pw = (u16 *)data;
1216 	u16 w, u;
1217 
1218 	ks_stop_rx(ks);  /* Stop receiving for reconfiguration */
1219 
1220 	u = *pw++;
1221 	w = ((u & 0xFF) << 8) | ((u >> 8) & 0xFF);
1222 	ks_wrreg16(ks, KS_MARH, w);
1223 
1224 	u = *pw++;
1225 	w = ((u & 0xFF) << 8) | ((u >> 8) & 0xFF);
1226 	ks_wrreg16(ks, KS_MARM, w);
1227 
1228 	u = *pw;
1229 	w = ((u & 0xFF) << 8) | ((u >> 8) & 0xFF);
1230 	ks_wrreg16(ks, KS_MARL, w);
1231 
1232 	memcpy(ks->mac_addr, data, 6);
1233 
1234 	if (ks->enabled)
1235 		ks_start_rx(ks);
1236 }
1237 
ks_set_mac_address(struct net_device * netdev,void * paddr)1238 static int ks_set_mac_address(struct net_device *netdev, void *paddr)
1239 {
1240 	struct ks_net *ks = netdev_priv(netdev);
1241 	struct sockaddr *addr = paddr;
1242 	u8 *da;
1243 
1244 	memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
1245 
1246 	da = (u8 *)netdev->dev_addr;
1247 
1248 	ks_set_mac(ks, da);
1249 	return 0;
1250 }
1251 
ks_net_ioctl(struct net_device * netdev,struct ifreq * req,int cmd)1252 static int ks_net_ioctl(struct net_device *netdev, struct ifreq *req, int cmd)
1253 {
1254 	struct ks_net *ks = netdev_priv(netdev);
1255 
1256 	if (!netif_running(netdev))
1257 		return -EINVAL;
1258 
1259 	return generic_mii_ioctl(&ks->mii, if_mii(req), cmd, NULL);
1260 }
1261 
1262 static const struct net_device_ops ks_netdev_ops = {
1263 	.ndo_open		= ks_net_open,
1264 	.ndo_stop		= ks_net_stop,
1265 	.ndo_do_ioctl		= ks_net_ioctl,
1266 	.ndo_start_xmit		= ks_start_xmit,
1267 	.ndo_set_mac_address	= ks_set_mac_address,
1268 	.ndo_set_rx_mode	= ks_set_rx_mode,
1269 	.ndo_change_mtu		= eth_change_mtu,
1270 	.ndo_validate_addr	= eth_validate_addr,
1271 };
1272 
1273 /* ethtool support */
1274 
ks_get_drvinfo(struct net_device * netdev,struct ethtool_drvinfo * di)1275 static void ks_get_drvinfo(struct net_device *netdev,
1276 			       struct ethtool_drvinfo *di)
1277 {
1278 	strlcpy(di->driver, DRV_NAME, sizeof(di->driver));
1279 	strlcpy(di->version, "1.00", sizeof(di->version));
1280 	strlcpy(di->bus_info, dev_name(netdev->dev.parent),
1281 		sizeof(di->bus_info));
1282 }
1283 
ks_get_msglevel(struct net_device * netdev)1284 static u32 ks_get_msglevel(struct net_device *netdev)
1285 {
1286 	struct ks_net *ks = netdev_priv(netdev);
1287 	return ks->msg_enable;
1288 }
1289 
ks_set_msglevel(struct net_device * netdev,u32 to)1290 static void ks_set_msglevel(struct net_device *netdev, u32 to)
1291 {
1292 	struct ks_net *ks = netdev_priv(netdev);
1293 	ks->msg_enable = to;
1294 }
1295 
ks_get_settings(struct net_device * netdev,struct ethtool_cmd * cmd)1296 static int ks_get_settings(struct net_device *netdev, struct ethtool_cmd *cmd)
1297 {
1298 	struct ks_net *ks = netdev_priv(netdev);
1299 	return mii_ethtool_gset(&ks->mii, cmd);
1300 }
1301 
ks_set_settings(struct net_device * netdev,struct ethtool_cmd * cmd)1302 static int ks_set_settings(struct net_device *netdev, struct ethtool_cmd *cmd)
1303 {
1304 	struct ks_net *ks = netdev_priv(netdev);
1305 	return mii_ethtool_sset(&ks->mii, cmd);
1306 }
1307 
ks_get_link(struct net_device * netdev)1308 static u32 ks_get_link(struct net_device *netdev)
1309 {
1310 	struct ks_net *ks = netdev_priv(netdev);
1311 	return mii_link_ok(&ks->mii);
1312 }
1313 
ks_nway_reset(struct net_device * netdev)1314 static int ks_nway_reset(struct net_device *netdev)
1315 {
1316 	struct ks_net *ks = netdev_priv(netdev);
1317 	return mii_nway_restart(&ks->mii);
1318 }
1319 
1320 static const struct ethtool_ops ks_ethtool_ops = {
1321 	.get_drvinfo	= ks_get_drvinfo,
1322 	.get_msglevel	= ks_get_msglevel,
1323 	.set_msglevel	= ks_set_msglevel,
1324 	.get_settings	= ks_get_settings,
1325 	.set_settings	= ks_set_settings,
1326 	.get_link	= ks_get_link,
1327 	.nway_reset	= ks_nway_reset,
1328 };
1329 
1330 /* MII interface controls */
1331 
1332 /**
1333  * ks_phy_reg - convert MII register into a KS8851 register
1334  * @reg: MII register number.
1335  *
1336  * Return the KS8851 register number for the corresponding MII PHY register
1337  * if possible. Return zero if the MII register has no direct mapping to the
1338  * KS8851 register set.
1339  */
ks_phy_reg(int reg)1340 static int ks_phy_reg(int reg)
1341 {
1342 	switch (reg) {
1343 	case MII_BMCR:
1344 		return KS_P1MBCR;
1345 	case MII_BMSR:
1346 		return KS_P1MBSR;
1347 	case MII_PHYSID1:
1348 		return KS_PHY1ILR;
1349 	case MII_PHYSID2:
1350 		return KS_PHY1IHR;
1351 	case MII_ADVERTISE:
1352 		return KS_P1ANAR;
1353 	case MII_LPA:
1354 		return KS_P1ANLPR;
1355 	}
1356 
1357 	return 0x0;
1358 }
1359 
1360 /**
1361  * ks_phy_read - MII interface PHY register read.
1362  * @netdev: The network device the PHY is on.
1363  * @phy_addr: Address of PHY (ignored as we only have one)
1364  * @reg: The register to read.
1365  *
1366  * This call reads data from the PHY register specified in @reg. Since the
1367  * device does not support all the MII registers, the non-existent values
1368  * are always returned as zero.
1369  *
1370  * We return zero for unsupported registers as the MII code does not check
1371  * the value returned for any error status, and simply returns it to the
1372  * caller. The mii-tool that the driver was tested with takes any -ve error
1373  * as real PHY capabilities, thus displaying incorrect data to the user.
1374  */
ks_phy_read(struct net_device * netdev,int phy_addr,int reg)1375 static int ks_phy_read(struct net_device *netdev, int phy_addr, int reg)
1376 {
1377 	struct ks_net *ks = netdev_priv(netdev);
1378 	int ksreg;
1379 	int result;
1380 
1381 	ksreg = ks_phy_reg(reg);
1382 	if (!ksreg)
1383 		return 0x0;	/* no error return allowed, so use zero */
1384 
1385 	mutex_lock(&ks->lock);
1386 	result = ks_rdreg16(ks, ksreg);
1387 	mutex_unlock(&ks->lock);
1388 
1389 	return result;
1390 }
1391 
ks_phy_write(struct net_device * netdev,int phy,int reg,int value)1392 static void ks_phy_write(struct net_device *netdev,
1393 			     int phy, int reg, int value)
1394 {
1395 	struct ks_net *ks = netdev_priv(netdev);
1396 	int ksreg;
1397 
1398 	ksreg = ks_phy_reg(reg);
1399 	if (ksreg) {
1400 		mutex_lock(&ks->lock);
1401 		ks_wrreg16(ks, ksreg, value);
1402 		mutex_unlock(&ks->lock);
1403 	}
1404 }
1405 
1406 /**
1407  * ks_read_selftest - read the selftest memory info.
1408  * @ks: The device state
1409  *
1410  * Read and check the TX/RX memory selftest information.
1411  */
ks_read_selftest(struct ks_net * ks)1412 static int ks_read_selftest(struct ks_net *ks)
1413 {
1414 	unsigned both_done = MBIR_TXMBF | MBIR_RXMBF;
1415 	int ret = 0;
1416 	unsigned rd;
1417 
1418 	rd = ks_rdreg16(ks, KS_MBIR);
1419 
1420 	if ((rd & both_done) != both_done) {
1421 		netdev_warn(ks->netdev, "Memory selftest not finished\n");
1422 		return 0;
1423 	}
1424 
1425 	if (rd & MBIR_TXMBFA) {
1426 		netdev_err(ks->netdev, "TX memory selftest fails\n");
1427 		ret |= 1;
1428 	}
1429 
1430 	if (rd & MBIR_RXMBFA) {
1431 		netdev_err(ks->netdev, "RX memory selftest fails\n");
1432 		ret |= 2;
1433 	}
1434 
1435 	netdev_info(ks->netdev, "the selftest passes\n");
1436 	return ret;
1437 }
1438 
ks_setup(struct ks_net * ks)1439 static void ks_setup(struct ks_net *ks)
1440 {
1441 	u16	w;
1442 
1443 	/**
1444 	 * Configure QMU Transmit
1445 	 */
1446 
1447 	/* Setup Transmit Frame Data Pointer Auto-Increment (TXFDPR) */
1448 	ks_wrreg16(ks, KS_TXFDPR, TXFDPR_TXFPAI);
1449 
1450 	/* Setup Receive Frame Data Pointer Auto-Increment */
1451 	ks_wrreg16(ks, KS_RXFDPR, RXFDPR_RXFPAI);
1452 
1453 	/* Setup Receive Frame Threshold - 1 frame (RXFCTFC) */
1454 	ks_wrreg16(ks, KS_RXFCTR, 1 & RXFCTR_THRESHOLD_MASK);
1455 
1456 	/* Setup RxQ Command Control (RXQCR) */
1457 	ks->rc_rxqcr = RXQCR_CMD_CNTL;
1458 	ks_wrreg16(ks, KS_RXQCR, ks->rc_rxqcr);
1459 
1460 	/**
1461 	 * set the force mode to half duplex, default is full duplex
1462 	 *  because if the auto-negotiation fails, most switch uses
1463 	 *  half-duplex.
1464 	 */
1465 
1466 	w = ks_rdreg16(ks, KS_P1MBCR);
1467 	w &= ~P1MBCR_FORCE_FDX;
1468 	ks_wrreg16(ks, KS_P1MBCR, w);
1469 
1470 	w = TXCR_TXFCE | TXCR_TXPE | TXCR_TXCRC | TXCR_TCGIP;
1471 	ks_wrreg16(ks, KS_TXCR, w);
1472 
1473 	w = RXCR1_RXFCE | RXCR1_RXBE | RXCR1_RXUE | RXCR1_RXME | RXCR1_RXIPFCC;
1474 
1475 	if (ks->promiscuous)         /* bPromiscuous */
1476 		w |= (RXCR1_RXAE | RXCR1_RXINVF);
1477 	else if (ks->all_mcast) /* Multicast address passed mode */
1478 		w |= (RXCR1_RXAE | RXCR1_RXMAFMA | RXCR1_RXPAFMA);
1479 	else                                   /* Normal mode */
1480 		w |= RXCR1_RXPAFMA;
1481 
1482 	ks_wrreg16(ks, KS_RXCR1, w);
1483 }  /*ks_setup */
1484 
1485 
ks_setup_int(struct ks_net * ks)1486 static void ks_setup_int(struct ks_net *ks)
1487 {
1488 	ks->rc_ier = 0x00;
1489 	/* Clear the interrupts status of the hardware. */
1490 	ks_wrreg16(ks, KS_ISR, 0xffff);
1491 
1492 	/* Enables the interrupts of the hardware. */
1493 	ks->rc_ier = (IRQ_LCI | IRQ_TXI | IRQ_RXI);
1494 }  /* ks_setup_int */
1495 
ks_hw_init(struct ks_net * ks)1496 static int ks_hw_init(struct ks_net *ks)
1497 {
1498 #define	MHEADER_SIZE	(sizeof(struct type_frame_head) * MAX_RECV_FRAMES)
1499 	ks->promiscuous = 0;
1500 	ks->all_mcast = 0;
1501 	ks->mcast_lst_size = 0;
1502 
1503 	ks->frame_head_info = (struct type_frame_head *) \
1504 		kmalloc(MHEADER_SIZE, GFP_KERNEL);
1505 	if (!ks->frame_head_info) {
1506 		pr_err("Error: Fail to allocate frame memory\n");
1507 		return false;
1508 	}
1509 
1510 	ks_set_mac(ks, KS_DEFAULT_MAC_ADDRESS);
1511 	return true;
1512 }
1513 
1514 
ks8851_probe(struct platform_device * pdev)1515 static int __devinit ks8851_probe(struct platform_device *pdev)
1516 {
1517 	int err = -ENOMEM;
1518 	struct resource *io_d, *io_c;
1519 	struct net_device *netdev;
1520 	struct ks_net *ks;
1521 	u16 id, data;
1522 
1523 	io_d = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1524 	io_c = platform_get_resource(pdev, IORESOURCE_MEM, 1);
1525 
1526 	if (!request_mem_region(io_d->start, resource_size(io_d), DRV_NAME))
1527 		goto err_mem_region;
1528 
1529 	if (!request_mem_region(io_c->start, resource_size(io_c), DRV_NAME))
1530 		goto err_mem_region1;
1531 
1532 	netdev = alloc_etherdev(sizeof(struct ks_net));
1533 	if (!netdev)
1534 		goto err_alloc_etherdev;
1535 
1536 	SET_NETDEV_DEV(netdev, &pdev->dev);
1537 
1538 	ks = netdev_priv(netdev);
1539 	ks->netdev = netdev;
1540 	ks->hw_addr = ioremap(io_d->start, resource_size(io_d));
1541 
1542 	if (!ks->hw_addr)
1543 		goto err_ioremap;
1544 
1545 	ks->hw_addr_cmd = ioremap(io_c->start, resource_size(io_c));
1546 	if (!ks->hw_addr_cmd)
1547 		goto err_ioremap1;
1548 
1549 	ks->irq = platform_get_irq(pdev, 0);
1550 
1551 	if (ks->irq < 0) {
1552 		err = ks->irq;
1553 		goto err_get_irq;
1554 	}
1555 
1556 	ks->pdev = pdev;
1557 
1558 	mutex_init(&ks->lock);
1559 	spin_lock_init(&ks->statelock);
1560 
1561 	netdev->netdev_ops = &ks_netdev_ops;
1562 	netdev->ethtool_ops = &ks_ethtool_ops;
1563 
1564 	/* setup mii state */
1565 	ks->mii.dev             = netdev;
1566 	ks->mii.phy_id          = 1,
1567 	ks->mii.phy_id_mask     = 1;
1568 	ks->mii.reg_num_mask    = 0xf;
1569 	ks->mii.mdio_read       = ks_phy_read;
1570 	ks->mii.mdio_write      = ks_phy_write;
1571 
1572 	netdev_info(netdev, "message enable is %d\n", msg_enable);
1573 	/* set the default message enable */
1574 	ks->msg_enable = netif_msg_init(msg_enable, (NETIF_MSG_DRV |
1575 						     NETIF_MSG_PROBE |
1576 						     NETIF_MSG_LINK));
1577 	ks_read_config(ks);
1578 
1579 	/* simple check for a valid chip being connected to the bus */
1580 	if ((ks_rdreg16(ks, KS_CIDER) & ~CIDER_REV_MASK) != CIDER_ID) {
1581 		netdev_err(netdev, "failed to read device ID\n");
1582 		err = -ENODEV;
1583 		goto err_register;
1584 	}
1585 
1586 	if (ks_read_selftest(ks)) {
1587 		netdev_err(netdev, "failed to read device ID\n");
1588 		err = -ENODEV;
1589 		goto err_register;
1590 	}
1591 
1592 	err = register_netdev(netdev);
1593 	if (err)
1594 		goto err_register;
1595 
1596 	platform_set_drvdata(pdev, netdev);
1597 
1598 	ks_soft_reset(ks, GRR_GSR);
1599 	ks_hw_init(ks);
1600 	ks_disable_qmu(ks);
1601 	ks_setup(ks);
1602 	ks_setup_int(ks);
1603 	memcpy(netdev->dev_addr, ks->mac_addr, 6);
1604 
1605 	data = ks_rdreg16(ks, KS_OBCR);
1606 	ks_wrreg16(ks, KS_OBCR, data | OBCR_ODS_16MA);
1607 
1608 	/**
1609 	 * If you want to use the default MAC addr,
1610 	 * comment out the 2 functions below.
1611 	 */
1612 
1613 	random_ether_addr(netdev->dev_addr);
1614 	ks_set_mac(ks, netdev->dev_addr);
1615 
1616 	id = ks_rdreg16(ks, KS_CIDER);
1617 
1618 	netdev_info(netdev, "Found chip, family: 0x%x, id: 0x%x, rev: 0x%x\n",
1619 		    (id >> 8) & 0xff, (id >> 4) & 0xf, (id >> 1) & 0x7);
1620 	return 0;
1621 
1622 err_register:
1623 err_get_irq:
1624 	iounmap(ks->hw_addr_cmd);
1625 err_ioremap1:
1626 	iounmap(ks->hw_addr);
1627 err_ioremap:
1628 	free_netdev(netdev);
1629 err_alloc_etherdev:
1630 	release_mem_region(io_c->start, resource_size(io_c));
1631 err_mem_region1:
1632 	release_mem_region(io_d->start, resource_size(io_d));
1633 err_mem_region:
1634 	return err;
1635 }
1636 
ks8851_remove(struct platform_device * pdev)1637 static int __devexit ks8851_remove(struct platform_device *pdev)
1638 {
1639 	struct net_device *netdev = platform_get_drvdata(pdev);
1640 	struct ks_net *ks = netdev_priv(netdev);
1641 	struct resource *iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1642 
1643 	kfree(ks->frame_head_info);
1644 	unregister_netdev(netdev);
1645 	iounmap(ks->hw_addr);
1646 	free_netdev(netdev);
1647 	release_mem_region(iomem->start, resource_size(iomem));
1648 	platform_set_drvdata(pdev, NULL);
1649 	return 0;
1650 
1651 }
1652 
1653 static struct platform_driver ks8851_platform_driver = {
1654 	.driver = {
1655 		.name = DRV_NAME,
1656 		.owner = THIS_MODULE,
1657 	},
1658 	.probe = ks8851_probe,
1659 	.remove = __devexit_p(ks8851_remove),
1660 };
1661 
ks8851_init(void)1662 static int __init ks8851_init(void)
1663 {
1664 	return platform_driver_register(&ks8851_platform_driver);
1665 }
1666 
ks8851_exit(void)1667 static void __exit ks8851_exit(void)
1668 {
1669 	platform_driver_unregister(&ks8851_platform_driver);
1670 }
1671 
1672 module_init(ks8851_init);
1673 module_exit(ks8851_exit);
1674 
1675 MODULE_DESCRIPTION("KS8851 MLL Network driver");
1676 MODULE_AUTHOR("David Choi <david.choi@micrel.com>");
1677 MODULE_LICENSE("GPL");
1678 module_param_named(message, msg_enable, int, 0);
1679 MODULE_PARM_DESC(message, "Message verbosity level (0=none, 31=all)");
1680 
1681