1 /* $Id: he.c,v 1.18 2003/05/06 22:57:15 chas Exp $ */
2 
3 /*
4 
5   he.c
6 
7   ForeRunnerHE ATM Adapter driver for ATM on Linux
8   Copyright (C) 1999-2001  Naval Research Laboratory
9 
10   This library is free software; you can redistribute it and/or
11   modify it under the terms of the GNU Lesser General Public
12   License as published by the Free Software Foundation; either
13   version 2.1 of the License, or (at your option) any later version.
14 
15   This library is distributed in the hope that it will be useful,
16   but WITHOUT ANY WARRANTY; without even the implied warranty of
17   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
18   Lesser General Public License for more details.
19 
20   You should have received a copy of the GNU Lesser General Public
21   License along with this library; if not, write to the Free Software
22   Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
23 
24 */
25 
26 /*
27 
28   he.c
29 
30   ForeRunnerHE ATM Adapter driver for ATM on Linux
31   Copyright (C) 1999-2001  Naval Research Laboratory
32 
33   Permission to use, copy, modify and distribute this software and its
34   documentation is hereby granted, provided that both the copyright
35   notice and this permission notice appear in all copies of the software,
36   derivative works or modified versions, and any portions thereof, and
37   that both notices appear in supporting documentation.
38 
39   NRL ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" CONDITION AND
40   DISCLAIMS ANY LIABILITY OF ANY KIND FOR ANY DAMAGES WHATSOEVER
41   RESULTING FROM THE USE OF THIS SOFTWARE.
42 
43   This driver was written using the "Programmer's Reference Manual for
44   ForeRunnerHE(tm)", MANU0361-01 - Rev. A, 08/21/98.
45 
46   AUTHORS:
47 	chas williams <chas@cmf.nrl.navy.mil>
48 	eric kinzie <ekinzie@cmf.nrl.navy.mil>
49 
50   NOTES:
51 	4096 supported 'connections'
52 	group 0 is used for all traffic
53 	interrupt queue 0 is used for all interrupts
54 	aal0 support (based on work from ulrich.u.muller@nokia.com)
55 
56  */
57 
58 #include <linux/config.h>
59 #include <linux/module.h>
60 #include <linux/version.h>
61 #include <linux/kernel.h>
62 #include <linux/skbuff.h>
63 #include <linux/pci.h>
64 #include <linux/errno.h>
65 #include <linux/types.h>
66 #include <linux/string.h>
67 #include <linux/delay.h>
68 #include <linux/init.h>
69 #include <linux/mm.h>
70 #include <linux/sched.h>
71 #include <linux/timer.h>
72 #include <linux/interrupt.h>
73 #include <asm/io.h>
74 #include <asm/byteorder.h>
75 #include <asm/uaccess.h>
76 
77 #include <linux/atmdev.h>
78 #include <linux/atm.h>
79 #include <linux/sonet.h>
80 
81 #define USE_TASKLET
82 #undef USE_SCATTERGATHER
83 #undef USE_CHECKSUM_HW			/* still confused about this */
84 #define USE_RBPS
85 #undef USE_RBPS_POOL			/* if memory is tight try this */
86 #undef USE_RBPL_POOL			/* if memory is tight try this */
87 #define USE_TPD_POOL
88 /* #undef CONFIG_ATM_HE_USE_SUNI */
89 
90 /* compatibility */
91 
92 #ifndef IRQ_HANDLED
93 typedef void irqreturn_t;
94 #define IRQ_NONE
95 #define IRQ_HANDLED
96 #define IRQ_RETVAL(x)
97 #endif
98 
99 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,4,9)
100 #define __devexit_p(func)		func
101 #endif
102 
103 #ifndef MODULE_LICENSE
104 #define MODULE_LICENSE(x)
105 #endif
106 
107 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,4,3)
108 #define pci_set_drvdata(pci_dev, data)	(pci_dev)->driver_data = (data)
109 #define pci_get_drvdata(pci_dev)	(pci_dev)->driver_data
110 #endif
111 
112 #include "he.h"
113 
114 #include "suni.h"
115 
116 #include <linux/atm_he.h>
117 
118 #define hprintk(fmt,args...)	printk(KERN_ERR DEV_LABEL "%d: " fmt, he_dev->number , ##args)
119 
120 #undef DEBUG
121 #ifdef DEBUG
122 #define HPRINTK(fmt,args...)	printk(KERN_DEBUG DEV_LABEL "%d: " fmt, he_dev->number , ##args)
123 #else
124 #define HPRINTK(fmt,args...)	do { } while (0)
125 #endif /* DEBUG */
126 
127 
128 /* version definition */
129 
130 static char *version = "$Id: he.c,v 1.18 2003/05/06 22:57:15 chas Exp $";
131 
132 /* declarations */
133 
134 static int he_open(struct atm_vcc *vcc, short vpi, int vci);
135 static void he_close(struct atm_vcc *vcc);
136 static int he_send(struct atm_vcc *vcc, struct sk_buff *skb);
137 static int he_sg_send(struct atm_vcc *vcc, unsigned long start, unsigned long size);
138 static int he_ioctl(struct atm_dev *dev, unsigned int cmd, void *arg);
139 static irqreturn_t he_irq_handler(int irq, void *dev_id, struct pt_regs *regs);
140 static void he_tasklet(unsigned long data);
141 static int he_proc_read(struct atm_dev *dev,loff_t *pos,char *page);
142 static int he_start(struct atm_dev *dev);
143 static void he_stop(struct he_dev *dev);
144 static void he_phy_put(struct atm_dev *, unsigned char, unsigned long);
145 static unsigned char he_phy_get(struct atm_dev *, unsigned long);
146 
147 static u8 read_prom_byte(struct he_dev *he_dev, int addr);
148 
149 /* globals */
150 
151 struct he_dev *he_devs = NULL;
152 static short disable64 = -1;
153 static short nvpibits = -1;
154 static short nvcibits = -1;
155 static short rx_skb_reserve = 16;
156 static short irq_coalesce = 1;
157 static short sdh = 1;
158 
159 static struct atmdev_ops he_ops =
160 {
161 	.open =		he_open,
162 	.close =	he_close,
163 	.ioctl =	he_ioctl,
164 	.send =		he_send,
165 	.sg_send =	he_sg_send,
166 	.phy_put =	he_phy_put,
167 	.phy_get =	he_phy_get,
168 	.proc_read =	he_proc_read,
169 	.owner =	THIS_MODULE
170 };
171 
172 #define he_writel(dev, val, reg)	do { writel(val, (dev)->membase + (reg)); wmb(); } while (0)
173 #define he_readl(dev, reg)		readl((dev)->membase + (reg))
174 
175 /* section 2.12 connection memory access */
176 
177 static __inline__ void
he_writel_internal(struct he_dev * he_dev,unsigned val,unsigned addr,unsigned flags)178 he_writel_internal(struct he_dev *he_dev, unsigned val, unsigned addr,
179 								unsigned flags)
180 {
181 	he_writel(he_dev, val, CON_DAT);
182 	(void) he_readl(he_dev, CON_DAT);		/* flush posted writes */
183 	he_writel(he_dev, flags | CON_CTL_WRITE | CON_CTL_ADDR(addr), CON_CTL);
184 	while (he_readl(he_dev, CON_CTL) & CON_CTL_BUSY);
185 }
186 
187 #define he_writel_rcm(dev, val, reg) 				\
188 			he_writel_internal(dev, val, reg, CON_CTL_RCM)
189 
190 #define he_writel_tcm(dev, val, reg) 				\
191 			he_writel_internal(dev, val, reg, CON_CTL_TCM)
192 
193 #define he_writel_mbox(dev, val, reg) 				\
194 			he_writel_internal(dev, val, reg, CON_CTL_MBOX)
195 
196 static unsigned
he_readl_internal(struct he_dev * he_dev,unsigned addr,unsigned flags)197 he_readl_internal(struct he_dev *he_dev, unsigned addr, unsigned flags)
198 {
199 	he_writel(he_dev, flags | CON_CTL_READ | CON_CTL_ADDR(addr), CON_CTL);
200 	while (he_readl(he_dev, CON_CTL) & CON_CTL_BUSY);
201 	return he_readl(he_dev, CON_DAT);
202 }
203 
204 #define he_readl_rcm(dev, reg) \
205 			he_readl_internal(dev, reg, CON_CTL_RCM)
206 
207 #define he_readl_tcm(dev, reg) \
208 			he_readl_internal(dev, reg, CON_CTL_TCM)
209 
210 #define he_readl_mbox(dev, reg) \
211 			he_readl_internal(dev, reg, CON_CTL_MBOX)
212 
213 
214 /* figure 2.2 connection id */
215 
216 #define he_mkcid(dev, vpi, vci)		(((vpi << (dev)->vcibits) | vci) & 0x1fff)
217 
218 /* 2.5.1 per connection transmit state registers */
219 
220 #define he_writel_tsr0(dev, val, cid) \
221 		he_writel_tcm(dev, val, CONFIG_TSRA | (cid << 3) | 0)
222 #define he_readl_tsr0(dev, cid) \
223 		he_readl_tcm(dev, CONFIG_TSRA | (cid << 3) | 0)
224 
225 #define he_writel_tsr1(dev, val, cid) \
226 		he_writel_tcm(dev, val, CONFIG_TSRA | (cid << 3) | 1)
227 
228 #define he_writel_tsr2(dev, val, cid) \
229 		he_writel_tcm(dev, val, CONFIG_TSRA | (cid << 3) | 2)
230 
231 #define he_writel_tsr3(dev, val, cid) \
232 		he_writel_tcm(dev, val, CONFIG_TSRA | (cid << 3) | 3)
233 
234 #define he_writel_tsr4(dev, val, cid) \
235 		he_writel_tcm(dev, val, CONFIG_TSRA | (cid << 3) | 4)
236 
237 	/* from page 2-20
238 	 *
239 	 * NOTE While the transmit connection is active, bits 23 through 0
240 	 *      of this register must not be written by the host.  Byte
241 	 *      enables should be used during normal operation when writing
242 	 *      the most significant byte.
243 	 */
244 
245 #define he_writel_tsr4_upper(dev, val, cid) \
246 		he_writel_internal(dev, val, CONFIG_TSRA | (cid << 3) | 4, \
247 							CON_CTL_TCM \
248 							| CON_BYTE_DISABLE_2 \
249 							| CON_BYTE_DISABLE_1 \
250 							| CON_BYTE_DISABLE_0)
251 
252 #define he_readl_tsr4(dev, cid) \
253 		he_readl_tcm(dev, CONFIG_TSRA | (cid << 3) | 4)
254 
255 #define he_writel_tsr5(dev, val, cid) \
256 		he_writel_tcm(dev, val, CONFIG_TSRA | (cid << 3) | 5)
257 
258 #define he_writel_tsr6(dev, val, cid) \
259 		he_writel_tcm(dev, val, CONFIG_TSRA | (cid << 3) | 6)
260 
261 #define he_writel_tsr7(dev, val, cid) \
262 		he_writel_tcm(dev, val, CONFIG_TSRA | (cid << 3) | 7)
263 
264 
265 #define he_writel_tsr8(dev, val, cid) \
266 		he_writel_tcm(dev, val, CONFIG_TSRB | (cid << 2) | 0)
267 
268 #define he_writel_tsr9(dev, val, cid) \
269 		he_writel_tcm(dev, val, CONFIG_TSRB | (cid << 2) | 1)
270 
271 #define he_writel_tsr10(dev, val, cid) \
272 		he_writel_tcm(dev, val, CONFIG_TSRB | (cid << 2) | 2)
273 
274 #define he_writel_tsr11(dev, val, cid) \
275 		he_writel_tcm(dev, val, CONFIG_TSRB | (cid << 2) | 3)
276 
277 
278 #define he_writel_tsr12(dev, val, cid) \
279 		he_writel_tcm(dev, val, CONFIG_TSRC | (cid << 1) | 0)
280 
281 #define he_writel_tsr13(dev, val, cid) \
282 		he_writel_tcm(dev, val, CONFIG_TSRC | (cid << 1) | 1)
283 
284 
285 #define he_writel_tsr14(dev, val, cid) \
286 		he_writel_tcm(dev, val, CONFIG_TSRD | cid)
287 
288 #define he_writel_tsr14_upper(dev, val, cid) \
289 		he_writel_internal(dev, val, CONFIG_TSRD | cid, \
290 							CON_CTL_TCM \
291 							| CON_BYTE_DISABLE_2 \
292 							| CON_BYTE_DISABLE_1 \
293 							| CON_BYTE_DISABLE_0)
294 
295 /* 2.7.1 per connection receive state registers */
296 
297 #define he_writel_rsr0(dev, val, cid) \
298 		he_writel_rcm(dev, val, 0x00000 | (cid << 3) | 0)
299 #define he_readl_rsr0(dev, cid) \
300 		he_readl_rcm(dev, 0x00000 | (cid << 3) | 0)
301 
302 #define he_writel_rsr1(dev, val, cid) \
303 		he_writel_rcm(dev, val, 0x00000 | (cid << 3) | 1)
304 
305 #define he_writel_rsr2(dev, val, cid) \
306 		he_writel_rcm(dev, val, 0x00000 | (cid << 3) | 2)
307 
308 #define he_writel_rsr3(dev, val, cid) \
309 		he_writel_rcm(dev, val, 0x00000 | (cid << 3) | 3)
310 
311 #define he_writel_rsr4(dev, val, cid) \
312 		he_writel_rcm(dev, val, 0x00000 | (cid << 3) | 4)
313 
314 #define he_writel_rsr5(dev, val, cid) \
315 		he_writel_rcm(dev, val, 0x00000 | (cid << 3) | 5)
316 
317 #define he_writel_rsr6(dev, val, cid) \
318 		he_writel_rcm(dev, val, 0x00000 | (cid << 3) | 6)
319 
320 #define he_writel_rsr7(dev, val, cid) \
321 		he_writel_rcm(dev, val, 0x00000 | (cid << 3) | 7)
322 
323 static __inline__ struct atm_vcc*
__find_vcc(struct he_dev * he_dev,unsigned cid)324 __find_vcc(struct he_dev *he_dev, unsigned cid)
325 {
326 	struct atm_vcc *vcc;
327 	struct sock *s;
328 	short vpi;
329 	int vci;
330 
331 	vpi = cid >> he_dev->vcibits;
332 	vci = cid & ((1 << he_dev->vcibits) - 1);
333 
334 	for (s = vcc_sklist; s; s = s->next) {
335 		vcc = s->protinfo.af_atm;
336 		if (vcc->vci == vci && vcc->vpi == vpi &&
337 		    vcc->dev == he_dev->atm_dev &&
338 		    vcc->qos.rxtp.traffic_class != ATM_NONE) {
339 			return vcc;
340 		}
341 	}
342 
343 	return NULL;
344 }
345 
346 static int __devinit
he_init_one(struct pci_dev * pci_dev,const struct pci_device_id * pci_ent)347 he_init_one(struct pci_dev *pci_dev, const struct pci_device_id *pci_ent)
348 {
349 	struct atm_dev *atm_dev = NULL;
350 	struct he_dev *he_dev = NULL;
351 	int err = 0;
352 
353 	printk(KERN_INFO "he: %s\n", version);
354 
355 	if (pci_enable_device(pci_dev))
356 		return -EIO;
357 	if (pci_set_dma_mask(pci_dev, HE_DMA_MASK) != 0) {
358 		printk(KERN_WARNING "he: no suitable dma available\n");
359 		err = -EIO;
360 		goto init_one_failure;
361 	}
362 
363 	atm_dev = atm_dev_register(DEV_LABEL, &he_ops, -1, 0);
364 	if (!atm_dev) {
365 		err = -ENODEV;
366 		goto init_one_failure;
367 	}
368 	pci_set_drvdata(pci_dev, atm_dev);
369 
370 	he_dev = (struct he_dev *) kmalloc(sizeof(struct he_dev),
371 							GFP_KERNEL);
372 	if (!he_dev) {
373 		err = -ENOMEM;
374 		goto init_one_failure;
375 	}
376 	memset(he_dev, 0, sizeof(struct he_dev));
377 
378 	he_dev->pci_dev = pci_dev;
379 	he_dev->atm_dev = atm_dev;
380 	he_dev->atm_dev->dev_data = he_dev;
381 	atm_dev->dev_data = he_dev;
382 	he_dev->number = atm_dev->number;
383 	if (he_start(atm_dev)) {
384 		he_stop(he_dev);
385 		err = -ENODEV;
386 		goto init_one_failure;
387 	}
388 	he_dev->next = NULL;
389 	if (he_devs)
390 		he_dev->next = he_devs;
391 	he_devs = he_dev;
392 	return 0;
393 
394 init_one_failure:
395 	if (atm_dev)
396 		atm_dev_deregister(atm_dev);
397 	if (he_dev)
398 		kfree(he_dev);
399 	pci_disable_device(pci_dev);
400 	return err;
401 }
402 
403 static void __devexit
he_remove_one(struct pci_dev * pci_dev)404 he_remove_one (struct pci_dev *pci_dev)
405 {
406 	struct atm_dev *atm_dev;
407 	struct he_dev *he_dev;
408 
409 	atm_dev = pci_get_drvdata(pci_dev);
410 	he_dev = HE_DEV(atm_dev);
411 
412 	/* need to remove from he_devs */
413 
414 	he_stop(he_dev);
415 	atm_dev_deregister(atm_dev);
416 	kfree(he_dev);
417 
418 	pci_set_drvdata(pci_dev, NULL);
419 	pci_disable_device(pci_dev);
420 }
421 
422 
423 static unsigned
rate_to_atmf(unsigned rate)424 rate_to_atmf(unsigned rate)		/* cps to atm forum format */
425 {
426 #define NONZERO (1 << 14)
427 
428 	unsigned exp = 0;
429 
430 	if (rate == 0)
431 		return 0;
432 
433 	rate <<= 9;
434 	while (rate > 0x3ff) {
435 		++exp;
436 		rate >>= 1;
437 	}
438 
439 	return (NONZERO | (exp << 9) | (rate & 0x1ff));
440 }
441 
442 static void __init
he_init_rx_lbfp0(struct he_dev * he_dev)443 he_init_rx_lbfp0(struct he_dev *he_dev)
444 {
445 	unsigned i, lbm_offset, lbufd_index, lbuf_addr, lbuf_count;
446 	unsigned lbufs_per_row = he_dev->cells_per_row / he_dev->cells_per_lbuf;
447 	unsigned lbuf_bufsize = he_dev->cells_per_lbuf * ATM_CELL_PAYLOAD;
448 	unsigned row_offset = he_dev->r0_startrow * he_dev->bytes_per_row;
449 
450 	lbufd_index = 0;
451 	lbm_offset = he_readl(he_dev, RCMLBM_BA);
452 
453 	he_writel(he_dev, lbufd_index, RLBF0_H);
454 
455 	for (i = 0, lbuf_count = 0; i < he_dev->r0_numbuffs; ++i) {
456 		lbufd_index += 2;
457 		lbuf_addr = (row_offset + (lbuf_count * lbuf_bufsize)) / 32;
458 
459 		he_writel_rcm(he_dev, lbuf_addr, lbm_offset);
460 		he_writel_rcm(he_dev, lbufd_index, lbm_offset + 1);
461 
462 		if (++lbuf_count == lbufs_per_row) {
463 			lbuf_count = 0;
464 			row_offset += he_dev->bytes_per_row;
465 		}
466 		lbm_offset += 4;
467 	}
468 
469 	he_writel(he_dev, lbufd_index - 2, RLBF0_T);
470 	he_writel(he_dev, he_dev->r0_numbuffs, RLBF0_C);
471 }
472 
473 static void __init
he_init_rx_lbfp1(struct he_dev * he_dev)474 he_init_rx_lbfp1(struct he_dev *he_dev)
475 {
476 	unsigned i, lbm_offset, lbufd_index, lbuf_addr, lbuf_count;
477 	unsigned lbufs_per_row = he_dev->cells_per_row / he_dev->cells_per_lbuf;
478 	unsigned lbuf_bufsize = he_dev->cells_per_lbuf * ATM_CELL_PAYLOAD;
479 	unsigned row_offset = he_dev->r1_startrow * he_dev->bytes_per_row;
480 
481 	lbufd_index = 1;
482 	lbm_offset = he_readl(he_dev, RCMLBM_BA) + (2 * lbufd_index);
483 
484 	he_writel(he_dev, lbufd_index, RLBF1_H);
485 
486 	for (i = 0, lbuf_count = 0; i < he_dev->r1_numbuffs; ++i) {
487 		lbufd_index += 2;
488 		lbuf_addr = (row_offset + (lbuf_count * lbuf_bufsize)) / 32;
489 
490 		he_writel_rcm(he_dev, lbuf_addr, lbm_offset);
491 		he_writel_rcm(he_dev, lbufd_index, lbm_offset + 1);
492 
493 		if (++lbuf_count == lbufs_per_row) {
494 			lbuf_count = 0;
495 			row_offset += he_dev->bytes_per_row;
496 		}
497 		lbm_offset += 4;
498 	}
499 
500 	he_writel(he_dev, lbufd_index - 2, RLBF1_T);
501 	he_writel(he_dev, he_dev->r1_numbuffs, RLBF1_C);
502 }
503 
504 static void __init
he_init_tx_lbfp(struct he_dev * he_dev)505 he_init_tx_lbfp(struct he_dev *he_dev)
506 {
507 	unsigned i, lbm_offset, lbufd_index, lbuf_addr, lbuf_count;
508 	unsigned lbufs_per_row = he_dev->cells_per_row / he_dev->cells_per_lbuf;
509 	unsigned lbuf_bufsize = he_dev->cells_per_lbuf * ATM_CELL_PAYLOAD;
510 	unsigned row_offset = he_dev->tx_startrow * he_dev->bytes_per_row;
511 
512 	lbufd_index = he_dev->r0_numbuffs + he_dev->r1_numbuffs;
513 	lbm_offset = he_readl(he_dev, RCMLBM_BA) + (2 * lbufd_index);
514 
515 	he_writel(he_dev, lbufd_index, TLBF_H);
516 
517 	for (i = 0, lbuf_count = 0; i < he_dev->tx_numbuffs; ++i) {
518 		lbufd_index += 1;
519 		lbuf_addr = (row_offset + (lbuf_count * lbuf_bufsize)) / 32;
520 
521 		he_writel_rcm(he_dev, lbuf_addr, lbm_offset);
522 		he_writel_rcm(he_dev, lbufd_index, lbm_offset + 1);
523 
524 		if (++lbuf_count == lbufs_per_row) {
525 			lbuf_count = 0;
526 			row_offset += he_dev->bytes_per_row;
527 		}
528 		lbm_offset += 2;
529 	}
530 
531 	he_writel(he_dev, lbufd_index - 1, TLBF_T);
532 }
533 
534 static int __init
he_init_tpdrq(struct he_dev * he_dev)535 he_init_tpdrq(struct he_dev *he_dev)
536 {
537 	he_dev->tpdrq_base = pci_alloc_consistent(he_dev->pci_dev,
538 		CONFIG_TPDRQ_SIZE * sizeof(struct he_tpdrq), &he_dev->tpdrq_phys);
539 	if (he_dev->tpdrq_base == NULL) {
540 		hprintk("failed to alloc tpdrq\n");
541 		return -ENOMEM;
542 	}
543 	memset(he_dev->tpdrq_base, 0,
544 				CONFIG_TPDRQ_SIZE * sizeof(struct he_tpdrq));
545 
546 	he_dev->tpdrq_tail = he_dev->tpdrq_base;
547 	he_dev->tpdrq_head = he_dev->tpdrq_base;
548 
549 	he_writel(he_dev, he_dev->tpdrq_phys, TPDRQ_B_H);
550 	he_writel(he_dev, 0, TPDRQ_T);
551 	he_writel(he_dev, CONFIG_TPDRQ_SIZE - 1, TPDRQ_S);
552 
553 	return 0;
554 }
555 
556 static void __init
he_init_cs_block(struct he_dev * he_dev)557 he_init_cs_block(struct he_dev *he_dev)
558 {
559 	unsigned clock, rate, delta;
560 	int reg;
561 
562 	/* 5.1.7 cs block initialization */
563 
564 	for (reg = 0; reg < 0x20; ++reg)
565 		he_writel_mbox(he_dev, 0x0, CS_STTIM0 + reg);
566 
567 	/* rate grid timer reload values */
568 
569 	clock = he_is622(he_dev) ? 66667000 : 50000000;
570 	rate = he_dev->atm_dev->link_rate;
571 	delta = rate / 16 / 2;
572 
573 	for (reg = 0; reg < 0x10; ++reg) {
574 		/* 2.4 internal transmit function
575 		 *
576 	 	 * we initialize the first row in the rate grid.
577 		 * values are period (in clock cycles) of timer
578 		 */
579 		unsigned period = clock / rate;
580 
581 		he_writel_mbox(he_dev, period, CS_TGRLD0 + reg);
582 		rate -= delta;
583 	}
584 
585 	if (he_is622(he_dev)) {
586 		/* table 5.2 (4 cells per lbuf) */
587 		he_writel_mbox(he_dev, 0x000800fa, CS_ERTHR0);
588 		he_writel_mbox(he_dev, 0x000c33cb, CS_ERTHR1);
589 		he_writel_mbox(he_dev, 0x0010101b, CS_ERTHR2);
590 		he_writel_mbox(he_dev, 0x00181dac, CS_ERTHR3);
591 		he_writel_mbox(he_dev, 0x00280600, CS_ERTHR4);
592 
593 		/* table 5.3, 5.4, 5.5, 5.6, 5.7 */
594 		he_writel_mbox(he_dev, 0x023de8b3, CS_ERCTL0);
595 		he_writel_mbox(he_dev, 0x1801, CS_ERCTL1);
596 		he_writel_mbox(he_dev, 0x68b3, CS_ERCTL2);
597 		he_writel_mbox(he_dev, 0x1280, CS_ERSTAT0);
598 		he_writel_mbox(he_dev, 0x68b3, CS_ERSTAT1);
599 		he_writel_mbox(he_dev, 0x14585, CS_RTFWR);
600 
601 		he_writel_mbox(he_dev, 0x4680, CS_RTATR);
602 
603 		/* table 5.8 */
604 		he_writel_mbox(he_dev, 0x00159ece, CS_TFBSET);
605 		he_writel_mbox(he_dev, 0x68b3, CS_WCRMAX);
606 		he_writel_mbox(he_dev, 0x5eb3, CS_WCRMIN);
607 		he_writel_mbox(he_dev, 0xe8b3, CS_WCRINC);
608 		he_writel_mbox(he_dev, 0xdeb3, CS_WCRDEC);
609 		he_writel_mbox(he_dev, 0x68b3, CS_WCRCEIL);
610 
611 		/* table 5.9 */
612 		he_writel_mbox(he_dev, 0x5, CS_OTPPER);
613 		he_writel_mbox(he_dev, 0x14, CS_OTWPER);
614 	} else {
615 		/* table 5.1 (4 cells per lbuf) */
616 		he_writel_mbox(he_dev, 0x000400ea, CS_ERTHR0);
617 		he_writel_mbox(he_dev, 0x00063388, CS_ERTHR1);
618 		he_writel_mbox(he_dev, 0x00081018, CS_ERTHR2);
619 		he_writel_mbox(he_dev, 0x000c1dac, CS_ERTHR3);
620 		he_writel_mbox(he_dev, 0x0014051a, CS_ERTHR4);
621 
622 		/* table 5.3, 5.4, 5.5, 5.6, 5.7 */
623 		he_writel_mbox(he_dev, 0x0235e4b1, CS_ERCTL0);
624 		he_writel_mbox(he_dev, 0x4701, CS_ERCTL1);
625 		he_writel_mbox(he_dev, 0x64b1, CS_ERCTL2);
626 		he_writel_mbox(he_dev, 0x1280, CS_ERSTAT0);
627 		he_writel_mbox(he_dev, 0x64b1, CS_ERSTAT1);
628 		he_writel_mbox(he_dev, 0xf424, CS_RTFWR);
629 
630 		he_writel_mbox(he_dev, 0x4680, CS_RTATR);
631 
632 		/* table 5.8 */
633 		he_writel_mbox(he_dev, 0x000563b7, CS_TFBSET);
634 		he_writel_mbox(he_dev, 0x64b1, CS_WCRMAX);
635 		he_writel_mbox(he_dev, 0x5ab1, CS_WCRMIN);
636 		he_writel_mbox(he_dev, 0xe4b1, CS_WCRINC);
637 		he_writel_mbox(he_dev, 0xdab1, CS_WCRDEC);
638 		he_writel_mbox(he_dev, 0x64b1, CS_WCRCEIL);
639 
640 		/* table 5.9 */
641 		he_writel_mbox(he_dev, 0x6, CS_OTPPER);
642 		he_writel_mbox(he_dev, 0x1e, CS_OTWPER);
643 	}
644 
645 	he_writel_mbox(he_dev, 0x8, CS_OTTLIM);
646 
647 	for (reg = 0; reg < 0x8; ++reg)
648 		he_writel_mbox(he_dev, 0x0, CS_HGRRT0 + reg);
649 
650 }
651 
652 static int __init
he_init_cs_block_rcm(struct he_dev * he_dev)653 he_init_cs_block_rcm(struct he_dev *he_dev)
654 {
655 	unsigned (*rategrid)[16][16];
656 	unsigned rate, delta;
657 	int i, j, reg;
658 
659 	unsigned rate_atmf, exp, man;
660 	unsigned long long rate_cps;
661 	int mult, buf, buf_limit = 4;
662 
663 	rategrid = kmalloc( sizeof(unsigned) * 16 * 16, GFP_KERNEL);
664 	if (!rategrid)
665 		return -ENOMEM;
666 
667 	/* initialize rate grid group table */
668 
669 	for (reg = 0x0; reg < 0xff; ++reg)
670 		he_writel_rcm(he_dev, 0x0, CONFIG_RCMABR + reg);
671 
672 	/* initialize rate controller groups */
673 
674 	for (reg = 0x100; reg < 0x1ff; ++reg)
675 		he_writel_rcm(he_dev, 0x0, CONFIG_RCMABR + reg);
676 
677 	/* initialize tNrm lookup table */
678 
679 	/* the manual makes reference to a routine in a sample driver
680 	   for proper configuration; fortunately, we only need this
681 	   in order to support abr connection */
682 
683 	/* initialize rate to group table */
684 
685 	rate = he_dev->atm_dev->link_rate;
686 	delta = rate / 32;
687 
688 	/*
689 	 * 2.4 transmit internal functions
690 	 *
691 	 * we construct a copy of the rate grid used by the scheduler
692 	 * in order to construct the rate to group table below
693 	 */
694 
695 	for (j = 0; j < 16; j++) {
696 		(*rategrid)[0][j] = rate;
697 		rate -= delta;
698 	}
699 
700 	for (i = 1; i < 16; i++)
701 		for (j = 0; j < 16; j++)
702 			if (i > 14)
703 				(*rategrid)[i][j] = (*rategrid)[i - 1][j] / 4;
704 			else
705 				(*rategrid)[i][j] = (*rategrid)[i - 1][j] / 2;
706 
707 	/*
708 	 * 2.4 transmit internal function
709 	 *
710 	 * this table maps the upper 5 bits of exponent and mantissa
711 	 * of the atm forum representation of the rate into an index
712 	 * on rate grid
713 	 */
714 
715 	rate_atmf = 0;
716 	while (rate_atmf < 0x400) {
717 		man = (rate_atmf & 0x1f) << 4;
718 		exp = rate_atmf >> 5;
719 
720 		/*
721 			instead of '/ 512', use '>> 9' to prevent a call
722 			to divdu3 on x86 platforms
723 		*/
724 		rate_cps = (unsigned long long) (1 << exp) * (man + 512) >> 9;
725 
726 		if (rate_cps < 10)
727 			rate_cps = 10;	/* 2.2.1 minimum payload rate is 10 cps */
728 
729 		for (i = 255; i > 0; i--)
730 			if ((*rategrid)[i/16][i%16] >= rate_cps)
731 				break;	 /* pick nearest rate instead? */
732 
733 		/*
734 		 * each table entry is 16 bits: (rate grid index (8 bits)
735 		 * and a buffer limit (8 bits)
736 		 * there are two table entries in each 32-bit register
737 		 */
738 
739 #ifdef notdef
740 		buf = rate_cps * he_dev->tx_numbuffs /
741 				(he_dev->atm_dev->link_rate * 2);
742 #else
743 		/* this is pretty, but avoids _divdu3 and is mostly correct */
744 		mult = he_dev->atm_dev->link_rate / ATM_OC3_PCR;
745 		if (rate_cps > (272 * mult))
746 			buf = 4;
747 		else if (rate_cps > (204 * mult))
748 			buf = 3;
749 		else if (rate_cps > (136 * mult))
750 			buf = 2;
751 		else if (rate_cps > (68 * mult))
752 			buf = 1;
753 		else
754 			buf = 0;
755 #endif
756 		if (buf > buf_limit)
757 			buf = buf_limit;
758 		reg = (reg << 16) | ((i << 8) | buf);
759 
760 #define RTGTBL_OFFSET 0x400
761 
762 		if (rate_atmf & 0x1)
763 			he_writel_rcm(he_dev, reg,
764 				CONFIG_RCMABR + RTGTBL_OFFSET + (rate_atmf >> 1));
765 
766 		++rate_atmf;
767 	}
768 
769 	kfree(rategrid);
770 	return 0;
771 }
772 
773 static int __init
he_init_group(struct he_dev * he_dev,int group)774 he_init_group(struct he_dev *he_dev, int group)
775 {
776 	int i;
777 
778 #ifdef USE_RBPS
779 	/* small buffer pool */
780 #ifdef USE_RBPS_POOL
781 	he_dev->rbps_pool = pci_pool_create("rbps", he_dev->pci_dev,
782 			CONFIG_RBPS_BUFSIZE, 8, 0, SLAB_KERNEL);
783 	if (he_dev->rbps_pool == NULL) {
784 		hprintk("unable to create rbps pages\n");
785 		return -ENOMEM;
786 	}
787 #else /* !USE_RBPS_POOL */
788 	he_dev->rbps_pages = pci_alloc_consistent(he_dev->pci_dev,
789 		CONFIG_RBPS_SIZE * CONFIG_RBPS_BUFSIZE, &he_dev->rbps_pages_phys);
790 	if (he_dev->rbps_pages == NULL) {
791 		hprintk("unable to create rbps page pool\n");
792 		return -ENOMEM;
793 	}
794 #endif /* USE_RBPS_POOL */
795 
796 	he_dev->rbps_base = pci_alloc_consistent(he_dev->pci_dev,
797 		CONFIG_RBPS_SIZE * sizeof(struct he_rbp), &he_dev->rbps_phys);
798 	if (he_dev->rbps_base == NULL) {
799 		hprintk("failed to alloc rbps\n");
800 		return -ENOMEM;
801 	}
802 	memset(he_dev->rbps_base, 0, CONFIG_RBPS_SIZE * sizeof(struct he_rbp));
803 	he_dev->rbps_virt = kmalloc(CONFIG_RBPS_SIZE * sizeof(struct he_virt), GFP_KERNEL);
804 
805 	for (i = 0; i < CONFIG_RBPS_SIZE; ++i) {
806 		dma_addr_t dma_handle;
807 		void *cpuaddr;
808 
809 #ifdef USE_RBPS_POOL
810 		cpuaddr = pci_pool_alloc(he_dev->rbps_pool, SLAB_KERNEL|SLAB_DMA, &dma_handle);
811 		if (cpuaddr == NULL)
812 			return -ENOMEM;
813 #else
814 		cpuaddr = he_dev->rbps_pages + (i * CONFIG_RBPS_BUFSIZE);
815 		dma_handle = he_dev->rbps_pages_phys + (i * CONFIG_RBPS_BUFSIZE);
816 #endif
817 
818 		he_dev->rbps_virt[i].virt = cpuaddr;
819 		he_dev->rbps_base[i].status = RBP_LOANED | RBP_SMALLBUF | (i << RBP_INDEX_OFF);
820 		he_dev->rbps_base[i].phys = dma_handle;
821 
822 	}
823 	he_dev->rbps_tail = &he_dev->rbps_base[CONFIG_RBPS_SIZE - 1];
824 
825 	he_writel(he_dev, he_dev->rbps_phys, G0_RBPS_S + (group * 32));
826 	he_writel(he_dev, RBPS_MASK(he_dev->rbps_tail),
827 						G0_RBPS_T + (group * 32));
828 	he_writel(he_dev, CONFIG_RBPS_BUFSIZE/4,
829 						G0_RBPS_BS + (group * 32));
830 	he_writel(he_dev,
831 			RBP_THRESH(CONFIG_RBPS_THRESH) |
832 			RBP_QSIZE(CONFIG_RBPS_SIZE - 1) |
833 			RBP_INT_ENB,
834 						G0_RBPS_QI + (group * 32));
835 #else /* !USE_RBPS */
836 	he_writel(he_dev, 0x0, G0_RBPS_S + (group * 32));
837 	he_writel(he_dev, 0x0, G0_RBPS_T + (group * 32));
838 	he_writel(he_dev, 0x0, G0_RBPS_QI + (group * 32));
839 	he_writel(he_dev, RBP_THRESH(0x1) | RBP_QSIZE(0x0),
840 						G0_RBPS_BS + (group * 32));
841 #endif /* USE_RBPS */
842 
843 	/* large buffer pool */
844 #ifdef USE_RBPL_POOL
845 	he_dev->rbpl_pool = pci_pool_create("rbpl", he_dev->pci_dev,
846 			CONFIG_RBPL_BUFSIZE, 8, 0, SLAB_KERNEL);
847 	if (he_dev->rbpl_pool == NULL) {
848 		hprintk("unable to create rbpl pool\n");
849 		return -ENOMEM;
850 	}
851 #else /* !USE_RBPL_POOL */
852 	he_dev->rbpl_pages = (void *) pci_alloc_consistent(he_dev->pci_dev,
853 		CONFIG_RBPL_SIZE * CONFIG_RBPL_BUFSIZE, &he_dev->rbpl_pages_phys);
854 	if (he_dev->rbpl_pages == NULL) {
855 		hprintk("unable to create rbpl pages\n");
856 		return -ENOMEM;
857 	}
858 #endif /* USE_RBPL_POOL */
859 
860 	he_dev->rbpl_base = pci_alloc_consistent(he_dev->pci_dev,
861 		CONFIG_RBPL_SIZE * sizeof(struct he_rbp), &he_dev->rbpl_phys);
862 	if (he_dev->rbpl_base == NULL) {
863 		hprintk("failed to alloc rbpl\n");
864 		return -ENOMEM;
865 	}
866 	memset(he_dev->rbpl_base, 0, CONFIG_RBPL_SIZE * sizeof(struct he_rbp));
867 	he_dev->rbpl_virt = kmalloc(CONFIG_RBPL_SIZE * sizeof(struct he_virt), GFP_KERNEL);
868 
869 	for (i = 0; i < CONFIG_RBPL_SIZE; ++i) {
870 		dma_addr_t dma_handle;
871 		void *cpuaddr;
872 
873 #ifdef USE_RBPL_POOL
874 		cpuaddr = pci_pool_alloc(he_dev->rbpl_pool, SLAB_KERNEL|SLAB_DMA, &dma_handle);
875 		if (cpuaddr == NULL)
876 			return -ENOMEM;
877 #else
878 		cpuaddr = he_dev->rbpl_pages + (i * CONFIG_RBPL_BUFSIZE);
879 		dma_handle = he_dev->rbpl_pages_phys + (i * CONFIG_RBPL_BUFSIZE);
880 #endif
881 
882 		he_dev->rbpl_virt[i].virt = cpuaddr;
883 		he_dev->rbpl_base[i].status = RBP_LOANED | (i << RBP_INDEX_OFF);
884 		he_dev->rbpl_base[i].phys = dma_handle;
885 	}
886 	he_dev->rbpl_tail = &he_dev->rbpl_base[CONFIG_RBPL_SIZE - 1];
887 
888 	he_writel(he_dev, he_dev->rbpl_phys, G0_RBPL_S + (group * 32));
889 	he_writel(he_dev, RBPL_MASK(he_dev->rbpl_tail),
890 						G0_RBPL_T + (group * 32));
891 	he_writel(he_dev, CONFIG_RBPL_BUFSIZE/4,
892 						G0_RBPL_BS + (group * 32));
893 	he_writel(he_dev,
894 			RBP_THRESH(CONFIG_RBPL_THRESH) |
895 			RBP_QSIZE(CONFIG_RBPL_SIZE - 1) |
896 			RBP_INT_ENB,
897 						G0_RBPL_QI + (group * 32));
898 
899 	/* rx buffer ready queue */
900 
901 	he_dev->rbrq_base = pci_alloc_consistent(he_dev->pci_dev,
902 		CONFIG_RBRQ_SIZE * sizeof(struct he_rbrq), &he_dev->rbrq_phys);
903 	if (he_dev->rbrq_base == NULL) {
904 		hprintk("failed to allocate rbrq\n");
905 		return -ENOMEM;
906 	}
907 	memset(he_dev->rbrq_base, 0, CONFIG_RBRQ_SIZE * sizeof(struct he_rbrq));
908 
909 	he_dev->rbrq_head = he_dev->rbrq_base;
910 	he_writel(he_dev, he_dev->rbrq_phys, G0_RBRQ_ST + (group * 16));
911 	he_writel(he_dev, 0, G0_RBRQ_H + (group * 16));
912 	he_writel(he_dev,
913 		RBRQ_THRESH(CONFIG_RBRQ_THRESH) | RBRQ_SIZE(CONFIG_RBRQ_SIZE - 1),
914 						G0_RBRQ_Q + (group * 16));
915 	if (irq_coalesce) {
916 		hprintk("coalescing interrupts\n");
917 		he_writel(he_dev, RBRQ_TIME(768) | RBRQ_COUNT(7),
918 						G0_RBRQ_I + (group * 16));
919 	} else
920 		he_writel(he_dev, RBRQ_TIME(0) | RBRQ_COUNT(1),
921 						G0_RBRQ_I + (group * 16));
922 
923 	/* tx buffer ready queue */
924 
925 	he_dev->tbrq_base = pci_alloc_consistent(he_dev->pci_dev,
926 		CONFIG_TBRQ_SIZE * sizeof(struct he_tbrq), &he_dev->tbrq_phys);
927 	if (he_dev->tbrq_base == NULL) {
928 		hprintk("failed to allocate tbrq\n");
929 		return -ENOMEM;
930 	}
931 	memset(he_dev->tbrq_base, 0, CONFIG_TBRQ_SIZE * sizeof(struct he_tbrq));
932 
933 	he_dev->tbrq_head = he_dev->tbrq_base;
934 
935 	he_writel(he_dev, he_dev->tbrq_phys, G0_TBRQ_B_T + (group * 16));
936 	he_writel(he_dev, 0, G0_TBRQ_H + (group * 16));
937 	he_writel(he_dev, CONFIG_TBRQ_SIZE - 1, G0_TBRQ_S + (group * 16));
938 	he_writel(he_dev, CONFIG_TBRQ_THRESH, G0_TBRQ_THRESH + (group * 16));
939 
940 	return 0;
941 }
942 
943 static int __init
he_init_irq(struct he_dev * he_dev)944 he_init_irq(struct he_dev *he_dev)
945 {
946 	int i;
947 
948 	/* 2.9.3.5  tail offset for each interrupt queue is located after the
949 		    end of the interrupt queue */
950 
951 	he_dev->irq_base = pci_alloc_consistent(he_dev->pci_dev,
952 			(CONFIG_IRQ_SIZE+1) * sizeof(struct he_irq), &he_dev->irq_phys);
953 	if (he_dev->irq_base == NULL) {
954 		hprintk("failed to allocate irq\n");
955 		return -ENOMEM;
956 	}
957 	he_dev->irq_tailoffset = (unsigned *)
958 					&he_dev->irq_base[CONFIG_IRQ_SIZE];
959 	*he_dev->irq_tailoffset = 0;
960 	he_dev->irq_head = he_dev->irq_base;
961 	he_dev->irq_tail = he_dev->irq_base;
962 
963 	for (i = 0; i < CONFIG_IRQ_SIZE; ++i)
964 		he_dev->irq_base[i].isw = ITYPE_INVALID;
965 
966 	he_writel(he_dev, he_dev->irq_phys, IRQ0_BASE);
967 	he_writel(he_dev,
968 		IRQ_SIZE(CONFIG_IRQ_SIZE) | IRQ_THRESH(CONFIG_IRQ_THRESH),
969 								IRQ0_HEAD);
970 	he_writel(he_dev, IRQ_INT_A | IRQ_TYPE_LINE, IRQ0_CNTL);
971 	he_writel(he_dev, 0x0, IRQ0_DATA);
972 
973 	he_writel(he_dev, 0x0, IRQ1_BASE);
974 	he_writel(he_dev, 0x0, IRQ1_HEAD);
975 	he_writel(he_dev, 0x0, IRQ1_CNTL);
976 	he_writel(he_dev, 0x0, IRQ1_DATA);
977 
978 	he_writel(he_dev, 0x0, IRQ2_BASE);
979 	he_writel(he_dev, 0x0, IRQ2_HEAD);
980 	he_writel(he_dev, 0x0, IRQ2_CNTL);
981 	he_writel(he_dev, 0x0, IRQ2_DATA);
982 
983 	he_writel(he_dev, 0x0, IRQ3_BASE);
984 	he_writel(he_dev, 0x0, IRQ3_HEAD);
985 	he_writel(he_dev, 0x0, IRQ3_CNTL);
986 	he_writel(he_dev, 0x0, IRQ3_DATA);
987 
988 	/* 2.9.3.2 interrupt queue mapping registers */
989 
990 	he_writel(he_dev, 0x0, GRP_10_MAP);
991 	he_writel(he_dev, 0x0, GRP_32_MAP);
992 	he_writel(he_dev, 0x0, GRP_54_MAP);
993 	he_writel(he_dev, 0x0, GRP_76_MAP);
994 
995 	if (request_irq(he_dev->pci_dev->irq, he_irq_handler, SA_INTERRUPT|SA_SHIRQ, DEV_LABEL, he_dev)) {
996 		hprintk("irq %d already in use\n", he_dev->pci_dev->irq);
997 		return -EINVAL;
998 	}
999 
1000 	he_dev->irq = he_dev->pci_dev->irq;
1001 
1002 	return 0;
1003 }
1004 
1005 static int __init
he_start(struct atm_dev * dev)1006 he_start(struct atm_dev *dev)
1007 {
1008 	struct he_dev *he_dev;
1009 	struct pci_dev *pci_dev;
1010 
1011 	u16 command;
1012 	u32 gen_cntl_0, host_cntl, lb_swap;
1013 	u8 cache_size, timer;
1014 
1015 	unsigned err;
1016 	unsigned int status, reg;
1017 	int i, group;
1018 
1019 	he_dev = HE_DEV(dev);
1020 	pci_dev = he_dev->pci_dev;
1021 
1022 	he_dev->membase = pci_dev->resource[0].start;
1023 	HPRINTK("membase = 0x%lx  irq = %d.\n", he_dev->membase, pci_dev->irq);
1024 
1025 	/*
1026 	 * pci bus controller initialization
1027 	 */
1028 
1029 	/* 4.3 pci bus controller-specific initialization */
1030 	if (pci_read_config_dword(pci_dev, GEN_CNTL_0, &gen_cntl_0) != 0) {
1031 		hprintk("can't read GEN_CNTL_0\n");
1032 		return -EINVAL;
1033 	}
1034 	gen_cntl_0 |= (MRL_ENB | MRM_ENB | IGNORE_TIMEOUT);
1035 	if (pci_write_config_dword(pci_dev, GEN_CNTL_0, gen_cntl_0) != 0) {
1036 		hprintk("can't write GEN_CNTL_0.\n");
1037 		return -EINVAL;
1038 	}
1039 
1040 	if (pci_read_config_word(pci_dev, PCI_COMMAND, &command) != 0) {
1041 		hprintk("can't read PCI_COMMAND.\n");
1042 		return -EINVAL;
1043 	}
1044 
1045 	command |= (PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER | PCI_COMMAND_INVALIDATE);
1046 	if (pci_write_config_word(pci_dev, PCI_COMMAND, command) != 0) {
1047 		hprintk("can't enable memory.\n");
1048 		return -EINVAL;
1049 	}
1050 
1051 	if (pci_read_config_byte(pci_dev, PCI_CACHE_LINE_SIZE, &cache_size)) {
1052 		hprintk("can't read cache line size?\n");
1053 		return -EINVAL;
1054 	}
1055 
1056 	if (cache_size < 16) {
1057 		cache_size = 16;
1058 		if (pci_write_config_byte(pci_dev, PCI_CACHE_LINE_SIZE, cache_size))
1059 			hprintk("can't set cache line size to %d\n", cache_size);
1060 	}
1061 
1062 	if (pci_read_config_byte(pci_dev, PCI_LATENCY_TIMER, &timer)) {
1063 		hprintk("can't read latency timer?\n");
1064 		return -EINVAL;
1065 	}
1066 
1067 	/* from table 3.9
1068 	 *
1069 	 * LAT_TIMER = 1 + AVG_LAT + BURST_SIZE/BUS_SIZE
1070 	 *
1071 	 * AVG_LAT: The average first data read/write latency [maximum 16 clock cycles]
1072 	 * BURST_SIZE: 1536 bytes (read) for 622, 768 bytes (read) for 155 [192 clock cycles]
1073 	 *
1074 	 */
1075 #define LAT_TIMER 209
1076 	if (timer < LAT_TIMER) {
1077 		HPRINTK("latency timer was %d, setting to %d\n", timer, LAT_TIMER);
1078 		timer = LAT_TIMER;
1079 		if (pci_write_config_byte(pci_dev, PCI_LATENCY_TIMER, timer))
1080 			hprintk("can't set latency timer to %d\n", timer);
1081 	}
1082 
1083 	if (!(he_dev->membase = (unsigned long) ioremap(he_dev->membase, HE_REGMAP_SIZE))) {
1084 		hprintk("can't set up page mapping\n");
1085 		return -EINVAL;
1086 	}
1087 
1088 	/* 4.4 card reset */
1089 	he_writel(he_dev, 0x0, RESET_CNTL);
1090 	he_writel(he_dev, 0xff, RESET_CNTL);
1091 
1092 	udelay(16*1000);	/* 16 ms */
1093 	status = he_readl(he_dev, RESET_CNTL);
1094 	if ((status & BOARD_RST_STATUS) == 0) {
1095 		hprintk("reset failed\n");
1096 		return -EINVAL;
1097 	}
1098 
1099 	/* 4.5 set bus width */
1100 	host_cntl = he_readl(he_dev, HOST_CNTL);
1101 	if (host_cntl & PCI_BUS_SIZE64)
1102 		gen_cntl_0 |= ENBL_64;
1103 	else
1104 		gen_cntl_0 &= ~ENBL_64;
1105 
1106 	if (disable64 == 1) {
1107 		hprintk("disabling 64-bit pci bus transfers\n");
1108 		gen_cntl_0 &= ~ENBL_64;
1109 	}
1110 
1111 	if (gen_cntl_0 & ENBL_64)
1112 		hprintk("64-bit transfers enabled\n");
1113 
1114 	pci_write_config_dword(pci_dev, GEN_CNTL_0, gen_cntl_0);
1115 
1116 	/* 4.7 read prom contents */
1117 	for (i = 0; i < PROD_ID_LEN; ++i)
1118 		he_dev->prod_id[i] = read_prom_byte(he_dev, PROD_ID + i);
1119 
1120 	he_dev->media = read_prom_byte(he_dev, MEDIA);
1121 
1122 	for (i = 0; i < 6; ++i)
1123 		dev->esi[i] = read_prom_byte(he_dev, MAC_ADDR + i);
1124 
1125 	hprintk("%s%s, %x:%x:%x:%x:%x:%x\n",
1126 				he_dev->prod_id,
1127 					he_dev->media & 0x40 ? "SM" : "MM",
1128 						dev->esi[0],
1129 						dev->esi[1],
1130 						dev->esi[2],
1131 						dev->esi[3],
1132 						dev->esi[4],
1133 						dev->esi[5]);
1134 	he_dev->atm_dev->link_rate = he_is622(he_dev) ?
1135 						ATM_OC12_PCR : ATM_OC3_PCR;
1136 
1137 	/* 4.6 set host endianess */
1138 	lb_swap = he_readl(he_dev, LB_SWAP);
1139 	if (he_is622(he_dev))
1140 		lb_swap &= ~XFER_SIZE;		/* 4 cells */
1141 	else
1142 		lb_swap |= XFER_SIZE;		/* 8 cells */
1143 #ifdef __BIG_ENDIAN
1144 	lb_swap |= DESC_WR_SWAP | INTR_SWAP | BIG_ENDIAN_HOST;
1145 #else
1146 	lb_swap &= ~(DESC_WR_SWAP | INTR_SWAP | BIG_ENDIAN_HOST |
1147 			DATA_WR_SWAP | DATA_RD_SWAP | DESC_RD_SWAP);
1148 #endif /* __BIG_ENDIAN */
1149 	he_writel(he_dev, lb_swap, LB_SWAP);
1150 
1151 	/* 4.8 sdram controller initialization */
1152 	he_writel(he_dev, he_is622(he_dev) ? LB_64_ENB : 0x0, SDRAM_CTL);
1153 
1154 	/* 4.9 initialize rnum value */
1155 	lb_swap |= SWAP_RNUM_MAX(0xf);
1156 	he_writel(he_dev, lb_swap, LB_SWAP);
1157 
1158 	/* 4.10 initialize the interrupt queues */
1159 	if ((err = he_init_irq(he_dev)) != 0)
1160 		return err;
1161 
1162 #ifdef USE_TASKLET
1163 	tasklet_init(&he_dev->tasklet, he_tasklet, (unsigned long) he_dev);
1164 #endif
1165 	spin_lock_init(&he_dev->global_lock);
1166 
1167 	/* 4.11 enable pci bus controller state machines */
1168 	host_cntl |= (OUTFF_ENB | CMDFF_ENB |
1169 				QUICK_RD_RETRY | QUICK_WR_RETRY | PERR_INT_ENB);
1170 	he_writel(he_dev, host_cntl, HOST_CNTL);
1171 
1172 	gen_cntl_0 |= INT_PROC_ENBL|INIT_ENB;
1173 	pci_write_config_dword(pci_dev, GEN_CNTL_0, gen_cntl_0);
1174 
1175 	/*
1176 	 * atm network controller initialization
1177 	 */
1178 
1179 	/* 5.1.1 generic configuration state */
1180 
1181 	/*
1182 	 *		local (cell) buffer memory map
1183 	 *
1184 	 *             HE155                          HE622
1185 	 *
1186 	 *        0 ____________1023 bytes  0 _______________________2047 bytes
1187 	 *         |            |            |                   |   |
1188 	 *         |  utility   |            |        rx0        |   |
1189 	 *        5|____________|         255|___________________| u |
1190 	 *        6|            |         256|                   | t |
1191 	 *         |            |            |                   | i |
1192 	 *         |    rx0     |     row    |        tx         | l |
1193 	 *         |            |            |                   | i |
1194 	 *         |            |         767|___________________| t |
1195 	 *      517|____________|         768|                   | y |
1196 	 * row  518|            |            |        rx1        |   |
1197 	 *         |            |        1023|___________________|___|
1198 	 *         |            |
1199 	 *         |    tx      |
1200 	 *         |            |
1201 	 *         |            |
1202 	 *     1535|____________|
1203 	 *     1536|            |
1204 	 *         |    rx1     |
1205 	 *     2047|____________|
1206 	 *
1207 	 */
1208 
1209 	/* total 4096 connections */
1210 	he_dev->vcibits = CONFIG_DEFAULT_VCIBITS;
1211 	he_dev->vpibits = CONFIG_DEFAULT_VPIBITS;
1212 
1213 	if (nvpibits != -1 && nvcibits != -1 && nvpibits+nvcibits != HE_MAXCIDBITS) {
1214 		hprintk("nvpibits + nvcibits != %d\n", HE_MAXCIDBITS);
1215 		return -ENODEV;
1216 	}
1217 
1218 	if (nvpibits != -1) {
1219 		he_dev->vpibits = nvpibits;
1220 		he_dev->vcibits = HE_MAXCIDBITS - nvpibits;
1221 	}
1222 
1223 	if (nvcibits != -1) {
1224 		he_dev->vcibits = nvcibits;
1225 		he_dev->vpibits = HE_MAXCIDBITS - nvcibits;
1226 	}
1227 
1228 
1229 	if (he_is622(he_dev)) {
1230 		he_dev->cells_per_row = 40;
1231 		he_dev->bytes_per_row = 2048;
1232 		he_dev->r0_numrows = 256;
1233 		he_dev->tx_numrows = 512;
1234 		he_dev->r1_numrows = 256;
1235 		he_dev->r0_startrow = 0;
1236 		he_dev->tx_startrow = 256;
1237 		he_dev->r1_startrow = 768;
1238 	} else {
1239 		he_dev->cells_per_row = 20;
1240 		he_dev->bytes_per_row = 1024;
1241 		he_dev->r0_numrows = 512;
1242 		he_dev->tx_numrows = 1018;
1243 		he_dev->r1_numrows = 512;
1244 		he_dev->r0_startrow = 6;
1245 		he_dev->tx_startrow = 518;
1246 		he_dev->r1_startrow = 1536;
1247 	}
1248 
1249 	he_dev->cells_per_lbuf = 4;
1250 	he_dev->buffer_limit = 4;
1251 	he_dev->r0_numbuffs = he_dev->r0_numrows *
1252 				he_dev->cells_per_row / he_dev->cells_per_lbuf;
1253 	if (he_dev->r0_numbuffs > 2560)
1254 		he_dev->r0_numbuffs = 2560;
1255 
1256 	he_dev->r1_numbuffs = he_dev->r1_numrows *
1257 				he_dev->cells_per_row / he_dev->cells_per_lbuf;
1258 	if (he_dev->r1_numbuffs > 2560)
1259 		he_dev->r1_numbuffs = 2560;
1260 
1261 	he_dev->tx_numbuffs = he_dev->tx_numrows *
1262 				he_dev->cells_per_row / he_dev->cells_per_lbuf;
1263 	if (he_dev->tx_numbuffs > 5120)
1264 		he_dev->tx_numbuffs = 5120;
1265 
1266 	/* 5.1.2 configure hardware dependent registers */
1267 
1268 	he_writel(he_dev,
1269 		SLICE_X(0x2) | ARB_RNUM_MAX(0xf) | TH_PRTY(0x3) |
1270 		RH_PRTY(0x3) | TL_PRTY(0x2) | RL_PRTY(0x1) |
1271 		(he_is622(he_dev) ? BUS_MULTI(0x28) : BUS_MULTI(0x46)) |
1272 		(he_is622(he_dev) ? NET_PREF(0x50) : NET_PREF(0x8c)),
1273 								LBARB);
1274 
1275 	he_writel(he_dev, BANK_ON |
1276 		(he_is622(he_dev) ? (REF_RATE(0x384) | WIDE_DATA) : REF_RATE(0x150)),
1277 								SDRAMCON);
1278 
1279 	he_writel(he_dev,
1280 		(he_is622(he_dev) ? RM_BANK_WAIT(1) : RM_BANK_WAIT(0)) |
1281 						RM_RW_WAIT(1), RCMCONFIG);
1282 	he_writel(he_dev,
1283 		(he_is622(he_dev) ? TM_BANK_WAIT(2) : TM_BANK_WAIT(1)) |
1284 						TM_RW_WAIT(1), TCMCONFIG);
1285 
1286 	he_writel(he_dev, he_dev->cells_per_lbuf * ATM_CELL_PAYLOAD, LB_CONFIG);
1287 
1288 	he_writel(he_dev,
1289 		(he_is622(he_dev) ? UT_RD_DELAY(8) : UT_RD_DELAY(0)) |
1290 		(he_is622(he_dev) ? RC_UT_MODE(0) : RC_UT_MODE(1)) |
1291 		RX_VALVP(he_dev->vpibits) |
1292 		RX_VALVC(he_dev->vcibits),			 RC_CONFIG);
1293 
1294 	he_writel(he_dev, DRF_THRESH(0x20) |
1295 		(he_is622(he_dev) ? TX_UT_MODE(0) : TX_UT_MODE(1)) |
1296 		TX_VCI_MASK(he_dev->vcibits) |
1297 		LBFREE_CNT(he_dev->tx_numbuffs), 		TX_CONFIG);
1298 
1299 	he_writel(he_dev, 0x0, TXAAL5_PROTO);
1300 
1301 	he_writel(he_dev, PHY_INT_ENB |
1302 		(he_is622(he_dev) ? PTMR_PRE(67 - 1) : PTMR_PRE(50 - 1)),
1303 								RH_CONFIG);
1304 
1305 	/* 5.1.3 initialize connection memory */
1306 
1307 	for (i = 0; i < TCM_MEM_SIZE; ++i)
1308 		he_writel_tcm(he_dev, 0, i);
1309 
1310 	for (i = 0; i < RCM_MEM_SIZE; ++i)
1311 		he_writel_rcm(he_dev, 0, i);
1312 
1313 	/*
1314 	 *	transmit connection memory map
1315 	 *
1316 	 *                  tx memory
1317 	 *          0x0 ___________________
1318 	 *             |                   |
1319 	 *             |                   |
1320 	 *             |       TSRa        |
1321 	 *             |                   |
1322 	 *             |                   |
1323 	 *       0x8000|___________________|
1324 	 *             |                   |
1325 	 *             |       TSRb        |
1326 	 *       0xc000|___________________|
1327 	 *             |                   |
1328 	 *             |       TSRc        |
1329 	 *       0xe000|___________________|
1330 	 *             |       TSRd        |
1331 	 *       0xf000|___________________|
1332 	 *             |       tmABR       |
1333 	 *      0x10000|___________________|
1334 	 *             |                   |
1335 	 *             |       tmTPD       |
1336 	 *             |___________________|
1337 	 *             |                   |
1338 	 *                      ....
1339 	 *      0x1ffff|___________________|
1340 	 *
1341 	 *
1342 	 */
1343 
1344 	he_writel(he_dev, CONFIG_TSRB, TSRB_BA);
1345 	he_writel(he_dev, CONFIG_TSRC, TSRC_BA);
1346 	he_writel(he_dev, CONFIG_TSRD, TSRD_BA);
1347 	he_writel(he_dev, CONFIG_TMABR, TMABR_BA);
1348 	he_writel(he_dev, CONFIG_TPDBA, TPD_BA);
1349 
1350 
1351 	/*
1352 	 *	receive connection memory map
1353 	 *
1354 	 *          0x0 ___________________
1355 	 *             |                   |
1356 	 *             |                   |
1357 	 *             |       RSRa        |
1358 	 *             |                   |
1359 	 *             |                   |
1360 	 *       0x8000|___________________|
1361 	 *             |                   |
1362 	 *             |             rx0/1 |
1363 	 *             |       LBM         |   link lists of local
1364 	 *             |             tx    |   buffer memory
1365 	 *             |                   |
1366 	 *       0xd000|___________________|
1367 	 *             |                   |
1368 	 *             |      rmABR        |
1369 	 *       0xe000|___________________|
1370 	 *             |                   |
1371 	 *             |       RSRb        |
1372 	 *             |___________________|
1373 	 *             |                   |
1374 	 *                      ....
1375 	 *       0xffff|___________________|
1376 	 */
1377 
1378 	he_writel(he_dev, 0x08000, RCMLBM_BA);
1379 	he_writel(he_dev, 0x0e000, RCMRSRB_BA);
1380 	he_writel(he_dev, 0x0d800, RCMABR_BA);
1381 
1382 	/* 5.1.4 initialize local buffer free pools linked lists */
1383 
1384 	he_init_rx_lbfp0(he_dev);
1385 	he_init_rx_lbfp1(he_dev);
1386 
1387 	he_writel(he_dev, 0x0, RLBC_H);
1388 	he_writel(he_dev, 0x0, RLBC_T);
1389 	he_writel(he_dev, 0x0, RLBC_H2);
1390 
1391 	he_writel(he_dev, 512, RXTHRSH);	/* 10% of r0+r1 buffers */
1392 	he_writel(he_dev, 256, LITHRSH); 	/* 5% of r0+r1 buffers */
1393 
1394 	he_init_tx_lbfp(he_dev);
1395 
1396 	he_writel(he_dev, he_is622(he_dev) ? 0x104780 : 0x800, UBUFF_BA);
1397 
1398 	/* 5.1.5 initialize intermediate receive queues */
1399 
1400 	if (he_is622(he_dev)) {
1401 		he_writel(he_dev, 0x000f, G0_INMQ_S);
1402 		he_writel(he_dev, 0x200f, G0_INMQ_L);
1403 
1404 		he_writel(he_dev, 0x001f, G1_INMQ_S);
1405 		he_writel(he_dev, 0x201f, G1_INMQ_L);
1406 
1407 		he_writel(he_dev, 0x002f, G2_INMQ_S);
1408 		he_writel(he_dev, 0x202f, G2_INMQ_L);
1409 
1410 		he_writel(he_dev, 0x003f, G3_INMQ_S);
1411 		he_writel(he_dev, 0x203f, G3_INMQ_L);
1412 
1413 		he_writel(he_dev, 0x004f, G4_INMQ_S);
1414 		he_writel(he_dev, 0x204f, G4_INMQ_L);
1415 
1416 		he_writel(he_dev, 0x005f, G5_INMQ_S);
1417 		he_writel(he_dev, 0x205f, G5_INMQ_L);
1418 
1419 		he_writel(he_dev, 0x006f, G6_INMQ_S);
1420 		he_writel(he_dev, 0x206f, G6_INMQ_L);
1421 
1422 		he_writel(he_dev, 0x007f, G7_INMQ_S);
1423 		he_writel(he_dev, 0x207f, G7_INMQ_L);
1424 	} else {
1425 		he_writel(he_dev, 0x0000, G0_INMQ_S);
1426 		he_writel(he_dev, 0x0008, G0_INMQ_L);
1427 
1428 		he_writel(he_dev, 0x0001, G1_INMQ_S);
1429 		he_writel(he_dev, 0x0009, G1_INMQ_L);
1430 
1431 		he_writel(he_dev, 0x0002, G2_INMQ_S);
1432 		he_writel(he_dev, 0x000a, G2_INMQ_L);
1433 
1434 		he_writel(he_dev, 0x0003, G3_INMQ_S);
1435 		he_writel(he_dev, 0x000b, G3_INMQ_L);
1436 
1437 		he_writel(he_dev, 0x0004, G4_INMQ_S);
1438 		he_writel(he_dev, 0x000c, G4_INMQ_L);
1439 
1440 		he_writel(he_dev, 0x0005, G5_INMQ_S);
1441 		he_writel(he_dev, 0x000d, G5_INMQ_L);
1442 
1443 		he_writel(he_dev, 0x0006, G6_INMQ_S);
1444 		he_writel(he_dev, 0x000e, G6_INMQ_L);
1445 
1446 		he_writel(he_dev, 0x0007, G7_INMQ_S);
1447 		he_writel(he_dev, 0x000f, G7_INMQ_L);
1448 	}
1449 
1450 	/* 5.1.6 application tunable parameters */
1451 
1452 	he_writel(he_dev, 0x0, MCC);
1453 	he_writel(he_dev, 0x0, OEC);
1454 	he_writel(he_dev, 0x0, DCC);
1455 	he_writel(he_dev, 0x0, CEC);
1456 
1457 	/* 5.1.7 cs block initialization */
1458 
1459 	he_init_cs_block(he_dev);
1460 
1461 	/* 5.1.8 cs block connection memory initialization */
1462 
1463 	if (he_init_cs_block_rcm(he_dev) < 0)
1464 		return -ENOMEM;
1465 
1466 	/* 5.1.10 initialize host structures */
1467 
1468 	he_init_tpdrq(he_dev);
1469 
1470 #ifdef USE_TPD_POOL
1471 	he_dev->tpd_pool = pci_pool_create("tpd", he_dev->pci_dev,
1472 		sizeof(struct he_tpd), TPD_ALIGNMENT, 0, SLAB_KERNEL);
1473 	if (he_dev->tpd_pool == NULL) {
1474 		hprintk("unable to create tpd pci_pool\n");
1475 		return -ENOMEM;
1476 	}
1477 
1478 	INIT_LIST_HEAD(&he_dev->outstanding_tpds);
1479 #else
1480 	he_dev->tpd_base = (void *) pci_alloc_consistent(he_dev->pci_dev,
1481 			CONFIG_NUMTPDS * sizeof(struct he_tpd), &he_dev->tpd_base_phys);
1482 	if (!he_dev->tpd_base)
1483 		return -ENOMEM;
1484 
1485 	for (i = 0; i < CONFIG_NUMTPDS; ++i) {
1486 		he_dev->tpd_base[i].status = (i << TPD_ADDR_SHIFT);
1487 		he_dev->tpd_base[i].inuse = 0;
1488 	}
1489 
1490 	he_dev->tpd_head = he_dev->tpd_base;
1491 	he_dev->tpd_end = &he_dev->tpd_base[CONFIG_NUMTPDS - 1];
1492 #endif
1493 
1494 	if (he_init_group(he_dev, 0) != 0)
1495 		return -ENOMEM;
1496 
1497 	for (group = 1; group < HE_NUM_GROUPS; ++group) {
1498 		he_writel(he_dev, 0x0, G0_RBPS_S + (group * 32));
1499 		he_writel(he_dev, 0x0, G0_RBPS_T + (group * 32));
1500 		he_writel(he_dev, 0x0, G0_RBPS_QI + (group * 32));
1501 		he_writel(he_dev, RBP_THRESH(0x1) | RBP_QSIZE(0x0),
1502 						G0_RBPS_BS + (group * 32));
1503 
1504 		he_writel(he_dev, 0x0, G0_RBPL_S + (group * 32));
1505 		he_writel(he_dev, 0x0, G0_RBPL_T + (group * 32));
1506 		he_writel(he_dev, RBP_THRESH(0x1) | RBP_QSIZE(0x0),
1507 						G0_RBPL_QI + (group * 32));
1508 		he_writel(he_dev, 0x0, G0_RBPL_BS + (group * 32));
1509 
1510 		he_writel(he_dev, 0x0, G0_RBRQ_ST + (group * 16));
1511 		he_writel(he_dev, 0x0, G0_RBRQ_H + (group * 16));
1512 		he_writel(he_dev, RBRQ_THRESH(0x1) | RBRQ_SIZE(0x0),
1513 						G0_RBRQ_Q + (group * 16));
1514 		he_writel(he_dev, 0x0, G0_RBRQ_I + (group * 16));
1515 
1516 		he_writel(he_dev, 0x0, G0_TBRQ_B_T + (group * 16));
1517 		he_writel(he_dev, 0x0, G0_TBRQ_H + (group * 16));
1518 		he_writel(he_dev, TBRQ_THRESH(0x1),
1519 						G0_TBRQ_THRESH + (group * 16));
1520 		he_writel(he_dev, 0x0, G0_TBRQ_S + (group * 16));
1521 	}
1522 
1523 	/* host status page */
1524 
1525 	he_dev->hsp = pci_alloc_consistent(he_dev->pci_dev,
1526 				sizeof(struct he_hsp), &he_dev->hsp_phys);
1527 	if (he_dev->hsp == NULL) {
1528 		hprintk("failed to allocate host status page\n");
1529 		return -ENOMEM;
1530 	}
1531 	memset(he_dev->hsp, 0, sizeof(struct he_hsp));
1532 	he_writel(he_dev, he_dev->hsp_phys, HSP_BA);
1533 
1534 	/* initialize framer */
1535 
1536 #ifdef CONFIG_ATM_HE_USE_SUNI
1537 	suni_init(he_dev->atm_dev);
1538 	if (he_dev->atm_dev->phy && he_dev->atm_dev->phy->start)
1539 		he_dev->atm_dev->phy->start(he_dev->atm_dev);
1540 #endif /* CONFIG_ATM_HE_USE_SUNI */
1541 
1542 	if (sdh) {
1543 		/* this really should be in suni.c but for now... */
1544 
1545 		int val;
1546 
1547 		val = he_phy_get(he_dev->atm_dev, SUNI_TPOP_APM);
1548 		val = (val & ~SUNI_TPOP_APM_S) | ( 0x2 << SUNI_TPOP_APM_S_SHIFT);
1549 		he_phy_put(he_dev->atm_dev, val, SUNI_TPOP_APM);
1550 	}
1551 
1552 	/* 5.1.12 enable transmit and receive */
1553 
1554 	reg = he_readl_mbox(he_dev, CS_ERCTL0);
1555 	reg |= TX_ENABLE|ER_ENABLE;
1556 	he_writel_mbox(he_dev, reg, CS_ERCTL0);
1557 
1558 	reg = he_readl(he_dev, RC_CONFIG);
1559 	reg |= RX_ENABLE;
1560 	he_writel(he_dev, reg, RC_CONFIG);
1561 
1562 	for (i = 0; i < HE_NUM_CS_STPER; ++i) {
1563 		he_dev->cs_stper[i].inuse = 0;
1564 		he_dev->cs_stper[i].pcr = -1;
1565 	}
1566 	he_dev->total_bw = 0;
1567 
1568 
1569 	/* atm linux initialization */
1570 
1571 	he_dev->atm_dev->ci_range.vpi_bits = he_dev->vpibits;
1572 	he_dev->atm_dev->ci_range.vci_bits = he_dev->vcibits;
1573 
1574 	he_dev->irq_peak = 0;
1575 	he_dev->rbrq_peak = 0;
1576 	he_dev->rbpl_peak = 0;
1577 	he_dev->tbrq_peak = 0;
1578 
1579 	HPRINTK("hell bent for leather!\n");
1580 
1581 	return 0;
1582 }
1583 
1584 static void
he_stop(struct he_dev * he_dev)1585 he_stop(struct he_dev *he_dev)
1586 {
1587 	u16 command;
1588 	u32 gen_cntl_0, reg;
1589 	struct pci_dev *pci_dev;
1590 
1591 	pci_dev = he_dev->pci_dev;
1592 
1593 	/* disable interrupts */
1594 
1595 	if (he_dev->membase) {
1596 		pci_read_config_dword(pci_dev, GEN_CNTL_0, &gen_cntl_0);
1597 		gen_cntl_0 &= ~(INT_PROC_ENBL | INIT_ENB);
1598 		pci_write_config_dword(pci_dev, GEN_CNTL_0, gen_cntl_0);
1599 
1600 #ifdef USE_TASKLET
1601 		tasklet_disable(&he_dev->tasklet);
1602 #endif
1603 
1604 		/* disable recv and transmit */
1605 
1606 		reg = he_readl_mbox(he_dev, CS_ERCTL0);
1607 		reg &= ~(TX_ENABLE|ER_ENABLE);
1608 		he_writel_mbox(he_dev, reg, CS_ERCTL0);
1609 
1610 		reg = he_readl(he_dev, RC_CONFIG);
1611 		reg &= ~(RX_ENABLE);
1612 		he_writel(he_dev, reg, RC_CONFIG);
1613 	}
1614 
1615 #ifdef CONFIG_ATM_HE_USE_SUNI
1616 	if (he_dev->atm_dev->phy && he_dev->atm_dev->phy->stop)
1617 		he_dev->atm_dev->phy->stop(he_dev->atm_dev);
1618 #endif /* CONFIG_ATM_HE_USE_SUNI */
1619 
1620 	if (he_dev->irq)
1621 		free_irq(he_dev->irq, he_dev);
1622 
1623 	if (he_dev->irq_base)
1624 		pci_free_consistent(he_dev->pci_dev, (CONFIG_IRQ_SIZE+1)
1625 			* sizeof(struct he_irq), he_dev->irq_base, he_dev->irq_phys);
1626 
1627 	if (he_dev->hsp)
1628 		pci_free_consistent(he_dev->pci_dev, sizeof(struct he_hsp),
1629 						he_dev->hsp, he_dev->hsp_phys);
1630 
1631 	if (he_dev->rbpl_base) {
1632 #ifdef USE_RBPL_POOL
1633 		for (i = 0; i < CONFIG_RBPL_SIZE; ++i) {
1634 			void *cpuaddr = he_dev->rbpl_virt[i].virt;
1635 			dma_addr_t dma_handle = he_dev->rbpl_base[i].phys;
1636 
1637 			pci_pool_free(he_dev->rbpl_pool, cpuaddr, dma_handle);
1638 		}
1639 #else
1640 		pci_free_consistent(he_dev->pci_dev, CONFIG_RBPL_SIZE
1641 			* CONFIG_RBPL_BUFSIZE, he_dev->rbpl_pages, he_dev->rbpl_pages_phys);
1642 #endif
1643 		pci_free_consistent(he_dev->pci_dev, CONFIG_RBPL_SIZE
1644 			* sizeof(struct he_rbp), he_dev->rbpl_base, he_dev->rbpl_phys);
1645 	}
1646 
1647 #ifdef USE_RBPL_POOL
1648 	if (he_dev->rbpl_pool)
1649 		pci_pool_destroy(he_dev->rbpl_pool);
1650 #endif
1651 
1652 #ifdef USE_RBPS
1653 	if (he_dev->rbps_base) {
1654 #ifdef USE_RBPS_POOL
1655 		for (i = 0; i < CONFIG_RBPS_SIZE; ++i) {
1656 			void *cpuaddr = he_dev->rbps_virt[i].virt;
1657 			dma_addr_t dma_handle = he_dev->rbps_base[i].phys;
1658 
1659 			pci_pool_free(he_dev->rbps_pool, cpuaddr, dma_handle);
1660 		}
1661 #else
1662 		pci_free_consistent(he_dev->pci_dev, CONFIG_RBPS_SIZE
1663 			* CONFIG_RBPS_BUFSIZE, he_dev->rbps_pages, he_dev->rbps_pages_phys);
1664 #endif
1665 		pci_free_consistent(he_dev->pci_dev, CONFIG_RBPS_SIZE
1666 			* sizeof(struct he_rbp), he_dev->rbps_base, he_dev->rbps_phys);
1667 	}
1668 
1669 #ifdef USE_RBPS_POOL
1670 	if (he_dev->rbps_pool)
1671 		pci_pool_destroy(he_dev->rbps_pool);
1672 #endif
1673 
1674 #endif /* USE_RBPS */
1675 
1676 	if (he_dev->rbrq_base)
1677 		pci_free_consistent(he_dev->pci_dev, CONFIG_RBRQ_SIZE * sizeof(struct he_rbrq),
1678 							he_dev->rbrq_base, he_dev->rbrq_phys);
1679 
1680 	if (he_dev->tbrq_base)
1681 		pci_free_consistent(he_dev->pci_dev, CONFIG_TBRQ_SIZE * sizeof(struct he_tbrq),
1682 							he_dev->tbrq_base, he_dev->tbrq_phys);
1683 
1684 	if (he_dev->tpdrq_base)
1685 		pci_free_consistent(he_dev->pci_dev, CONFIG_TBRQ_SIZE * sizeof(struct he_tbrq),
1686 							he_dev->tpdrq_base, he_dev->tpdrq_phys);
1687 
1688 #ifdef USE_TPD_POOL
1689 	if (he_dev->tpd_pool)
1690 		pci_pool_destroy(he_dev->tpd_pool);
1691 #else
1692 	if (he_dev->tpd_base)
1693 		pci_free_consistent(he_dev->pci_dev, CONFIG_NUMTPDS * sizeof(struct he_tpd),
1694 							he_dev->tpd_base, he_dev->tpd_base_phys);
1695 #endif
1696 
1697 	if (he_dev->pci_dev) {
1698 		pci_read_config_word(he_dev->pci_dev, PCI_COMMAND, &command);
1699 		command &= ~(PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER);
1700 		pci_write_config_word(he_dev->pci_dev, PCI_COMMAND, command);
1701 	}
1702 
1703 	if (he_dev->membase)
1704 		iounmap((void *) he_dev->membase);
1705 }
1706 
1707 static struct he_tpd *
__alloc_tpd(struct he_dev * he_dev)1708 __alloc_tpd(struct he_dev *he_dev)
1709 {
1710 #ifdef USE_TPD_POOL
1711 	struct he_tpd *tpd;
1712 	dma_addr_t dma_handle;
1713 
1714 	tpd = pci_pool_alloc(he_dev->tpd_pool, SLAB_ATOMIC|SLAB_DMA, &dma_handle);
1715 	if (tpd == NULL)
1716 		return NULL;
1717 
1718 	tpd->status = TPD_ADDR(dma_handle);
1719 	tpd->reserved = 0;
1720 	tpd->iovec[0].addr = 0; tpd->iovec[0].len = 0;
1721 	tpd->iovec[1].addr = 0; tpd->iovec[1].len = 0;
1722 	tpd->iovec[2].addr = 0; tpd->iovec[2].len = 0;
1723 
1724 	return tpd;
1725 #else
1726 	int i;
1727 
1728 	for (i = 0; i < CONFIG_NUMTPDS; ++i) {
1729 		++he_dev->tpd_head;
1730 		if (he_dev->tpd_head > he_dev->tpd_end) {
1731 			he_dev->tpd_head = he_dev->tpd_base;
1732 		}
1733 
1734 		if (!he_dev->tpd_head->inuse) {
1735 			he_dev->tpd_head->inuse = 1;
1736 			he_dev->tpd_head->status &= TPD_MASK;
1737 			he_dev->tpd_head->iovec[0].addr = 0; he_dev->tpd_head->iovec[0].len = 0;
1738 			he_dev->tpd_head->iovec[1].addr = 0; he_dev->tpd_head->iovec[1].len = 0;
1739 			he_dev->tpd_head->iovec[2].addr = 0; he_dev->tpd_head->iovec[2].len = 0;
1740 			return he_dev->tpd_head;
1741 		}
1742 	}
1743 	hprintk("out of tpds -- increase CONFIG_NUMTPDS (%d)\n", CONFIG_NUMTPDS);
1744 	return NULL;
1745 #endif
1746 }
1747 
1748 #define AAL5_LEN(buf,len) 						\
1749 			((((unsigned char *)(buf))[(len)-6] << 8) |	\
1750 				(((unsigned char *)(buf))[(len)-5]))
1751 
1752 /* 2.10.1.2 receive
1753  *
1754  * aal5 packets can optionally return the tcp checksum in the lower
1755  * 16 bits of the crc (RSR0_TCP_CKSUM)
1756  */
1757 
1758 #define TCP_CKSUM(buf,len) 						\
1759 			((((unsigned char *)(buf))[(len)-2] << 8) |	\
1760 				(((unsigned char *)(buf))[(len-1)]))
1761 
1762 static int
he_service_rbrq(struct he_dev * he_dev,int group)1763 he_service_rbrq(struct he_dev *he_dev, int group)
1764 {
1765 	struct he_rbrq *rbrq_tail = (struct he_rbrq *)
1766 				((unsigned long)he_dev->rbrq_base |
1767 					he_dev->hsp->group[group].rbrq_tail);
1768 	struct he_rbp *rbp = NULL;
1769 	unsigned cid, lastcid = -1;
1770 	unsigned buf_len = 0;
1771 	struct sk_buff *skb;
1772 	struct atm_vcc *vcc = NULL;
1773 	struct he_vcc *he_vcc;
1774 	struct he_iovec *iov;
1775 	int pdus_assembled = 0;
1776 	int updated = 0;
1777 
1778 	read_lock(&vcc_sklist_lock);
1779 	while (he_dev->rbrq_head != rbrq_tail) {
1780 		++updated;
1781 
1782 		HPRINTK("%p rbrq%d 0x%x len=%d cid=0x%x %s%s%s%s%s%s\n",
1783 			he_dev->rbrq_head, group,
1784 			RBRQ_ADDR(he_dev->rbrq_head),
1785 			RBRQ_BUFLEN(he_dev->rbrq_head),
1786 			RBRQ_CID(he_dev->rbrq_head),
1787 			RBRQ_CRC_ERR(he_dev->rbrq_head) ? " CRC_ERR" : "",
1788 			RBRQ_LEN_ERR(he_dev->rbrq_head) ? " LEN_ERR" : "",
1789 			RBRQ_END_PDU(he_dev->rbrq_head) ? " END_PDU" : "",
1790 			RBRQ_AAL5_PROT(he_dev->rbrq_head) ? " AAL5_PROT" : "",
1791 			RBRQ_CON_CLOSED(he_dev->rbrq_head) ? " CON_CLOSED" : "",
1792 			RBRQ_HBUF_ERR(he_dev->rbrq_head) ? " HBUF_ERR" : "");
1793 
1794 #ifdef USE_RBPS
1795 		if (RBRQ_ADDR(he_dev->rbrq_head) & RBP_SMALLBUF)
1796 			rbp = &he_dev->rbps_base[RBP_INDEX(RBRQ_ADDR(he_dev->rbrq_head))];
1797 		else
1798 #endif
1799 			rbp = &he_dev->rbpl_base[RBP_INDEX(RBRQ_ADDR(he_dev->rbrq_head))];
1800 
1801 		buf_len = RBRQ_BUFLEN(he_dev->rbrq_head) * 4;
1802 		cid = RBRQ_CID(he_dev->rbrq_head);
1803 
1804 		if (cid != lastcid)
1805 			vcc = __find_vcc(he_dev, cid);
1806 		lastcid = cid;
1807 
1808 		if (vcc == NULL) {
1809 			hprintk("vcc == NULL  (cid 0x%x)\n", cid);
1810 			if (!RBRQ_HBUF_ERR(he_dev->rbrq_head))
1811 					rbp->status &= ~RBP_LOANED;
1812 
1813 			goto next_rbrq_entry;
1814 		}
1815 
1816 		he_vcc = HE_VCC(vcc);
1817 		if (he_vcc == NULL) {
1818 			hprintk("he_vcc == NULL  (cid 0x%x)\n", cid);
1819 			if (!RBRQ_HBUF_ERR(he_dev->rbrq_head))
1820 					rbp->status &= ~RBP_LOANED;
1821 			goto next_rbrq_entry;
1822 		}
1823 
1824 		if (RBRQ_HBUF_ERR(he_dev->rbrq_head)) {
1825 			hprintk("HBUF_ERR!  (cid 0x%x)\n", cid);
1826 				atomic_inc(&vcc->stats->rx_drop);
1827 			goto return_host_buffers;
1828 		}
1829 
1830 		he_vcc->iov_tail->iov_base = RBRQ_ADDR(he_dev->rbrq_head);
1831 		he_vcc->iov_tail->iov_len = buf_len;
1832 		he_vcc->pdu_len += buf_len;
1833 		++he_vcc->iov_tail;
1834 
1835 		if (RBRQ_CON_CLOSED(he_dev->rbrq_head)) {
1836 			lastcid = -1;
1837 			HPRINTK("wake_up rx_waitq  (cid 0x%x)\n", cid);
1838 			wake_up(&he_vcc->rx_waitq);
1839 			goto return_host_buffers;
1840 		}
1841 
1842 #ifdef notdef
1843 		if ((he_vcc->iov_tail - he_vcc->iov_head) > HE_MAXIOV) {
1844 			hprintk("iovec full!  cid 0x%x\n", cid);
1845 			goto return_host_buffers;
1846 		}
1847 #endif
1848 		if (!RBRQ_END_PDU(he_dev->rbrq_head))
1849 			goto next_rbrq_entry;
1850 
1851 		if (RBRQ_LEN_ERR(he_dev->rbrq_head)
1852 				|| RBRQ_CRC_ERR(he_dev->rbrq_head)) {
1853 			HPRINTK("%s%s (%d.%d)\n",
1854 				RBRQ_CRC_ERR(he_dev->rbrq_head)
1855 							? "CRC_ERR " : "",
1856 				RBRQ_LEN_ERR(he_dev->rbrq_head)
1857 							? "LEN_ERR" : "",
1858 							vcc->vpi, vcc->vci);
1859 			atomic_inc(&vcc->stats->rx_err);
1860 			goto return_host_buffers;
1861 		}
1862 
1863 		skb = atm_alloc_charge(vcc, he_vcc->pdu_len + rx_skb_reserve,
1864 							GFP_ATOMIC);
1865 		if (!skb) {
1866 			HPRINTK("charge failed (%d.%d)\n", vcc->vpi, vcc->vci);
1867 			goto return_host_buffers;
1868 		}
1869 
1870 		if (rx_skb_reserve > 0)
1871 			skb_reserve(skb, rx_skb_reserve);
1872 
1873 		do_gettimeofday(&skb->stamp);
1874 
1875 		for (iov = he_vcc->iov_head;
1876 				iov < he_vcc->iov_tail; ++iov) {
1877 #ifdef USE_RBPS
1878 			if (iov->iov_base & RBP_SMALLBUF)
1879 				memcpy(skb_put(skb, iov->iov_len),
1880 					he_dev->rbps_virt[RBP_INDEX(iov->iov_base)].virt, iov->iov_len);
1881 			else
1882 #endif
1883 				memcpy(skb_put(skb, iov->iov_len),
1884 					he_dev->rbpl_virt[RBP_INDEX(iov->iov_base)].virt, iov->iov_len);
1885 		}
1886 
1887 		switch (vcc->qos.aal) {
1888 			case ATM_AAL0:
1889 				/* 2.10.1.5 raw cell receive */
1890 				skb->len = ATM_AAL0_SDU;
1891 				skb->tail = skb->data + skb->len;
1892 				break;
1893 			case ATM_AAL5:
1894 				/* 2.10.1.2 aal5 receive */
1895 
1896 				skb->len = AAL5_LEN(skb->data, he_vcc->pdu_len);
1897 				skb->tail = skb->data + skb->len;
1898 #ifdef USE_CHECKSUM_HW
1899 				if (vcc->vpi == 0 && vcc->vci >= ATM_NOT_RSV_VCI) {
1900 					skb->ip_summed = CHECKSUM_HW;
1901 					skb->csum = TCP_CKSUM(skb->data,
1902 							he_vcc->pdu_len);
1903 				}
1904 #endif
1905 				break;
1906 		}
1907 
1908 #ifdef should_never_happen
1909 		if (skb->len > vcc->qos.rxtp.max_sdu)
1910 			hprintk("pdu_len (%d) > vcc->qos.rxtp.max_sdu (%d)!  cid 0x%x\n", skb->len, vcc->qos.rxtp.max_sdu, cid);
1911 #endif
1912 
1913 #ifdef notdef
1914 		ATM_SKB(skb)->vcc = vcc;
1915 #endif
1916 		vcc->push(vcc, skb);
1917 
1918 		atomic_inc(&vcc->stats->rx);
1919 
1920 return_host_buffers:
1921 		++pdus_assembled;
1922 
1923 		for (iov = he_vcc->iov_head;
1924 				iov < he_vcc->iov_tail; ++iov) {
1925 #ifdef USE_RBPS
1926 			if (iov->iov_base & RBP_SMALLBUF)
1927 				rbp = &he_dev->rbps_base[RBP_INDEX(iov->iov_base)];
1928 			else
1929 #endif
1930 				rbp = &he_dev->rbpl_base[RBP_INDEX(iov->iov_base)];
1931 
1932 			rbp->status &= ~RBP_LOANED;
1933 		}
1934 
1935 		he_vcc->iov_tail = he_vcc->iov_head;
1936 		he_vcc->pdu_len = 0;
1937 
1938 next_rbrq_entry:
1939 		he_dev->rbrq_head = (struct he_rbrq *)
1940 				((unsigned long) he_dev->rbrq_base |
1941 					RBRQ_MASK(++he_dev->rbrq_head));
1942 
1943 	}
1944 	read_unlock(&vcc_sklist_lock);
1945 
1946 	if (updated) {
1947 		if (updated > he_dev->rbrq_peak)
1948 			he_dev->rbrq_peak = updated;
1949 
1950 		he_writel(he_dev, RBRQ_MASK(he_dev->rbrq_head),
1951 						G0_RBRQ_H + (group * 16));
1952 	}
1953 
1954 	return pdus_assembled;
1955 }
1956 
1957 static void
he_service_tbrq(struct he_dev * he_dev,int group)1958 he_service_tbrq(struct he_dev *he_dev, int group)
1959 {
1960 	struct he_tbrq *tbrq_tail = (struct he_tbrq *)
1961 				((unsigned long)he_dev->tbrq_base |
1962 					he_dev->hsp->group[group].tbrq_tail);
1963 	struct he_tpd *tpd;
1964 	int slot, updated = 0;
1965 #ifdef USE_TPD_POOL
1966 	struct list_head *p;
1967 #endif
1968 
1969 	/* 2.1.6 transmit buffer return queue */
1970 
1971 	while (he_dev->tbrq_head != tbrq_tail) {
1972 		++updated;
1973 
1974 		HPRINTK("tbrq%d 0x%x%s%s\n",
1975 			group,
1976 			TBRQ_TPD(he_dev->tbrq_head),
1977 			TBRQ_EOS(he_dev->tbrq_head) ? " EOS" : "",
1978 			TBRQ_MULTIPLE(he_dev->tbrq_head) ? " MULTIPLE" : "");
1979 #ifdef USE_TPD_POOL
1980 		tpd = NULL;
1981 		p = &he_dev->outstanding_tpds;
1982 		while ((p = p->next) != &he_dev->outstanding_tpds) {
1983 			struct he_tpd *__tpd = list_entry(p, struct he_tpd, entry);
1984 			if (TPD_ADDR(__tpd->status) == TBRQ_TPD(he_dev->tbrq_head)) {
1985 				tpd = __tpd;
1986 				list_del(&__tpd->entry);
1987 				break;
1988 			}
1989 		}
1990 
1991 		if (tpd == NULL) {
1992 			hprintk("unable to locate tpd for dma buffer %x\n",
1993 						TBRQ_TPD(he_dev->tbrq_head));
1994 			goto next_tbrq_entry;
1995 		}
1996 #else
1997 		tpd = &he_dev->tpd_base[ TPD_INDEX(TBRQ_TPD(he_dev->tbrq_head)) ];
1998 #endif
1999 
2000 		if (TBRQ_EOS(he_dev->tbrq_head)) {
2001 			HPRINTK("wake_up(tx_waitq) cid 0x%x\n",
2002 				he_mkcid(he_dev, tpd->vcc->vpi, tpd->vcc->vci));
2003 			if (tpd->vcc)
2004 				wake_up(&HE_VCC(tpd->vcc)->tx_waitq);
2005 
2006 			goto next_tbrq_entry;
2007 		}
2008 
2009 		for (slot = 0; slot < TPD_MAXIOV; ++slot) {
2010 			if (tpd->iovec[slot].addr)
2011 				pci_unmap_single(he_dev->pci_dev,
2012 					tpd->iovec[slot].addr,
2013 					tpd->iovec[slot].len & TPD_LEN_MASK,
2014 							PCI_DMA_TODEVICE);
2015 			if (tpd->iovec[slot].len & TPD_LST)
2016 				break;
2017 
2018 		}
2019 
2020 		if (tpd->skb) {	/* && !TBRQ_MULTIPLE(he_dev->tbrq_head) */
2021 			if (tpd->vcc && tpd->vcc->pop)
2022 				tpd->vcc->pop(tpd->vcc, tpd->skb);
2023 			else
2024 				dev_kfree_skb_any(tpd->skb);
2025 		}
2026 
2027 next_tbrq_entry:
2028 #ifdef USE_TPD_POOL
2029 		if (tpd)
2030 			pci_pool_free(he_dev->tpd_pool, tpd, TPD_ADDR(tpd->status));
2031 #else
2032 		tpd->inuse = 0;
2033 #endif
2034 		he_dev->tbrq_head = (struct he_tbrq *)
2035 				((unsigned long) he_dev->tbrq_base |
2036 					TBRQ_MASK(++he_dev->tbrq_head));
2037 	}
2038 
2039 	if (updated) {
2040 		if (updated > he_dev->tbrq_peak)
2041 			he_dev->tbrq_peak = updated;
2042 
2043 		he_writel(he_dev, TBRQ_MASK(he_dev->tbrq_head),
2044 						G0_TBRQ_H + (group * 16));
2045 	}
2046 }
2047 
2048 
2049 static void
he_service_rbpl(struct he_dev * he_dev,int group)2050 he_service_rbpl(struct he_dev *he_dev, int group)
2051 {
2052 	struct he_rbp *newtail;
2053 	struct he_rbp *rbpl_head;
2054 	int moved = 0;
2055 
2056 	rbpl_head = (struct he_rbp *) ((unsigned long)he_dev->rbpl_base |
2057 					RBPL_MASK(he_readl(he_dev, G0_RBPL_S)));
2058 
2059 	for (;;) {
2060 		newtail = (struct he_rbp *) ((unsigned long)he_dev->rbpl_base |
2061 						RBPL_MASK(he_dev->rbpl_tail+1));
2062 
2063 		/* table 3.42 -- rbpl_tail should never be set to rbpl_head */
2064 		if ((newtail == rbpl_head) || (newtail->status & RBP_LOANED))
2065 			break;
2066 
2067 		newtail->status |= RBP_LOANED;
2068 		he_dev->rbpl_tail = newtail;
2069 		++moved;
2070 	}
2071 
2072 	if (moved)
2073 		he_writel(he_dev, RBPL_MASK(he_dev->rbpl_tail), G0_RBPL_T);
2074 }
2075 
2076 #ifdef USE_RBPS
2077 static void
he_service_rbps(struct he_dev * he_dev,int group)2078 he_service_rbps(struct he_dev *he_dev, int group)
2079 {
2080 	struct he_rbp *newtail;
2081 	struct he_rbp *rbps_head;
2082 	int moved = 0;
2083 
2084 	rbps_head = (struct he_rbp *) ((unsigned long)he_dev->rbps_base |
2085 					RBPS_MASK(he_readl(he_dev, G0_RBPS_S)));
2086 
2087 	for (;;) {
2088 		newtail = (struct he_rbp *) ((unsigned long)he_dev->rbps_base |
2089 						RBPS_MASK(he_dev->rbps_tail+1));
2090 
2091 		/* table 3.42 -- rbps_tail should never be set to rbps_head */
2092 		if ((newtail == rbps_head) || (newtail->status & RBP_LOANED))
2093 			break;
2094 
2095 		newtail->status |= RBP_LOANED;
2096 		he_dev->rbps_tail = newtail;
2097 		++moved;
2098 	}
2099 
2100 	if (moved)
2101 		he_writel(he_dev, RBPS_MASK(he_dev->rbps_tail), G0_RBPS_T);
2102 }
2103 #endif /* USE_RBPS */
2104 
2105 static void
he_tasklet(unsigned long data)2106 he_tasklet(unsigned long data)
2107 {
2108 	unsigned long flags;
2109 	struct he_dev *he_dev = (struct he_dev *) data;
2110 	int group, type;
2111 	int updated = 0;
2112 
2113 	HPRINTK("tasklet (0x%lx)\n", data);
2114 #ifdef USE_TASKLET
2115 	spin_lock_irqsave(&he_dev->global_lock, flags);
2116 #endif
2117 
2118 	while (he_dev->irq_head != he_dev->irq_tail) {
2119 		++updated;
2120 
2121 		type = ITYPE_TYPE(he_dev->irq_head->isw);
2122 		group = ITYPE_GROUP(he_dev->irq_head->isw);
2123 
2124 		switch (type) {
2125 			case ITYPE_RBRQ_THRESH:
2126 				HPRINTK("rbrq%d threshold\n", group);
2127 				/* fall through */
2128 			case ITYPE_RBRQ_TIMER:
2129 				if (he_service_rbrq(he_dev, group)) {
2130 					he_service_rbpl(he_dev, group);
2131 #ifdef USE_RBPS
2132 					he_service_rbps(he_dev, group);
2133 #endif /* USE_RBPS */
2134 				}
2135 				break;
2136 			case ITYPE_TBRQ_THRESH:
2137 				HPRINTK("tbrq%d threshold\n", group);
2138 				/* fall through */
2139 			case ITYPE_TPD_COMPLETE:
2140 				he_service_tbrq(he_dev, group);
2141 				break;
2142 			case ITYPE_RBPL_THRESH:
2143 				he_service_rbpl(he_dev, group);
2144 				break;
2145 			case ITYPE_RBPS_THRESH:
2146 #ifdef USE_RBPS
2147 				he_service_rbps(he_dev, group);
2148 #endif /* USE_RBPS */
2149 				break;
2150 			case ITYPE_PHY:
2151 				HPRINTK("phy interrupt\n");
2152 #ifdef CONFIG_ATM_HE_USE_SUNI
2153 				spin_unlock_irqrestore(&he_dev->global_lock, flags);
2154 				if (he_dev->atm_dev->phy && he_dev->atm_dev->phy->interrupt)
2155 					he_dev->atm_dev->phy->interrupt(he_dev->atm_dev);
2156 				spin_lock_irqsave(&he_dev->global_lock, flags);
2157 #endif
2158 				break;
2159 			case ITYPE_OTHER:
2160 				switch (type|group) {
2161 					case ITYPE_PARITY:
2162 						hprintk("parity error\n");
2163 						break;
2164 					case ITYPE_ABORT:
2165 						hprintk("abort 0x%x\n", he_readl(he_dev, ABORT_ADDR));
2166 						break;
2167 				}
2168 				break;
2169 			case ITYPE_TYPE(ITYPE_INVALID):
2170 				/* see 8.1.1 -- check all queues */
2171 
2172 				HPRINTK("isw not updated 0x%x\n", he_dev->irq_head->isw);
2173 
2174 				he_service_rbrq(he_dev, 0);
2175 				he_service_rbpl(he_dev, 0);
2176 #ifdef USE_RBPS
2177 				he_service_rbps(he_dev, 0);
2178 #endif /* USE_RBPS */
2179 				he_service_tbrq(he_dev, 0);
2180 				break;
2181 			default:
2182 				hprintk("bad isw 0x%x?\n", he_dev->irq_head->isw);
2183 		}
2184 
2185 		he_dev->irq_head->isw = ITYPE_INVALID;
2186 
2187 		he_dev->irq_head = (struct he_irq *) NEXT_ENTRY(he_dev->irq_base, he_dev->irq_head, IRQ_MASK);
2188 	}
2189 
2190 	if (updated) {
2191 		if (updated > he_dev->irq_peak)
2192 			he_dev->irq_peak = updated;
2193 
2194 		he_writel(he_dev,
2195 			IRQ_SIZE(CONFIG_IRQ_SIZE) |
2196 			IRQ_THRESH(CONFIG_IRQ_THRESH) |
2197 			IRQ_TAIL(he_dev->irq_tail), IRQ0_HEAD);
2198 		(void) he_readl(he_dev, INT_FIFO); /* 8.1.2 controller errata; flush posted writes */
2199 	}
2200 #ifdef USE_TASKLET
2201 	spin_unlock_irqrestore(&he_dev->global_lock, flags);
2202 #endif
2203 }
2204 
2205 static irqreturn_t
he_irq_handler(int irq,void * dev_id,struct pt_regs * regs)2206 he_irq_handler(int irq, void *dev_id, struct pt_regs *regs)
2207 {
2208 	unsigned long flags;
2209 	struct he_dev *he_dev = (struct he_dev * )dev_id;
2210 	int handled = 0;
2211 
2212 	if (he_dev == NULL)
2213 		return IRQ_NONE;
2214 
2215 	spin_lock_irqsave(&he_dev->global_lock, flags);
2216 
2217 	he_dev->irq_tail = (struct he_irq *) (((unsigned long)he_dev->irq_base) |
2218 						(*he_dev->irq_tailoffset << 2));
2219 
2220 	if (he_dev->irq_tail == he_dev->irq_head) {
2221 		HPRINTK("tailoffset not updated?\n");
2222 		he_dev->irq_tail = (struct he_irq *) ((unsigned long)he_dev->irq_base |
2223 			((he_readl(he_dev, IRQ0_BASE) & IRQ_MASK) << 2));
2224 		(void) he_readl(he_dev, INT_FIFO);	/* 8.1.2 controller errata */
2225 	}
2226 
2227 #ifdef DEBUG
2228 	if (he_dev->irq_head == he_dev->irq_tail /* && !IRQ_PENDING */)
2229 		hprintk("spurious (or shared) interrupt?\n");
2230 #endif
2231 
2232 	if (he_dev->irq_head != he_dev->irq_tail) {
2233 		handled = 1;
2234 #ifdef USE_TASKLET
2235 		tasklet_schedule(&he_dev->tasklet);
2236 #else
2237 		he_tasklet((unsigned long) he_dev);
2238 #endif
2239 		he_writel(he_dev, INT_CLEAR_A, INT_FIFO);	/* clear interrupt */
2240 		(void) he_readl(he_dev, INT_FIFO);		/* flush posted writes */
2241 	}
2242 	spin_unlock_irqrestore(&he_dev->global_lock, flags);
2243 	return IRQ_RETVAL(handled);
2244 
2245 }
2246 
2247 static __inline__ void
__enqueue_tpd(struct he_dev * he_dev,struct he_tpd * tpd,unsigned cid)2248 __enqueue_tpd(struct he_dev *he_dev, struct he_tpd *tpd, unsigned cid)
2249 {
2250 	struct he_tpdrq *new_tail;
2251 
2252 	HPRINTK("tpdrq %p cid 0x%x -> tpdrq_tail %p\n",
2253 					tpd, cid, he_dev->tpdrq_tail);
2254 
2255 	/* new_tail = he_dev->tpdrq_tail; */
2256 	new_tail = (struct he_tpdrq *) ((unsigned long) he_dev->tpdrq_base |
2257 					TPDRQ_MASK(he_dev->tpdrq_tail+1));
2258 
2259 	/*
2260 	 * check to see if we are about to set the tail == head
2261 	 * if true, update the head pointer from the adapter
2262 	 * to see if this is really the case (reading the queue
2263 	 * head for every enqueue would be unnecessarily slow)
2264 	 */
2265 
2266 	if (new_tail == he_dev->tpdrq_head) {
2267 		he_dev->tpdrq_head = (struct he_tpdrq *)
2268 			(((unsigned long)he_dev->tpdrq_base) |
2269 				TPDRQ_MASK(he_readl(he_dev, TPDRQ_B_H)));
2270 
2271 		if (new_tail == he_dev->tpdrq_head) {
2272 			hprintk("tpdrq full (cid 0x%x)\n", cid);
2273 			/*
2274 			 * FIXME
2275 			 * push tpd onto a transmit backlog queue
2276 			 * after service_tbrq, service the backlog
2277 			 * for now, we just drop the pdu
2278 			 */
2279 			if (tpd->skb) {
2280 				if (tpd->vcc->pop)
2281 					tpd->vcc->pop(tpd->vcc, tpd->skb);
2282 				else
2283 					dev_kfree_skb_any(tpd->skb);
2284 				atomic_inc(&tpd->vcc->stats->tx_err);
2285 			}
2286 #ifdef USE_TPD_POOL
2287 			pci_pool_free(he_dev->tpd_pool, tpd, TPD_ADDR(tpd->status));
2288 #else
2289 			tpd->inuse = 0;
2290 #endif
2291 			return;
2292 		}
2293 	}
2294 
2295 	/* 2.1.5 transmit packet descriptor ready queue */
2296 #ifdef USE_TPD_POOL
2297 	list_add_tail(&tpd->entry, &he_dev->outstanding_tpds);
2298 	he_dev->tpdrq_tail->tpd = TPD_ADDR(tpd->status);
2299 #else
2300 	he_dev->tpdrq_tail->tpd = he_dev->tpd_base_phys +
2301 				(TPD_INDEX(tpd->status) * sizeof(struct he_tpd));
2302 #endif
2303 	he_dev->tpdrq_tail->cid = cid;
2304 	wmb();
2305 
2306 	he_dev->tpdrq_tail = new_tail;
2307 
2308 	he_writel(he_dev, TPDRQ_MASK(he_dev->tpdrq_tail), TPDRQ_T);
2309 	(void) he_readl(he_dev, TPDRQ_T);		/* flush posted writes */
2310 }
2311 
2312 static int
he_open(struct atm_vcc * vcc,short vpi,int vci)2313 he_open(struct atm_vcc *vcc, short vpi, int vci)
2314 {
2315 	unsigned long flags;
2316 	struct he_dev *he_dev = HE_DEV(vcc->dev);
2317 	struct he_vcc *he_vcc;
2318 	int err = 0;
2319 	unsigned cid, rsr0, rsr1, rsr4, tsr0, tsr0_aal, tsr4, period, reg, clock;
2320 
2321 
2322 	if ((err = atm_find_ci(vcc, &vpi, &vci))) {
2323 		HPRINTK("atm_find_ci err = %d\n", err);
2324 		return err;
2325 	}
2326 	if (vci == ATM_VCI_UNSPEC || vpi == ATM_VPI_UNSPEC)
2327 		return 0;
2328 	vcc->vpi = vpi;
2329 	vcc->vci = vci;
2330 
2331 	HPRINTK("open vcc %p %d.%d\n", vcc, vpi, vci);
2332 
2333 	set_bit(ATM_VF_ADDR, &vcc->flags);
2334 
2335 	cid = he_mkcid(he_dev, vpi, vci);
2336 
2337 	he_vcc = (struct he_vcc *) kmalloc(sizeof(struct he_vcc), GFP_ATOMIC);
2338 	if (he_vcc == NULL) {
2339 		hprintk("unable to allocate he_vcc during open\n");
2340 		return -ENOMEM;
2341 	}
2342 
2343 	he_vcc->iov_tail = he_vcc->iov_head;
2344 	he_vcc->pdu_len = 0;
2345 	he_vcc->rc_index = -1;
2346 
2347 	init_waitqueue_head(&he_vcc->rx_waitq);
2348 	init_waitqueue_head(&he_vcc->tx_waitq);
2349 
2350 	vcc->dev_data = he_vcc;
2351 
2352 	if (vcc->qos.txtp.traffic_class != ATM_NONE) {
2353 		int pcr_goal;
2354 
2355 		pcr_goal = atm_pcr_goal(&vcc->qos.txtp);
2356 		if (pcr_goal == 0)
2357 			pcr_goal = he_dev->atm_dev->link_rate;
2358 		if (pcr_goal < 0)	/* means round down, technically */
2359 			pcr_goal = -pcr_goal;
2360 
2361 		HPRINTK("open tx cid 0x%x pcr_goal %d\n", cid, pcr_goal);
2362 
2363 		switch (vcc->qos.aal) {
2364 			case ATM_AAL5:
2365 				tsr0_aal = TSR0_AAL5;
2366 				tsr4 = TSR4_AAL5;
2367 				break;
2368 			case ATM_AAL0:
2369 				tsr0_aal = TSR0_AAL0_SDU;
2370 				tsr4 = TSR4_AAL0_SDU;
2371 				break;
2372 			default:
2373 				err = -EINVAL;
2374 				goto open_failed;
2375 		}
2376 
2377 		spin_lock_irqsave(&he_dev->global_lock, flags);
2378 		tsr0 = he_readl_tsr0(he_dev, cid);
2379 		spin_unlock_irqrestore(&he_dev->global_lock, flags);
2380 
2381 		if (TSR0_CONN_STATE(tsr0) != 0) {
2382 			hprintk("cid 0x%x not idle (tsr0 = 0x%x)\n", cid, tsr0);
2383 			err = -EBUSY;
2384 			goto open_failed;
2385 		}
2386 
2387 		switch (vcc->qos.txtp.traffic_class) {
2388 			case ATM_UBR:
2389 				/* 2.3.3.1 open connection ubr */
2390 
2391 				tsr0 = TSR0_UBR | TSR0_GROUP(0) | tsr0_aal |
2392 					TSR0_USE_WMIN | TSR0_UPDATE_GER;
2393 				break;
2394 
2395 			case ATM_CBR:
2396 				/* 2.3.3.2 open connection cbr */
2397 
2398 				/* 8.2.3 cbr scheduler wrap problem -- limit to 90% total link rate */
2399 				if ((he_dev->total_bw + pcr_goal)
2400 					> (he_dev->atm_dev->link_rate * 9 / 10))
2401 				{
2402 					err = -EBUSY;
2403 					goto open_failed;
2404 				}
2405 
2406 				spin_lock_irqsave(&he_dev->global_lock, flags);			/* also protects he_dev->cs_stper[] */
2407 
2408 				/* find an unused cs_stper register */
2409 				for (reg = 0; reg < HE_NUM_CS_STPER; ++reg)
2410 					if (he_dev->cs_stper[reg].inuse == 0 ||
2411 					    he_dev->cs_stper[reg].pcr == pcr_goal)
2412 							break;
2413 
2414 				if (reg == HE_NUM_CS_STPER) {
2415 					err = -EBUSY;
2416 					spin_unlock_irqrestore(&he_dev->global_lock, flags);
2417 					goto open_failed;
2418 				}
2419 
2420 				he_dev->total_bw += pcr_goal;
2421 
2422 				he_vcc->rc_index = reg;
2423 				++he_dev->cs_stper[reg].inuse;
2424 				he_dev->cs_stper[reg].pcr = pcr_goal;
2425 
2426 				clock = he_is622(he_dev) ? 66667000 : 50000000;
2427 				period = clock / pcr_goal;
2428 
2429 				HPRINTK("rc_index = %d period = %d\n",
2430 								reg, period);
2431 
2432 				he_writel_mbox(he_dev, rate_to_atmf(period/2),
2433 							CS_STPER0 + reg);
2434 				spin_unlock_irqrestore(&he_dev->global_lock, flags);
2435 
2436 				tsr0 = TSR0_CBR | TSR0_GROUP(0) | tsr0_aal |
2437 							TSR0_RC_INDEX(reg);
2438 
2439 				break;
2440 			default:
2441 				err = -EINVAL;
2442 				goto open_failed;
2443 		}
2444 
2445 		spin_lock_irqsave(&he_dev->global_lock, flags);
2446 
2447 		he_writel_tsr0(he_dev, tsr0, cid);
2448 		he_writel_tsr4(he_dev, tsr4 | 1, cid);
2449 		he_writel_tsr1(he_dev, TSR1_MCR(rate_to_atmf(0)) |
2450 					TSR1_PCR(rate_to_atmf(pcr_goal)), cid);
2451 		he_writel_tsr2(he_dev, TSR2_ACR(rate_to_atmf(pcr_goal)), cid);
2452 		he_writel_tsr9(he_dev, TSR9_OPEN_CONN, cid);
2453 
2454 		he_writel_tsr3(he_dev, 0x0, cid);
2455 		he_writel_tsr5(he_dev, 0x0, cid);
2456 		he_writel_tsr6(he_dev, 0x0, cid);
2457 		he_writel_tsr7(he_dev, 0x0, cid);
2458 		he_writel_tsr8(he_dev, 0x0, cid);
2459 		he_writel_tsr10(he_dev, 0x0, cid);
2460 		he_writel_tsr11(he_dev, 0x0, cid);
2461 		he_writel_tsr12(he_dev, 0x0, cid);
2462 		he_writel_tsr13(he_dev, 0x0, cid);
2463 		he_writel_tsr14(he_dev, 0x0, cid);
2464 		(void) he_readl_tsr0(he_dev, cid);		/* flush posted writes */
2465 		spin_unlock_irqrestore(&he_dev->global_lock, flags);
2466 	}
2467 
2468 	if (vcc->qos.rxtp.traffic_class != ATM_NONE) {
2469 		unsigned aal;
2470 
2471 		HPRINTK("open rx cid 0x%x (rx_waitq %p)\n", cid,
2472 		 				&HE_VCC(vcc)->rx_waitq);
2473 
2474 		switch (vcc->qos.aal) {
2475 			case ATM_AAL5:
2476 				aal = RSR0_AAL5;
2477 				break;
2478 			case ATM_AAL0:
2479 				aal = RSR0_RAWCELL;
2480 				break;
2481 			default:
2482 				err = -EINVAL;
2483 				goto open_failed;
2484 		}
2485 
2486 		spin_lock_irqsave(&he_dev->global_lock, flags);
2487 
2488 		rsr0 = he_readl_rsr0(he_dev, cid);
2489 		if (rsr0 & RSR0_OPEN_CONN) {
2490 			spin_unlock_irqrestore(&he_dev->global_lock, flags);
2491 
2492 			hprintk("cid 0x%x not idle (rsr0 = 0x%x)\n", cid, rsr0);
2493 			err = -EBUSY;
2494 			goto open_failed;
2495 		}
2496 
2497 #ifdef USE_RBPS
2498 		rsr1 = RSR1_GROUP(0);
2499 		rsr4 = RSR4_GROUP(0);
2500 #else /* !USE_RBPS */
2501 		rsr1 = RSR1_GROUP(0)|RSR1_RBPL_ONLY;
2502 		rsr4 = RSR4_GROUP(0)|RSR4_RBPL_ONLY;
2503 #endif /* USE_RBPS */
2504 		rsr0 = vcc->qos.rxtp.traffic_class == ATM_UBR ?
2505 				(RSR0_EPD_ENABLE|RSR0_PPD_ENABLE) : 0;
2506 
2507 #ifdef USE_CHECKSUM_HW
2508 		if (vpi == 0 && vci >= ATM_NOT_RSV_VCI)
2509 			rsr0 |= RSR0_TCP_CKSUM;
2510 #endif
2511 
2512 		he_writel_rsr4(he_dev, rsr4, cid);
2513 		he_writel_rsr1(he_dev, rsr1, cid);
2514 		/* 5.1.11 last parameter initialized should be
2515 			  the open/closed indication in rsr0 */
2516 		he_writel_rsr0(he_dev,
2517 			rsr0 | RSR0_START_PDU | RSR0_OPEN_CONN | aal, cid);
2518 		(void) he_readl_rsr0(he_dev, cid);		/* flush posted writes */
2519 
2520 		spin_unlock_irqrestore(&he_dev->global_lock, flags);
2521 	}
2522 
2523 open_failed:
2524 
2525 	if (err) {
2526 		if (he_vcc)
2527 			kfree(he_vcc);
2528 		clear_bit(ATM_VF_ADDR, &vcc->flags);
2529 	}
2530 	else
2531 		set_bit(ATM_VF_READY, &vcc->flags);
2532 
2533 	return err;
2534 }
2535 
2536 static void
he_close(struct atm_vcc * vcc)2537 he_close(struct atm_vcc *vcc)
2538 {
2539 	unsigned long flags;
2540 	DECLARE_WAITQUEUE(wait, current);
2541 	struct he_dev *he_dev = HE_DEV(vcc->dev);
2542 	struct he_tpd *tpd;
2543 	unsigned cid;
2544 	struct he_vcc *he_vcc = HE_VCC(vcc);
2545 #define MAX_RETRY 30
2546 	int retry = 0, sleep = 1, tx_inuse;
2547 
2548 	HPRINTK("close vcc %p %d.%d\n", vcc, vcc->vpi, vcc->vci);
2549 
2550 	clear_bit(ATM_VF_READY, &vcc->flags);
2551 	cid = he_mkcid(he_dev, vcc->vpi, vcc->vci);
2552 
2553 	if (vcc->qos.rxtp.traffic_class != ATM_NONE) {
2554 		int timeout;
2555 
2556 		HPRINTK("close rx cid 0x%x\n", cid);
2557 
2558 		/* 2.7.2.2 close receive operation */
2559 
2560 		/* wait for previous close (if any) to finish */
2561 
2562 		spin_lock_irqsave(&he_dev->global_lock, flags);
2563 		while (he_readl(he_dev, RCC_STAT) & RCC_BUSY) {
2564 			HPRINTK("close cid 0x%x RCC_BUSY\n", cid);
2565 			udelay(250);
2566 		}
2567 
2568 		add_wait_queue(&he_vcc->rx_waitq, &wait);
2569 		set_current_state(TASK_UNINTERRUPTIBLE);
2570 
2571 		he_writel_rsr0(he_dev, RSR0_CLOSE_CONN, cid);
2572 		(void) he_readl_rsr0(he_dev, cid);		/* flush posted writes */
2573 		he_writel_mbox(he_dev, cid, RXCON_CLOSE);
2574 		spin_unlock_irqrestore(&he_dev->global_lock, flags);
2575 
2576 		timeout = schedule_timeout(30*HZ);
2577 
2578 		remove_wait_queue(&he_vcc->rx_waitq, &wait);
2579 		set_current_state(TASK_RUNNING);
2580 
2581 		if (timeout == 0)
2582 			hprintk("close rx timeout cid 0x%x\n", cid);
2583 
2584 		HPRINTK("close rx cid 0x%x complete\n", cid);
2585 
2586 	}
2587 
2588 	if (vcc->qos.txtp.traffic_class != ATM_NONE) {
2589 		volatile unsigned tsr4, tsr0;
2590 		int timeout;
2591 
2592 		HPRINTK("close tx cid 0x%x\n", cid);
2593 
2594 		/* 2.1.2
2595 		 *
2596 		 * ... the host must first stop queueing packets to the TPDRQ
2597 		 * on the connection to be closed, then wait for all outstanding
2598 		 * packets to be transmitted and their buffers returned to the
2599 		 * TBRQ. When the last packet on the connection arrives in the
2600 		 * TBRQ, the host issues the close command to the adapter.
2601 		 */
2602 
2603 		while (((tx_inuse = atomic_read(&vcc->sk->wmem_alloc)) > 0) &&
2604 		       (retry < MAX_RETRY)) {
2605 			set_current_state(TASK_UNINTERRUPTIBLE);
2606 			(void) schedule_timeout(sleep);
2607 			set_current_state(TASK_RUNNING);
2608 			if (sleep < HZ)
2609 				sleep = sleep * 2;
2610 
2611 			++retry;
2612 		}
2613 
2614 		if (tx_inuse)
2615 			hprintk("close tx cid 0x%x tx_inuse = %d\n", cid, tx_inuse);
2616 
2617 		/* 2.3.1.1 generic close operations with flush */
2618 
2619 		spin_lock_irqsave(&he_dev->global_lock, flags);
2620 		he_writel_tsr4_upper(he_dev, TSR4_FLUSH_CONN, cid);
2621 					/* also clears TSR4_SESSION_ENDED */
2622 
2623 		switch (vcc->qos.txtp.traffic_class) {
2624 			case ATM_UBR:
2625 				he_writel_tsr1(he_dev,
2626 					TSR1_MCR(rate_to_atmf(200000))
2627 					| TSR1_PCR(0), cid);
2628 				break;
2629 			case ATM_CBR:
2630 				he_writel_tsr14_upper(he_dev, TSR14_DELETE, cid);
2631 				break;
2632 		}
2633 		(void) he_readl_tsr4(he_dev, cid);		/* flush posted writes */
2634 
2635 		tpd = __alloc_tpd(he_dev);
2636 		if (tpd == NULL) {
2637 			hprintk("close tx he_alloc_tpd failed cid 0x%x\n", cid);
2638 			goto close_tx_incomplete;
2639 		}
2640 		tpd->status |= TPD_EOS | TPD_INT;
2641 		tpd->skb = NULL;
2642 		tpd->vcc = vcc;
2643 		wmb();
2644 
2645 		add_wait_queue(&he_vcc->tx_waitq, &wait);
2646 		set_current_state(TASK_UNINTERRUPTIBLE);
2647 		__enqueue_tpd(he_dev, tpd, cid);
2648 		spin_unlock_irqrestore(&he_dev->global_lock, flags);
2649 
2650 		timeout = schedule_timeout(30*HZ);
2651 
2652 		remove_wait_queue(&he_vcc->tx_waitq, &wait);
2653 		set_current_state(TASK_RUNNING);
2654 
2655 		spin_lock_irqsave(&he_dev->global_lock, flags);
2656 
2657 		if (timeout == 0) {
2658 			hprintk("close tx timeout cid 0x%x\n", cid);
2659 			goto close_tx_incomplete;
2660 		}
2661 
2662 		while (!((tsr4 = he_readl_tsr4(he_dev, cid)) & TSR4_SESSION_ENDED)) {
2663 			HPRINTK("close tx cid 0x%x !TSR4_SESSION_ENDED (tsr4 = 0x%x)\n", cid, tsr4);
2664 			udelay(250);
2665 		}
2666 
2667 		while (TSR0_CONN_STATE(tsr0 = he_readl_tsr0(he_dev, cid)) != 0) {
2668 			HPRINTK("close tx cid 0x%x TSR0_CONN_STATE != 0 (tsr0 = 0x%x)\n", cid, tsr0);
2669 			udelay(250);
2670 		}
2671 
2672 close_tx_incomplete:
2673 
2674 		if (vcc->qos.txtp.traffic_class == ATM_CBR) {
2675 			int reg = he_vcc->rc_index;
2676 
2677 			HPRINTK("cs_stper reg = %d\n", reg);
2678 
2679 			if (he_dev->cs_stper[reg].inuse == 0)
2680 				hprintk("cs_stper[%d].inuse = 0!\n", reg);
2681 			else
2682 				--he_dev->cs_stper[reg].inuse;
2683 
2684 			he_dev->total_bw -= he_dev->cs_stper[reg].pcr;
2685 		}
2686 		spin_unlock_irqrestore(&he_dev->global_lock, flags);
2687 
2688 		HPRINTK("close tx cid 0x%x complete\n", cid);
2689 	}
2690 
2691 	kfree(he_vcc);
2692 
2693 	clear_bit(ATM_VF_ADDR, &vcc->flags);
2694 }
2695 
2696 static int
he_sg_send(struct atm_vcc * vcc,unsigned long start,unsigned long size)2697 he_sg_send(struct atm_vcc *vcc, unsigned long start, unsigned long size)
2698 {
2699 #ifdef USE_SCATTERGATHER
2700 	return 1;
2701 #else
2702 	return 0;
2703 #endif
2704 }
2705 
2706 static int
he_send(struct atm_vcc * vcc,struct sk_buff * skb)2707 he_send(struct atm_vcc *vcc, struct sk_buff *skb)
2708 {
2709 	unsigned long flags;
2710 	struct he_dev *he_dev = HE_DEV(vcc->dev);
2711 	unsigned cid = he_mkcid(he_dev, vcc->vpi, vcc->vci);
2712 	struct he_tpd *tpd;
2713 #ifdef USE_SCATTERGATHER
2714 	int i, slot = 0;
2715 #endif
2716 
2717 #define HE_TPD_BUFSIZE 0xffff
2718 
2719 	HPRINTK("send %d.%d\n", vcc->vpi, vcc->vci);
2720 
2721 	if ((skb->len > HE_TPD_BUFSIZE) ||
2722 	    ((vcc->qos.aal == ATM_AAL0) && (skb->len != ATM_AAL0_SDU))) {
2723 		hprintk("buffer too large (or small) -- %d bytes\n", skb->len );
2724 		if (vcc->pop)
2725 			vcc->pop(vcc, skb);
2726 		else
2727 			dev_kfree_skb_any(skb);
2728 		atomic_inc(&vcc->stats->tx_err);
2729 		return -EINVAL;
2730 	}
2731 
2732 #ifndef USE_SCATTERGATHER
2733 	if (skb_shinfo(skb)->nr_frags) {
2734 		hprintk("no scatter/gather support\n");
2735 		if (vcc->pop)
2736 			vcc->pop(vcc, skb);
2737 		else
2738 			dev_kfree_skb_any(skb);
2739 		atomic_inc(&vcc->stats->tx_err);
2740 		return -EINVAL;
2741 	}
2742 #endif
2743 	spin_lock_irqsave(&he_dev->global_lock, flags);
2744 
2745 	tpd = __alloc_tpd(he_dev);
2746 	if (tpd == NULL) {
2747 		if (vcc->pop)
2748 			vcc->pop(vcc, skb);
2749 		else
2750 			dev_kfree_skb_any(skb);
2751 		atomic_inc(&vcc->stats->tx_err);
2752 		spin_unlock_irqrestore(&he_dev->global_lock, flags);
2753 		return -ENOMEM;
2754 	}
2755 
2756 	if (vcc->qos.aal == ATM_AAL5)
2757 		tpd->status |= TPD_CELLTYPE(TPD_USERCELL);
2758 	else {
2759 		char *pti_clp = (void *) (skb->data + 3);
2760 		int clp, pti;
2761 
2762 		pti = (*pti_clp & ATM_HDR_PTI_MASK) >> ATM_HDR_PTI_SHIFT;
2763 		clp = (*pti_clp & ATM_HDR_CLP);
2764 		tpd->status |= TPD_CELLTYPE(pti);
2765 		if (clp)
2766 			tpd->status |= TPD_CLP;
2767 
2768 		skb_pull(skb, ATM_AAL0_SDU - ATM_CELL_PAYLOAD);
2769 	}
2770 
2771 #ifdef USE_SCATTERGATHER
2772 	tpd->iovec[slot].addr = pci_map_single(he_dev->pci_dev, skb->data,
2773 				skb->len - skb->data_len, PCI_DMA_TODEVICE);
2774 	tpd->iovec[slot].len = skb->len - skb->data_len;
2775 	++slot;
2776 
2777 	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2778 		skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2779 
2780 		if (slot == TPD_MAXIOV) {	/* queue tpd; start new tpd */
2781 			tpd->vcc = vcc;
2782 			tpd->skb = NULL;	/* not the last fragment
2783 						   so dont ->push() yet */
2784 			wmb();
2785 
2786 			__enqueue_tpd(he_dev, tpd, cid);
2787 			tpd = __alloc_tpd(he_dev);
2788 			if (tpd == NULL) {
2789 				if (vcc->pop)
2790 					vcc->pop(vcc, skb);
2791 				else
2792 					dev_kfree_skb_any(skb);
2793 				atomic_inc(&vcc->stats->tx_err);
2794 				spin_unlock_irqrestore(&he_dev->global_lock, flags);
2795 				return -ENOMEM;
2796 			}
2797 			tpd->status |= TPD_USERCELL;
2798 			slot = 0;
2799 		}
2800 
2801 		tpd->iovec[slot].addr = pci_map_single(he_dev->pci_dev,
2802 			(void *) page_address(frag->page) + frag->page_offset,
2803 				frag->size, PCI_DMA_TODEVICE);
2804 		tpd->iovec[slot].len = frag->size;
2805 		++slot;
2806 
2807 	}
2808 
2809 	tpd->iovec[slot - 1].len |= TPD_LST;
2810 #else
2811 	tpd->address0 = pci_map_single(he_dev->pci_dev, skb->data, skb->len, PCI_DMA_TODEVICE);
2812 	tpd->length0 = skb->len | TPD_LST;
2813 #endif
2814 	tpd->status |= TPD_INT;
2815 
2816 	tpd->vcc = vcc;
2817 	tpd->skb = skb;
2818 	wmb();
2819 	ATM_SKB(skb)->vcc = vcc;
2820 
2821 	__enqueue_tpd(he_dev, tpd, cid);
2822 	spin_unlock_irqrestore(&he_dev->global_lock, flags);
2823 
2824 	atomic_inc(&vcc->stats->tx);
2825 
2826 	return 0;
2827 }
2828 
2829 static int
he_ioctl(struct atm_dev * atm_dev,unsigned int cmd,void * arg)2830 he_ioctl(struct atm_dev *atm_dev, unsigned int cmd, void *arg)
2831 {
2832 	unsigned long flags;
2833 	struct he_dev *he_dev = HE_DEV(atm_dev);
2834 	struct he_ioctl_reg reg;
2835 	int err = 0;
2836 
2837 	switch (cmd) {
2838 		case HE_GET_REG:
2839 			if (!capable(CAP_NET_ADMIN))
2840 				return -EPERM;
2841 
2842 			if (copy_from_user(&reg, (struct he_ioctl_reg *) arg,
2843 						sizeof(struct he_ioctl_reg)))
2844 				return -EFAULT;
2845 
2846 			spin_lock_irqsave(&he_dev->global_lock, flags);
2847 			switch (reg.type) {
2848 				case HE_REGTYPE_PCI:
2849 					reg.val = he_readl(he_dev, reg.addr);
2850 					break;
2851 				case HE_REGTYPE_RCM:
2852 					reg.val =
2853 						he_readl_rcm(he_dev, reg.addr);
2854 					break;
2855 				case HE_REGTYPE_TCM:
2856 					reg.val =
2857 						he_readl_tcm(he_dev, reg.addr);
2858 					break;
2859 				case HE_REGTYPE_MBOX:
2860 					reg.val =
2861 						he_readl_mbox(he_dev, reg.addr);
2862 					break;
2863 				default:
2864 					err = -EINVAL;
2865 					break;
2866 			}
2867 			spin_unlock_irqrestore(&he_dev->global_lock, flags);
2868 			if (err == 0)
2869 				if (copy_to_user((struct he_ioctl_reg *) arg, &reg,
2870 							sizeof(struct he_ioctl_reg)))
2871 					return -EFAULT;
2872 			break;
2873 		default:
2874 #ifdef CONFIG_ATM_HE_USE_SUNI
2875 			if (atm_dev->phy && atm_dev->phy->ioctl)
2876 				err = atm_dev->phy->ioctl(atm_dev, cmd, arg);
2877 #else /* CONFIG_ATM_HE_USE_SUNI */
2878 			err = -EINVAL;
2879 #endif /* CONFIG_ATM_HE_USE_SUNI */
2880 			break;
2881 	}
2882 
2883 	return err;
2884 }
2885 
2886 static void
he_phy_put(struct atm_dev * atm_dev,unsigned char val,unsigned long addr)2887 he_phy_put(struct atm_dev *atm_dev, unsigned char val, unsigned long addr)
2888 {
2889 	unsigned long flags;
2890 	struct he_dev *he_dev = HE_DEV(atm_dev);
2891 
2892 	HPRINTK("phy_put(val 0x%x, addr 0x%lx)\n", val, addr);
2893 
2894 	spin_lock_irqsave(&he_dev->global_lock, flags);
2895 	he_writel(he_dev, val, FRAMER + (addr*4));
2896 	(void) he_readl(he_dev, FRAMER + (addr*4));		/* flush posted writes */
2897 	spin_unlock_irqrestore(&he_dev->global_lock, flags);
2898 }
2899 
2900 
2901 static unsigned char
he_phy_get(struct atm_dev * atm_dev,unsigned long addr)2902 he_phy_get(struct atm_dev *atm_dev, unsigned long addr)
2903 {
2904 	unsigned long flags;
2905 	struct he_dev *he_dev = HE_DEV(atm_dev);
2906 	unsigned reg;
2907 
2908 	spin_lock_irqsave(&he_dev->global_lock, flags);
2909 	reg = he_readl(he_dev, FRAMER + (addr*4));
2910 	spin_unlock_irqrestore(&he_dev->global_lock, flags);
2911 
2912 	HPRINTK("phy_get(addr 0x%lx) =0x%x\n", addr, reg);
2913 	return reg;
2914 }
2915 
2916 static int
he_proc_read(struct atm_dev * dev,loff_t * pos,char * page)2917 he_proc_read(struct atm_dev *dev, loff_t *pos, char *page)
2918 {
2919 	unsigned long flags;
2920 	struct he_dev *he_dev = HE_DEV(dev);
2921 	int left, i;
2922 #ifdef notdef
2923 	struct he_rbrq *rbrq_tail;
2924 	struct he_tpdrq *tpdrq_head;
2925 	int rbpl_head, rbpl_tail;
2926 #endif
2927 	static long mcc = 0, oec = 0, dcc = 0, cec = 0;
2928 
2929 
2930 	left = *pos;
2931 	if (!left--)
2932 		return sprintf(page, "%s\n", version);
2933 
2934 	if (!left--)
2935 		return sprintf(page, "%s%s\n\n",
2936 			he_dev->prod_id, he_dev->media & 0x40 ? "SM" : "MM");
2937 
2938 	if (!left--)
2939 		return sprintf(page, "Mismatched Cells  VPI/VCI Not Open  Dropped Cells  RCM Dropped Cells\n");
2940 
2941 	spin_lock_irqsave(&he_dev->global_lock, flags);
2942 	mcc += he_readl(he_dev, MCC);
2943 	oec += he_readl(he_dev, OEC);
2944 	dcc += he_readl(he_dev, DCC);
2945 	cec += he_readl(he_dev, CEC);
2946 	spin_unlock_irqrestore(&he_dev->global_lock, flags);
2947 
2948 	if (!left--)
2949 		return sprintf(page, "%16ld  %16ld  %13ld  %17ld\n\n",
2950 							mcc, oec, dcc, cec);
2951 
2952 	if (!left--)
2953 		return sprintf(page, "irq_size = %d  inuse = ?  peak = %d\n",
2954 				CONFIG_IRQ_SIZE, he_dev->irq_peak);
2955 
2956 	if (!left--)
2957 		return sprintf(page, "tpdrq_size = %d  inuse = ?\n",
2958 						CONFIG_TPDRQ_SIZE);
2959 
2960 	if (!left--)
2961 		return sprintf(page, "rbrq_size = %d  inuse = ?  peak = %d\n",
2962 				CONFIG_RBRQ_SIZE, he_dev->rbrq_peak);
2963 
2964 	if (!left--)
2965 		return sprintf(page, "tbrq_size = %d  peak = %d\n",
2966 					CONFIG_TBRQ_SIZE, he_dev->tbrq_peak);
2967 
2968 
2969 #ifdef notdef
2970 	rbpl_head = RBPL_MASK(he_readl(he_dev, G0_RBPL_S));
2971 	rbpl_tail = RBPL_MASK(he_readl(he_dev, G0_RBPL_T));
2972 
2973 	inuse = rbpl_head - rbpl_tail;
2974 	if (inuse < 0)
2975 		inuse += CONFIG_RBPL_SIZE * sizeof(struct he_rbp);
2976 	inuse /= sizeof(struct he_rbp);
2977 
2978 	if (!left--)
2979 		return sprintf(page, "rbpl_size = %d  inuse = %d\n\n",
2980 						CONFIG_RBPL_SIZE, inuse);
2981 #endif
2982 
2983 	if (!left--)
2984 		return sprintf(page, "rate controller periods (cbr)\n                 pcr  #vc\n");
2985 
2986 	for (i = 0; i < HE_NUM_CS_STPER; ++i)
2987 		if (!left--)
2988 			return sprintf(page, "cs_stper%-2d  %8ld  %3d\n", i,
2989 						he_dev->cs_stper[i].pcr,
2990 						he_dev->cs_stper[i].inuse);
2991 
2992 	if (!left--)
2993 		return sprintf(page, "total bw (cbr): %d  (limit %d)\n",
2994 			he_dev->total_bw, he_dev->atm_dev->link_rate * 10 / 9);
2995 
2996 	return 0;
2997 }
2998 
2999 /* eeprom routines  -- see 4.7 */
3000 
3001 u8
read_prom_byte(struct he_dev * he_dev,int addr)3002 read_prom_byte(struct he_dev *he_dev, int addr)
3003 {
3004 	u32 val = 0, tmp_read = 0;
3005 	int i, j = 0;
3006 	u8 byte_read = 0;
3007 
3008 	val = readl(he_dev->membase + HOST_CNTL);
3009 	val &= 0xFFFFE0FF;
3010 
3011 	/* Turn on write enable */
3012 	val |= 0x800;
3013 	he_writel(he_dev, val, HOST_CNTL);
3014 
3015 	/* Send READ instruction */
3016 	for (i = 0; i < sizeof(readtab)/sizeof(readtab[0]); i++) {
3017 		he_writel(he_dev, val | readtab[i], HOST_CNTL);
3018 		udelay(EEPROM_DELAY);
3019 	}
3020 
3021 	/* Next, we need to send the byte address to read from */
3022 	for (i = 7; i >= 0; i--) {
3023 		he_writel(he_dev, val | clocktab[j++] | (((addr >> i) & 1) << 9), HOST_CNTL);
3024 		udelay(EEPROM_DELAY);
3025 		he_writel(he_dev, val | clocktab[j++] | (((addr >> i) & 1) << 9), HOST_CNTL);
3026 		udelay(EEPROM_DELAY);
3027 	}
3028 
3029 	j = 0;
3030 
3031 	val &= 0xFFFFF7FF;      /* Turn off write enable */
3032 	he_writel(he_dev, val, HOST_CNTL);
3033 
3034 	/* Now, we can read data from the EEPROM by clocking it in */
3035 	for (i = 7; i >= 0; i--) {
3036 		he_writel(he_dev, val | clocktab[j++], HOST_CNTL);
3037 		udelay(EEPROM_DELAY);
3038 		tmp_read = he_readl(he_dev, HOST_CNTL);
3039 		byte_read |= (unsigned char)
3040 			   ((tmp_read & ID_DOUT) >> ID_DOFFSET << i);
3041 		he_writel(he_dev, val | clocktab[j++], HOST_CNTL);
3042 		udelay(EEPROM_DELAY);
3043 	}
3044 
3045 	he_writel(he_dev, val | ID_CS, HOST_CNTL);
3046 	udelay(EEPROM_DELAY);
3047 
3048 	return byte_read;
3049 }
3050 
3051 MODULE_LICENSE("GPL");
3052 MODULE_AUTHOR("chas williams <chas@cmf.nrl.navy.mil>");
3053 MODULE_DESCRIPTION("ForeRunnerHE ATM Adapter driver");
3054 MODULE_PARM(disable64, "h");
3055 MODULE_PARM_DESC(disable64, "disable 64-bit pci bus transfers");
3056 MODULE_PARM(nvpibits, "i");
3057 MODULE_PARM_DESC(nvpibits, "numbers of bits for vpi (default 0)");
3058 MODULE_PARM(nvcibits, "i");
3059 MODULE_PARM_DESC(nvcibits, "numbers of bits for vci (default 12)");
3060 MODULE_PARM(rx_skb_reserve, "i");
3061 MODULE_PARM_DESC(rx_skb_reserve, "padding for receive skb (default 16)");
3062 MODULE_PARM(irq_coalesce, "i");
3063 MODULE_PARM_DESC(irq_coalesce, "use interrupt coalescing (default 1)");
3064 MODULE_PARM(sdh, "i");
3065 MODULE_PARM_DESC(sdh, "use SDH framing (default 0)");
3066 
3067 static struct pci_device_id he_pci_tbl[] __devinitdata = {
3068 	{ PCI_VENDOR_ID_FORE, PCI_DEVICE_ID_FORE_HE, PCI_ANY_ID, PCI_ANY_ID,
3069 	  0, 0, 0 },
3070 	{ 0, }
3071 };
3072 
3073 static struct pci_driver he_driver = {
3074 	.name =		"he",
3075 	.probe =	he_init_one,
3076 	.remove =	__devexit_p(he_remove_one),
3077 	.id_table =	he_pci_tbl,
3078 };
3079 
he_init(void)3080 static int __init he_init(void)
3081 {
3082 	return pci_module_init(&he_driver);
3083 }
3084 
he_cleanup(void)3085 static void __exit he_cleanup(void)
3086 {
3087 	pci_unregister_driver(&he_driver);
3088 }
3089 
3090 module_init(he_init);
3091 module_exit(he_cleanup);
3092