1 #ifndef _ACENIC_H_
2 #define _ACENIC_H_
3
4 #include <linux/config.h>
5
6 /*
7 * Generate TX index update each time, when TX ring is closed.
8 * Normally, this is not useful, because results in more dma (and irqs
9 * without TX_COAL_INTS_ONLY).
10 */
11 #define USE_TX_COAL_NOW 0
12
13 #ifndef MAX_SKB_FRAGS
14 #define MAX_SKB_FRAGS 0
15 #endif
16
17
18 /*
19 * Addressing:
20 *
21 * The Tigon uses 64-bit host addresses, regardless of their actual
22 * length, and it expects a big-endian format. For 32 bit systems the
23 * upper 32 bits of the address are simply ignored (zero), however for
24 * little endian 64 bit systems (Alpha) this looks strange with the
25 * two parts of the address word being swapped.
26 *
27 * The addresses are split in two 32 bit words for all architectures
28 * as some of them are in PCI shared memory and it is necessary to use
29 * readl/writel to access them.
30 *
31 * The addressing code is derived from Pete Wyckoff's work, but
32 * modified to deal properly with readl/writel usage.
33 */
34
35 struct ace_regs {
36 u32 pad0[16]; /* PCI control registers */
37
38 u32 HostCtrl; /* 0x40 */
39 u32 LocalCtrl;
40
41 u32 pad1[2];
42
43 u32 MiscCfg; /* 0x50 */
44
45 u32 pad2[2];
46
47 u32 PciState;
48
49 u32 pad3[2]; /* 0x60 */
50
51 u32 WinBase;
52 u32 WinData;
53
54 u32 pad4[12]; /* 0x70 */
55
56 u32 DmaWriteState; /* 0xa0 */
57 u32 pad5[3];
58 u32 DmaReadState; /* 0xb0 */
59
60 u32 pad6[26];
61
62 u32 AssistState;
63
64 u32 pad7[8]; /* 0x120 */
65
66 u32 CpuCtrl; /* 0x140 */
67 u32 Pc;
68
69 u32 pad8[3];
70
71 u32 SramAddr; /* 0x154 */
72 u32 SramData;
73
74 u32 pad9[49];
75
76 u32 MacRxState; /* 0x220 */
77
78 u32 pad10[7];
79
80 u32 CpuBCtrl; /* 0x240 */
81 u32 PcB;
82
83 u32 pad11[3];
84
85 u32 SramBAddr; /* 0x254 */
86 u32 SramBData;
87
88 u32 pad12[105];
89
90 u32 pad13[32]; /* 0x400 */
91 u32 Stats[32];
92
93 u32 Mb0Hi; /* 0x500 */
94 u32 Mb0Lo;
95 u32 Mb1Hi;
96 u32 CmdPrd;
97 u32 Mb2Hi;
98 u32 TxPrd;
99 u32 Mb3Hi;
100 u32 RxStdPrd;
101 u32 Mb4Hi;
102 u32 RxJumboPrd;
103 u32 Mb5Hi;
104 u32 RxMiniPrd;
105 u32 Mb6Hi;
106 u32 Mb6Lo;
107 u32 Mb7Hi;
108 u32 Mb7Lo;
109 u32 Mb8Hi;
110 u32 Mb8Lo;
111 u32 Mb9Hi;
112 u32 Mb9Lo;
113 u32 MbAHi;
114 u32 MbALo;
115 u32 MbBHi;
116 u32 MbBLo;
117 u32 MbCHi;
118 u32 MbCLo;
119 u32 MbDHi;
120 u32 MbDLo;
121 u32 MbEHi;
122 u32 MbELo;
123 u32 MbFHi;
124 u32 MbFLo;
125
126 u32 pad14[32];
127
128 u32 MacAddrHi; /* 0x600 */
129 u32 MacAddrLo;
130 u32 InfoPtrHi;
131 u32 InfoPtrLo;
132 u32 MultiCastHi; /* 0x610 */
133 u32 MultiCastLo;
134 u32 ModeStat;
135 u32 DmaReadCfg;
136 u32 DmaWriteCfg; /* 0x620 */
137 u32 TxBufRat;
138 u32 EvtCsm;
139 u32 CmdCsm;
140 u32 TuneRxCoalTicks;/* 0x630 */
141 u32 TuneTxCoalTicks;
142 u32 TuneStatTicks;
143 u32 TuneMaxTxDesc;
144 u32 TuneMaxRxDesc; /* 0x640 */
145 u32 TuneTrace;
146 u32 TuneLink;
147 u32 TuneFastLink;
148 u32 TracePtr; /* 0x650 */
149 u32 TraceStrt;
150 u32 TraceLen;
151 u32 IfIdx;
152 u32 IfMtu; /* 0x660 */
153 u32 MaskInt;
154 u32 GigLnkState;
155 u32 FastLnkState;
156 u32 pad16[4]; /* 0x670 */
157 u32 RxRetCsm; /* 0x680 */
158
159 u32 pad17[31];
160
161 u32 CmdRng[64]; /* 0x700 */
162 u32 Window[0x200];
163 };
164
165
166 typedef struct {
167 u32 addrhi;
168 u32 addrlo;
169 } aceaddr;
170
171
172 #define ACE_WINDOW_SIZE 0x800
173
174 #define ACE_JUMBO_MTU 9000
175 #define ACE_STD_MTU 1500
176
177 #define ACE_TRACE_SIZE 0x8000
178
179 /*
180 * Host control register bits.
181 */
182
183 #define IN_INT 0x01
184 #define CLR_INT 0x02
185 #define HW_RESET 0x08
186 #define BYTE_SWAP 0x10
187 #define WORD_SWAP 0x20
188 #define MASK_INTS 0x40
189
190 /*
191 * Local control register bits.
192 */
193
194 #define EEPROM_DATA_IN 0x800000
195 #define EEPROM_DATA_OUT 0x400000
196 #define EEPROM_WRITE_ENABLE 0x200000
197 #define EEPROM_CLK_OUT 0x100000
198
199 #define EEPROM_BASE 0xa0000000
200
201 #define EEPROM_WRITE_SELECT 0xa0
202 #define EEPROM_READ_SELECT 0xa1
203
204 #define SRAM_BANK_512K 0x200
205
206
207 /*
208 * udelay() values for when clocking the eeprom
209 */
210 #define ACE_SHORT_DELAY 2
211 #define ACE_LONG_DELAY 4
212
213
214 /*
215 * Misc Config bits
216 */
217
218 #define SYNC_SRAM_TIMING 0x100000
219
220
221 /*
222 * CPU state bits.
223 */
224
225 #define CPU_RESET 0x01
226 #define CPU_TRACE 0x02
227 #define CPU_PROM_FAILED 0x10
228 #define CPU_HALT 0x00010000
229 #define CPU_HALTED 0xffff0000
230
231
232 /*
233 * PCI State bits.
234 */
235
236 #define DMA_READ_MAX_4 0x04
237 #define DMA_READ_MAX_16 0x08
238 #define DMA_READ_MAX_32 0x0c
239 #define DMA_READ_MAX_64 0x10
240 #define DMA_READ_MAX_128 0x14
241 #define DMA_READ_MAX_256 0x18
242 #define DMA_READ_MAX_1K 0x1c
243 #define DMA_WRITE_MAX_4 0x20
244 #define DMA_WRITE_MAX_16 0x40
245 #define DMA_WRITE_MAX_32 0x60
246 #define DMA_WRITE_MAX_64 0x80
247 #define DMA_WRITE_MAX_128 0xa0
248 #define DMA_WRITE_MAX_256 0xc0
249 #define DMA_WRITE_MAX_1K 0xe0
250 #define DMA_READ_WRITE_MASK 0xfc
251 #define MEM_READ_MULTIPLE 0x00020000
252 #define PCI_66MHZ 0x00080000
253 #define PCI_32BIT 0x00100000
254 #define DMA_WRITE_ALL_ALIGN 0x00800000
255 #define READ_CMD_MEM 0x06000000
256 #define WRITE_CMD_MEM 0x70000000
257
258
259 /*
260 * Mode status
261 */
262
263 #define ACE_BYTE_SWAP_BD 0x02
264 #define ACE_WORD_SWAP_BD 0x04 /* not actually used */
265 #define ACE_WARN 0x08
266 #define ACE_BYTE_SWAP_DMA 0x10
267 #define ACE_NO_JUMBO_FRAG 0x200
268 #define ACE_FATAL 0x40000000
269
270
271 /*
272 * DMA config
273 */
274
275 #define DMA_THRESH_1W 0x10
276 #define DMA_THRESH_2W 0x20
277 #define DMA_THRESH_4W 0x40
278 #define DMA_THRESH_8W 0x80
279 #define DMA_THRESH_16W 0x100
280 #define DMA_THRESH_32W 0x0 /* not described in doc, but exists. */
281
282
283 /*
284 * Tuning parameters
285 */
286
287 #define TICKS_PER_SEC 1000000
288
289
290 /*
291 * Link bits
292 */
293
294 #define LNK_PREF 0x00008000
295 #define LNK_10MB 0x00010000
296 #define LNK_100MB 0x00020000
297 #define LNK_1000MB 0x00040000
298 #define LNK_FULL_DUPLEX 0x00080000
299 #define LNK_HALF_DUPLEX 0x00100000
300 #define LNK_TX_FLOW_CTL_Y 0x00200000
301 #define LNK_NEG_ADVANCED 0x00400000
302 #define LNK_RX_FLOW_CTL_Y 0x00800000
303 #define LNK_NIC 0x01000000
304 #define LNK_JAM 0x02000000
305 #define LNK_JUMBO 0x04000000
306 #define LNK_ALTEON 0x08000000
307 #define LNK_NEG_FCTL 0x10000000
308 #define LNK_NEGOTIATE 0x20000000
309 #define LNK_ENABLE 0x40000000
310 #define LNK_UP 0x80000000
311
312
313 /*
314 * Event definitions
315 */
316
317 #define EVT_RING_ENTRIES 256
318 #define EVT_RING_SIZE (EVT_RING_ENTRIES * sizeof(struct event))
319
320 struct event {
321 #ifdef __LITTLE_ENDIAN_BITFIELD
322 u32 idx:12;
323 u32 code:12;
324 u32 evt:8;
325 #else
326 u32 evt:8;
327 u32 code:12;
328 u32 idx:12;
329 #endif
330 u32 pad;
331 };
332
333
334 /*
335 * Events
336 */
337
338 #define E_FW_RUNNING 0x01
339 #define E_STATS_UPDATED 0x04
340
341 #define E_STATS_UPDATE 0x04
342
343 #define E_LNK_STATE 0x06
344 #define E_C_LINK_UP 0x01
345 #define E_C_LINK_DOWN 0x02
346 #define E_C_LINK_10_100 0x03
347
348 #define E_ERROR 0x07
349 #define E_C_ERR_INVAL_CMD 0x01
350 #define E_C_ERR_UNIMP_CMD 0x02
351 #define E_C_ERR_BAD_CFG 0x03
352
353 #define E_MCAST_LIST 0x08
354 #define E_C_MCAST_ADDR_ADD 0x01
355 #define E_C_MCAST_ADDR_DEL 0x02
356
357 #define E_RESET_JUMBO_RNG 0x09
358
359
360 /*
361 * Commands
362 */
363
364 #define CMD_RING_ENTRIES 64
365
366 struct cmd {
367 #ifdef __LITTLE_ENDIAN_BITFIELD
368 u32 idx:12;
369 u32 code:12;
370 u32 evt:8;
371 #else
372 u32 evt:8;
373 u32 code:12;
374 u32 idx:12;
375 #endif
376 };
377
378
379 #define C_HOST_STATE 0x01
380 #define C_C_STACK_UP 0x01
381 #define C_C_STACK_DOWN 0x02
382
383 #define C_FDR_FILTERING 0x02
384 #define C_C_FDR_FILT_ENABLE 0x01
385 #define C_C_FDR_FILT_DISABLE 0x02
386
387 #define C_SET_RX_PRD_IDX 0x03
388 #define C_UPDATE_STATS 0x04
389 #define C_RESET_JUMBO_RNG 0x05
390 #define C_ADD_MULTICAST_ADDR 0x08
391 #define C_DEL_MULTICAST_ADDR 0x09
392
393 #define C_SET_PROMISC_MODE 0x0a
394 #define C_C_PROMISC_ENABLE 0x01
395 #define C_C_PROMISC_DISABLE 0x02
396
397 #define C_LNK_NEGOTIATION 0x0b
398 #define C_C_NEGOTIATE_BOTH 0x00
399 #define C_C_NEGOTIATE_GIG 0x01
400 #define C_C_NEGOTIATE_10_100 0x02
401
402 #define C_SET_MAC_ADDR 0x0c
403 #define C_CLEAR_PROFILE 0x0d
404
405 #define C_SET_MULTICAST_MODE 0x0e
406 #define C_C_MCAST_ENABLE 0x01
407 #define C_C_MCAST_DISABLE 0x02
408
409 #define C_CLEAR_STATS 0x0f
410 #define C_SET_RX_JUMBO_PRD_IDX 0x10
411 #define C_REFRESH_STATS 0x11
412
413
414 /*
415 * Descriptor flags
416 */
417 #define BD_FLG_TCP_UDP_SUM 0x01
418 #define BD_FLG_IP_SUM 0x02
419 #define BD_FLG_END 0x04
420 #define BD_FLG_MORE 0x08
421 #define BD_FLG_JUMBO 0x10
422 #define BD_FLG_UCAST 0x20
423 #define BD_FLG_MCAST 0x40
424 #define BD_FLG_BCAST 0x60
425 #define BD_FLG_TYP_MASK 0x60
426 #define BD_FLG_IP_FRAG 0x80
427 #define BD_FLG_IP_FRAG_END 0x100
428 #define BD_FLG_VLAN_TAG 0x200
429 #define BD_FLG_FRAME_ERROR 0x400
430 #define BD_FLG_COAL_NOW 0x800
431 #define BD_FLG_MINI 0x1000
432
433
434 /*
435 * Ring Control block flags
436 */
437 #define RCB_FLG_TCP_UDP_SUM 0x01
438 #define RCB_FLG_IP_SUM 0x02
439 #define RCB_FLG_NO_PSEUDO_HDR 0x08
440 #define RCB_FLG_VLAN_ASSIST 0x10
441 #define RCB_FLG_COAL_INT_ONLY 0x20
442 #define RCB_FLG_TX_HOST_RING 0x40
443 #define RCB_FLG_IEEE_SNAP_SUM 0x80
444 #define RCB_FLG_EXT_RX_BD 0x100
445 #define RCB_FLG_RNG_DISABLE 0x200
446
447
448 /*
449 * TX ring - maximum TX ring entries for Tigon I's is 128
450 */
451 #define MAX_TX_RING_ENTRIES 256
452 #define TIGON_I_TX_RING_ENTRIES 128
453 #define TX_RING_SIZE (MAX_TX_RING_ENTRIES * sizeof(struct tx_desc))
454 #define TX_RING_BASE 0x3800
455
456 struct tx_desc{
457 aceaddr addr;
458 u32 flagsize;
459 #if 0
460 /*
461 * This is in PCI shared mem and must be accessed with readl/writel
462 * real layout is:
463 */
464 #if __LITTLE_ENDIAN
465 u16 flags;
466 u16 size;
467 u16 vlan;
468 u16 reserved;
469 #else
470 u16 size;
471 u16 flags;
472 u16 reserved;
473 u16 vlan;
474 #endif
475 #endif
476 u32 vlanres;
477 };
478
479
480 #define RX_STD_RING_ENTRIES 512
481 #define RX_STD_RING_SIZE (RX_STD_RING_ENTRIES * sizeof(struct rx_desc))
482
483 #define RX_JUMBO_RING_ENTRIES 256
484 #define RX_JUMBO_RING_SIZE (RX_JUMBO_RING_ENTRIES *sizeof(struct rx_desc))
485
486 #define RX_MINI_RING_ENTRIES 1024
487 #define RX_MINI_RING_SIZE (RX_MINI_RING_ENTRIES *sizeof(struct rx_desc))
488
489 #define RX_RETURN_RING_ENTRIES 2048
490 #define RX_RETURN_RING_SIZE (RX_MAX_RETURN_RING_ENTRIES * \
491 sizeof(struct rx_desc))
492
493 struct rx_desc{
494 aceaddr addr;
495 #ifdef __LITTLE_ENDIAN
496 u16 size;
497 u16 idx;
498 #else
499 u16 idx;
500 u16 size;
501 #endif
502 #ifdef __LITTLE_ENDIAN
503 u16 flags;
504 u16 type;
505 #else
506 u16 type;
507 u16 flags;
508 #endif
509 #ifdef __LITTLE_ENDIAN
510 u16 tcp_udp_csum;
511 u16 ip_csum;
512 #else
513 u16 ip_csum;
514 u16 tcp_udp_csum;
515 #endif
516 #ifdef __LITTLE_ENDIAN
517 u16 vlan;
518 u16 err_flags;
519 #else
520 u16 err_flags;
521 u16 vlan;
522 #endif
523 u32 reserved;
524 u32 opague;
525 };
526
527
528 /*
529 * This struct is shared with the NIC firmware.
530 */
531 struct ring_ctrl {
532 aceaddr rngptr;
533 #ifdef __LITTLE_ENDIAN
534 u16 flags;
535 u16 max_len;
536 #else
537 u16 max_len;
538 u16 flags;
539 #endif
540 u32 pad;
541 };
542
543
544 struct ace_mac_stats {
545 u32 excess_colls;
546 u32 coll_1;
547 u32 coll_2;
548 u32 coll_3;
549 u32 coll_4;
550 u32 coll_5;
551 u32 coll_6;
552 u32 coll_7;
553 u32 coll_8;
554 u32 coll_9;
555 u32 coll_10;
556 u32 coll_11;
557 u32 coll_12;
558 u32 coll_13;
559 u32 coll_14;
560 u32 coll_15;
561 u32 late_coll;
562 u32 defers;
563 u32 crc_err;
564 u32 underrun;
565 u32 crs_err;
566 u32 pad[3];
567 u32 drop_ula;
568 u32 drop_mc;
569 u32 drop_fc;
570 u32 drop_space;
571 u32 coll;
572 u32 kept_bc;
573 u32 kept_mc;
574 u32 kept_uc;
575 };
576
577
578 struct ace_info {
579 union {
580 u32 stats[256];
581 } s;
582 struct ring_ctrl evt_ctrl;
583 struct ring_ctrl cmd_ctrl;
584 struct ring_ctrl tx_ctrl;
585 struct ring_ctrl rx_std_ctrl;
586 struct ring_ctrl rx_jumbo_ctrl;
587 struct ring_ctrl rx_mini_ctrl;
588 struct ring_ctrl rx_return_ctrl;
589 aceaddr evt_prd_ptr;
590 aceaddr rx_ret_prd_ptr;
591 aceaddr tx_csm_ptr;
592 aceaddr stats2_ptr;
593 };
594
595
596 struct ring_info {
597 struct sk_buff *skb;
598 DECLARE_PCI_UNMAP_ADDR(mapping)
599 };
600
601
602 /*
603 * Funny... As soon as we add maplen on alpha, it starts to work
604 * much slower. Hmm... is it because struct does not fit to one cacheline?
605 * So, split tx_ring_info.
606 */
607 struct tx_ring_info {
608 struct sk_buff *skb;
609 DECLARE_PCI_UNMAP_ADDR(mapping)
610 DECLARE_PCI_UNMAP_LEN(maplen)
611 };
612
613
614 /*
615 * struct ace_skb holding the rings of skb's. This is an awful lot of
616 * pointers, but I don't see any other smart mode to do this in an
617 * efficient manner ;-(
618 */
619 struct ace_skb
620 {
621 struct tx_ring_info tx_skbuff[MAX_TX_RING_ENTRIES];
622 struct ring_info rx_std_skbuff[RX_STD_RING_ENTRIES];
623 struct ring_info rx_mini_skbuff[RX_MINI_RING_ENTRIES];
624 struct ring_info rx_jumbo_skbuff[RX_JUMBO_RING_ENTRIES];
625 };
626
627
628 /*
629 * Struct private for the AceNIC.
630 *
631 * Elements are grouped so variables used by the tx handling goes
632 * together, and will go into the same cache lines etc. in order to
633 * avoid cache line contention between the rx and tx handling on SMP.
634 *
635 * Frequently accessed variables are put at the beginning of the
636 * struct to help the compiler generate better/shorter code.
637 */
638 struct ace_private
639 {
640 struct ace_info *info;
641 struct ace_regs *regs; /* register base */
642 struct ace_skb *skb;
643 dma_addr_t info_dma; /* 32/64 bit */
644
645 int version, link;
646 int promisc, mcast_all;
647
648 /*
649 * TX elements
650 */
651 struct tx_desc *tx_ring;
652 u32 tx_prd;
653 volatile u32 tx_ret_csm;
654 int tx_ring_entries;
655
656 /*
657 * RX elements
658 */
659 unsigned long std_refill_busy
660 __attribute__ ((aligned (SMP_CACHE_BYTES)));
661 unsigned long mini_refill_busy, jumbo_refill_busy;
662 atomic_t cur_rx_bufs;
663 atomic_t cur_mini_bufs;
664 atomic_t cur_jumbo_bufs;
665 u32 rx_std_skbprd, rx_mini_skbprd, rx_jumbo_skbprd;
666 u32 cur_rx;
667
668 struct rx_desc *rx_std_ring;
669 struct rx_desc *rx_jumbo_ring;
670 struct rx_desc *rx_mini_ring;
671 struct rx_desc *rx_return_ring;
672
673 #if ACENIC_DO_VLAN
674 struct vlan_group *vlgrp;
675 #endif
676
677 int tasklet_pending, jumbo;
678 struct tasklet_struct ace_tasklet;
679
680 struct event *evt_ring;
681
682 volatile u32 *evt_prd, *rx_ret_prd, *tx_csm;
683
684 dma_addr_t tx_ring_dma; /* 32/64 bit */
685 dma_addr_t rx_ring_base_dma;
686 dma_addr_t evt_ring_dma;
687 dma_addr_t evt_prd_dma, rx_ret_prd_dma, tx_csm_dma;
688
689 unsigned char *trace_buf;
690 struct pci_dev *pdev;
691 struct net_device *next;
692 volatile int fw_running;
693 int board_idx;
694 u16 pci_command;
695 u8 pci_latency;
696 char name[48];
697 #ifdef INDEX_DEBUG
698 spinlock_t debug_lock
699 __attribute__ ((aligned (SMP_CACHE_BYTES)));;
700 u32 last_tx, last_std_rx, last_mini_rx;
701 #endif
702 struct net_device_stats stats;
703 int pci_using_dac;
704 };
705
706
707 #define TX_RESERVED MAX_SKB_FRAGS
708
tx_space(struct ace_private * ap,u32 csm,u32 prd)709 static inline int tx_space (struct ace_private *ap, u32 csm, u32 prd)
710 {
711 return (csm - prd - 1) & (ACE_TX_RING_ENTRIES(ap) - 1);
712 }
713
714 #define tx_free(ap) tx_space((ap)->tx_ret_csm, (ap)->tx_prd, ap)
715
716 #if MAX_SKB_FRAGS
717 #define tx_ring_full(ap, csm, prd) (tx_space(ap, csm, prd) <= TX_RESERVED)
718 #else
719 #define tx_ring_full 0
720 #endif
721
722
set_aceaddr(aceaddr * aa,dma_addr_t addr)723 static inline void set_aceaddr(aceaddr *aa, dma_addr_t addr)
724 {
725 u64 baddr = (u64) addr;
726 aa->addrlo = baddr & 0xffffffff;
727 aa->addrhi = baddr >> 32;
728 wmb();
729 }
730
731
ace_set_txprd(struct ace_regs * regs,struct ace_private * ap,u32 value)732 static inline void ace_set_txprd(struct ace_regs *regs,
733 struct ace_private *ap, u32 value)
734 {
735 #ifdef INDEX_DEBUG
736 unsigned long flags;
737 spin_lock_irqsave(&ap->debug_lock, flags);
738 writel(value, ®s->TxPrd);
739 if (value == ap->last_tx)
740 printk(KERN_ERR "AceNIC RACE ALERT! writing identical value "
741 "to tx producer (%i)\n", value);
742 ap->last_tx = value;
743 spin_unlock_irqrestore(&ap->debug_lock, flags);
744 #else
745 writel(value, ®s->TxPrd);
746 #endif
747 wmb();
748 }
749
750
ace_mask_irq(struct net_device * dev)751 static inline void ace_mask_irq(struct net_device *dev)
752 {
753 struct ace_private *ap = dev->priv;
754 struct ace_regs *regs = ap->regs;
755
756 if (ACE_IS_TIGON_I(ap))
757 writel(1, ®s->MaskInt);
758 else
759 writel(readl(®s->HostCtrl) | MASK_INTS, ®s->HostCtrl);
760
761 ace_sync_irq(dev->irq);
762 }
763
764
ace_unmask_irq(struct net_device * dev)765 static inline void ace_unmask_irq(struct net_device *dev)
766 {
767 struct ace_private *ap = dev->priv;
768 struct ace_regs *regs = ap->regs;
769
770 if (ACE_IS_TIGON_I(ap))
771 writel(0, ®s->MaskInt);
772 else
773 writel(readl(®s->HostCtrl) & ~MASK_INTS, ®s->HostCtrl);
774 }
775
776
777 /*
778 * Prototypes
779 */
780 static int ace_init(struct net_device *dev);
781 static void ace_load_std_rx_ring(struct ace_private *ap, int nr_bufs);
782 static void ace_load_mini_rx_ring(struct ace_private *ap, int nr_bufs);
783 static void ace_load_jumbo_rx_ring(struct ace_private *ap, int nr_bufs);
784 static void ace_interrupt(int irq, void *dev_id, struct pt_regs *regs);
785 static int ace_load_firmware(struct net_device *dev);
786 static int ace_open(struct net_device *dev);
787 static int ace_start_xmit(struct sk_buff *skb, struct net_device *dev);
788 static int ace_close(struct net_device *dev);
789 static void ace_tasklet(unsigned long dev);
790 static void ace_dump_trace(struct ace_private *ap);
791 static void ace_set_multicast_list(struct net_device *dev);
792 static int ace_change_mtu(struct net_device *dev, int new_mtu);
793 static int ace_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd);
794 static int ace_set_mac_addr(struct net_device *dev, void *p);
795 static void ace_set_rxtx_parms(struct net_device *dev, int jumbo);
796 static int ace_allocate_descriptors(struct net_device *dev);
797 static void ace_free_descriptors(struct net_device *dev);
798 static void ace_init_cleanup(struct net_device *dev);
799 static struct net_device_stats *ace_get_stats(struct net_device *dev);
800 static int read_eeprom_byte(struct net_device *dev, unsigned long offset);
801 #if ACENIC_DO_VLAN
802 static void ace_vlan_rx_register(struct net_device *dev, struct vlan_group *grp);
803 static void ace_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid);
804 #endif
805
806 #endif /* _ACENIC_H_ */
807