1 /*
2 * smctr.c: A network driver for the SMC Token Ring Adapters.
3 *
4 * Written by Jay Schulist <jschlst@samba.org>
5 *
6 * This software may be used and distributed according to the terms
7 * of the GNU General Public License, incorporated herein by reference.
8 *
9 * This device driver works with the following SMC adapters:
10 * - SMC TokenCard Elite (8115T, chips 825/584)
11 * - SMC TokenCard Elite/A MCA (8115T/A, chips 825/594)
12 *
13 * Source(s):
14 * - SMC TokenCard SDK.
15 *
16 * Maintainer(s):
17 * JS Jay Schulist <jschlst@samba.org>
18 *
19 * Changes:
20 * 07102000 JS Fixed a timing problem in smctr_wait_cmd();
21 * Also added a bit more discriptive error msgs.
22 * 07122000 JS Fixed problem with detecting a card with
23 * module io/irq/mem specified.
24 *
25 * To do:
26 * 1. Multicast support.
27 */
28
29 #ifdef MODULE
30 #include <linux/module.h>
31 #include <linux/version.h>
32 #endif
33
34 #include <linux/config.h>
35 #include <linux/kernel.h>
36 #include <linux/sched.h>
37 #include <linux/types.h>
38 #include <linux/fcntl.h>
39 #include <linux/interrupt.h>
40 #include <linux/ptrace.h>
41 #include <linux/ioport.h>
42 #include <linux/in.h>
43 #include <linux/slab.h>
44 #include <linux/string.h>
45 #include <linux/time.h>
46 #include <asm/system.h>
47 #include <asm/bitops.h>
48 #include <asm/io.h>
49 #include <asm/dma.h>
50 #include <asm/irq.h>
51 #include <linux/errno.h>
52 #include <linux/init.h>
53 #include <linux/pci.h>
54 #include <linux/mca.h>
55 #include <linux/delay.h>
56
57 #include <linux/netdevice.h>
58 #include <linux/etherdevice.h>
59 #include <linux/skbuff.h>
60 #include <linux/trdevice.h>
61
62 #if BITS_PER_LONG == 64
63 #error FIXME: driver does not support 64-bit platforms
64 #endif
65
66 #include "smctr.h" /* Our Stuff */
67 #include "smctr_firmware.h" /* SMC adapter firmware */
68
69 static char version[] __initdata = KERN_INFO "smctr.c: v1.4 7/12/00 by jschlst@samba.org\n";
70 static const char cardname[] = "smctr";
71
72
73 #define SMCTR_IO_EXTENT 20
74
75 /* A zero-terminated list of I/O addresses to be probed. */
76 static unsigned int smctr_portlist[] __initdata = {
77 0x200, 0x220, 0x240, 0x260, 0x280, 0x2A0, 0x2C0, 0x2E0, 0x300,
78 0x320, 0x340, 0x360, 0x380,
79 0
80 };
81
82 #ifdef CONFIG_MCA
83 static unsigned int smctr_posid = 0x6ec6;
84 #endif
85
86 static int ringspeed;
87
88 /* SMC Name of the Adapter. */
89 static char smctr_name[] = "SMC TokenCard";
90 char *smctr_model = "Unknown";
91
92 /* Use 0 for production, 1 for verification, 2 for debug, and
93 * 3 for very verbose debug.
94 */
95 #ifndef SMCTR_DEBUG
96 #define SMCTR_DEBUG 1
97 #endif
98 static unsigned int smctr_debug = SMCTR_DEBUG;
99
100 /* smctr.c prototypes and functions are arranged alphabeticly
101 * for clearity, maintainability and pure old fashion fun.
102 */
103 /* A */
104 static int smctr_alloc_shared_memory(struct net_device *dev);
105
106 /* B */
107 static int smctr_bypass_state(struct net_device *dev);
108
109 /* C */
110 static int smctr_checksum_firmware(struct net_device *dev);
111 static int __init smctr_chk_isa(struct net_device *dev);
112 static int smctr_chg_rx_mask(struct net_device *dev);
113 static int smctr_clear_int(struct net_device *dev);
114 static int smctr_clear_trc_reset(int ioaddr);
115 static int smctr_close(struct net_device *dev);
116
117 /* D */
118 static int smctr_decode_firmware(struct net_device *dev);
119 static int smctr_disable_16bit(struct net_device *dev);
120 static int smctr_disable_adapter_ctrl_store(struct net_device *dev);
121 static int smctr_disable_bic_int(struct net_device *dev);
122
123 /* E */
124 static int smctr_enable_16bit(struct net_device *dev);
125 static int smctr_enable_adapter_ctrl_store(struct net_device *dev);
126 static int smctr_enable_adapter_ram(struct net_device *dev);
127 static int smctr_enable_bic_int(struct net_device *dev);
128
129 /* G */
130 static int __init smctr_get_boardid(struct net_device *dev, int mca);
131 static int smctr_get_group_address(struct net_device *dev);
132 static int smctr_get_functional_address(struct net_device *dev);
133 static unsigned int smctr_get_num_rx_bdbs(struct net_device *dev);
134 static int smctr_get_physical_drop_number(struct net_device *dev);
135 static __u8 *smctr_get_rx_pointer(struct net_device *dev, short queue);
136 static int smctr_get_station_id(struct net_device *dev);
137 static struct net_device_stats *smctr_get_stats(struct net_device *dev);
138 static FCBlock *smctr_get_tx_fcb(struct net_device *dev, __u16 queue,
139 __u16 bytes_count);
140 static int smctr_get_upstream_neighbor_addr(struct net_device *dev);
141
142 /* H */
143 static int smctr_hardware_send_packet(struct net_device *dev,
144 struct net_local *tp);
145 /* I */
146 static int smctr_init_acbs(struct net_device *dev);
147 static int smctr_init_adapter(struct net_device *dev);
148 static int __init smctr_init_card(struct net_device *dev);
149 static int smctr_init_card_real(struct net_device *dev);
150 static int smctr_init_rx_bdbs(struct net_device *dev);
151 static int smctr_init_rx_fcbs(struct net_device *dev);
152 static int smctr_init_shared_memory(struct net_device *dev);
153 static int smctr_init_tx_bdbs(struct net_device *dev);
154 static int smctr_init_tx_fcbs(struct net_device *dev);
155 static int smctr_internal_self_test(struct net_device *dev);
156 static void smctr_interrupt(int irq, void *dev_id, struct pt_regs *regs);
157 static int smctr_issue_enable_int_cmd(struct net_device *dev,
158 __u16 interrupt_enable_mask);
159 static int smctr_issue_int_ack(struct net_device *dev, __u16 iack_code,
160 __u16 ibits);
161 static int smctr_issue_init_timers_cmd(struct net_device *dev);
162 static int smctr_issue_init_txrx_cmd(struct net_device *dev);
163 static int smctr_issue_insert_cmd(struct net_device *dev);
164 static int smctr_issue_read_ring_status_cmd(struct net_device *dev);
165 static int smctr_issue_read_word_cmd(struct net_device *dev, __u16 aword_cnt);
166 static int smctr_issue_remove_cmd(struct net_device *dev);
167 static int smctr_issue_resume_acb_cmd(struct net_device *dev);
168 static int smctr_issue_resume_rx_bdb_cmd(struct net_device *dev, __u16 queue);
169 static int smctr_issue_resume_rx_fcb_cmd(struct net_device *dev, __u16 queue);
170 static int smctr_issue_resume_tx_fcb_cmd(struct net_device *dev, __u16 queue);
171 static int smctr_issue_test_internal_rom_cmd(struct net_device *dev);
172 static int smctr_issue_test_hic_cmd(struct net_device *dev);
173 static int smctr_issue_test_mac_reg_cmd(struct net_device *dev);
174 static int smctr_issue_trc_loopback_cmd(struct net_device *dev);
175 static int smctr_issue_tri_loopback_cmd(struct net_device *dev);
176 static int smctr_issue_write_byte_cmd(struct net_device *dev,
177 short aword_cnt, void *byte);
178 static int smctr_issue_write_word_cmd(struct net_device *dev,
179 short aword_cnt, void *word);
180
181 /* J */
182 static int smctr_join_complete_state(struct net_device *dev);
183
184 /* L */
185 static int smctr_link_tx_fcbs_to_bdbs(struct net_device *dev);
186 static int smctr_load_firmware(struct net_device *dev);
187 static int smctr_load_node_addr(struct net_device *dev);
188 static int smctr_lobe_media_test(struct net_device *dev);
189 static int smctr_lobe_media_test_cmd(struct net_device *dev);
190 static int smctr_lobe_media_test_state(struct net_device *dev);
191
192 /* M */
193 static int smctr_make_8025_hdr(struct net_device *dev,
194 MAC_HEADER *rmf, MAC_HEADER *tmf, __u16 ac_fc);
195 static int smctr_make_access_pri(struct net_device *dev,
196 MAC_SUB_VECTOR *tsv);
197 static int smctr_make_addr_mod(struct net_device *dev, MAC_SUB_VECTOR *tsv);
198 static int smctr_make_auth_funct_class(struct net_device *dev,
199 MAC_SUB_VECTOR *tsv);
200 static int smctr_make_corr(struct net_device *dev,
201 MAC_SUB_VECTOR *tsv, __u16 correlator);
202 static int smctr_make_funct_addr(struct net_device *dev,
203 MAC_SUB_VECTOR *tsv);
204 static int smctr_make_group_addr(struct net_device *dev,
205 MAC_SUB_VECTOR *tsv);
206 static int smctr_make_phy_drop_num(struct net_device *dev,
207 MAC_SUB_VECTOR *tsv);
208 static int smctr_make_product_id(struct net_device *dev, MAC_SUB_VECTOR *tsv);
209 static int smctr_make_station_id(struct net_device *dev, MAC_SUB_VECTOR *tsv);
210 static int smctr_make_ring_station_status(struct net_device *dev,
211 MAC_SUB_VECTOR *tsv);
212 static int smctr_make_ring_station_version(struct net_device *dev,
213 MAC_SUB_VECTOR *tsv);
214 static int smctr_make_tx_status_code(struct net_device *dev,
215 MAC_SUB_VECTOR *tsv, __u16 tx_fstatus);
216 static int smctr_make_upstream_neighbor_addr(struct net_device *dev,
217 MAC_SUB_VECTOR *tsv);
218 static int smctr_make_wrap_data(struct net_device *dev,
219 MAC_SUB_VECTOR *tsv);
220
221 /* O */
222 static int smctr_open(struct net_device *dev);
223 static int smctr_open_tr(struct net_device *dev);
224
225 /* P */
226 int __init smctr_probe (struct net_device *dev);
227 static int __init smctr_probe1(struct net_device *dev, int ioaddr);
228 static int smctr_process_rx_packet(MAC_HEADER *rmf, __u16 size,
229 struct net_device *dev, __u16 rx_status);
230
231 /* R */
232 static int smctr_ram_memory_test(struct net_device *dev);
233 static int smctr_rcv_chg_param(struct net_device *dev, MAC_HEADER *rmf,
234 __u16 *correlator);
235 static int smctr_rcv_init(struct net_device *dev, MAC_HEADER *rmf,
236 __u16 *correlator);
237 static int smctr_rcv_tx_forward(struct net_device *dev, MAC_HEADER *rmf);
238 static int smctr_rcv_rq_addr_state_attch(struct net_device *dev,
239 MAC_HEADER *rmf, __u16 *correlator);
240 static int smctr_rcv_unknown(struct net_device *dev, MAC_HEADER *rmf,
241 __u16 *correlator);
242 static int smctr_reset_adapter(struct net_device *dev);
243 static int smctr_restart_tx_chain(struct net_device *dev, short queue);
244 static int smctr_ring_status_chg(struct net_device *dev);
245 static int smctr_rx_frame(struct net_device *dev);
246
247 /* S */
248 static int smctr_send_dat(struct net_device *dev);
249 static int smctr_send_packet(struct sk_buff *skb, struct net_device *dev);
250 static int smctr_send_lobe_media_test(struct net_device *dev);
251 static int smctr_send_rpt_addr(struct net_device *dev, MAC_HEADER *rmf,
252 __u16 correlator);
253 static int smctr_send_rpt_attch(struct net_device *dev, MAC_HEADER *rmf,
254 __u16 correlator);
255 static int smctr_send_rpt_state(struct net_device *dev, MAC_HEADER *rmf,
256 __u16 correlator);
257 static int smctr_send_rpt_tx_forward(struct net_device *dev,
258 MAC_HEADER *rmf, __u16 tx_fstatus);
259 static int smctr_send_rsp(struct net_device *dev, MAC_HEADER *rmf,
260 __u16 rcode, __u16 correlator);
261 static int smctr_send_rq_init(struct net_device *dev);
262 static int smctr_send_tx_forward(struct net_device *dev, MAC_HEADER *rmf,
263 __u16 *tx_fstatus);
264 static int smctr_set_auth_access_pri(struct net_device *dev,
265 MAC_SUB_VECTOR *rsv);
266 static int smctr_set_auth_funct_class(struct net_device *dev,
267 MAC_SUB_VECTOR *rsv);
268 static int smctr_set_corr(struct net_device *dev, MAC_SUB_VECTOR *rsv,
269 __u16 *correlator);
270 static int smctr_set_error_timer_value(struct net_device *dev,
271 MAC_SUB_VECTOR *rsv);
272 static int smctr_set_frame_forward(struct net_device *dev,
273 MAC_SUB_VECTOR *rsv, __u8 dc_sc);
274 static int smctr_set_local_ring_num(struct net_device *dev,
275 MAC_SUB_VECTOR *rsv);
276 static unsigned short smctr_set_ctrl_attention(struct net_device *dev);
277 static void smctr_set_multicast_list(struct net_device *dev);
278 static int smctr_set_page(struct net_device *dev, __u8 *buf);
279 static int smctr_set_phy_drop(struct net_device *dev,
280 MAC_SUB_VECTOR *rsv);
281 static int smctr_set_ring_speed(struct net_device *dev);
282 static int smctr_set_rx_look_ahead(struct net_device *dev);
283 static int smctr_set_trc_reset(int ioaddr);
284 static int smctr_setup_single_cmd(struct net_device *dev,
285 __u16 command, __u16 subcommand);
286 static int smctr_setup_single_cmd_w_data(struct net_device *dev,
287 __u16 command, __u16 subcommand);
288 static char *smctr_malloc(struct net_device *dev, __u16 size);
289 static int smctr_status_chg(struct net_device *dev);
290
291 /* T */
292 static void smctr_timeout(struct net_device *dev);
293 static int smctr_trc_send_packet(struct net_device *dev, FCBlock *fcb,
294 __u16 queue);
295 static __u16 smctr_tx_complete(struct net_device *dev, __u16 queue);
296 static unsigned short smctr_tx_move_frame(struct net_device *dev,
297 struct sk_buff *skb, __u8 *pbuff, unsigned int bytes);
298
299 /* U */
300 static int smctr_update_err_stats(struct net_device *dev);
301 static int smctr_update_rx_chain(struct net_device *dev, __u16 queue);
302 static int smctr_update_tx_chain(struct net_device *dev, FCBlock *fcb,
303 __u16 queue);
304
305 /* W */
306 static int smctr_wait_cmd(struct net_device *dev);
307 static int smctr_wait_while_cbusy(struct net_device *dev);
308
309 #define TO_256_BYTE_BOUNDRY(X) (((X + 0xff) & 0xff00) - X)
310 #define TO_PARAGRAPH_BOUNDRY(X) (((X + 0x0f) & 0xfff0) - X)
311 #define PARAGRAPH_BOUNDRY(X) smctr_malloc(dev, TO_PARAGRAPH_BOUNDRY(X))
312
313 /* Allocate Adapter Shared Memory.
314 * IMPORTANT NOTE: Any changes to this function MUST be mirrored in the
315 * function "get_num_rx_bdbs" below!!!
316 *
317 * Order of memory allocation:
318 *
319 * 0. Initial System Configuration Block Pointer
320 * 1. System Configuration Block
321 * 2. System Control Block
322 * 3. Action Command Block
323 * 4. Interrupt Status Block
324 *
325 * 5. MAC TX FCB'S
326 * 6. NON-MAC TX FCB'S
327 * 7. MAC TX BDB'S
328 * 8. NON-MAC TX BDB'S
329 * 9. MAC RX FCB'S
330 * 10. NON-MAC RX FCB'S
331 * 11. MAC RX BDB'S
332 * 12. NON-MAC RX BDB'S
333 * 13. MAC TX Data Buffer( 1, 256 byte buffer)
334 * 14. MAC RX Data Buffer( 1, 256 byte buffer)
335 *
336 * 15. NON-MAC TX Data Buffer
337 * 16. NON-MAC RX Data Buffer
338 */
smctr_alloc_shared_memory(struct net_device * dev)339 static int smctr_alloc_shared_memory(struct net_device *dev)
340 {
341 struct net_local *tp = (struct net_local *)dev->priv;
342
343 if(smctr_debug > 10)
344 printk("%s: smctr_alloc_shared_memory\n", dev->name);
345
346 /* Allocate initial System Control Block pointer.
347 * This pointer is located in the last page, last offset - 4.
348 */
349 tp->iscpb_ptr = (ISCPBlock *)(tp->ram_access + ((__u32)64 * 0x400)
350 - (long)ISCP_BLOCK_SIZE);
351
352 /* Allocate System Control Blocks. */
353 tp->scgb_ptr = (SCGBlock *)smctr_malloc(dev, sizeof(SCGBlock));
354 PARAGRAPH_BOUNDRY(tp->sh_mem_used);
355
356 tp->sclb_ptr = (SCLBlock *)smctr_malloc(dev, sizeof(SCLBlock));
357 PARAGRAPH_BOUNDRY(tp->sh_mem_used);
358
359 tp->acb_head = (ACBlock *)smctr_malloc(dev,
360 sizeof(ACBlock)*tp->num_acbs);
361 PARAGRAPH_BOUNDRY(tp->sh_mem_used);
362
363 tp->isb_ptr = (ISBlock *)smctr_malloc(dev, sizeof(ISBlock));
364 PARAGRAPH_BOUNDRY(tp->sh_mem_used);
365
366 tp->misc_command_data = (__u16 *)smctr_malloc(dev, MISC_DATA_SIZE);
367 PARAGRAPH_BOUNDRY(tp->sh_mem_used);
368
369 /* Allocate transmit FCBs. */
370 tp->tx_fcb_head[MAC_QUEUE] = (FCBlock *)smctr_malloc(dev,
371 sizeof(FCBlock) * tp->num_tx_fcbs[MAC_QUEUE]);
372
373 tp->tx_fcb_head[NON_MAC_QUEUE] = (FCBlock *)smctr_malloc(dev,
374 sizeof(FCBlock) * tp->num_tx_fcbs[NON_MAC_QUEUE]);
375
376 tp->tx_fcb_head[BUG_QUEUE] = (FCBlock *)smctr_malloc(dev,
377 sizeof(FCBlock) * tp->num_tx_fcbs[BUG_QUEUE]);
378
379 /* Allocate transmit BDBs. */
380 tp->tx_bdb_head[MAC_QUEUE] = (BDBlock *)smctr_malloc(dev,
381 sizeof(BDBlock) * tp->num_tx_bdbs[MAC_QUEUE]);
382
383 tp->tx_bdb_head[NON_MAC_QUEUE] = (BDBlock *)smctr_malloc(dev,
384 sizeof(BDBlock) * tp->num_tx_bdbs[NON_MAC_QUEUE]);
385
386 tp->tx_bdb_head[BUG_QUEUE] = (BDBlock *)smctr_malloc(dev,
387 sizeof(BDBlock) * tp->num_tx_bdbs[BUG_QUEUE]);
388
389 /* Allocate receive FCBs. */
390 tp->rx_fcb_head[MAC_QUEUE] = (FCBlock *)smctr_malloc(dev,
391 sizeof(FCBlock) * tp->num_rx_fcbs[MAC_QUEUE]);
392
393 tp->rx_fcb_head[NON_MAC_QUEUE] = (FCBlock *)smctr_malloc(dev,
394 sizeof(FCBlock) * tp->num_rx_fcbs[NON_MAC_QUEUE]);
395
396 /* Allocate receive BDBs. */
397 tp->rx_bdb_head[MAC_QUEUE] = (BDBlock *)smctr_malloc(dev,
398 sizeof(BDBlock) * tp->num_rx_bdbs[MAC_QUEUE]);
399
400 tp->rx_bdb_end[MAC_QUEUE] = (BDBlock *)smctr_malloc(dev, 0);
401
402 tp->rx_bdb_head[NON_MAC_QUEUE] = (BDBlock *)smctr_malloc(dev,
403 sizeof(BDBlock) * tp->num_rx_bdbs[NON_MAC_QUEUE]);
404
405 tp->rx_bdb_end[NON_MAC_QUEUE] = (BDBlock *)smctr_malloc(dev, 0);
406
407 /* Allocate MAC transmit buffers.
408 * MAC Tx Buffers doen't have to be on an ODD Boundry.
409 */
410 tp->tx_buff_head[MAC_QUEUE]
411 = (__u16 *)smctr_malloc(dev, tp->tx_buff_size[MAC_QUEUE]);
412 tp->tx_buff_curr[MAC_QUEUE] = tp->tx_buff_head[MAC_QUEUE];
413 tp->tx_buff_end [MAC_QUEUE] = (__u16 *)smctr_malloc(dev, 0);
414
415 /* Allocate BUG transmit buffers. */
416 tp->tx_buff_head[BUG_QUEUE]
417 = (__u16 *)smctr_malloc(dev, tp->tx_buff_size[BUG_QUEUE]);
418 tp->tx_buff_curr[BUG_QUEUE] = tp->tx_buff_head[BUG_QUEUE];
419 tp->tx_buff_end[BUG_QUEUE] = (__u16 *)smctr_malloc(dev, 0);
420
421 /* Allocate MAC receive data buffers.
422 * MAC Rx buffer doesn't have to be on a 256 byte boundry.
423 */
424 tp->rx_buff_head[MAC_QUEUE] = (__u16 *)smctr_malloc(dev,
425 RX_DATA_BUFFER_SIZE * tp->num_rx_bdbs[MAC_QUEUE]);
426 tp->rx_buff_end[MAC_QUEUE] = (__u16 *)smctr_malloc(dev, 0);
427
428 /* Allocate Non-MAC transmit buffers.
429 * ?? For maximum Netware performance, put Tx Buffers on
430 * ODD Boundry and then restore malloc to Even Boundrys.
431 */
432 smctr_malloc(dev, 1L);
433 tp->tx_buff_head[NON_MAC_QUEUE]
434 = (__u16 *)smctr_malloc(dev, tp->tx_buff_size[NON_MAC_QUEUE]);
435 tp->tx_buff_curr[NON_MAC_QUEUE] = tp->tx_buff_head[NON_MAC_QUEUE];
436 tp->tx_buff_end [NON_MAC_QUEUE] = (__u16 *)smctr_malloc(dev, 0);
437 smctr_malloc(dev, 1L);
438
439 /* Allocate Non-MAC receive data buffers.
440 * To guarantee a minimum of 256 contigous memory to
441 * UM_Receive_Packet's lookahead pointer, before a page
442 * change or ring end is encountered, place each rx buffer on
443 * a 256 byte boundry.
444 */
445 smctr_malloc(dev, TO_256_BYTE_BOUNDRY(tp->sh_mem_used));
446 tp->rx_buff_head[NON_MAC_QUEUE] = (__u16 *)smctr_malloc(dev,
447 RX_DATA_BUFFER_SIZE * tp->num_rx_bdbs[NON_MAC_QUEUE]);
448 tp->rx_buff_end[NON_MAC_QUEUE] = (__u16 *)smctr_malloc(dev, 0);
449
450 return (0);
451 }
452
453 /* Enter Bypass state. */
smctr_bypass_state(struct net_device * dev)454 static int smctr_bypass_state(struct net_device *dev)
455 {
456 int err;
457
458 if(smctr_debug > 10)
459 printk("%s: smctr_bypass_state\n", dev->name);
460
461 err = smctr_setup_single_cmd(dev, ACB_CMD_CHANGE_JOIN_STATE,
462 JS_BYPASS_STATE);
463
464 return (err);
465 }
466
smctr_checksum_firmware(struct net_device * dev)467 static int smctr_checksum_firmware(struct net_device *dev)
468 {
469 struct net_local *tp = (struct net_local *)dev->priv;
470 __u16 i, checksum = 0;
471
472 if(smctr_debug > 10)
473 printk("%s: smctr_checksum_firmware\n", dev->name);
474
475 smctr_enable_adapter_ctrl_store(dev);
476
477 for(i = 0; i < CS_RAM_SIZE; i += 2)
478 checksum += *((__u16 *)(tp->ram_access + i));
479
480 tp->microcode_version = *(__u16 *)(tp->ram_access
481 + CS_RAM_VERSION_OFFSET);
482 tp->microcode_version >>= 8;
483
484 smctr_disable_adapter_ctrl_store(dev);
485
486 if(checksum)
487 return (checksum);
488
489 return (0);
490 }
491
smctr_chk_mca(struct net_device * dev)492 static int smctr_chk_mca(struct net_device *dev)
493 {
494 #ifdef CONFIG_MCA
495 struct net_local *tp = (struct net_local *)dev->priv;
496 int current_slot;
497 __u8 r1, r2, r3, r4, r5;
498
499 current_slot = mca_find_unused_adapter(smctr_posid, 0);
500 if(current_slot == MCA_NOTFOUND)
501 return (-ENODEV);
502
503 mca_set_adapter_name(current_slot, smctr_name);
504 mca_mark_as_used(current_slot);
505 tp->slot_num = current_slot;
506
507 r1 = mca_read_stored_pos(tp->slot_num, 2);
508 r2 = mca_read_stored_pos(tp->slot_num, 3);
509
510 if(tp->slot_num)
511 outb(CNFG_POS_CONTROL_REG, (__u8)((tp->slot_num - 1) | CNFG_SLOT_ENABLE_BIT));
512 else
513 outb(CNFG_POS_CONTROL_REG, (__u8)((tp->slot_num) | CNFG_SLOT_ENABLE_BIT));
514
515 r1 = inb(CNFG_POS_REG1);
516 r2 = inb(CNFG_POS_REG0);
517
518 tp->bic_type = BIC_594_CHIP;
519
520 /* IO */
521 r2 = mca_read_stored_pos(tp->slot_num, 2);
522 r2 &= 0xF0;
523 dev->base_addr = ((__u16)r2 << 8) + (__u16)0x800;
524 request_region(dev->base_addr, SMCTR_IO_EXTENT, smctr_name);
525
526 /* IRQ */
527 r5 = mca_read_stored_pos(tp->slot_num, 5);
528 r5 &= 0xC;
529 switch(r5)
530 {
531 case 0:
532 dev->irq = 3;
533 break;
534
535 case 0x4:
536 dev->irq = 4;
537 break;
538
539 case 0x8:
540 dev->irq = 10;
541 break;
542
543 default:
544 dev->irq = 15;
545 break;
546 }
547 if(request_irq(dev->irq, smctr_interrupt, SA_SHIRQ, smctr_name, dev))
548 return (-ENODEV);
549
550 /* Get RAM base */
551 r3 = mca_read_stored_pos(tp->slot_num, 3);
552 if(r3 & 0x8)
553 {
554 if(r3 & 0x80)
555 tp->ram_base = ((__u32)(r3 & 0x7) << 13) + 0xFD0000;
556 else
557 tp->ram_base = ((__u32)(r3 & 0x7) << 13) + 0x0D0000;
558 }
559 else
560 {
561 if(r3 & 0x80)
562 tp->ram_base = ((__u32)(r3 & 0x7) << 13) + 0xFC0000;
563 else
564 tp->ram_base = ((__u32)(r3 & 0x7) << 13) + 0x0C0000;
565 }
566
567 /* Get Ram Size */
568 r3 &= 0x30;
569 r3 >>= 4;
570
571 tp->ram_usable = (__u16)CNFG_SIZE_8KB << r3;
572 tp->ram_size = (__u16)CNFG_SIZE_64KB;
573 tp->board_id |= TOKEN_MEDIA;
574
575 r4 = mca_read_stored_pos(tp->slot_num, 4);
576 if(r4 & 0x8)
577 tp->rom_base = ((__u32)(r4 & 0x7) << 13) + 0xD0000;
578 else
579 tp->rom_base = ((__u32)(r4 & 0x7) << 13) + 0xC0000;
580
581 /* Get ROM size. */
582 r4 >>= 4;
583 if(r4 == 0)
584 tp->rom_size = CNFG_SIZE_8KB;
585 else
586 {
587 if(r4 == 1)
588 tp->rom_size = CNFG_SIZE_16KB;
589 else
590 {
591 if(r4 == 2)
592 tp->rom_size = CNFG_SIZE_32KB;
593 else
594 tp->rom_size = ROM_DISABLE;
595 }
596 }
597
598 /* Get Media Type. */
599 r5 = mca_read_stored_pos(tp->slot_num, 5);
600 r5 &= CNFG_MEDIA_TYPE_MASK;
601 switch(r5)
602 {
603 case (0):
604 tp->media_type = MEDIA_STP_4;
605 break;
606
607 case (1):
608 tp->media_type = MEDIA_STP_16;
609 break;
610
611 case (3):
612 tp->media_type = MEDIA_UTP_16;
613 break;
614
615 default:
616 tp->media_type = MEDIA_UTP_4;
617 break;
618 }
619 tp->media_menu = 14;
620
621 r2 = mca_read_stored_pos(tp->slot_num, 2);
622 if(!(r2 & 0x02))
623 tp->mode_bits |= EARLY_TOKEN_REL;
624
625 /* Disable slot */
626 outb(CNFG_POS_CONTROL_REG, 0);
627
628 tp->board_id = smctr_get_boardid(dev, 1);
629 switch(tp->board_id & 0xffff)
630 {
631 case WD8115TA:
632 smctr_model = "8115T/A";
633 break;
634
635 case WD8115T:
636 if(tp->extra_info & CHIP_REV_MASK)
637 smctr_model = "8115T rev XE";
638 else
639 smctr_model = "8115T rev XD";
640 break;
641
642 default:
643 smctr_model = "Unknown";
644 break;
645 }
646
647 return (0);
648 #else
649 return (-1);
650 #endif /* CONFIG_MCA */
651 }
652
smctr_chg_rx_mask(struct net_device * dev)653 static int smctr_chg_rx_mask(struct net_device *dev)
654 {
655 struct net_local *tp = (struct net_local *)dev->priv;
656 int err = 0;
657
658 if(smctr_debug > 10)
659 printk("%s: smctr_chg_rx_mask\n", dev->name);
660
661 smctr_enable_16bit(dev);
662 smctr_set_page(dev, (__u8 *)tp->ram_access);
663
664 if(tp->mode_bits & LOOPING_MODE_MASK)
665 tp->config_word0 |= RX_OWN_BIT;
666 else
667 tp->config_word0 &= ~RX_OWN_BIT;
668
669 if(tp->receive_mask & PROMISCUOUS_MODE)
670 tp->config_word0 |= PROMISCUOUS_BIT;
671 else
672 tp->config_word0 &= ~PROMISCUOUS_BIT;
673
674 if(tp->receive_mask & ACCEPT_ERR_PACKETS)
675 tp->config_word0 |= SAVBAD_BIT;
676 else
677 tp->config_word0 &= ~SAVBAD_BIT;
678
679 if(tp->receive_mask & ACCEPT_ATT_MAC_FRAMES)
680 tp->config_word0 |= RXATMAC;
681 else
682 tp->config_word0 &= ~RXATMAC;
683
684 if(tp->receive_mask & ACCEPT_MULTI_PROM)
685 tp->config_word1 |= MULTICAST_ADDRESS_BIT;
686 else
687 tp->config_word1 &= ~MULTICAST_ADDRESS_BIT;
688
689 if(tp->receive_mask & ACCEPT_SOURCE_ROUTING_SPANNING)
690 tp->config_word1 |= SOURCE_ROUTING_SPANNING_BITS;
691 else
692 {
693 if(tp->receive_mask & ACCEPT_SOURCE_ROUTING)
694 tp->config_word1 |= SOURCE_ROUTING_EXPLORER_BIT;
695 else
696 tp->config_word1 &= ~SOURCE_ROUTING_SPANNING_BITS;
697 }
698
699 if((err = smctr_issue_write_word_cmd(dev, RW_CONFIG_REGISTER_0,
700 &tp->config_word0)))
701 {
702 return (err);
703 }
704
705 if((err = smctr_issue_write_word_cmd(dev, RW_CONFIG_REGISTER_1,
706 &tp->config_word1)))
707 {
708 return (err);
709 }
710
711 smctr_disable_16bit(dev);
712
713 return (0);
714 }
715
smctr_clear_int(struct net_device * dev)716 static int smctr_clear_int(struct net_device *dev)
717 {
718 struct net_local *tp = (struct net_local *)dev->priv;
719
720 outb((tp->trc_mask | CSR_CLRTINT), dev->base_addr + CSR);
721
722 return (0);
723 }
724
smctr_clear_trc_reset(int ioaddr)725 static int smctr_clear_trc_reset(int ioaddr)
726 {
727 __u8 r;
728
729 r = inb(ioaddr + MSR);
730 outb(~MSR_RST & r, ioaddr + MSR);
731
732 return (0);
733 }
734
735 /*
736 * The inverse routine to smctr_open().
737 */
smctr_close(struct net_device * dev)738 static int smctr_close(struct net_device *dev)
739 {
740 struct net_local *tp = (struct net_local *)dev->priv;
741 struct sk_buff *skb;
742 int err;
743
744 netif_stop_queue(dev);
745
746 #ifdef MODULE
747 MOD_DEC_USE_COUNT;
748 #endif
749
750 tp->cleanup = 1;
751
752 /* Check to see if adapter is already in a closed state. */
753 if(tp->status != OPEN)
754 return (0);
755
756 smctr_enable_16bit(dev);
757 smctr_set_page(dev, (__u8 *)tp->ram_access);
758
759 if((err = smctr_issue_remove_cmd(dev)))
760 {
761 smctr_disable_16bit(dev);
762 return (err);
763 }
764
765 for(;;)
766 {
767 skb = skb_dequeue(&tp->SendSkbQueue);
768 if(skb == NULL)
769 break;
770 tp->QueueSkb++;
771 dev_kfree_skb(skb);
772 }
773
774
775 return (0);
776 }
777
smctr_decode_firmware(struct net_device * dev)778 static int smctr_decode_firmware(struct net_device *dev)
779 {
780 struct net_local *tp = (struct net_local *)dev->priv;
781 short bit = 0x80, shift = 12;
782 DECODE_TREE_NODE *tree;
783 short branch, tsize;
784 __u16 buff = 0;
785 long weight;
786 __u8 *ucode;
787 __u16 *mem;
788
789 if(smctr_debug > 10)
790 printk("%s: smctr_decode_firmware\n", dev->name);
791
792 weight = *(long *)(tp->ptr_ucode + WEIGHT_OFFSET);
793 tsize = *(__u8 *)(tp->ptr_ucode + TREE_SIZE_OFFSET);
794 tree = (DECODE_TREE_NODE *)(tp->ptr_ucode + TREE_OFFSET);
795 ucode = (__u8 *)(tp->ptr_ucode + TREE_OFFSET
796 + (tsize * sizeof(DECODE_TREE_NODE)));
797 mem = (__u16 *)(tp->ram_access);
798
799 while(weight)
800 {
801 branch = ROOT;
802 while((tree + branch)->tag != LEAF && weight)
803 {
804 branch = *ucode & bit ? (tree + branch)->llink
805 : (tree + branch)->rlink;
806
807 bit >>= 1;
808 weight--;
809
810 if(bit == 0)
811 {
812 bit = 0x80;
813 ucode++;
814 }
815 }
816
817 buff |= (tree + branch)->info << shift;
818 shift -= 4;
819
820 if(shift < 0)
821 {
822 *(mem++) = SWAP_BYTES(buff);
823 buff = 0;
824 shift = 12;
825 }
826 }
827
828 /* The following assumes the Control Store Memory has
829 * been initialized to zero. If the last partial word
830 * is zero, it will not be written.
831 */
832 if(buff)
833 *(mem++) = SWAP_BYTES(buff);
834
835 return (0);
836 }
837
smctr_disable_16bit(struct net_device * dev)838 static int smctr_disable_16bit(struct net_device *dev)
839 {
840 return (0);
841 }
842
843 /*
844 * On Exit, Adapter is:
845 * 1. TRC is in a reset state and un-initialized.
846 * 2. Adapter memory is enabled.
847 * 3. Control Store memory is out of context (-WCSS is 1).
848 */
smctr_disable_adapter_ctrl_store(struct net_device * dev)849 static int smctr_disable_adapter_ctrl_store(struct net_device *dev)
850 {
851 struct net_local *tp = (struct net_local *)dev->priv;
852 int ioaddr = dev->base_addr;
853
854 if(smctr_debug > 10)
855 printk("%s: smctr_disable_adapter_ctrl_store\n", dev->name);
856
857 tp->trc_mask |= CSR_WCSS;
858 outb(tp->trc_mask, ioaddr + CSR);
859
860 return (0);
861 }
862
smctr_disable_bic_int(struct net_device * dev)863 static int smctr_disable_bic_int(struct net_device *dev)
864 {
865 struct net_local *tp = (struct net_local *)dev->priv;
866 int ioaddr = dev->base_addr;
867
868 tp->trc_mask = CSR_MSK_ALL | CSR_MSKCBUSY
869 | CSR_MSKTINT | CSR_WCSS;
870 outb(tp->trc_mask, ioaddr + CSR);
871
872 return (0);
873 }
874
smctr_enable_16bit(struct net_device * dev)875 static int smctr_enable_16bit(struct net_device *dev)
876 {
877 struct net_local *tp = (struct net_local *)dev->priv;
878 __u8 r;
879
880 if(tp->adapter_bus == BUS_ISA16_TYPE)
881 {
882 r = inb(dev->base_addr + LAAR);
883 outb((r | LAAR_MEM16ENB), dev->base_addr + LAAR);
884 }
885
886 return (0);
887 }
888
889 /*
890 * To enable the adapter control store memory:
891 * 1. Adapter must be in a RESET state.
892 * 2. Adapter memory must be enabled.
893 * 3. Control Store Memory is in context (-WCSS is 0).
894 */
smctr_enable_adapter_ctrl_store(struct net_device * dev)895 static int smctr_enable_adapter_ctrl_store(struct net_device *dev)
896 {
897 struct net_local *tp = (struct net_local *)dev->priv;
898 int ioaddr = dev->base_addr;
899
900 if(smctr_debug > 10)
901 printk("%s: smctr_enable_adapter_ctrl_store\n", dev->name);
902
903 smctr_set_trc_reset(ioaddr);
904 smctr_enable_adapter_ram(dev);
905
906 tp->trc_mask &= ~CSR_WCSS;
907 outb(tp->trc_mask, ioaddr + CSR);
908
909 return (0);
910 }
911
smctr_enable_adapter_ram(struct net_device * dev)912 static int smctr_enable_adapter_ram(struct net_device *dev)
913 {
914 int ioaddr = dev->base_addr;
915 __u8 r;
916
917 if(smctr_debug > 10)
918 printk("%s: smctr_enable_adapter_ram\n", dev->name);
919
920 r = inb(ioaddr + MSR);
921 outb(MSR_MEMB | r, ioaddr + MSR);
922
923 return (0);
924 }
925
smctr_enable_bic_int(struct net_device * dev)926 static int smctr_enable_bic_int(struct net_device *dev)
927 {
928 struct net_local *tp = (struct net_local *)dev->priv;
929 int ioaddr = dev->base_addr;
930 __u8 r;
931
932 switch(tp->bic_type)
933 {
934 case (BIC_584_CHIP):
935 tp->trc_mask = CSR_MSKCBUSY | CSR_WCSS;
936 outb(tp->trc_mask, ioaddr + CSR);
937 r = inb(ioaddr + IRR);
938 outb(r | IRR_IEN, ioaddr + IRR);
939 break;
940
941 case (BIC_594_CHIP):
942 tp->trc_mask = CSR_MSKCBUSY | CSR_WCSS;
943 outb(tp->trc_mask, ioaddr + CSR);
944 r = inb(ioaddr + IMCCR);
945 outb(r | IMCCR_EIL, ioaddr + IMCCR);
946 break;
947 }
948
949 return (0);
950 }
951
smctr_chk_isa(struct net_device * dev)952 static int __init smctr_chk_isa(struct net_device *dev)
953 {
954 struct net_local *tp = (struct net_local *)dev->priv;
955 int ioaddr = dev->base_addr;
956 __u8 r1, r2, b, chksum = 0;
957 __u16 r;
958 int i;
959
960 if(smctr_debug > 10)
961 printk("%s: smctr_chk_isa %#4x\n", dev->name, ioaddr);
962
963 if((ioaddr & 0x1F) != 0)
964 return (-ENODEV);
965
966 /* Checksum SMC node address */
967 for(i = 0; i < 8; i++)
968 {
969 b = inb(ioaddr + LAR0 + i);
970 chksum += b;
971 }
972
973 if(chksum != NODE_ADDR_CKSUM)
974 return (-ENODEV); /* Adapter Not Found */
975
976 /* Grab the region so that no one else tries to probe our ioports. */
977 request_region(ioaddr, SMCTR_IO_EXTENT, smctr_name);
978
979 b = inb(ioaddr + BDID);
980 if(b != BRD_ID_8115T)
981 {
982 printk("%s: The adapter found is not supported\n", dev->name);
983 return (-1);
984 }
985
986 /* Check for 8115T Board ID */
987 r2 = 0;
988 for(r = 0; r < 8; r++)
989 {
990 r1 = inb(ioaddr + 0x8 + r);
991 r2 += r1;
992 }
993
994 /* value of RegF adds up the sum to 0xFF */
995 if((r2 != 0xFF) && (r2 != 0xEE))
996 return (-1);
997
998 /* Get adapter ID */
999 tp->board_id = smctr_get_boardid(dev, 0);
1000 switch(tp->board_id & 0xffff)
1001 {
1002 case WD8115TA:
1003 smctr_model = "8115T/A";
1004 break;
1005
1006 case WD8115T:
1007 if(tp->extra_info & CHIP_REV_MASK)
1008 smctr_model = "8115T rev XE";
1009 else
1010 smctr_model = "8115T rev XD";
1011 break;
1012
1013 default:
1014 smctr_model = "Unknown";
1015 break;
1016 }
1017
1018 /* Store BIC type. */
1019 tp->bic_type = BIC_584_CHIP;
1020 tp->nic_type = NIC_825_CHIP;
1021
1022 /* Copy Ram Size */
1023 tp->ram_usable = CNFG_SIZE_16KB;
1024 tp->ram_size = CNFG_SIZE_64KB;
1025
1026 /* Get 58x Ram Base */
1027 r1 = inb(ioaddr);
1028 r1 &= 0x3F;
1029
1030 r2 = inb(ioaddr + CNFG_LAAR_584);
1031 r2 &= CNFG_LAAR_MASK;
1032 r2 <<= 3;
1033 r2 |= ((r1 & 0x38) >> 3);
1034
1035 tp->ram_base = ((__u32)r2 << 16) + (((__u32)(r1 & 0x7)) << 13);
1036
1037 /* Get 584 Irq */
1038 r1 = 0;
1039 r1 = inb(ioaddr + CNFG_ICR_583);
1040 r1 &= CNFG_ICR_IR2_584;
1041
1042 r2 = inb(ioaddr + CNFG_IRR_583);
1043 r2 &= CNFG_IRR_IRQS; /* 0x60 */
1044 r2 >>= 5;
1045
1046 switch(r2)
1047 {
1048 case 0:
1049 if(r1 == 0)
1050 dev->irq = 2;
1051 else
1052 dev->irq = 10;
1053 break;
1054
1055 case 1:
1056 if(r1 == 0)
1057 dev->irq = 3;
1058 else
1059 dev->irq = 11;
1060 break;
1061
1062 case 2:
1063 if(r1 == 0)
1064 {
1065 if(tp->extra_info & ALTERNATE_IRQ_BIT)
1066 dev->irq = 5;
1067 else
1068 dev->irq = 4;
1069 }
1070 else
1071 dev->irq = 15;
1072 break;
1073
1074 case 3:
1075 if(r1 == 0)
1076 dev->irq = 7;
1077 else
1078 dev->irq = 4;
1079 break;
1080
1081 default:
1082 printk("%s: No IRQ found aborting\n", dev->name);
1083 return(-1);
1084 }
1085
1086 if(request_irq(dev->irq, smctr_interrupt, SA_SHIRQ, smctr_name, dev))
1087 return (-ENODEV);
1088
1089 /* Get 58x Rom Base */
1090 r1 = inb(ioaddr + CNFG_BIO_583);
1091 r1 &= 0x3E;
1092 r1 |= 0x40;
1093
1094 tp->rom_base = (__u32)r1 << 13;
1095
1096 /* Get 58x Rom Size */
1097 r1 = inb(ioaddr + CNFG_BIO_583);
1098 r1 &= 0xC0;
1099 if(r1 == 0)
1100 tp->rom_size = ROM_DISABLE;
1101 else
1102 {
1103 r1 >>= 6;
1104 tp->rom_size = (__u16)CNFG_SIZE_8KB << r1;
1105 }
1106
1107 /* Get 58x Boot Status */
1108 r1 = inb(ioaddr + CNFG_GP2);
1109
1110 tp->mode_bits &= (~BOOT_STATUS_MASK);
1111
1112 if(r1 & CNFG_GP2_BOOT_NIBBLE)
1113 tp->mode_bits |= BOOT_TYPE_1;
1114
1115 /* Get 58x Zero Wait State */
1116 tp->mode_bits &= (~ZERO_WAIT_STATE_MASK);
1117
1118 r1 = inb(ioaddr + CNFG_IRR_583);
1119
1120 if(r1 & CNFG_IRR_ZWS)
1121 tp->mode_bits |= ZERO_WAIT_STATE_8_BIT;
1122
1123 if(tp->board_id & BOARD_16BIT)
1124 {
1125 r1 = inb(ioaddr + CNFG_LAAR_584);
1126
1127 if(r1 & CNFG_LAAR_ZWS)
1128 tp->mode_bits |= ZERO_WAIT_STATE_16_BIT;
1129 }
1130
1131 /* Get 584 Media Menu */
1132 tp->media_menu = 14;
1133 r1 = inb(ioaddr + CNFG_IRR_583);
1134
1135 tp->mode_bits &= 0xf8ff; /* (~CNFG_INTERFACE_TYPE_MASK) */
1136 if((tp->board_id & TOKEN_MEDIA) == TOKEN_MEDIA)
1137 {
1138 /* Get Advanced Features */
1139 if(((r1 & 0x6) >> 1) == 0x3)
1140 tp->media_type |= MEDIA_UTP_16;
1141 else
1142 {
1143 if(((r1 & 0x6) >> 1) == 0x2)
1144 tp->media_type |= MEDIA_STP_16;
1145 else
1146 {
1147 if(((r1 & 0x6) >> 1) == 0x1)
1148 tp->media_type |= MEDIA_UTP_4;
1149
1150 else
1151 tp->media_type |= MEDIA_STP_4;
1152 }
1153 }
1154
1155 r1 = inb(ioaddr + CNFG_GP2);
1156 if(!(r1 & 0x2) ) /* GP2_ETRD */
1157 tp->mode_bits |= EARLY_TOKEN_REL;
1158
1159 /* see if the chip is corrupted
1160 if(smctr_read_584_chksum(ioaddr))
1161 {
1162 printk("%s: EEPROM Checksum Failure\n", dev->name);
1163 return(-1);
1164 }
1165 */
1166 }
1167
1168 return (0);
1169 }
1170
smctr_get_boardid(struct net_device * dev,int mca)1171 static int __init smctr_get_boardid(struct net_device *dev, int mca)
1172 {
1173 struct net_local *tp = (struct net_local *)dev->priv;
1174 int ioaddr = dev->base_addr;
1175 __u8 r, r1, IdByte;
1176 __u16 BoardIdMask;
1177
1178 tp->board_id = BoardIdMask = 0;
1179
1180 if(mca)
1181 {
1182 BoardIdMask |= (MICROCHANNEL+INTERFACE_CHIP+TOKEN_MEDIA+PAGED_RAM+BOARD_16BIT);
1183 tp->extra_info |= (INTERFACE_594_CHIP+RAM_SIZE_64K+NIC_825_BIT+ALTERNATE_IRQ_BIT+SLOT_16BIT);
1184 }
1185 else
1186 {
1187 BoardIdMask|=(INTERFACE_CHIP+TOKEN_MEDIA+PAGED_RAM+BOARD_16BIT);
1188 tp->extra_info |= (INTERFACE_584_CHIP + RAM_SIZE_64K
1189 + NIC_825_BIT + ALTERNATE_IRQ_BIT);
1190 }
1191
1192 if(!mca)
1193 {
1194 r = inb(ioaddr + BID_REG_1);
1195 r &= 0x0c;
1196 outb(r, ioaddr + BID_REG_1);
1197 r = inb(ioaddr + BID_REG_1);
1198
1199 if(r & BID_SIXTEEN_BIT_BIT)
1200 {
1201 tp->extra_info |= SLOT_16BIT;
1202 tp->adapter_bus = BUS_ISA16_TYPE;
1203 }
1204 else
1205 tp->adapter_bus = BUS_ISA8_TYPE;
1206 }
1207 else
1208 tp->adapter_bus = BUS_MCA_TYPE;
1209
1210 /* Get Board Id Byte */
1211 IdByte = inb(ioaddr + BID_BOARD_ID_BYTE);
1212
1213 /* if Major version > 1.0 then
1214 * return;
1215 */
1216 if(IdByte & 0xF8)
1217 return (-1);
1218
1219 r1 = inb(ioaddr + BID_REG_1);
1220 r1 &= BID_ICR_MASK;
1221 r1 |= BID_OTHER_BIT;
1222
1223 outb(r1, ioaddr + BID_REG_1);
1224 r1 = inb(ioaddr + BID_REG_3);
1225
1226 r1 &= BID_EAR_MASK;
1227 r1 |= BID_ENGR_PAGE;
1228
1229 outb(r1, ioaddr + BID_REG_3);
1230 r1 = inb(ioaddr + BID_REG_1);
1231 r1 &= BID_ICR_MASK;
1232 r1 |= (BID_RLA | BID_OTHER_BIT);
1233
1234 outb(r1, ioaddr + BID_REG_1);
1235
1236 r1 = inb(ioaddr + BID_REG_1);
1237 while(r1 & BID_RECALL_DONE_MASK)
1238 r1 = inb(ioaddr + BID_REG_1);
1239
1240 r = inb(ioaddr + BID_LAR_0 + BID_REG_6);
1241
1242 /* clear chip rev bits */
1243 tp->extra_info &= ~CHIP_REV_MASK;
1244 tp->extra_info |= ((r & BID_EEPROM_CHIP_REV_MASK) << 6);
1245
1246 r1 = inb(ioaddr + BID_REG_1);
1247 r1 &= BID_ICR_MASK;
1248 r1 |= BID_OTHER_BIT;
1249
1250 outb(r1, ioaddr + BID_REG_1);
1251 r1 = inb(ioaddr + BID_REG_3);
1252
1253 r1 &= BID_EAR_MASK;
1254 r1 |= BID_EA6;
1255
1256 outb(r1, ioaddr + BID_REG_3);
1257 r1 = inb(ioaddr + BID_REG_1);
1258
1259 r1 &= BID_ICR_MASK;
1260 r1 |= BID_RLA;
1261
1262 outb(r1, ioaddr + BID_REG_1);
1263 r1 = inb(ioaddr + BID_REG_1);
1264
1265 while(r1 & BID_RECALL_DONE_MASK)
1266 r1 = inb(ioaddr + BID_REG_1);
1267
1268 return (BoardIdMask);
1269 }
1270
smctr_get_group_address(struct net_device * dev)1271 static int smctr_get_group_address(struct net_device *dev)
1272 {
1273 smctr_issue_read_word_cmd(dev, RW_INDIVIDUAL_GROUP_ADDR);
1274
1275 return(smctr_wait_cmd(dev));
1276 }
1277
smctr_get_functional_address(struct net_device * dev)1278 static int smctr_get_functional_address(struct net_device *dev)
1279 {
1280 smctr_issue_read_word_cmd(dev, RW_FUNCTIONAL_ADDR);
1281
1282 return(smctr_wait_cmd(dev));
1283 }
1284
1285 /* Calculate number of Non-MAC receive BDB's and data buffers.
1286 * This function must simulate allocateing shared memory exactly
1287 * as the allocate_shared_memory function above.
1288 */
smctr_get_num_rx_bdbs(struct net_device * dev)1289 static unsigned int smctr_get_num_rx_bdbs(struct net_device *dev)
1290 {
1291 struct net_local *tp = (struct net_local *)dev->priv;
1292 unsigned int mem_used = 0;
1293
1294 /* Allocate System Control Blocks. */
1295 mem_used += sizeof(SCGBlock);
1296
1297 mem_used += TO_PARAGRAPH_BOUNDRY(mem_used);
1298 mem_used += sizeof(SCLBlock);
1299
1300 mem_used += TO_PARAGRAPH_BOUNDRY(mem_used);
1301 mem_used += sizeof(ACBlock) * tp->num_acbs;
1302
1303 mem_used += TO_PARAGRAPH_BOUNDRY(mem_used);
1304 mem_used += sizeof(ISBlock);
1305
1306 mem_used += TO_PARAGRAPH_BOUNDRY(mem_used);
1307 mem_used += MISC_DATA_SIZE;
1308
1309 /* Allocate transmit FCB's. */
1310 mem_used += TO_PARAGRAPH_BOUNDRY(mem_used);
1311
1312 mem_used += sizeof(FCBlock) * tp->num_tx_fcbs[MAC_QUEUE];
1313 mem_used += sizeof(FCBlock) * tp->num_tx_fcbs[NON_MAC_QUEUE];
1314 mem_used += sizeof(FCBlock) * tp->num_tx_fcbs[BUG_QUEUE];
1315
1316 /* Allocate transmit BDBs. */
1317 mem_used += sizeof(BDBlock) * tp->num_tx_bdbs[MAC_QUEUE];
1318 mem_used += sizeof(BDBlock) * tp->num_tx_bdbs[NON_MAC_QUEUE];
1319 mem_used += sizeof(BDBlock) * tp->num_tx_bdbs[BUG_QUEUE];
1320
1321 /* Allocate receive FCBs. */
1322 mem_used += sizeof(FCBlock) * tp->num_rx_fcbs[MAC_QUEUE];
1323 mem_used += sizeof(FCBlock) * tp->num_rx_fcbs[NON_MAC_QUEUE];
1324
1325 /* Allocate receive BDBs. */
1326 mem_used += sizeof(BDBlock) * tp->num_rx_bdbs[MAC_QUEUE];
1327
1328 /* Allocate MAC transmit buffers.
1329 * MAC transmit buffers don't have to be on an ODD Boundry.
1330 */
1331 mem_used += tp->tx_buff_size[MAC_QUEUE];
1332
1333 /* Allocate BUG transmit buffers. */
1334 mem_used += tp->tx_buff_size[BUG_QUEUE];
1335
1336 /* Allocate MAC receive data buffers.
1337 * MAC receive buffers don't have to be on a 256 byte boundry.
1338 */
1339 mem_used += RX_DATA_BUFFER_SIZE * tp->num_rx_bdbs[MAC_QUEUE];
1340
1341 /* Allocate Non-MAC transmit buffers.
1342 * For maximum Netware performance, put Tx Buffers on
1343 * ODD Boundry,and then restore malloc to Even Boundrys.
1344 */
1345 mem_used += 1L;
1346 mem_used += tp->tx_buff_size[NON_MAC_QUEUE];
1347 mem_used += 1L;
1348
1349 /* CALCULATE NUMBER OF NON-MAC RX BDB'S
1350 * AND NON-MAC RX DATA BUFFERS
1351 *
1352 * Make sure the mem_used offset at this point is the
1353 * same as in allocate_shared memory or the following
1354 * boundry adjustment will be incorrect (i.e. not allocating
1355 * the non-mac receive buffers above cannot change the 256
1356 * byte offset).
1357 *
1358 * Since this cannot be guaranteed, adding the full 256 bytes
1359 * to the amount of shared memory used at this point will guaranteed
1360 * that the rx data buffers do not overflow shared memory.
1361 */
1362 mem_used += 0x100;
1363
1364 return((0xffff - mem_used) / (RX_DATA_BUFFER_SIZE + sizeof(BDBlock)));
1365 }
1366
smctr_get_physical_drop_number(struct net_device * dev)1367 static int smctr_get_physical_drop_number(struct net_device *dev)
1368 {
1369 smctr_issue_read_word_cmd(dev, RW_PHYSICAL_DROP_NUMBER);
1370
1371 return(smctr_wait_cmd(dev));
1372 }
1373
smctr_get_rx_pointer(struct net_device * dev,short queue)1374 static __u8 * smctr_get_rx_pointer(struct net_device *dev, short queue)
1375 {
1376 struct net_local *tp = (struct net_local *)dev->priv;
1377 BDBlock *bdb;
1378
1379 bdb = (BDBlock *)((__u32)tp->ram_access
1380 + (__u32)(tp->rx_fcb_curr[queue]->trc_bdb_ptr));
1381
1382 tp->rx_fcb_curr[queue]->bdb_ptr = bdb;
1383
1384 return ((__u8 *)bdb->data_block_ptr);
1385 }
1386
smctr_get_station_id(struct net_device * dev)1387 static int smctr_get_station_id(struct net_device *dev)
1388 {
1389 smctr_issue_read_word_cmd(dev, RW_INDIVIDUAL_MAC_ADDRESS);
1390
1391 return(smctr_wait_cmd(dev));
1392 }
1393
1394 /*
1395 * Get the current statistics. This may be called with the card open
1396 * or closed.
1397 */
smctr_get_stats(struct net_device * dev)1398 static struct net_device_stats *smctr_get_stats(struct net_device *dev)
1399 {
1400 struct net_local *tp = (struct net_local *)dev->priv;
1401
1402 return ((struct net_device_stats *)&tp->MacStat);
1403 }
1404
smctr_get_tx_fcb(struct net_device * dev,__u16 queue,__u16 bytes_count)1405 static FCBlock *smctr_get_tx_fcb(struct net_device *dev, __u16 queue,
1406 __u16 bytes_count)
1407 {
1408 struct net_local *tp = (struct net_local *)dev->priv;
1409 FCBlock *pFCB;
1410 BDBlock *pbdb;
1411 unsigned short alloc_size;
1412 unsigned short *temp;
1413
1414 if(smctr_debug > 20)
1415 printk("smctr_get_tx_fcb\n");
1416
1417 /* check if there is enough FCB blocks */
1418 if(tp->num_tx_fcbs_used[queue] >= tp->num_tx_fcbs[queue])
1419 return ((FCBlock *)(-1L));
1420
1421 /* round off the input pkt size to the nearest even number */
1422 alloc_size = (bytes_count + 1) & 0xfffe;
1423
1424 /* check if enough mem */
1425 if((tp->tx_buff_used[queue] + alloc_size) > tp->tx_buff_size[queue])
1426 return ((FCBlock *)(-1L));
1427
1428 /* check if past the end ;
1429 * if exactly enough mem to end of ring, alloc from front.
1430 * this avoids update of curr when curr = end
1431 */
1432 if(((unsigned long)(tp->tx_buff_curr[queue]) + alloc_size)
1433 >= (unsigned long)(tp->tx_buff_end[queue]))
1434 {
1435 /* check if enough memory from ring head */
1436 alloc_size = alloc_size +
1437 (__u16)((__u32)tp->tx_buff_end[queue]
1438 - (__u32)tp->tx_buff_curr[queue]);
1439
1440 if((tp->tx_buff_used[queue] + alloc_size)
1441 > tp->tx_buff_size[queue])
1442 {
1443 return ((FCBlock *)(-1L));
1444 }
1445
1446 /* ring wrap */
1447 tp->tx_buff_curr[queue] = tp->tx_buff_head[queue];
1448 }
1449
1450 tp->tx_buff_used[queue] += alloc_size;
1451 tp->num_tx_fcbs_used[queue]++;
1452 tp->tx_fcb_curr[queue]->frame_length = bytes_count;
1453 tp->tx_fcb_curr[queue]->memory_alloc = alloc_size;
1454 temp = tp->tx_buff_curr[queue];
1455 tp->tx_buff_curr[queue]
1456 = (__u16 *)((__u32)temp + (__u32)((bytes_count + 1) & 0xfffe));
1457
1458 pbdb = tp->tx_fcb_curr[queue]->bdb_ptr;
1459 pbdb->buffer_length = bytes_count;
1460 pbdb->data_block_ptr = temp;
1461 pbdb->trc_data_block_ptr = TRC_POINTER(temp);
1462
1463 pFCB = tp->tx_fcb_curr[queue];
1464 tp->tx_fcb_curr[queue] = tp->tx_fcb_curr[queue]->next_ptr;
1465
1466 return (pFCB);
1467 }
1468
smctr_get_upstream_neighbor_addr(struct net_device * dev)1469 static int smctr_get_upstream_neighbor_addr(struct net_device *dev)
1470 {
1471 smctr_issue_read_word_cmd(dev, RW_UPSTREAM_NEIGHBOR_ADDRESS);
1472
1473 return(smctr_wait_cmd(dev));
1474 }
1475
smctr_hardware_send_packet(struct net_device * dev,struct net_local * tp)1476 static int smctr_hardware_send_packet(struct net_device *dev,
1477 struct net_local *tp)
1478 {
1479 struct tr_statistics *tstat = &tp->MacStat;
1480 struct sk_buff *skb;
1481 FCBlock *fcb;
1482
1483 if(smctr_debug > 10)
1484 printk("%s: smctr_hardware_send_packet\n", dev->name);
1485
1486 if(tp->status != OPEN)
1487 return (-1);
1488
1489 if(tp->monitor_state_ready != 1)
1490 return (-1);
1491
1492 for(;;)
1493 {
1494 /* Send first buffer from queue */
1495 skb = skb_dequeue(&tp->SendSkbQueue);
1496 if(skb == NULL)
1497 return (-1);
1498
1499 tp->QueueSkb++;
1500
1501 if(skb->len < SMC_HEADER_SIZE || skb->len > tp->max_packet_size) return (-1);
1502
1503 smctr_enable_16bit(dev);
1504 smctr_set_page(dev, (__u8 *)tp->ram_access);
1505
1506 if((fcb = smctr_get_tx_fcb(dev, NON_MAC_QUEUE, skb->len))
1507 == (FCBlock *)(-1L))
1508 {
1509 smctr_disable_16bit(dev);
1510 return (-1);
1511 }
1512
1513 smctr_tx_move_frame(dev, skb,
1514 (__u8 *)fcb->bdb_ptr->data_block_ptr, skb->len);
1515
1516 smctr_set_page(dev, (__u8 *)fcb);
1517
1518 smctr_trc_send_packet(dev, fcb, NON_MAC_QUEUE);
1519 dev_kfree_skb(skb);
1520
1521 tstat->tx_packets++;
1522
1523 smctr_disable_16bit(dev);
1524 }
1525
1526 return (0);
1527 }
1528
smctr_init_acbs(struct net_device * dev)1529 static int smctr_init_acbs(struct net_device *dev)
1530 {
1531 struct net_local *tp = (struct net_local *)dev->priv;
1532 unsigned int i;
1533 ACBlock *acb;
1534
1535 if(smctr_debug > 10)
1536 printk("%s: smctr_init_acbs\n", dev->name);
1537
1538 acb = tp->acb_head;
1539 acb->cmd_done_status = (ACB_COMMAND_DONE | ACB_COMMAND_SUCCESSFUL);
1540 acb->cmd_info = ACB_CHAIN_END;
1541 acb->cmd = 0;
1542 acb->subcmd = 0;
1543 acb->data_offset_lo = 0;
1544 acb->data_offset_hi = 0;
1545 acb->next_ptr
1546 = (ACBlock *)(((char *)acb) + sizeof(ACBlock));
1547 acb->trc_next_ptr = TRC_POINTER(acb->next_ptr);
1548
1549 for(i = 1; i < tp->num_acbs; i++)
1550 {
1551 acb = acb->next_ptr;
1552 acb->cmd_done_status
1553 = (ACB_COMMAND_DONE | ACB_COMMAND_SUCCESSFUL);
1554 acb->cmd_info = ACB_CHAIN_END;
1555 acb->cmd = 0;
1556 acb->subcmd = 0;
1557 acb->data_offset_lo = 0;
1558 acb->data_offset_hi = 0;
1559 acb->next_ptr
1560 = (ACBlock *)(((char *)acb) + sizeof(ACBlock));
1561 acb->trc_next_ptr = TRC_POINTER(acb->next_ptr);
1562 }
1563
1564 acb->next_ptr = tp->acb_head;
1565 acb->trc_next_ptr = TRC_POINTER(tp->acb_head);
1566 tp->acb_next = tp->acb_head->next_ptr;
1567 tp->acb_curr = tp->acb_head->next_ptr;
1568 tp->num_acbs_used = 0;
1569
1570 return (0);
1571 }
1572
smctr_init_adapter(struct net_device * dev)1573 static int smctr_init_adapter(struct net_device *dev)
1574 {
1575 struct net_local *tp = (struct net_local *)dev->priv;
1576 int err;
1577
1578 if(smctr_debug > 10)
1579 printk("%s: smctr_init_adapter\n", dev->name);
1580
1581 tp->status = CLOSED;
1582 tp->page_offset_mask = (tp->ram_usable * 1024) - 1;
1583 skb_queue_head_init(&tp->SendSkbQueue);
1584 tp->QueueSkb = MAX_TX_QUEUE;
1585
1586 if(!(tp->group_address_0 & 0x0080))
1587 tp->group_address_0 |= 0x00C0;
1588
1589 if(!(tp->functional_address_0 & 0x00C0))
1590 tp->functional_address_0 |= 0x00C0;
1591
1592 tp->functional_address[0] &= 0xFF7F;
1593
1594 if(tp->authorized_function_classes == 0)
1595 tp->authorized_function_classes = 0x7FFF;
1596
1597 if(tp->authorized_access_priority == 0)
1598 tp->authorized_access_priority = 0x06;
1599
1600 smctr_disable_bic_int(dev);
1601 smctr_set_trc_reset(dev->base_addr);
1602
1603 smctr_enable_16bit(dev);
1604 smctr_set_page(dev, (__u8 *)tp->ram_access);
1605
1606 if(smctr_checksum_firmware(dev))
1607 {
1608 printk("%s: Previously loaded firmware is missing\n",dev->name); return (-ENOENT);
1609 }
1610
1611 if((err = smctr_ram_memory_test(dev)))
1612 {
1613 printk("%s: RAM memory test failed.\n", dev->name);
1614 return (-EIO);
1615 }
1616
1617 smctr_set_rx_look_ahead(dev);
1618 smctr_load_node_addr(dev);
1619
1620 /* Initialize adapter for Internal Self Test. */
1621 smctr_reset_adapter(dev);
1622 if((err = smctr_init_card_real(dev)))
1623 {
1624 printk("%s: Initialization of card failed (%d)\n",
1625 dev->name, err);
1626 return (-EINVAL);
1627 }
1628
1629 /* This routine clobbers the TRC's internal registers. */
1630 if((err = smctr_internal_self_test(dev)))
1631 {
1632 printk("%s: Card failed internal self test (%d)\n",
1633 dev->name, err);
1634 return (-EINVAL);
1635 }
1636
1637 /* Re-Initialize adapter's internal registers */
1638 smctr_reset_adapter(dev);
1639 if((err = smctr_init_card_real(dev)))
1640 {
1641 printk("%s: Initialization of card failed (%d)\n",
1642 dev->name, err);
1643 return (-EINVAL);
1644 }
1645
1646 smctr_enable_bic_int(dev);
1647
1648 if((err = smctr_issue_enable_int_cmd(dev, TRC_INTERRUPT_ENABLE_MASK)))
1649 return (err);
1650
1651 smctr_disable_16bit(dev);
1652
1653 return (0);
1654 }
1655
1656 /* Dummy function */
smctr_init_card(struct net_device * dev)1657 static int __init smctr_init_card(struct net_device *dev)
1658 {
1659 if(smctr_debug > 10)
1660 printk("%s: smctr_init_card\n", dev->name);
1661
1662 return (0);
1663 }
1664
smctr_init_card_real(struct net_device * dev)1665 static int smctr_init_card_real(struct net_device *dev)
1666 {
1667 struct net_local *tp = (struct net_local *)dev->priv;
1668 int err = 0;
1669
1670 if(smctr_debug > 10)
1671 printk("%s: smctr_init_card_real\n", dev->name);
1672
1673 tp->sh_mem_used = 0;
1674 tp->num_acbs = NUM_OF_ACBS;
1675
1676 /* Range Check Max Packet Size */
1677 if(tp->max_packet_size < 256)
1678 tp->max_packet_size = 256;
1679 else
1680 {
1681 if(tp->max_packet_size > NON_MAC_TX_BUFFER_MEMORY)
1682 tp->max_packet_size = NON_MAC_TX_BUFFER_MEMORY;
1683 }
1684
1685 tp->num_of_tx_buffs = (NON_MAC_TX_BUFFER_MEMORY
1686 / tp->max_packet_size) - 1;
1687
1688 if(tp->num_of_tx_buffs > NUM_NON_MAC_TX_FCBS)
1689 tp->num_of_tx_buffs = NUM_NON_MAC_TX_FCBS;
1690 else
1691 {
1692 if(tp->num_of_tx_buffs == 0)
1693 tp->num_of_tx_buffs = 1;
1694 }
1695
1696 /* Tx queue constants */
1697 tp->num_tx_fcbs [BUG_QUEUE] = NUM_BUG_TX_FCBS;
1698 tp->num_tx_bdbs [BUG_QUEUE] = NUM_BUG_TX_BDBS;
1699 tp->tx_buff_size [BUG_QUEUE] = BUG_TX_BUFFER_MEMORY;
1700 tp->tx_buff_used [BUG_QUEUE] = 0;
1701 tp->tx_queue_status [BUG_QUEUE] = NOT_TRANSMITING;
1702
1703 tp->num_tx_fcbs [MAC_QUEUE] = NUM_MAC_TX_FCBS;
1704 tp->num_tx_bdbs [MAC_QUEUE] = NUM_MAC_TX_BDBS;
1705 tp->tx_buff_size [MAC_QUEUE] = MAC_TX_BUFFER_MEMORY;
1706 tp->tx_buff_used [MAC_QUEUE] = 0;
1707 tp->tx_queue_status [MAC_QUEUE] = NOT_TRANSMITING;
1708
1709 tp->num_tx_fcbs [NON_MAC_QUEUE] = NUM_NON_MAC_TX_FCBS;
1710 tp->num_tx_bdbs [NON_MAC_QUEUE] = NUM_NON_MAC_TX_BDBS;
1711 tp->tx_buff_size [NON_MAC_QUEUE] = NON_MAC_TX_BUFFER_MEMORY;
1712 tp->tx_buff_used [NON_MAC_QUEUE] = 0;
1713 tp->tx_queue_status [NON_MAC_QUEUE] = NOT_TRANSMITING;
1714
1715 /* Receive Queue Constants */
1716 tp->num_rx_fcbs[MAC_QUEUE] = NUM_MAC_RX_FCBS;
1717 tp->num_rx_bdbs[MAC_QUEUE] = NUM_MAC_RX_BDBS;
1718
1719 if(tp->extra_info & CHIP_REV_MASK)
1720 tp->num_rx_fcbs[NON_MAC_QUEUE] = 78; /* 825 Rev. XE */
1721 else
1722 tp->num_rx_fcbs[NON_MAC_QUEUE] = 7; /* 825 Rev. XD */
1723
1724 tp->num_rx_bdbs[NON_MAC_QUEUE] = smctr_get_num_rx_bdbs(dev);
1725
1726 smctr_alloc_shared_memory(dev);
1727 smctr_init_shared_memory(dev);
1728
1729 if((err = smctr_issue_init_timers_cmd(dev)))
1730 return (err);
1731
1732 if((err = smctr_issue_init_txrx_cmd(dev)))
1733 {
1734 printk("%s: Hardware failure\n", dev->name);
1735 return (err);
1736 }
1737
1738 return (0);
1739 }
1740
smctr_init_rx_bdbs(struct net_device * dev)1741 static int smctr_init_rx_bdbs(struct net_device *dev)
1742 {
1743 struct net_local *tp = (struct net_local *)dev->priv;
1744 unsigned int i, j;
1745 BDBlock *bdb;
1746 __u16 *buf;
1747
1748 if(smctr_debug > 10)
1749 printk("%s: smctr_init_rx_bdbs\n", dev->name);
1750
1751 for(i = 0; i < NUM_RX_QS_USED; i++)
1752 {
1753 bdb = tp->rx_bdb_head[i];
1754 buf = tp->rx_buff_head[i];
1755 bdb->info = (BDB_CHAIN_END | BDB_NO_WARNING);
1756 bdb->buffer_length = RX_DATA_BUFFER_SIZE;
1757 bdb->next_ptr = (BDBlock *)(((char *)bdb) + sizeof(BDBlock));
1758 bdb->data_block_ptr = buf;
1759 bdb->trc_next_ptr = TRC_POINTER(bdb->next_ptr);
1760
1761 if(i == NON_MAC_QUEUE)
1762 bdb->trc_data_block_ptr = RX_BUFF_TRC_POINTER(buf);
1763 else
1764 bdb->trc_data_block_ptr = TRC_POINTER(buf);
1765
1766 for(j = 1; j < tp->num_rx_bdbs[i]; j++)
1767 {
1768 bdb->next_ptr->back_ptr = bdb;
1769 bdb = bdb->next_ptr;
1770 buf = (__u16 *)((char *)buf + RX_DATA_BUFFER_SIZE);
1771 bdb->info = (BDB_NOT_CHAIN_END | BDB_NO_WARNING);
1772 bdb->buffer_length = RX_DATA_BUFFER_SIZE;
1773 bdb->next_ptr = (BDBlock *)(((char *)bdb) + sizeof(BDBlock));
1774 bdb->data_block_ptr = buf;
1775 bdb->trc_next_ptr = TRC_POINTER(bdb->next_ptr);
1776
1777 if(i == NON_MAC_QUEUE)
1778 bdb->trc_data_block_ptr = RX_BUFF_TRC_POINTER(buf);
1779 else
1780 bdb->trc_data_block_ptr = TRC_POINTER(buf);
1781 }
1782
1783 bdb->next_ptr = tp->rx_bdb_head[i];
1784 bdb->trc_next_ptr = TRC_POINTER(tp->rx_bdb_head[i]);
1785
1786 tp->rx_bdb_head[i]->back_ptr = bdb;
1787 tp->rx_bdb_curr[i] = tp->rx_bdb_head[i]->next_ptr;
1788 }
1789
1790 return (0);
1791 }
1792
smctr_init_rx_fcbs(struct net_device * dev)1793 static int smctr_init_rx_fcbs(struct net_device *dev)
1794 {
1795 struct net_local *tp = (struct net_local *)dev->priv;
1796 unsigned int i, j;
1797 FCBlock *fcb;
1798
1799 for(i = 0; i < NUM_RX_QS_USED; i++)
1800 {
1801 fcb = tp->rx_fcb_head[i];
1802 fcb->frame_status = 0;
1803 fcb->frame_length = 0;
1804 fcb->info = FCB_CHAIN_END;
1805 fcb->next_ptr = (FCBlock *)(((char*)fcb) + sizeof(FCBlock));
1806 if(i == NON_MAC_QUEUE)
1807 fcb->trc_next_ptr = RX_FCB_TRC_POINTER(fcb->next_ptr);
1808 else
1809 fcb->trc_next_ptr = TRC_POINTER(fcb->next_ptr);
1810
1811 for(j = 1; j < tp->num_rx_fcbs[i]; j++)
1812 {
1813 fcb->next_ptr->back_ptr = fcb;
1814 fcb = fcb->next_ptr;
1815 fcb->frame_status = 0;
1816 fcb->frame_length = 0;
1817 fcb->info = FCB_WARNING;
1818 fcb->next_ptr
1819 = (FCBlock *)(((char *)fcb) + sizeof(FCBlock));
1820
1821 if(i == NON_MAC_QUEUE)
1822 fcb->trc_next_ptr
1823 = RX_FCB_TRC_POINTER(fcb->next_ptr);
1824 else
1825 fcb->trc_next_ptr
1826 = TRC_POINTER(fcb->next_ptr);
1827 }
1828
1829 fcb->next_ptr = tp->rx_fcb_head[i];
1830
1831 if(i == NON_MAC_QUEUE)
1832 fcb->trc_next_ptr = RX_FCB_TRC_POINTER(fcb->next_ptr);
1833 else
1834 fcb->trc_next_ptr = TRC_POINTER(fcb->next_ptr);
1835
1836 tp->rx_fcb_head[i]->back_ptr = fcb;
1837 tp->rx_fcb_curr[i] = tp->rx_fcb_head[i]->next_ptr;
1838 }
1839
1840 return(0);
1841 }
1842
smctr_init_shared_memory(struct net_device * dev)1843 static int smctr_init_shared_memory(struct net_device *dev)
1844 {
1845 struct net_local *tp = (struct net_local *)dev->priv;
1846 unsigned int i;
1847 __u32 *iscpb;
1848
1849 if(smctr_debug > 10)
1850 printk("%s: smctr_init_shared_memory\n", dev->name);
1851
1852 smctr_set_page(dev, (__u8 *)(unsigned int)tp->iscpb_ptr);
1853
1854 /* Initialize Initial System Configuration Point. (ISCP) */
1855 iscpb = (__u32 *)PAGE_POINTER(&tp->iscpb_ptr->trc_scgb_ptr);
1856 *iscpb = (__u32)(SWAP_WORDS(TRC_POINTER(tp->scgb_ptr)));
1857
1858 smctr_set_page(dev, (__u8 *)tp->ram_access);
1859
1860 /* Initialize System Configuration Pointers. (SCP) */
1861 tp->scgb_ptr->config = (SCGB_ADDRESS_POINTER_FORMAT
1862 | SCGB_MULTI_WORD_CONTROL | SCGB_DATA_FORMAT
1863 | SCGB_BURST_LENGTH);
1864
1865 tp->scgb_ptr->trc_sclb_ptr = TRC_POINTER(tp->sclb_ptr);
1866 tp->scgb_ptr->trc_acb_ptr = TRC_POINTER(tp->acb_head);
1867 tp->scgb_ptr->trc_isb_ptr = TRC_POINTER(tp->isb_ptr);
1868 tp->scgb_ptr->isbsiz = (sizeof(ISBlock)) - 2;
1869
1870 /* Initialize System Control Block. (SCB) */
1871 tp->sclb_ptr->valid_command = SCLB_VALID | SCLB_CMD_NOP;
1872 tp->sclb_ptr->iack_code = 0;
1873 tp->sclb_ptr->resume_control = 0;
1874 tp->sclb_ptr->int_mask_control = 0;
1875 tp->sclb_ptr->int_mask_state = 0;
1876
1877 /* Initialize Interrupt Status Block. (ISB) */
1878 for(i = 0; i < NUM_OF_INTERRUPTS; i++)
1879 {
1880 tp->isb_ptr->IStatus[i].IType = 0xf0;
1881 tp->isb_ptr->IStatus[i].ISubtype = 0;
1882 }
1883
1884 tp->current_isb_index = 0;
1885
1886 /* Initialize Action Command Block. (ACB) */
1887 smctr_init_acbs(dev);
1888
1889 /* Initialize transmit FCB's and BDB's. */
1890 smctr_link_tx_fcbs_to_bdbs(dev);
1891 smctr_init_tx_bdbs(dev);
1892 smctr_init_tx_fcbs(dev);
1893
1894 /* Initialize receive FCB's and BDB's. */
1895 smctr_init_rx_bdbs(dev);
1896 smctr_init_rx_fcbs(dev);
1897
1898 return (0);
1899 }
1900
smctr_init_tx_bdbs(struct net_device * dev)1901 static int smctr_init_tx_bdbs(struct net_device *dev)
1902 {
1903 struct net_local *tp = (struct net_local *)dev->priv;
1904 unsigned int i, j;
1905 BDBlock *bdb;
1906
1907 for(i = 0; i < NUM_TX_QS_USED; i++)
1908 {
1909 bdb = tp->tx_bdb_head[i];
1910 bdb->info = (BDB_NOT_CHAIN_END | BDB_NO_WARNING);
1911 bdb->next_ptr = (BDBlock *)(((char *)bdb) + sizeof(BDBlock));
1912 bdb->trc_next_ptr = TRC_POINTER(bdb->next_ptr);
1913
1914 for(j = 1; j < tp->num_tx_bdbs[i]; j++)
1915 {
1916 bdb->next_ptr->back_ptr = bdb;
1917 bdb = bdb->next_ptr;
1918 bdb->info = (BDB_NOT_CHAIN_END | BDB_NO_WARNING);
1919 bdb->next_ptr
1920 = (BDBlock *)(((char *)bdb) + sizeof( BDBlock)); bdb->trc_next_ptr = TRC_POINTER(bdb->next_ptr);
1921 }
1922
1923 bdb->next_ptr = tp->tx_bdb_head[i];
1924 bdb->trc_next_ptr = TRC_POINTER(tp->tx_bdb_head[i]);
1925 tp->tx_bdb_head[i]->back_ptr = bdb;
1926 }
1927
1928 return (0);
1929 }
1930
smctr_init_tx_fcbs(struct net_device * dev)1931 static int smctr_init_tx_fcbs(struct net_device *dev)
1932 {
1933 struct net_local *tp = (struct net_local *)dev->priv;
1934 unsigned int i, j;
1935 FCBlock *fcb;
1936
1937 for(i = 0; i < NUM_TX_QS_USED; i++)
1938 {
1939 fcb = tp->tx_fcb_head[i];
1940 fcb->frame_status = 0;
1941 fcb->frame_length = 0;
1942 fcb->info = FCB_CHAIN_END;
1943 fcb->next_ptr = (FCBlock *)(((char *)fcb) + sizeof(FCBlock));
1944 fcb->trc_next_ptr = TRC_POINTER(fcb->next_ptr);
1945
1946 for(j = 1; j < tp->num_tx_fcbs[i]; j++)
1947 {
1948 fcb->next_ptr->back_ptr = fcb;
1949 fcb = fcb->next_ptr;
1950 fcb->frame_status = 0;
1951 fcb->frame_length = 0;
1952 fcb->info = FCB_CHAIN_END;
1953 fcb->next_ptr
1954 = (FCBlock *)(((char *)fcb) + sizeof(FCBlock));
1955 fcb->trc_next_ptr = TRC_POINTER(fcb->next_ptr);
1956 }
1957
1958 fcb->next_ptr = tp->tx_fcb_head[i];
1959 fcb->trc_next_ptr = TRC_POINTER(tp->tx_fcb_head[i]);
1960
1961 tp->tx_fcb_head[i]->back_ptr = fcb;
1962 tp->tx_fcb_end[i] = tp->tx_fcb_head[i]->next_ptr;
1963 tp->tx_fcb_curr[i] = tp->tx_fcb_head[i]->next_ptr;
1964 tp->num_tx_fcbs_used[i] = 0;
1965 }
1966
1967 return (0);
1968 }
1969
smctr_internal_self_test(struct net_device * dev)1970 static int smctr_internal_self_test(struct net_device *dev)
1971 {
1972 struct net_local *tp = (struct net_local *)dev->priv;
1973 int err;
1974
1975 if((err = smctr_issue_test_internal_rom_cmd(dev)))
1976 return (err);
1977
1978 if((err = smctr_wait_cmd(dev)))
1979 return (err);
1980
1981 if(tp->acb_head->cmd_done_status & 0xff)
1982 return (-1);
1983
1984 if((err = smctr_issue_test_hic_cmd(dev)))
1985 return (err);
1986
1987 if((err = smctr_wait_cmd(dev)))
1988 return (err);
1989
1990 if(tp->acb_head->cmd_done_status & 0xff)
1991 return (-1);
1992
1993 if((err = smctr_issue_test_mac_reg_cmd(dev)))
1994 return (err);
1995
1996 if((err = smctr_wait_cmd(dev)))
1997 return (err);
1998
1999 if(tp->acb_head->cmd_done_status & 0xff)
2000 return (-1);
2001
2002 return (0);
2003 }
2004
2005 /*
2006 * The typical workload of the driver: Handle the network interface interrupts.
2007 */
smctr_interrupt(int irq,void * dev_id,struct pt_regs * regs)2008 static void smctr_interrupt(int irq, void *dev_id, struct pt_regs *regs)
2009 {
2010 struct net_device *dev = dev_id;
2011 struct net_local *tp;
2012 int ioaddr;
2013 __u16 interrupt_unmask_bits = 0, interrupt_ack_code = 0xff00;
2014 __u16 err1, err = NOT_MY_INTERRUPT;
2015 __u8 isb_type, isb_subtype;
2016 __u16 isb_index;
2017
2018 if(dev == NULL)
2019 {
2020 printk("%s: irq %d for unknown device.\n", dev->name, irq);
2021 return;
2022 }
2023
2024 ioaddr = dev->base_addr;
2025 tp = (struct net_local *)dev->priv;
2026
2027 if(tp->status == NOT_INITIALIZED)
2028 return;
2029
2030 smctr_disable_bic_int(dev);
2031 smctr_enable_16bit(dev);
2032
2033 smctr_clear_int(dev);
2034
2035 /* First read the LSB */
2036 while((tp->isb_ptr->IStatus[tp->current_isb_index].IType & 0xf0) == 0)
2037 {
2038 isb_index = tp->current_isb_index;
2039 isb_type = tp->isb_ptr->IStatus[isb_index].IType;
2040 isb_subtype = tp->isb_ptr->IStatus[isb_index].ISubtype;
2041
2042 (tp->current_isb_index)++;
2043 if(tp->current_isb_index == NUM_OF_INTERRUPTS)
2044 tp->current_isb_index = 0;
2045
2046 if(isb_type >= 0x10)
2047 {
2048 smctr_disable_16bit(dev);
2049 return;
2050 }
2051
2052 err = HARDWARE_FAILED;
2053 interrupt_ack_code = isb_index;
2054 tp->isb_ptr->IStatus[isb_index].IType |= 0xf0;
2055
2056 interrupt_unmask_bits |= (1 << (__u16)isb_type);
2057
2058 switch(isb_type)
2059 {
2060 case ISB_IMC_MAC_TYPE_3:
2061 smctr_disable_16bit(dev);
2062
2063 switch(isb_subtype)
2064 {
2065 case 0:
2066 tp->monitor_state
2067 = MS_MONITOR_FSM_INACTIVE;
2068 break;
2069
2070 case 1:
2071 tp->monitor_state
2072 = MS_REPEAT_BEACON_STATE;
2073 break;
2074
2075 case 2:
2076 tp->monitor_state
2077 = MS_REPEAT_CLAIM_TOKEN_STATE;
2078 break;
2079
2080 case 3:
2081 tp->monitor_state
2082 = MS_TRANSMIT_CLAIM_TOKEN_STATE; break;
2083
2084 case 4:
2085 tp->monitor_state
2086 = MS_STANDBY_MONITOR_STATE;
2087 break;
2088
2089 case 5:
2090 tp->monitor_state
2091 = MS_TRANSMIT_BEACON_STATE;
2092 break;
2093
2094 case 6:
2095 tp->monitor_state
2096 = MS_ACTIVE_MONITOR_STATE;
2097 break;
2098
2099 case 7:
2100 tp->monitor_state
2101 = MS_TRANSMIT_RING_PURGE_STATE;
2102 break;
2103
2104 case 8: /* diagnostic state */
2105 break;
2106
2107 case 9:
2108 tp->monitor_state
2109 = MS_BEACON_TEST_STATE;
2110 if(smctr_lobe_media_test(dev))
2111 {
2112 tp->ring_status_flags
2113 = RING_STATUS_CHANGED;
2114 tp->ring_status
2115 = AUTO_REMOVAL_ERROR;
2116 smctr_ring_status_chg(dev);
2117 smctr_bypass_state(dev);
2118 }
2119 else
2120 smctr_issue_insert_cmd(dev);
2121 break;
2122
2123 /* case 0x0a-0xff, illegal states */
2124 default:
2125 break;
2126 }
2127
2128 tp->ring_status_flags = MONITOR_STATE_CHANGED;
2129 err = smctr_ring_status_chg(dev);
2130
2131 smctr_enable_16bit(dev);
2132 break;
2133
2134 /* Type 0x02 - MAC Error Counters Interrupt
2135 * One or more MAC Error Counter is half full
2136 * MAC Error Counters
2137 * Lost_FR_Error_Counter
2138 * RCV_Congestion_Counter
2139 * FR_copied_Error_Counter
2140 * FREQ_Error_Counter
2141 * Token_Error_Counter
2142 * Line_Error_Counter
2143 * Internal_Error_Count
2144 */
2145 case ISB_IMC_MAC_ERROR_COUNTERS:
2146 /* Read 802.5 Error Counters */
2147 err = smctr_issue_read_ring_status_cmd(dev);
2148 break;
2149
2150 /* Type 0x04 - MAC Type 2 Interrupt
2151 * HOST needs to enqueue MAC Frame for transmission
2152 * SubType Bit 15 - RQ_INIT_PDU( Request Initialization) * Changed from RQ_INIT_PDU to
2153 * TRC_Status_Changed_Indicate
2154 */
2155 case ISB_IMC_MAC_TYPE_2:
2156 err = smctr_issue_read_ring_status_cmd(dev);
2157 break;
2158
2159
2160 /* Type 0x05 - TX Frame Interrupt (FI). */
2161 case ISB_IMC_TX_FRAME:
2162 /* BUG QUEUE for TRC stuck receive BUG */
2163 if(isb_subtype & TX_PENDING_PRIORITY_2)
2164 {
2165 if((err = smctr_tx_complete(dev,
2166 BUG_QUEUE)) != SUCCESS)
2167 break;
2168 }
2169
2170 /* NON-MAC frames only */
2171 if(isb_subtype & TX_PENDING_PRIORITY_1)
2172 {
2173 if((err = smctr_tx_complete(dev,
2174 NON_MAC_QUEUE)) != SUCCESS)
2175 break;
2176 }
2177
2178 /* MAC frames only */
2179 if(isb_subtype & TX_PENDING_PRIORITY_0)
2180 err = smctr_tx_complete(dev, MAC_QUEUE); break;
2181
2182 /* Type 0x06 - TX END OF QUEUE (FE) */
2183 case ISB_IMC_END_OF_TX_QUEUE:
2184 /* BUG queue */
2185 if(isb_subtype & TX_PENDING_PRIORITY_2)
2186 {
2187 /* ok to clear Receive FIFO overrun
2188 * imask send_BUG now completes.
2189 */
2190 interrupt_unmask_bits |= 0x800;
2191
2192 tp->tx_queue_status[BUG_QUEUE]
2193 = NOT_TRANSMITING;
2194 if((err = smctr_tx_complete(dev,
2195 BUG_QUEUE)) != SUCCESS)
2196 break;
2197 if((err = smctr_restart_tx_chain(dev,
2198 BUG_QUEUE)) != SUCCESS)
2199 break;
2200 }
2201
2202 /* NON-MAC queue only */
2203 if(isb_subtype & TX_PENDING_PRIORITY_1)
2204 {
2205 tp->tx_queue_status[NON_MAC_QUEUE]
2206 = NOT_TRANSMITING;
2207 if((err = smctr_tx_complete(dev,
2208 NON_MAC_QUEUE)) != SUCCESS)
2209 break;
2210 if((err = smctr_restart_tx_chain(dev,
2211 NON_MAC_QUEUE)) != SUCCESS)
2212 break;
2213 }
2214
2215 /* MAC queue only */
2216 if(isb_subtype & TX_PENDING_PRIORITY_0)
2217 {
2218 tp->tx_queue_status[MAC_QUEUE]
2219 = NOT_TRANSMITING;
2220 if((err = smctr_tx_complete(dev,
2221 MAC_QUEUE)) != SUCCESS)
2222 break;
2223
2224 err = smctr_restart_tx_chain(dev,
2225 MAC_QUEUE);
2226 }
2227 break;
2228
2229 /* Type 0x07 - NON-MAC RX Resource Interrupt
2230 * Subtype bit 12 - (BW) BDB warning
2231 * Subtype bit 13 - (FW) FCB warning
2232 * Subtype bit 14 - (BE) BDB End of chain
2233 * Subtype bit 15 - (FE) FCB End of chain
2234 */
2235 case ISB_IMC_NON_MAC_RX_RESOURCE:
2236 tp->rx_fifo_overrun_count = 0;
2237 tp->receive_queue_number = NON_MAC_QUEUE;
2238 err1 = smctr_rx_frame(dev);
2239
2240 if(isb_subtype & NON_MAC_RX_RESOURCE_FE)
2241 {
2242 if((err = smctr_issue_resume_rx_fcb_cmd( dev, NON_MAC_QUEUE)) != SUCCESS) break;
2243
2244 if(tp->ptr_rx_fcb_overruns)
2245 (*tp->ptr_rx_fcb_overruns)++;
2246 }
2247
2248 if(isb_subtype & NON_MAC_RX_RESOURCE_BE)
2249 {
2250 if((err = smctr_issue_resume_rx_bdb_cmd( dev, NON_MAC_QUEUE)) != SUCCESS) break;
2251
2252 if(tp->ptr_rx_bdb_overruns)
2253 (*tp->ptr_rx_bdb_overruns)++;
2254 }
2255 err = err1;
2256 break;
2257
2258 /* Type 0x08 - MAC RX Resource Interrupt
2259 * Subtype bit 12 - (BW) BDB warning
2260 * Subtype bit 13 - (FW) FCB warning
2261 * Subtype bit 14 - (BE) BDB End of chain
2262 * Subtype bit 15 - (FE) FCB End of chain
2263 */
2264 case ISB_IMC_MAC_RX_RESOURCE:
2265 tp->receive_queue_number = MAC_QUEUE;
2266 err1 = smctr_rx_frame(dev);
2267
2268 if(isb_subtype & MAC_RX_RESOURCE_FE)
2269 {
2270 if((err = smctr_issue_resume_rx_fcb_cmd( dev, MAC_QUEUE)) != SUCCESS)
2271 break;
2272
2273 if(tp->ptr_rx_fcb_overruns)
2274 (*tp->ptr_rx_fcb_overruns)++;
2275 }
2276
2277 if(isb_subtype & MAC_RX_RESOURCE_BE)
2278 {
2279 if((err = smctr_issue_resume_rx_bdb_cmd( dev, MAC_QUEUE)) != SUCCESS)
2280 break;
2281
2282 if(tp->ptr_rx_bdb_overruns)
2283 (*tp->ptr_rx_bdb_overruns)++;
2284 }
2285 err = err1;
2286 break;
2287
2288 /* Type 0x09 - NON_MAC RX Frame Interrupt */
2289 case ISB_IMC_NON_MAC_RX_FRAME:
2290 tp->rx_fifo_overrun_count = 0;
2291 tp->receive_queue_number = NON_MAC_QUEUE;
2292 err = smctr_rx_frame(dev);
2293 break;
2294
2295 /* Type 0x0A - MAC RX Frame Interrupt */
2296 case ISB_IMC_MAC_RX_FRAME:
2297 tp->receive_queue_number = MAC_QUEUE;
2298 err = smctr_rx_frame(dev);
2299 break;
2300
2301 /* Type 0x0B - TRC status
2302 * TRC has encountered an error condition
2303 * subtype bit 14 - transmit FIFO underrun
2304 * subtype bit 15 - receive FIFO overrun
2305 */
2306 case ISB_IMC_TRC_FIFO_STATUS:
2307 if(isb_subtype & TRC_FIFO_STATUS_TX_UNDERRUN)
2308 {
2309 if(tp->ptr_tx_fifo_underruns)
2310 (*tp->ptr_tx_fifo_underruns)++;
2311 }
2312
2313 if(isb_subtype & TRC_FIFO_STATUS_RX_OVERRUN)
2314 {
2315 /* update overrun stuck receive counter
2316 * if >= 3, has to clear it by sending
2317 * back to back frames. We pick
2318 * DAT(duplicate address MAC frame)
2319 */
2320 tp->rx_fifo_overrun_count++;
2321
2322 if(tp->rx_fifo_overrun_count >= 3)
2323 {
2324 tp->rx_fifo_overrun_count = 0;
2325
2326 /* delay clearing fifo overrun
2327 * imask till send_BUG tx
2328 * complete posted
2329 */
2330 interrupt_unmask_bits &= (~0x800);
2331 printk("Jay please send bug\n");// smctr_send_bug(dev);
2332 }
2333
2334 if(tp->ptr_rx_fifo_overruns)
2335 (*tp->ptr_rx_fifo_overruns)++;
2336 }
2337
2338 err = SUCCESS;
2339 break;
2340
2341 /* Type 0x0C - Action Command Status Interrupt
2342 * Subtype bit 14 - CB end of command chain (CE)
2343 * Subtype bit 15 - CB command interrupt (CI)
2344 */
2345 case ISB_IMC_COMMAND_STATUS:
2346 err = SUCCESS;
2347 if(tp->acb_head->cmd == ACB_CMD_HIC_NOP)
2348 {
2349 printk("i1\n");
2350 smctr_disable_16bit(dev);
2351
2352 /* XXXXXXXXXXXXXXXXX */
2353 /* err = UM_Interrupt(dev); */
2354
2355 smctr_enable_16bit(dev);
2356 }
2357 else
2358 {
2359 if((tp->acb_head->cmd
2360 == ACB_CMD_READ_TRC_STATUS)
2361 && (tp->acb_head->subcmd
2362 == RW_TRC_STATUS_BLOCK))
2363 {
2364 if(tp->ptr_bcn_type != 0)
2365 {
2366 *(tp->ptr_bcn_type)
2367 = (__u32)((SBlock *)tp->misc_command_data)->BCN_Type;
2368 }
2369
2370 if(((SBlock *)tp->misc_command_data)->Status_CHG_Indicate & ERROR_COUNTERS_CHANGED)
2371 {
2372 smctr_update_err_stats(dev);
2373 }
2374
2375 if(((SBlock *)tp->misc_command_data)->Status_CHG_Indicate & TI_NDIS_RING_STATUS_CHANGED)
2376 {
2377 tp->ring_status
2378 = ((SBlock*)tp->misc_command_data)->TI_NDIS_Ring_Status;
2379 smctr_disable_16bit(dev);
2380 err = smctr_ring_status_chg(dev);
2381 smctr_enable_16bit(dev);
2382 if((tp->ring_status & REMOVE_RECEIVED)
2383 && (tp->config_word0 & NO_AUTOREMOVE))
2384 {
2385 smctr_issue_remove_cmd(dev);
2386 }
2387
2388 if(err != SUCCESS)
2389 {
2390 tp->acb_pending
2391 = 0;
2392 break;
2393 }
2394 }
2395
2396 if(((SBlock *)tp->misc_command_data)->Status_CHG_Indicate & UNA_CHANGED)
2397 {
2398 if(tp->ptr_una)
2399 {
2400 tp->ptr_una[0]
2401 = SWAP_BYTES(((SBlock *)tp->misc_command_data)->UNA[0]);
2402 tp->ptr_una[1]
2403 = SWAP_BYTES(((SBlock *)tp->misc_command_data)->UNA[1]);
2404 tp->ptr_una[2]
2405 = SWAP_BYTES(((SBlock *)tp->misc_command_data)->UNA[2]);
2406 }
2407
2408 }
2409
2410 if(((SBlock *)tp->misc_command_data)->Status_CHG_Indicate
2411 & READY_TO_SEND_RQ_INIT) {
2412 err = smctr_send_rq_init(dev);
2413 }
2414 }
2415 }
2416
2417 tp->acb_pending = 0;
2418 break;
2419
2420 /* Type 0x0D - MAC Type 1 interrupt
2421 * Subtype -- 00 FR_BCN received at S12
2422 * 01 FR_BCN received at S21
2423 * 02 FR_DAT(DA=MA, A<>0) received at S21
2424 * 03 TSM_EXP at S21
2425 * 04 FR_REMOVE received at S42
2426 * 05 TBR_EXP, BR_FLAG_SET at S42
2427 * 06 TBT_EXP at S53
2428 */
2429 case ISB_IMC_MAC_TYPE_1:
2430 if(isb_subtype > 8)
2431 {
2432 err = HARDWARE_FAILED;
2433 break;
2434 }
2435
2436 err = SUCCESS;
2437 switch(isb_subtype)
2438 {
2439 case 0:
2440 tp->join_state = JS_BYPASS_STATE;
2441 if(tp->status != CLOSED)
2442 {
2443 tp->status = CLOSED;
2444 err = smctr_status_chg(dev);
2445 }
2446 break;
2447
2448 case 1:
2449 tp->join_state
2450 = JS_LOBE_TEST_STATE;
2451 break;
2452
2453 case 2:
2454 tp->join_state
2455 = JS_DETECT_MONITOR_PRESENT_STATE;
2456 break;
2457
2458 case 3:
2459 tp->join_state
2460 = JS_AWAIT_NEW_MONITOR_STATE;
2461 break;
2462
2463 case 4:
2464 tp->join_state
2465 = JS_DUPLICATE_ADDRESS_TEST_STATE;
2466 break;
2467
2468 case 5:
2469 tp->join_state
2470 = JS_NEIGHBOR_NOTIFICATION_STATE;
2471 break;
2472
2473 case 6:
2474 tp->join_state
2475 = JS_REQUEST_INITIALIZATION_STATE;
2476 break;
2477
2478 case 7:
2479 tp->join_state
2480 = JS_JOIN_COMPLETE_STATE;
2481 tp->status = OPEN;
2482 err = smctr_status_chg(dev);
2483 break;
2484
2485 case 8:
2486 tp->join_state
2487 = JS_BYPASS_WAIT_STATE;
2488 break;
2489 }
2490 break ;
2491
2492 /* Type 0x0E - TRC Initialization Sequence Interrupt
2493 * Subtype -- 00-FF Initializatin sequence complete
2494 */
2495 case ISB_IMC_TRC_INTRNL_TST_STATUS:
2496 tp->status = INITIALIZED;
2497 smctr_disable_16bit(dev);
2498 err = smctr_status_chg(dev);
2499 smctr_enable_16bit(dev);
2500 break;
2501
2502 /* other interrupt types, illegal */
2503 default:
2504 break;
2505 }
2506
2507 if(err != SUCCESS)
2508 break;
2509 }
2510
2511 /* Checking the ack code instead of the unmask bits here is because :
2512 * while fixing the stuck receive, DAT frame are sent and mask off
2513 * FIFO overrun interrupt temporarily (interrupt_unmask_bits = 0)
2514 * but we still want to issue ack to ISB
2515 */
2516 if(!(interrupt_ack_code & 0xff00))
2517 smctr_issue_int_ack(dev, interrupt_ack_code,
2518 interrupt_unmask_bits);
2519
2520 smctr_disable_16bit(dev);
2521 smctr_enable_bic_int(dev);
2522
2523 return;
2524 }
2525
smctr_issue_enable_int_cmd(struct net_device * dev,__u16 interrupt_enable_mask)2526 static int smctr_issue_enable_int_cmd(struct net_device *dev,
2527 __u16 interrupt_enable_mask)
2528 {
2529 struct net_local *tp = (struct net_local *)dev->priv;
2530 int err;
2531
2532 if((err = smctr_wait_while_cbusy(dev)))
2533 return (err);
2534
2535 tp->sclb_ptr->int_mask_control = interrupt_enable_mask;
2536 tp->sclb_ptr->valid_command = SCLB_VALID
2537 | SCLB_CMD_CLEAR_INTERRUPT_MASK;
2538
2539 smctr_set_ctrl_attention(dev);
2540
2541 return (0);
2542 }
2543
smctr_issue_int_ack(struct net_device * dev,__u16 iack_code,__u16 ibits)2544 static int smctr_issue_int_ack(struct net_device *dev, __u16 iack_code,
2545 __u16 ibits)
2546 {
2547 struct net_local *tp = (struct net_local *)dev->priv;
2548
2549 if(smctr_wait_while_cbusy(dev))
2550 return (-1);
2551
2552 tp->sclb_ptr->int_mask_control = ibits;
2553 tp->sclb_ptr->iack_code = iack_code << 1; /* use the offset from base */ tp->sclb_ptr->resume_control = 0;
2554 tp->sclb_ptr->valid_command =
2555 SCLB_VALID | SCLB_IACK_CODE_VALID
2556 | SCLB_CMD_CLEAR_INTERRUPT_MASK;
2557
2558 smctr_set_ctrl_attention(dev);
2559
2560 return (0);
2561 }
2562
smctr_issue_init_timers_cmd(struct net_device * dev)2563 static int smctr_issue_init_timers_cmd(struct net_device *dev)
2564 {
2565 struct net_local *tp = (struct net_local *)dev->priv;
2566 unsigned int i;
2567 int err;
2568 __u16 *pTimer_Struc = (__u16 *)tp->misc_command_data;
2569
2570 if((err = smctr_wait_while_cbusy(dev)))
2571 return (err);
2572
2573 if((err = smctr_wait_cmd(dev)))
2574 return (err);
2575
2576 tp->config_word0 = THDREN | DMA_TRIGGER | USETPT | NO_AUTOREMOVE;
2577 tp->config_word1 = 0;
2578
2579 if((tp->media_type == MEDIA_STP_16)
2580 || (tp->media_type == MEDIA_UTP_16)
2581 || (tp->media_type == MEDIA_STP_16_UTP_16))
2582 {
2583 tp->config_word0 |= FREQ_16MB_BIT;
2584 }
2585
2586 if(tp->mode_bits & EARLY_TOKEN_REL)
2587 tp->config_word0 |= ETREN;
2588
2589 if(tp->mode_bits & LOOPING_MODE_MASK)
2590 tp->config_word0 |= RX_OWN_BIT;
2591 else
2592 tp->config_word0 &= ~RX_OWN_BIT;
2593
2594 if(tp->receive_mask & PROMISCUOUS_MODE)
2595 tp->config_word0 |= PROMISCUOUS_BIT;
2596 else
2597 tp->config_word0 &= ~PROMISCUOUS_BIT;
2598
2599 if(tp->receive_mask & ACCEPT_ERR_PACKETS)
2600 tp->config_word0 |= SAVBAD_BIT;
2601 else
2602 tp->config_word0 &= ~SAVBAD_BIT;
2603
2604 if(tp->receive_mask & ACCEPT_ATT_MAC_FRAMES)
2605 tp->config_word0 |= RXATMAC;
2606 else
2607 tp->config_word0 &= ~RXATMAC;
2608
2609 if(tp->receive_mask & ACCEPT_MULTI_PROM)
2610 tp->config_word1 |= MULTICAST_ADDRESS_BIT;
2611 else
2612 tp->config_word1 &= ~MULTICAST_ADDRESS_BIT;
2613
2614 if(tp->receive_mask & ACCEPT_SOURCE_ROUTING_SPANNING)
2615 tp->config_word1 |= SOURCE_ROUTING_SPANNING_BITS;
2616 else
2617 {
2618 if(tp->receive_mask & ACCEPT_SOURCE_ROUTING)
2619 tp->config_word1 |= SOURCE_ROUTING_EXPLORER_BIT;
2620 else
2621 tp->config_word1 &= ~SOURCE_ROUTING_SPANNING_BITS;
2622 }
2623
2624 if((tp->media_type == MEDIA_STP_16)
2625 || (tp->media_type == MEDIA_UTP_16)
2626 || (tp->media_type == MEDIA_STP_16_UTP_16))
2627 {
2628 tp->config_word1 |= INTERFRAME_SPACING_16;
2629 }
2630 else
2631 tp->config_word1 |= INTERFRAME_SPACING_4;
2632
2633 *pTimer_Struc++ = tp->config_word0;
2634 *pTimer_Struc++ = tp->config_word1;
2635
2636 if((tp->media_type == MEDIA_STP_4)
2637 || (tp->media_type == MEDIA_UTP_4)
2638 || (tp->media_type == MEDIA_STP_4_UTP_4))
2639 {
2640 *pTimer_Struc++ = 0x00FA; /* prescale */
2641 *pTimer_Struc++ = 0x2710; /* TPT_limit */
2642 *pTimer_Struc++ = 0x2710; /* TQP_limit */
2643 *pTimer_Struc++ = 0x0A28; /* TNT_limit */
2644 *pTimer_Struc++ = 0x3E80; /* TBT_limit */
2645 *pTimer_Struc++ = 0x3A98; /* TSM_limit */
2646 *pTimer_Struc++ = 0x1B58; /* TAM_limit */
2647 *pTimer_Struc++ = 0x00C8; /* TBR_limit */
2648 *pTimer_Struc++ = 0x07D0; /* TER_limit */
2649 *pTimer_Struc++ = 0x000A; /* TGT_limit */
2650 *pTimer_Struc++ = 0x1162; /* THT_limit */
2651 *pTimer_Struc++ = 0x07D0; /* TRR_limit */
2652 *pTimer_Struc++ = 0x1388; /* TVX_limit */
2653 *pTimer_Struc++ = 0x0000; /* reserved */
2654 }
2655 else
2656 {
2657 *pTimer_Struc++ = 0x03E8; /* prescale */
2658 *pTimer_Struc++ = 0x9C40; /* TPT_limit */
2659 *pTimer_Struc++ = 0x9C40; /* TQP_limit */
2660 *pTimer_Struc++ = 0x0A28; /* TNT_limit */
2661 *pTimer_Struc++ = 0x3E80; /* TBT_limit */
2662 *pTimer_Struc++ = 0x3A98; /* TSM_limit */
2663 *pTimer_Struc++ = 0x1B58; /* TAM_limit */
2664 *pTimer_Struc++ = 0x00C8; /* TBR_limit */
2665 *pTimer_Struc++ = 0x07D0; /* TER_limit */
2666 *pTimer_Struc++ = 0x000A; /* TGT_limit */
2667 *pTimer_Struc++ = 0x4588; /* THT_limit */
2668 *pTimer_Struc++ = 0x1F40; /* TRR_limit */
2669 *pTimer_Struc++ = 0x4E20; /* TVX_limit */
2670 *pTimer_Struc++ = 0x0000; /* reserved */
2671 }
2672
2673 /* Set node address. */
2674 *pTimer_Struc++ = dev->dev_addr[0] << 8
2675 | (dev->dev_addr[1] & 0xFF);
2676 *pTimer_Struc++ = dev->dev_addr[2] << 8
2677 | (dev->dev_addr[3] & 0xFF);
2678 *pTimer_Struc++ = dev->dev_addr[4] << 8
2679 | (dev->dev_addr[5] & 0xFF);
2680
2681 /* Set group address. */
2682 *pTimer_Struc++ = tp->group_address_0 << 8
2683 | tp->group_address_0 >> 8;
2684 *pTimer_Struc++ = tp->group_address[0] << 8
2685 | tp->group_address[0] >> 8;
2686 *pTimer_Struc++ = tp->group_address[1] << 8
2687 | tp->group_address[1] >> 8;
2688
2689 /* Set functional address. */
2690 *pTimer_Struc++ = tp->functional_address_0 << 8
2691 | tp->functional_address_0 >> 8;
2692 *pTimer_Struc++ = tp->functional_address[0] << 8
2693 | tp->functional_address[0] >> 8;
2694 *pTimer_Struc++ = tp->functional_address[1] << 8
2695 | tp->functional_address[1] >> 8;
2696
2697 /* Set Bit-Wise group address. */
2698 *pTimer_Struc++ = tp->bitwise_group_address[0] << 8
2699 | tp->bitwise_group_address[0] >> 8;
2700 *pTimer_Struc++ = tp->bitwise_group_address[1] << 8
2701 | tp->bitwise_group_address[1] >> 8;
2702
2703 /* Set ring number address. */
2704 *pTimer_Struc++ = tp->source_ring_number;
2705 *pTimer_Struc++ = tp->target_ring_number;
2706
2707 /* Physical drop number. */
2708 *pTimer_Struc++ = (unsigned short)0;
2709 *pTimer_Struc++ = (unsigned short)0;
2710
2711 /* Product instance ID. */
2712 for(i = 0; i < 9; i++)
2713 *pTimer_Struc++ = (unsigned short)0;
2714
2715 err = smctr_setup_single_cmd_w_data(dev, ACB_CMD_INIT_TRC_TIMERS, 0);
2716
2717 return (err);
2718 }
2719
smctr_issue_init_txrx_cmd(struct net_device * dev)2720 static int smctr_issue_init_txrx_cmd(struct net_device *dev)
2721 {
2722 struct net_local *tp = (struct net_local *)dev->priv;
2723 unsigned int i;
2724 int err;
2725 void **txrx_ptrs = (void *)tp->misc_command_data;
2726
2727 if((err = smctr_wait_while_cbusy(dev)))
2728 return (err);
2729
2730 if((err = smctr_wait_cmd(dev)))
2731 {
2732 printk("%s: Hardware failure\n", dev->name);
2733 return (err);
2734 }
2735
2736 /* Initialize Transmit Queue Pointers that are used, to point to
2737 * a single FCB.
2738 */
2739 for(i = 0; i < NUM_TX_QS_USED; i++)
2740 *txrx_ptrs++ = (void *)TRC_POINTER(tp->tx_fcb_head[i]);
2741
2742 /* Initialize Transmit Queue Pointers that are NOT used to ZERO. */
2743 for(; i < MAX_TX_QS; i++)
2744 *txrx_ptrs++ = (void *)0;
2745
2746 /* Initialize Receive Queue Pointers (MAC and Non-MAC) that are
2747 * used, to point to a single FCB and a BDB chain of buffers.
2748 */
2749 for(i = 0; i < NUM_RX_QS_USED; i++)
2750 {
2751 *txrx_ptrs++ = (void *)TRC_POINTER(tp->rx_fcb_head[i]);
2752 *txrx_ptrs++ = (void *)TRC_POINTER(tp->rx_bdb_head[i]);
2753 }
2754
2755 /* Initialize Receive Queue Pointers that are NOT used to ZERO. */
2756 for(; i < MAX_RX_QS; i++)
2757 {
2758 *txrx_ptrs++ = (void *)0;
2759 *txrx_ptrs++ = (void *)0;
2760 }
2761
2762 err = smctr_setup_single_cmd_w_data(dev, ACB_CMD_INIT_TX_RX, 0);
2763
2764 return (err);
2765 }
2766
smctr_issue_insert_cmd(struct net_device * dev)2767 static int smctr_issue_insert_cmd(struct net_device *dev)
2768 {
2769 int err;
2770
2771 err = smctr_setup_single_cmd(dev, ACB_CMD_INSERT, ACB_SUB_CMD_NOP);
2772
2773 return (err);
2774 }
2775
smctr_issue_read_ring_status_cmd(struct net_device * dev)2776 static int smctr_issue_read_ring_status_cmd(struct net_device *dev)
2777 {
2778 int err;
2779
2780 if((err = smctr_wait_while_cbusy(dev)))
2781 return (err);
2782
2783 if((err = smctr_wait_cmd(dev)))
2784 return (err);
2785
2786 err = smctr_setup_single_cmd_w_data(dev, ACB_CMD_READ_TRC_STATUS,
2787 RW_TRC_STATUS_BLOCK);
2788
2789 return (err);
2790 }
2791
smctr_issue_read_word_cmd(struct net_device * dev,__u16 aword_cnt)2792 static int smctr_issue_read_word_cmd(struct net_device *dev, __u16 aword_cnt)
2793 {
2794 int err;
2795
2796 if((err = smctr_wait_while_cbusy(dev)))
2797 return (err);
2798
2799 if((err = smctr_wait_cmd(dev)))
2800 return (err);
2801
2802 err = smctr_setup_single_cmd_w_data(dev, ACB_CMD_MCT_READ_VALUE,
2803 aword_cnt);
2804
2805 return (err);
2806 }
2807
smctr_issue_remove_cmd(struct net_device * dev)2808 static int smctr_issue_remove_cmd(struct net_device *dev)
2809 {
2810 struct net_local *tp = (struct net_local *)dev->priv;
2811 int err;
2812
2813 if((err = smctr_wait_while_cbusy(dev)))
2814 return (err);
2815
2816 tp->sclb_ptr->resume_control = 0;
2817 tp->sclb_ptr->valid_command = SCLB_VALID | SCLB_CMD_REMOVE;
2818
2819 smctr_set_ctrl_attention(dev);
2820
2821 return (0);
2822 }
2823
smctr_issue_resume_acb_cmd(struct net_device * dev)2824 static int smctr_issue_resume_acb_cmd(struct net_device *dev)
2825 {
2826 struct net_local *tp = (struct net_local *)dev->priv;
2827 int err;
2828
2829 if((err = smctr_wait_while_cbusy(dev)))
2830 return (err);
2831
2832 tp->sclb_ptr->resume_control = SCLB_RC_ACB;
2833 tp->sclb_ptr->valid_command = SCLB_VALID | SCLB_RESUME_CONTROL_VALID;
2834
2835 tp->acb_pending = 1;
2836
2837 smctr_set_ctrl_attention(dev);
2838
2839 return (0);
2840 }
2841
smctr_issue_resume_rx_bdb_cmd(struct net_device * dev,__u16 queue)2842 static int smctr_issue_resume_rx_bdb_cmd(struct net_device *dev, __u16 queue)
2843 {
2844 struct net_local *tp = (struct net_local *)dev->priv;
2845 int err;
2846
2847 if((err = smctr_wait_while_cbusy(dev)))
2848 return (err);
2849
2850 if(queue == MAC_QUEUE)
2851 tp->sclb_ptr->resume_control = SCLB_RC_RX_MAC_BDB;
2852 else
2853 tp->sclb_ptr->resume_control = SCLB_RC_RX_NON_MAC_BDB;
2854
2855 tp->sclb_ptr->valid_command = SCLB_VALID | SCLB_RESUME_CONTROL_VALID;
2856
2857 smctr_set_ctrl_attention(dev);
2858
2859 return (0);
2860 }
2861
smctr_issue_resume_rx_fcb_cmd(struct net_device * dev,__u16 queue)2862 static int smctr_issue_resume_rx_fcb_cmd(struct net_device *dev, __u16 queue)
2863 {
2864 struct net_local *tp = (struct net_local *)dev->priv;
2865
2866 if(smctr_debug > 10)
2867 printk("%s: smctr_issue_resume_rx_fcb_cmd\n", dev->name);
2868
2869 if(smctr_wait_while_cbusy(dev))
2870 return (-1);
2871
2872 if(queue == MAC_QUEUE)
2873 tp->sclb_ptr->resume_control = SCLB_RC_RX_MAC_FCB;
2874 else
2875 tp->sclb_ptr->resume_control = SCLB_RC_RX_NON_MAC_FCB;
2876
2877 tp->sclb_ptr->valid_command = SCLB_VALID | SCLB_RESUME_CONTROL_VALID;
2878
2879 smctr_set_ctrl_attention(dev);
2880
2881 return (0);
2882 }
2883
smctr_issue_resume_tx_fcb_cmd(struct net_device * dev,__u16 queue)2884 static int smctr_issue_resume_tx_fcb_cmd(struct net_device *dev, __u16 queue)
2885 {
2886 struct net_local *tp = (struct net_local *)dev->priv;
2887
2888 if(smctr_debug > 10)
2889 printk("%s: smctr_issue_resume_tx_fcb_cmd\n", dev->name);
2890
2891 if(smctr_wait_while_cbusy(dev))
2892 return (-1);
2893
2894 tp->sclb_ptr->resume_control = (SCLB_RC_TFCB0 << queue);
2895 tp->sclb_ptr->valid_command = SCLB_RESUME_CONTROL_VALID | SCLB_VALID;
2896
2897 smctr_set_ctrl_attention(dev);
2898
2899 return (0);
2900 }
2901
smctr_issue_test_internal_rom_cmd(struct net_device * dev)2902 static int smctr_issue_test_internal_rom_cmd(struct net_device *dev)
2903 {
2904 int err;
2905
2906 err = smctr_setup_single_cmd(dev, ACB_CMD_MCT_TEST,
2907 TRC_INTERNAL_ROM_TEST);
2908
2909 return (err);
2910 }
2911
smctr_issue_test_hic_cmd(struct net_device * dev)2912 static int smctr_issue_test_hic_cmd(struct net_device *dev)
2913 {
2914 int err;
2915
2916 err = smctr_setup_single_cmd(dev, ACB_CMD_HIC_TEST,
2917 TRC_HOST_INTERFACE_REG_TEST);
2918
2919 return (err);
2920 }
2921
smctr_issue_test_mac_reg_cmd(struct net_device * dev)2922 static int smctr_issue_test_mac_reg_cmd(struct net_device *dev)
2923 {
2924 int err;
2925
2926 err = smctr_setup_single_cmd(dev, ACB_CMD_MCT_TEST,
2927 TRC_MAC_REGISTERS_TEST);
2928
2929 return (err);
2930 }
2931
smctr_issue_trc_loopback_cmd(struct net_device * dev)2932 static int smctr_issue_trc_loopback_cmd(struct net_device *dev)
2933 {
2934 int err;
2935
2936 err = smctr_setup_single_cmd(dev, ACB_CMD_MCT_TEST,
2937 TRC_INTERNAL_LOOPBACK);
2938
2939 return (err);
2940 }
2941
smctr_issue_tri_loopback_cmd(struct net_device * dev)2942 static int smctr_issue_tri_loopback_cmd(struct net_device *dev)
2943 {
2944 int err;
2945
2946 err = smctr_setup_single_cmd(dev, ACB_CMD_MCT_TEST,
2947 TRC_TRI_LOOPBACK);
2948
2949 return (err);
2950 }
2951
smctr_issue_write_byte_cmd(struct net_device * dev,short aword_cnt,void * byte)2952 static int smctr_issue_write_byte_cmd(struct net_device *dev,
2953 short aword_cnt, void *byte)
2954 {
2955 struct net_local *tp = (struct net_local *)dev->priv;
2956 unsigned int iword, ibyte;
2957 int err;
2958
2959 if((err = smctr_wait_while_cbusy(dev)))
2960 return (err);
2961
2962 if((err = smctr_wait_cmd(dev)))
2963 return (err);
2964
2965 for(iword = 0, ibyte = 0; iword < (unsigned int)(aword_cnt & 0xff);
2966 iword++, ibyte += 2)
2967 {
2968 tp->misc_command_data[iword] = (*((__u8 *)byte + ibyte) << 8)
2969 | (*((__u8 *)byte + ibyte + 1));
2970 }
2971
2972 return (smctr_setup_single_cmd_w_data(dev, ACB_CMD_MCT_WRITE_VALUE,
2973 aword_cnt));
2974 }
2975
smctr_issue_write_word_cmd(struct net_device * dev,short aword_cnt,void * word)2976 static int smctr_issue_write_word_cmd(struct net_device *dev,
2977 short aword_cnt, void *word)
2978 {
2979 struct net_local *tp = (struct net_local *)dev->priv;
2980 unsigned int i, err;
2981
2982 if((err = smctr_wait_while_cbusy(dev)))
2983 return (err);
2984
2985 if((err = smctr_wait_cmd(dev)))
2986 return (err);
2987
2988 for(i = 0; i < (unsigned int)(aword_cnt & 0xff); i++)
2989 tp->misc_command_data[i] = *((__u16 *)word + i);
2990
2991 err = smctr_setup_single_cmd_w_data(dev, ACB_CMD_MCT_WRITE_VALUE,
2992 aword_cnt);
2993
2994 return (err);
2995 }
2996
smctr_join_complete_state(struct net_device * dev)2997 static int smctr_join_complete_state(struct net_device *dev)
2998 {
2999 int err;
3000
3001 err = smctr_setup_single_cmd(dev, ACB_CMD_CHANGE_JOIN_STATE,
3002 JS_JOIN_COMPLETE_STATE);
3003
3004 return (err);
3005 }
3006
smctr_link_tx_fcbs_to_bdbs(struct net_device * dev)3007 static int smctr_link_tx_fcbs_to_bdbs(struct net_device *dev)
3008 {
3009 struct net_local *tp = (struct net_local *)dev->priv;
3010 unsigned int i, j;
3011 FCBlock *fcb;
3012 BDBlock *bdb;
3013
3014 for(i = 0; i < NUM_TX_QS_USED; i++)
3015 {
3016 fcb = tp->tx_fcb_head[i];
3017 bdb = tp->tx_bdb_head[i];
3018
3019 for(j = 0; j < tp->num_tx_fcbs[i]; j++)
3020 {
3021 fcb->bdb_ptr = bdb;
3022 fcb->trc_bdb_ptr = TRC_POINTER(bdb);
3023 fcb = (FCBlock *)((char *)fcb + sizeof(FCBlock));
3024 bdb = (BDBlock *)((char *)bdb + sizeof(BDBlock));
3025 }
3026 }
3027
3028 return (0);
3029 }
3030
smctr_load_firmware(struct net_device * dev)3031 static int smctr_load_firmware(struct net_device *dev)
3032 {
3033 struct net_local *tp = (struct net_local *)dev->priv;
3034 __u16 i, checksum = 0;
3035 int err = 0;
3036
3037 if(smctr_debug > 10)
3038 printk("%s: smctr_load_firmware\n", dev->name);
3039
3040 tp->ptr_ucode = smctr_code;
3041 tp->num_of_tx_buffs = 4;
3042 tp->mode_bits |= UMAC;
3043 tp->receive_mask = 0;
3044 tp->max_packet_size = 4177;
3045
3046 /* Can only upload the firmware once per adapter reset. */
3047 if(tp->microcode_version != 0)
3048 return (UCODE_PRESENT);
3049
3050 /* Verify the firmware exists and is there in the right amount. */
3051 if((tp->ptr_ucode == 0L)
3052 || (*(tp->ptr_ucode + UCODE_VERSION_OFFSET) < UCODE_VERSION))
3053 {
3054 return (UCODE_NOT_PRESENT);
3055 }
3056
3057 /* UCODE_SIZE is not included in Checksum. */
3058 for(i = 0; i < *((__u16 *)(tp->ptr_ucode + UCODE_SIZE_OFFSET)); i += 2)
3059 checksum += *((__u16 *)(tp->ptr_ucode + 2 + i));
3060 if(checksum)
3061 return (UCODE_NOT_PRESENT);
3062
3063 /* At this point we have a valid firmware image, lets kick it on up. */
3064 smctr_enable_adapter_ram(dev);
3065 smctr_enable_16bit(dev);
3066 smctr_set_page(dev, (__u8 *)tp->ram_access);
3067
3068 if((smctr_checksum_firmware(dev))
3069 || (*(tp->ptr_ucode + UCODE_VERSION_OFFSET)
3070 > tp->microcode_version))
3071 {
3072 smctr_enable_adapter_ctrl_store(dev);
3073
3074 /* Zero out ram space for firmware. */
3075 for(i = 0; i < CS_RAM_SIZE; i += 2)
3076 *((__u16 *)(tp->ram_access + i)) = 0;
3077
3078 smctr_decode_firmware(dev);
3079
3080 tp->microcode_version = *(tp->ptr_ucode + UCODE_VERSION_OFFSET); *((__u16 *)(tp->ram_access + CS_RAM_VERSION_OFFSET))
3081 = (tp->microcode_version << 8);
3082 *((__u16 *)(tp->ram_access + CS_RAM_CHECKSUM_OFFSET))
3083 = ~(tp->microcode_version << 8) + 1;
3084
3085 smctr_disable_adapter_ctrl_store(dev);
3086
3087 if(smctr_checksum_firmware(dev))
3088 err = HARDWARE_FAILED;
3089 }
3090 else
3091 err = UCODE_PRESENT;
3092
3093 smctr_disable_16bit(dev);
3094
3095 return (err);
3096 }
3097
smctr_load_node_addr(struct net_device * dev)3098 static int smctr_load_node_addr(struct net_device *dev)
3099 {
3100 int ioaddr = dev->base_addr;
3101 unsigned int i;
3102 __u8 r;
3103
3104 for(i = 0; i < 6; i++)
3105 {
3106 r = inb(ioaddr + LAR0 + i);
3107 dev->dev_addr[i] = (char)r;
3108 }
3109 dev->addr_len = 6;
3110
3111 return (0);
3112 }
3113
3114 /* Lobe Media Test.
3115 * During the transmission of the initial 1500 lobe media MAC frames,
3116 * the phase lock loop in the 805 chip may lock, and then un-lock, causing
3117 * the 825 to go into a PURGE state. When performing a PURGE, the MCT
3118 * microcode will not transmit any frames given to it by the host, and
3119 * will consequently cause a timeout.
3120 *
3121 * NOTE 1: If the monitor_state is MS_BEACON_TEST_STATE, all transmit
3122 * queues other then the one used for the lobe_media_test should be
3123 * disabled.!?
3124 *
3125 * NOTE 2: If the monitor_state is MS_BEACON_TEST_STATE and the receive_mask
3126 * has any multi-cast or promiscous bits set, the receive_mask needs to
3127 * be changed to clear the multi-cast or promiscous mode bits, the lobe_test
3128 * run, and then the receive mask set back to its original value if the test
3129 * is successful.
3130 */
smctr_lobe_media_test(struct net_device * dev)3131 static int smctr_lobe_media_test(struct net_device *dev)
3132 {
3133 struct net_local *tp = (struct net_local *)dev->priv;
3134 unsigned int i, perror = 0;
3135 unsigned short saved_rcv_mask;
3136
3137 if(smctr_debug > 10)
3138 printk("%s: smctr_lobe_media_test\n", dev->name);
3139
3140 /* Clear receive mask for lobe test. */
3141 saved_rcv_mask = tp->receive_mask;
3142 tp->receive_mask = 0;
3143
3144 smctr_chg_rx_mask(dev);
3145
3146 /* Setup the lobe media test. */
3147 smctr_lobe_media_test_cmd(dev);
3148 if(smctr_wait_cmd(dev))
3149 {
3150 smctr_reset_adapter(dev);
3151 tp->status = CLOSED;
3152 return (LOBE_MEDIA_TEST_FAILED);
3153 }
3154
3155 /* Tx lobe media test frames. */
3156 for(i = 0; i < 1500; ++i)
3157 {
3158 if(smctr_send_lobe_media_test(dev))
3159 {
3160 if(perror)
3161 {
3162 smctr_reset_adapter(dev);
3163 tp->state = CLOSED;
3164 return (LOBE_MEDIA_TEST_FAILED);
3165 }
3166 else
3167 {
3168 perror = 1;
3169 if(smctr_lobe_media_test_cmd(dev))
3170 {
3171 smctr_reset_adapter(dev);
3172 tp->state = CLOSED;
3173 return (LOBE_MEDIA_TEST_FAILED);
3174 }
3175 }
3176 }
3177 }
3178
3179 if(smctr_send_dat(dev))
3180 {
3181 if(smctr_send_dat(dev))
3182 {
3183 smctr_reset_adapter(dev);
3184 tp->state = CLOSED;
3185 return (LOBE_MEDIA_TEST_FAILED);
3186 }
3187 }
3188
3189 /* Check if any frames received during test. */
3190 if((tp->rx_fcb_curr[MAC_QUEUE]->frame_status)
3191 || (tp->rx_fcb_curr[NON_MAC_QUEUE]->frame_status))
3192 {
3193 smctr_reset_adapter(dev);
3194 tp->state = CLOSED;
3195 return (LOBE_MEDIA_TEST_FAILED);
3196 }
3197
3198 /* Set receive mask to "Promisc" mode. */
3199 tp->receive_mask = saved_rcv_mask;
3200
3201 smctr_chg_rx_mask(dev);
3202
3203 return (0);
3204 }
3205
smctr_lobe_media_test_cmd(struct net_device * dev)3206 static int smctr_lobe_media_test_cmd(struct net_device *dev)
3207 {
3208 struct net_local *tp = (struct net_local *)dev->priv;
3209 int err;
3210
3211 if(smctr_debug > 10)
3212 printk("%s: smctr_lobe_media_test_cmd\n", dev->name);
3213
3214 /* Change to lobe media test state. */
3215 if(tp->monitor_state != MS_BEACON_TEST_STATE)
3216 {
3217 smctr_lobe_media_test_state(dev);
3218 if(smctr_wait_cmd(dev))
3219 {
3220 printk("Lobe Failed test state\n");
3221 return (LOBE_MEDIA_TEST_FAILED);
3222 }
3223 }
3224
3225 err = smctr_setup_single_cmd(dev, ACB_CMD_MCT_TEST,
3226 TRC_LOBE_MEDIA_TEST);
3227
3228 return (err);
3229 }
3230
smctr_lobe_media_test_state(struct net_device * dev)3231 static int smctr_lobe_media_test_state(struct net_device *dev)
3232 {
3233 int err;
3234
3235 err = smctr_setup_single_cmd(dev, ACB_CMD_CHANGE_JOIN_STATE,
3236 JS_LOBE_TEST_STATE);
3237
3238 return (err);
3239 }
3240
smctr_make_8025_hdr(struct net_device * dev,MAC_HEADER * rmf,MAC_HEADER * tmf,__u16 ac_fc)3241 static int smctr_make_8025_hdr(struct net_device *dev,
3242 MAC_HEADER *rmf, MAC_HEADER *tmf, __u16 ac_fc)
3243 {
3244 tmf->ac = MSB(ac_fc); /* msb is access control */
3245 tmf->fc = LSB(ac_fc); /* lsb is frame control */
3246
3247 tmf->sa[0] = dev->dev_addr[0];
3248 tmf->sa[1] = dev->dev_addr[1];
3249 tmf->sa[2] = dev->dev_addr[2];
3250 tmf->sa[3] = dev->dev_addr[3];
3251 tmf->sa[4] = dev->dev_addr[4];
3252 tmf->sa[5] = dev->dev_addr[5];
3253
3254 switch(tmf->vc)
3255 {
3256 /* Send RQ_INIT to RPS */
3257 case RQ_INIT:
3258 tmf->da[0] = 0xc0;
3259 tmf->da[1] = 0x00;
3260 tmf->da[2] = 0x00;
3261 tmf->da[3] = 0x00;
3262 tmf->da[4] = 0x00;
3263 tmf->da[5] = 0x02;
3264 break;
3265
3266 /* Send RPT_TX_FORWARD to CRS */
3267 case RPT_TX_FORWARD:
3268 tmf->da[0] = 0xc0;
3269 tmf->da[1] = 0x00;
3270 tmf->da[2] = 0x00;
3271 tmf->da[3] = 0x00;
3272 tmf->da[4] = 0x00;
3273 tmf->da[5] = 0x10;
3274 break;
3275
3276 /* Everything else goes to sender */
3277 default:
3278 tmf->da[0] = rmf->sa[0];
3279 tmf->da[1] = rmf->sa[1];
3280 tmf->da[2] = rmf->sa[2];
3281 tmf->da[3] = rmf->sa[3];
3282 tmf->da[4] = rmf->sa[4];
3283 tmf->da[5] = rmf->sa[5];
3284 break;
3285 }
3286
3287 return (0);
3288 }
3289
smctr_make_access_pri(struct net_device * dev,MAC_SUB_VECTOR * tsv)3290 static int smctr_make_access_pri(struct net_device *dev, MAC_SUB_VECTOR *tsv)
3291 {
3292 struct net_local *tp = (struct net_local *)dev->priv;
3293
3294 tsv->svi = AUTHORIZED_ACCESS_PRIORITY;
3295 tsv->svl = S_AUTHORIZED_ACCESS_PRIORITY;
3296
3297 tsv->svv[0] = MSB(tp->authorized_access_priority);
3298 tsv->svv[1] = LSB(tp->authorized_access_priority);
3299
3300 return (0);
3301 }
3302
smctr_make_addr_mod(struct net_device * dev,MAC_SUB_VECTOR * tsv)3303 static int smctr_make_addr_mod(struct net_device *dev, MAC_SUB_VECTOR *tsv)
3304 {
3305 tsv->svi = ADDRESS_MODIFER;
3306 tsv->svl = S_ADDRESS_MODIFER;
3307
3308 tsv->svv[0] = 0;
3309 tsv->svv[1] = 0;
3310
3311 return (0);
3312 }
3313
smctr_make_auth_funct_class(struct net_device * dev,MAC_SUB_VECTOR * tsv)3314 static int smctr_make_auth_funct_class(struct net_device *dev,
3315 MAC_SUB_VECTOR *tsv)
3316 {
3317 struct net_local *tp = (struct net_local *)dev->priv;
3318
3319 tsv->svi = AUTHORIZED_FUNCTION_CLASS;
3320 tsv->svl = S_AUTHORIZED_FUNCTION_CLASS;
3321
3322 tsv->svv[0] = MSB(tp->authorized_function_classes);
3323 tsv->svv[1] = LSB(tp->authorized_function_classes);
3324
3325 return (0);
3326 }
3327
smctr_make_corr(struct net_device * dev,MAC_SUB_VECTOR * tsv,__u16 correlator)3328 static int smctr_make_corr(struct net_device *dev,
3329 MAC_SUB_VECTOR *tsv, __u16 correlator)
3330 {
3331 tsv->svi = CORRELATOR;
3332 tsv->svl = S_CORRELATOR;
3333
3334 tsv->svv[0] = MSB(correlator);
3335 tsv->svv[1] = LSB(correlator);
3336
3337 return (0);
3338 }
3339
smctr_make_funct_addr(struct net_device * dev,MAC_SUB_VECTOR * tsv)3340 static int smctr_make_funct_addr(struct net_device *dev, MAC_SUB_VECTOR *tsv)
3341 {
3342 struct net_local *tp = (struct net_local *)dev->priv;
3343
3344 smctr_get_functional_address(dev);
3345
3346 tsv->svi = FUNCTIONAL_ADDRESS;
3347 tsv->svl = S_FUNCTIONAL_ADDRESS;
3348
3349 tsv->svv[0] = MSB(tp->misc_command_data[0]);
3350 tsv->svv[1] = LSB(tp->misc_command_data[0]);
3351
3352 tsv->svv[2] = MSB(tp->misc_command_data[1]);
3353 tsv->svv[3] = LSB(tp->misc_command_data[1]);
3354
3355 return (0);
3356 }
3357
smctr_make_group_addr(struct net_device * dev,MAC_SUB_VECTOR * tsv)3358 static int smctr_make_group_addr(struct net_device *dev, MAC_SUB_VECTOR *tsv)
3359 {
3360 struct net_local *tp = (struct net_local *)dev->priv;
3361
3362 smctr_get_group_address(dev);
3363
3364 tsv->svi = GROUP_ADDRESS;
3365 tsv->svl = S_GROUP_ADDRESS;
3366
3367 tsv->svv[0] = MSB(tp->misc_command_data[0]);
3368 tsv->svv[1] = LSB(tp->misc_command_data[0]);
3369
3370 tsv->svv[2] = MSB(tp->misc_command_data[1]);
3371 tsv->svv[3] = LSB(tp->misc_command_data[1]);
3372
3373 /* Set Group Address Sub-vector to all zeros if only the
3374 * Group Address/Functional Address Indicator is set.
3375 */
3376 if(tsv->svv[0] == 0x80 && tsv->svv[1] == 0x00
3377 && tsv->svv[2] == 0x00 && tsv->svv[3] == 0x00)
3378 tsv->svv[0] = 0x00;
3379
3380 return (0);
3381 }
3382
smctr_make_phy_drop_num(struct net_device * dev,MAC_SUB_VECTOR * tsv)3383 static int smctr_make_phy_drop_num(struct net_device *dev,
3384 MAC_SUB_VECTOR *tsv)
3385 {
3386 struct net_local *tp = (struct net_local *)dev->priv;
3387
3388 smctr_get_physical_drop_number(dev);
3389
3390 tsv->svi = PHYSICAL_DROP;
3391 tsv->svl = S_PHYSICAL_DROP;
3392
3393 tsv->svv[0] = MSB(tp->misc_command_data[0]);
3394 tsv->svv[1] = LSB(tp->misc_command_data[0]);
3395
3396 tsv->svv[2] = MSB(tp->misc_command_data[1]);
3397 tsv->svv[3] = LSB(tp->misc_command_data[1]);
3398
3399 return (0);
3400 }
3401
smctr_make_product_id(struct net_device * dev,MAC_SUB_VECTOR * tsv)3402 static int smctr_make_product_id(struct net_device *dev, MAC_SUB_VECTOR *tsv)
3403 {
3404 int i;
3405
3406 tsv->svi = PRODUCT_INSTANCE_ID;
3407 tsv->svl = S_PRODUCT_INSTANCE_ID;
3408
3409 for(i = 0; i < 18; i++)
3410 tsv->svv[i] = 0xF0;
3411
3412 return (0);
3413 }
3414
smctr_make_station_id(struct net_device * dev,MAC_SUB_VECTOR * tsv)3415 static int smctr_make_station_id(struct net_device *dev, MAC_SUB_VECTOR *tsv)
3416 {
3417 struct net_local *tp = (struct net_local *)dev->priv;
3418
3419 smctr_get_station_id(dev);
3420
3421 tsv->svi = STATION_IDENTIFER;
3422 tsv->svl = S_STATION_IDENTIFER;
3423
3424 tsv->svv[0] = MSB(tp->misc_command_data[0]);
3425 tsv->svv[1] = LSB(tp->misc_command_data[0]);
3426
3427 tsv->svv[2] = MSB(tp->misc_command_data[1]);
3428 tsv->svv[3] = LSB(tp->misc_command_data[1]);
3429
3430 tsv->svv[4] = MSB(tp->misc_command_data[2]);
3431 tsv->svv[5] = LSB(tp->misc_command_data[2]);
3432
3433 return (0);
3434 }
3435
smctr_make_ring_station_status(struct net_device * dev,MAC_SUB_VECTOR * tsv)3436 static int smctr_make_ring_station_status(struct net_device *dev,
3437 MAC_SUB_VECTOR * tsv)
3438 {
3439 tsv->svi = RING_STATION_STATUS;
3440 tsv->svl = S_RING_STATION_STATUS;
3441
3442 tsv->svv[0] = 0;
3443 tsv->svv[1] = 0;
3444 tsv->svv[2] = 0;
3445 tsv->svv[3] = 0;
3446 tsv->svv[4] = 0;
3447 tsv->svv[5] = 0;
3448
3449 return (0);
3450 }
3451
smctr_make_ring_station_version(struct net_device * dev,MAC_SUB_VECTOR * tsv)3452 static int smctr_make_ring_station_version(struct net_device *dev,
3453 MAC_SUB_VECTOR *tsv)
3454 {
3455 struct net_local *tp = (struct net_local *)dev->priv;
3456
3457 tsv->svi = RING_STATION_VERSION_NUMBER;
3458 tsv->svl = S_RING_STATION_VERSION_NUMBER;
3459
3460 tsv->svv[0] = 0xe2; /* EBCDIC - S */
3461 tsv->svv[1] = 0xd4; /* EBCDIC - M */
3462 tsv->svv[2] = 0xc3; /* EBCDIC - C */
3463 tsv->svv[3] = 0x40; /* EBCDIC - */
3464 tsv->svv[4] = 0xe5; /* EBCDIC - V */
3465 tsv->svv[5] = 0xF0 + (tp->microcode_version >> 4);
3466 tsv->svv[6] = 0xF0 + (tp->microcode_version & 0x0f);
3467 tsv->svv[7] = 0x40; /* EBCDIC - */
3468 tsv->svv[8] = 0xe7; /* EBCDIC - X */
3469
3470 if(tp->extra_info & CHIP_REV_MASK)
3471 tsv->svv[9] = 0xc5; /* EBCDIC - E */
3472 else
3473 tsv->svv[9] = 0xc4; /* EBCDIC - D */
3474
3475 return (0);
3476 }
3477
smctr_make_tx_status_code(struct net_device * dev,MAC_SUB_VECTOR * tsv,__u16 tx_fstatus)3478 static int smctr_make_tx_status_code(struct net_device *dev,
3479 MAC_SUB_VECTOR *tsv, __u16 tx_fstatus)
3480 {
3481 tsv->svi = TRANSMIT_STATUS_CODE;
3482 tsv->svl = S_TRANSMIT_STATUS_CODE;
3483
3484 tsv->svv[0] = ((tx_fstatus & 0x0100 >> 6) || IBM_PASS_SOURCE_ADDR);
3485
3486 /* Stripped frame status of Transmitted Frame */
3487 tsv->svv[1] = tx_fstatus & 0xff;
3488
3489 return (0);
3490 }
3491
smctr_make_upstream_neighbor_addr(struct net_device * dev,MAC_SUB_VECTOR * tsv)3492 static int smctr_make_upstream_neighbor_addr(struct net_device *dev,
3493 MAC_SUB_VECTOR *tsv)
3494 {
3495 struct net_local *tp = (struct net_local *)dev->priv;
3496
3497 smctr_get_upstream_neighbor_addr(dev);
3498
3499 tsv->svi = UPSTREAM_NEIGHBOR_ADDRESS;
3500 tsv->svl = S_UPSTREAM_NEIGHBOR_ADDRESS;
3501
3502 tsv->svv[0] = MSB(tp->misc_command_data[0]);
3503 tsv->svv[1] = LSB(tp->misc_command_data[0]);
3504
3505 tsv->svv[2] = MSB(tp->misc_command_data[1]);
3506 tsv->svv[3] = LSB(tp->misc_command_data[1]);
3507
3508 tsv->svv[4] = MSB(tp->misc_command_data[2]);
3509 tsv->svv[5] = LSB(tp->misc_command_data[2]);
3510
3511 return (0);
3512 }
3513
smctr_make_wrap_data(struct net_device * dev,MAC_SUB_VECTOR * tsv)3514 static int smctr_make_wrap_data(struct net_device *dev, MAC_SUB_VECTOR *tsv)
3515 {
3516 tsv->svi = WRAP_DATA;
3517 tsv->svl = S_WRAP_DATA;
3518
3519 return (0);
3520 }
3521
3522 /*
3523 * Open/initialize the board. This is called sometime after
3524 * booting when the 'ifconfig' program is run.
3525 *
3526 * This routine should set everything up anew at each open, even
3527 * registers that "should" only need to be set once at boot, so that
3528 * there is non-reboot way to recover if something goes wrong.
3529 */
smctr_open(struct net_device * dev)3530 static int smctr_open(struct net_device *dev)
3531 {
3532 int err;
3533
3534 if(smctr_debug > 10)
3535 printk("%s: smctr_open\n", dev->name);
3536
3537 err = smctr_init_adapter(dev);
3538 if(err < 0)
3539 return (err);
3540
3541 #ifdef MODULE
3542 MOD_INC_USE_COUNT;
3543 #endif
3544
3545 return (err);
3546 }
3547
3548 /* Interrupt driven open of Token card. */
smctr_open_tr(struct net_device * dev)3549 static int smctr_open_tr(struct net_device *dev)
3550 {
3551 struct net_local *tp = (struct net_local *)dev->priv;
3552 unsigned long flags;
3553 int err;
3554
3555 if(smctr_debug > 10)
3556 printk("%s: smctr_open_tr\n", dev->name);
3557
3558 /* Now we can actually open the adapter. */
3559 if(tp->status == OPEN)
3560 return (0);
3561 if(tp->status != INITIALIZED)
3562 return (-1);
3563
3564 save_flags(flags);
3565 cli();
3566
3567 smctr_set_page(dev, (__u8 *)tp->ram_access);
3568
3569 if((err = smctr_issue_resume_rx_fcb_cmd(dev, (short)MAC_QUEUE)))
3570 goto out;
3571
3572 if((err = smctr_issue_resume_rx_bdb_cmd(dev, (short)MAC_QUEUE)))
3573 goto out;
3574
3575 if((err = smctr_issue_resume_rx_fcb_cmd(dev, (short)NON_MAC_QUEUE)))
3576 goto out;
3577
3578 if((err = smctr_issue_resume_rx_bdb_cmd(dev, (short)NON_MAC_QUEUE)))
3579 goto out;
3580
3581 tp->status = CLOSED;
3582
3583 /* Insert into the Ring or Enter Loopback Mode. */
3584 if((tp->mode_bits & LOOPING_MODE_MASK) == LOOPBACK_MODE_1)
3585 {
3586 tp->status = CLOSED;
3587
3588 if(!(err = smctr_issue_trc_loopback_cmd(dev)))
3589 {
3590 if(!(err = smctr_wait_cmd(dev)))
3591 tp->status = OPEN;
3592 }
3593
3594 smctr_status_chg(dev);
3595 }
3596 else
3597 {
3598 if((tp->mode_bits & LOOPING_MODE_MASK) == LOOPBACK_MODE_2)
3599 {
3600 tp->status = CLOSED;
3601 if(!(err = smctr_issue_tri_loopback_cmd(dev)))
3602 {
3603 if(!(err = smctr_wait_cmd(dev)))
3604 tp->status = OPEN;
3605 }
3606
3607 smctr_status_chg(dev);
3608 }
3609 else
3610 {
3611 if((tp->mode_bits & LOOPING_MODE_MASK)
3612 == LOOPBACK_MODE_3)
3613 {
3614 tp->status = CLOSED;
3615 if(!(err = smctr_lobe_media_test_cmd(dev)))
3616 {
3617 if(!(err = smctr_wait_cmd(dev)))
3618 tp->status = OPEN;
3619 }
3620 smctr_status_chg(dev);
3621 }
3622 else
3623 {
3624 if(!(err = smctr_lobe_media_test(dev)))
3625 err = smctr_issue_insert_cmd(dev);
3626 else
3627 {
3628 if(err == LOBE_MEDIA_TEST_FAILED)
3629 printk("%s: Lobe Media Test Failure - Check cable?\n", dev->name);
3630 }
3631 }
3632 }
3633 }
3634
3635 out:
3636 restore_flags(flags);
3637
3638 return (err);
3639 }
3640
3641 /* Check for a network adapter of this type, and return '0 if one exists.
3642 * If dev->base_addr == 0, probe all likely locations.
3643 * If dev->base_addr == 1, always return failure.
3644 */
smctr_probe(struct net_device * dev)3645 int __init smctr_probe (struct net_device *dev)
3646 {
3647 int i;
3648 int base_addr = dev ? dev->base_addr : 0;
3649
3650 if(base_addr > 0x1ff) /* Check a single specified location. */
3651 return (smctr_probe1(dev, base_addr));
3652 else if(base_addr != 0) /* Don't probe at all. */
3653 return (-ENXIO);
3654
3655 for(i = 0; smctr_portlist[i]; i++)
3656 {
3657 int ioaddr = smctr_portlist[i];
3658 if(check_region(ioaddr, SMCTR_IO_EXTENT))
3659 continue;
3660 if (!smctr_probe1(dev, ioaddr))
3661 return (0);
3662 }
3663
3664 return (-ENODEV);
3665 }
3666
smctr_probe1(struct net_device * dev,int ioaddr)3667 static int __init smctr_probe1(struct net_device *dev, int ioaddr)
3668 {
3669 static unsigned version_printed;
3670 struct net_local *tp;
3671 int err;
3672 __u32 *ram;
3673
3674 if(smctr_debug && version_printed++ == 0)
3675 printk(version);
3676
3677 #ifndef MODULE
3678 dev = init_trdev(dev, 0);
3679 if(dev == NULL)
3680 return (-ENOMEM);
3681 #endif
3682
3683 /* Setup this devices private information structure */
3684 tp = (struct net_local *)kmalloc(sizeof(struct net_local),
3685 GFP_KERNEL);
3686 if(tp == NULL) {
3687 err = -ENOMEM;
3688 goto out;
3689 }
3690 memset(tp, 0, sizeof(struct net_local));
3691 dev->priv = tp;
3692 dev->base_addr = ioaddr;
3693
3694 /* Actually detect an adapter now. */
3695 err = smctr_chk_isa(dev);
3696 if(err < 0)
3697 {
3698 if ((err = smctr_chk_mca(dev)) < 0) {
3699 err = -ENODEV;
3700 goto out_tp;
3701 }
3702 }
3703
3704 tp = (struct net_local *)dev->priv;
3705 dev->rmem_start = dev->mem_start = tp->ram_base;
3706 dev->rmem_end = dev->mem_end = dev->mem_start + 0x10000;
3707 ram = (__u32 *)phys_to_virt(dev->mem_start);
3708 tp->ram_access = *(__u32 *)&ram;
3709 tp->status = NOT_INITIALIZED;
3710
3711 err = smctr_load_firmware(dev);
3712 if(err != UCODE_PRESENT && err != SUCCESS)
3713 {
3714 printk("%s: Firmware load failed (%d)\n", dev->name, err);
3715 err = -EIO;
3716 goto out_tp;
3717 }
3718
3719 /* Allow user to specify ring speed on module insert. */
3720 if(ringspeed == 4)
3721 tp->media_type = MEDIA_UTP_4;
3722 else
3723 tp->media_type = MEDIA_UTP_16;
3724
3725 printk("%s: %s %s at Io %#4x, Irq %d, Rom %#4x, Ram %#4x.\n",
3726 dev->name, smctr_name, smctr_model,
3727 (unsigned int)dev->base_addr,
3728 dev->irq, tp->rom_base, tp->ram_base);
3729
3730 /* AKPM: there's no point in this */
3731 dev->init = smctr_init_card;
3732 dev->open = smctr_open;
3733 dev->stop = smctr_close;
3734 dev->hard_start_xmit = smctr_send_packet;
3735 dev->tx_timeout = smctr_timeout;
3736 dev->watchdog_timeo = HZ;
3737 dev->get_stats = smctr_get_stats;
3738 dev->set_multicast_list = &smctr_set_multicast_list;
3739 return (0);
3740
3741 out_tp:
3742 kfree(tp);
3743 out:
3744 return err;
3745 }
3746
smctr_process_rx_packet(MAC_HEADER * rmf,__u16 size,struct net_device * dev,__u16 rx_status)3747 static int smctr_process_rx_packet(MAC_HEADER *rmf, __u16 size,
3748 struct net_device *dev, __u16 rx_status)
3749 {
3750 struct net_local *tp = (struct net_local *)dev->priv;
3751 struct sk_buff *skb;
3752 __u16 rcode, correlator;
3753 int err = 0;
3754 __u8 xframe = 1;
3755 __u16 tx_fstatus;
3756
3757 rmf->vl = SWAP_BYTES(rmf->vl);
3758 if(rx_status & FCB_RX_STATUS_DA_MATCHED)
3759 {
3760 switch(rmf->vc)
3761 {
3762 /* Received MAC Frames Processed by RS. */
3763 case INIT:
3764 if((rcode = smctr_rcv_init(dev, rmf,
3765 &correlator)) == HARDWARE_FAILED)
3766 {
3767 return (rcode);
3768 }
3769
3770 if((err = smctr_send_rsp(dev, rmf, rcode,
3771 correlator)))
3772 {
3773 return (err);
3774 }
3775 break;
3776
3777 case CHG_PARM:
3778 if((rcode = smctr_rcv_chg_param(dev, rmf,
3779 &correlator)) ==HARDWARE_FAILED)
3780 {
3781 return (rcode);
3782 }
3783
3784 if((err = smctr_send_rsp(dev, rmf, rcode,
3785 correlator)))
3786 {
3787 return (err);
3788 }
3789 break;
3790
3791 case RQ_ADDR:
3792 if((rcode = smctr_rcv_rq_addr_state_attch(dev,
3793 rmf, &correlator)) != POSITIVE_ACK)
3794 {
3795 if(rcode == HARDWARE_FAILED)
3796 return (rcode);
3797 else
3798 return (smctr_send_rsp(dev, rmf,
3799 rcode, correlator));
3800 }
3801
3802 if((err = smctr_send_rpt_addr(dev, rmf,
3803 correlator)))
3804 {
3805 return (err);
3806 }
3807 break;
3808
3809 case RQ_ATTCH:
3810 if((rcode = smctr_rcv_rq_addr_state_attch(dev,
3811 rmf, &correlator)) != POSITIVE_ACK)
3812 {
3813 if(rcode == HARDWARE_FAILED)
3814 return (rcode);
3815 else
3816 return (smctr_send_rsp(dev, rmf,
3817 rcode,
3818 correlator));
3819 }
3820
3821 if((err = smctr_send_rpt_attch(dev, rmf,
3822 correlator)))
3823 {
3824 return (err);
3825 }
3826 break;
3827
3828 case RQ_STATE:
3829 if((rcode = smctr_rcv_rq_addr_state_attch(dev,
3830 rmf, &correlator)) != POSITIVE_ACK)
3831 {
3832 if(rcode == HARDWARE_FAILED)
3833 return (rcode);
3834 else
3835 return (smctr_send_rsp(dev, rmf,
3836 rcode,
3837 correlator));
3838 }
3839
3840 if((err = smctr_send_rpt_state(dev, rmf,
3841 correlator)))
3842 {
3843 return (err);
3844 }
3845 break;
3846
3847 case TX_FORWARD:
3848 if((rcode = smctr_rcv_tx_forward(dev, rmf))
3849 != POSITIVE_ACK)
3850 {
3851 if(rcode == HARDWARE_FAILED)
3852 return (rcode);
3853 else
3854 return (smctr_send_rsp(dev, rmf,
3855 rcode,
3856 correlator));
3857 }
3858
3859 if((err = smctr_send_tx_forward(dev, rmf,
3860 &tx_fstatus)) == HARDWARE_FAILED)
3861 {
3862 return (err);
3863 }
3864
3865 if(err == A_FRAME_WAS_FORWARDED)
3866 {
3867 if((err = smctr_send_rpt_tx_forward(dev,
3868 rmf, tx_fstatus))
3869 == HARDWARE_FAILED)
3870 {
3871 return (err);
3872 }
3873 }
3874 break;
3875
3876 /* Received MAC Frames Processed by CRS/REM/RPS. */
3877 case RSP:
3878 case RQ_INIT:
3879 case RPT_NEW_MON:
3880 case RPT_SUA_CHG:
3881 case RPT_ACTIVE_ERR:
3882 case RPT_NN_INCMP:
3883 case RPT_ERROR:
3884 case RPT_ATTCH:
3885 case RPT_STATE:
3886 case RPT_ADDR:
3887 break;
3888
3889 /* Rcvd Att. MAC Frame (if RXATMAC set) or UNKNOWN */
3890 default:
3891 xframe = 0;
3892 if(!(tp->receive_mask & ACCEPT_ATT_MAC_FRAMES))
3893 {
3894 rcode = smctr_rcv_unknown(dev, rmf,
3895 &correlator);
3896 if((err = smctr_send_rsp(dev, rmf,rcode,
3897 correlator)))
3898 {
3899 return (err);
3900 }
3901 }
3902
3903 break;
3904 }
3905 }
3906 else
3907 {
3908 /* 1. DA doesn't match (Promiscuous Mode).
3909 * 2. Parse for Extended MAC Frame Type.
3910 */
3911 switch(rmf->vc)
3912 {
3913 case RSP:
3914 case INIT:
3915 case RQ_INIT:
3916 case RQ_ADDR:
3917 case RQ_ATTCH:
3918 case RQ_STATE:
3919 case CHG_PARM:
3920 case RPT_ADDR:
3921 case RPT_ERROR:
3922 case RPT_ATTCH:
3923 case RPT_STATE:
3924 case RPT_NEW_MON:
3925 case RPT_SUA_CHG:
3926 case RPT_NN_INCMP:
3927 case RPT_ACTIVE_ERR:
3928 break;
3929
3930 default:
3931 xframe = 0;
3932 break;
3933 }
3934 }
3935
3936 /* NOTE: UNKNOWN MAC frames will NOT be passed up unless
3937 * ACCEPT_ATT_MAC_FRAMES is set.
3938 */
3939 if(((tp->receive_mask & ACCEPT_ATT_MAC_FRAMES)
3940 && (xframe == (__u8)0))
3941 || ((tp->receive_mask & ACCEPT_EXT_MAC_FRAMES)
3942 && (xframe == (__u8)1)))
3943 {
3944 rmf->vl = SWAP_BYTES(rmf->vl);
3945
3946 if (!(skb = dev_alloc_skb(size)))
3947 return -ENOMEM;
3948 skb->len = size;
3949
3950 /* Slide data into a sleek skb. */
3951 skb_put(skb, skb->len);
3952 memcpy(skb->data, rmf, skb->len);
3953
3954 /* Update Counters */
3955 tp->MacStat.rx_packets++;
3956 tp->MacStat.rx_bytes += skb->len;
3957
3958 /* Kick the packet on up. */
3959 skb->dev = dev;
3960 skb->protocol = tr_type_trans(skb, dev);
3961 netif_rx(skb);
3962 dev->last_rx = jiffies;
3963 err = 0;
3964 }
3965
3966 return (err);
3967 }
3968
3969 /* Adapter RAM test. Incremental word ODD boundry data test. */
smctr_ram_memory_test(struct net_device * dev)3970 static int smctr_ram_memory_test(struct net_device *dev)
3971 {
3972 struct net_local *tp = (struct net_local *)dev->priv;
3973 __u16 page, pages_of_ram, start_pattern = 0, word_pattern = 0,
3974 word_read = 0, err_word = 0, err_pattern = 0;
3975 unsigned int err_offset;
3976 __u32 j, pword;
3977 __u8 err = 0;
3978
3979 if(smctr_debug > 10)
3980 printk("%s: smctr_ram_memory_test\n", dev->name);
3981
3982 start_pattern = 0x0001;
3983 pages_of_ram = tp->ram_size / tp->ram_usable;
3984 pword = tp->ram_access;
3985
3986 /* Incremental word ODD boundry test. */
3987 for(page = 0; (page < pages_of_ram) && (~err);
3988 page++, start_pattern += 0x8000)
3989 {
3990 smctr_set_page(dev, (__u8 *)(tp->ram_access
3991 + (page * tp->ram_usable * 1024) + 1));
3992 word_pattern = start_pattern;
3993
3994 for(j = 1; j < (__u32)(tp->ram_usable * 1024) - 1; j += 2)
3995 *(__u16 *)(pword + j) = word_pattern++;
3996
3997 word_pattern = start_pattern;
3998
3999 for(j = 1; j < (__u32)(tp->ram_usable * 1024) - 1
4000 && (~err); j += 2, word_pattern++)
4001 {
4002 word_read = *(__u16 *)(pword + j);
4003 if(word_read != word_pattern)
4004 {
4005 err = (__u8)1;
4006 err_offset = j;
4007 err_word = word_read;
4008 err_pattern = word_pattern;
4009 return (RAM_TEST_FAILED);
4010 }
4011 }
4012 }
4013
4014 /* Zero out memory. */
4015 for(page = 0; page < pages_of_ram && (~err); page++)
4016 {
4017 smctr_set_page(dev, (__u8 *)(tp->ram_access
4018 + (page * tp->ram_usable * 1024)));
4019 word_pattern = 0;
4020
4021 for(j = 0; j < (__u32)tp->ram_usable * 1024; j +=2)
4022 *(__u16 *)(pword + j) = word_pattern;
4023
4024 for(j =0; j < (__u32)tp->ram_usable * 1024
4025 && (~err); j += 2)
4026 {
4027 word_read = *(__u16 *)(pword + j);
4028 if(word_read != word_pattern)
4029 {
4030 err = (__u8)1;
4031 err_offset = j;
4032 err_word = word_read;
4033 err_pattern = word_pattern;
4034 return (RAM_TEST_FAILED);
4035 }
4036 }
4037 }
4038
4039 smctr_set_page(dev, (__u8 *)tp->ram_access);
4040
4041 return (0);
4042 }
4043
smctr_rcv_chg_param(struct net_device * dev,MAC_HEADER * rmf,__u16 * correlator)4044 static int smctr_rcv_chg_param(struct net_device *dev, MAC_HEADER *rmf,
4045 __u16 *correlator)
4046 {
4047 MAC_SUB_VECTOR *rsv;
4048 signed short vlen;
4049 __u16 rcode = POSITIVE_ACK;
4050 unsigned int svectors = F_NO_SUB_VECTORS_FOUND;
4051
4052 /* This Frame can only come from a CRS */
4053 if((rmf->dc_sc & SC_MASK) != SC_CRS)
4054 return(E_INAPPROPRIATE_SOURCE_CLASS);
4055
4056 /* Remove MVID Length from total length. */
4057 vlen = (signed short)rmf->vl - 4;
4058
4059 /* Point to First SVID */
4060 rsv = (MAC_SUB_VECTOR *)((__u32)rmf + sizeof(MAC_HEADER));
4061
4062 /* Search for Appropriate SVID's. */
4063 while((vlen > 0) && (rcode == POSITIVE_ACK))
4064 {
4065 switch(rsv->svi)
4066 {
4067 case CORRELATOR:
4068 svectors |= F_CORRELATOR;
4069 rcode = smctr_set_corr(dev, rsv, correlator);
4070 break;
4071
4072 case LOCAL_RING_NUMBER:
4073 svectors |= F_LOCAL_RING_NUMBER;
4074 rcode = smctr_set_local_ring_num(dev, rsv);
4075 break;
4076
4077 case ASSIGN_PHYSICAL_DROP:
4078 svectors |= F_ASSIGN_PHYSICAL_DROP;
4079 rcode = smctr_set_phy_drop(dev, rsv);
4080 break;
4081
4082 case ERROR_TIMER_VALUE:
4083 svectors |= F_ERROR_TIMER_VALUE;
4084 rcode = smctr_set_error_timer_value(dev, rsv);
4085 break;
4086
4087 case AUTHORIZED_FUNCTION_CLASS:
4088 svectors |= F_AUTHORIZED_FUNCTION_CLASS;
4089 rcode = smctr_set_auth_funct_class(dev, rsv);
4090 break;
4091
4092 case AUTHORIZED_ACCESS_PRIORITY:
4093 svectors |= F_AUTHORIZED_ACCESS_PRIORITY;
4094 rcode = smctr_set_auth_access_pri(dev, rsv);
4095 break;
4096
4097 default:
4098 rcode = E_SUB_VECTOR_UNKNOWN;
4099 break;
4100 }
4101
4102 /* Let Sender Know if SUM of SV length's is
4103 * larger then length in MVID length field
4104 */
4105 if((vlen -= rsv->svl) < 0)
4106 rcode = E_VECTOR_LENGTH_ERROR;
4107
4108 rsv = (MAC_SUB_VECTOR *)((__u32)rsv + rsv->svl);
4109 }
4110
4111 if(rcode == POSITIVE_ACK)
4112 {
4113 /* Let Sender Know if MVID length field
4114 * is larger then SUM of SV length's
4115 */
4116 if(vlen != 0)
4117 rcode = E_VECTOR_LENGTH_ERROR;
4118 else
4119 {
4120 /* Let Sender Know if Expected SVID Missing */
4121 if((svectors & R_CHG_PARM) ^ R_CHG_PARM)
4122 rcode = E_MISSING_SUB_VECTOR;
4123 }
4124 }
4125
4126 return (rcode);
4127 }
4128
smctr_rcv_init(struct net_device * dev,MAC_HEADER * rmf,__u16 * correlator)4129 static int smctr_rcv_init(struct net_device *dev, MAC_HEADER *rmf,
4130 __u16 *correlator)
4131 {
4132 MAC_SUB_VECTOR *rsv;
4133 signed short vlen;
4134 __u16 rcode = POSITIVE_ACK;
4135 unsigned int svectors = F_NO_SUB_VECTORS_FOUND;
4136
4137 /* This Frame can only come from a RPS */
4138 if((rmf->dc_sc & SC_MASK) != SC_RPS)
4139 return (E_INAPPROPRIATE_SOURCE_CLASS);
4140
4141 /* Remove MVID Length from total length. */
4142 vlen = (signed short)rmf->vl - 4;
4143
4144 /* Point to First SVID */
4145 rsv = (MAC_SUB_VECTOR *)((__u32)rmf + sizeof(MAC_HEADER));
4146
4147 /* Search for Appropriate SVID's */
4148 while((vlen > 0) && (rcode == POSITIVE_ACK))
4149 {
4150 switch(rsv->svi)
4151 {
4152 case CORRELATOR:
4153 svectors |= F_CORRELATOR;
4154 rcode = smctr_set_corr(dev, rsv, correlator);
4155 break;
4156
4157 case LOCAL_RING_NUMBER:
4158 svectors |= F_LOCAL_RING_NUMBER;
4159 rcode = smctr_set_local_ring_num(dev, rsv);
4160 break;
4161
4162 case ASSIGN_PHYSICAL_DROP:
4163 svectors |= F_ASSIGN_PHYSICAL_DROP;
4164 rcode = smctr_set_phy_drop(dev, rsv);
4165 break;
4166
4167 case ERROR_TIMER_VALUE:
4168 svectors |= F_ERROR_TIMER_VALUE;
4169 rcode = smctr_set_error_timer_value(dev, rsv);
4170 break;
4171
4172 default:
4173 rcode = E_SUB_VECTOR_UNKNOWN;
4174 break;
4175 }
4176
4177 /* Let Sender Know if SUM of SV length's is
4178 * larger then length in MVID length field
4179 */
4180 if((vlen -= rsv->svl) < 0)
4181 rcode = E_VECTOR_LENGTH_ERROR;
4182
4183 rsv = (MAC_SUB_VECTOR *)((__u32)rsv + rsv->svl);
4184 }
4185
4186 if(rcode == POSITIVE_ACK)
4187 {
4188 /* Let Sender Know if MVID length field
4189 * is larger then SUM of SV length's
4190 */
4191 if(vlen != 0)
4192 rcode = E_VECTOR_LENGTH_ERROR;
4193 else
4194 {
4195 /* Let Sender Know if Expected SV Missing */
4196 if((svectors & R_INIT) ^ R_INIT)
4197 rcode = E_MISSING_SUB_VECTOR;
4198 }
4199 }
4200
4201 return (rcode);
4202 }
4203
smctr_rcv_tx_forward(struct net_device * dev,MAC_HEADER * rmf)4204 static int smctr_rcv_tx_forward(struct net_device *dev, MAC_HEADER *rmf)
4205 {
4206 MAC_SUB_VECTOR *rsv;
4207 signed short vlen;
4208 __u16 rcode = POSITIVE_ACK;
4209 unsigned int svectors = F_NO_SUB_VECTORS_FOUND;
4210
4211 /* This Frame can only come from a CRS */
4212 if((rmf->dc_sc & SC_MASK) != SC_CRS)
4213 return (E_INAPPROPRIATE_SOURCE_CLASS);
4214
4215 /* Remove MVID Length from total length */
4216 vlen = (signed short)rmf->vl - 4;
4217
4218 /* Point to First SVID */
4219 rsv = (MAC_SUB_VECTOR *)((__u32)rmf + sizeof(MAC_HEADER));
4220
4221 /* Search for Appropriate SVID's */
4222 while((vlen > 0) && (rcode == POSITIVE_ACK))
4223 {
4224 switch(rsv->svi)
4225 {
4226 case FRAME_FORWARD:
4227 svectors |= F_FRAME_FORWARD;
4228 rcode = smctr_set_frame_forward(dev, rsv,
4229 rmf->dc_sc);
4230 break;
4231
4232 default:
4233 rcode = E_SUB_VECTOR_UNKNOWN;
4234 break;
4235 }
4236
4237 /* Let Sender Know if SUM of SV length's is
4238 * larger then length in MVID length field
4239 */
4240 if((vlen -= rsv->svl) < 0)
4241 rcode = E_VECTOR_LENGTH_ERROR;
4242
4243 rsv = (MAC_SUB_VECTOR *)((__u32)rsv + rsv->svl);
4244 }
4245
4246 if(rcode == POSITIVE_ACK)
4247 {
4248 /* Let Sender Know if MVID length field
4249 * is larger then SUM of SV length's
4250 */
4251 if(vlen != 0)
4252 rcode = E_VECTOR_LENGTH_ERROR;
4253 else
4254 {
4255 /* Let Sender Know if Expected SV Missing */
4256 if((svectors & R_TX_FORWARD) ^ R_TX_FORWARD)
4257 rcode = E_MISSING_SUB_VECTOR;
4258 }
4259 }
4260
4261 return (rcode);
4262 }
4263
smctr_rcv_rq_addr_state_attch(struct net_device * dev,MAC_HEADER * rmf,__u16 * correlator)4264 static int smctr_rcv_rq_addr_state_attch(struct net_device *dev,
4265 MAC_HEADER *rmf, __u16 *correlator)
4266 {
4267 MAC_SUB_VECTOR *rsv;
4268 signed short vlen;
4269 __u16 rcode = POSITIVE_ACK;
4270 unsigned int svectors = F_NO_SUB_VECTORS_FOUND;
4271
4272 /* Remove MVID Length from total length */
4273 vlen = (signed short)rmf->vl - 4;
4274
4275 /* Point to First SVID */
4276 rsv = (MAC_SUB_VECTOR *)((__u32)rmf + sizeof(MAC_HEADER));
4277
4278 /* Search for Appropriate SVID's */
4279 while((vlen > 0) && (rcode == POSITIVE_ACK))
4280 {
4281 switch(rsv->svi)
4282 {
4283 case CORRELATOR:
4284 svectors |= F_CORRELATOR;
4285 rcode = smctr_set_corr(dev, rsv, correlator);
4286 break;
4287
4288 default:
4289 rcode = E_SUB_VECTOR_UNKNOWN;
4290 break;
4291 }
4292
4293 /* Let Sender Know if SUM of SV length's is
4294 * larger then length in MVID length field
4295 */
4296 if((vlen -= rsv->svl) < 0)
4297 rcode = E_VECTOR_LENGTH_ERROR;
4298
4299 rsv = (MAC_SUB_VECTOR *)((__u32)rsv + rsv->svl);
4300 }
4301
4302 if(rcode == POSITIVE_ACK)
4303 {
4304 /* Let Sender Know if MVID length field
4305 * is larger then SUM of SV length's
4306 */
4307 if(vlen != 0)
4308 rcode = E_VECTOR_LENGTH_ERROR;
4309 else
4310 {
4311 /* Let Sender Know if Expected SVID Missing */
4312 if((svectors & R_RQ_ATTCH_STATE_ADDR)
4313 ^ R_RQ_ATTCH_STATE_ADDR)
4314 rcode = E_MISSING_SUB_VECTOR;
4315 }
4316 }
4317
4318 return (rcode);
4319 }
4320
smctr_rcv_unknown(struct net_device * dev,MAC_HEADER * rmf,__u16 * correlator)4321 static int smctr_rcv_unknown(struct net_device *dev, MAC_HEADER *rmf,
4322 __u16 *correlator)
4323 {
4324 MAC_SUB_VECTOR *rsv;
4325 signed short vlen;
4326
4327 *correlator = 0;
4328
4329 /* Remove MVID Length from total length */
4330 vlen = (signed short)rmf->vl - 4;
4331
4332 /* Point to First SVID */
4333 rsv = (MAC_SUB_VECTOR *)((__u32)rmf + sizeof(MAC_HEADER));
4334
4335 /* Search for CORRELATOR for RSP to UNKNOWN */
4336 while((vlen > 0) && (*correlator == 0))
4337 {
4338 switch(rsv->svi)
4339 {
4340 case CORRELATOR:
4341 smctr_set_corr(dev, rsv, correlator);
4342 break;
4343
4344 default:
4345 break;
4346 }
4347
4348 vlen -= rsv->svl;
4349 rsv = (MAC_SUB_VECTOR *)((__u32)rsv + rsv->svl);
4350 }
4351
4352 return (E_UNRECOGNIZED_VECTOR_ID);
4353 }
4354
4355 /*
4356 * Reset the 825 NIC and exit w:
4357 * 1. The NIC reset cleared (non-reset state), halted and un-initialized.
4358 * 2. TINT masked.
4359 * 3. CBUSY masked.
4360 * 4. TINT clear.
4361 * 5. CBUSY clear.
4362 */
smctr_reset_adapter(struct net_device * dev)4363 static int smctr_reset_adapter(struct net_device *dev)
4364 {
4365 struct net_local *tp = (struct net_local *)dev->priv;
4366 int ioaddr = dev->base_addr;
4367
4368 /* Reseting the NIC will put it in a halted and un-initialized state. */ smctr_set_trc_reset(ioaddr);
4369 mdelay(200); /* ~2 ms */
4370
4371 smctr_clear_trc_reset(ioaddr);
4372 mdelay(200); /* ~2 ms */
4373
4374 /* Remove any latched interrupts that occurred prior to reseting the
4375 * adapter or possibily caused by line glitches due to the reset.
4376 */
4377 outb(tp->trc_mask | CSR_CLRTINT | CSR_CLRCBUSY, ioaddr + CSR);
4378
4379 return (0);
4380 }
4381
smctr_restart_tx_chain(struct net_device * dev,short queue)4382 static int smctr_restart_tx_chain(struct net_device *dev, short queue)
4383 {
4384 struct net_local *tp = (struct net_local *)dev->priv;
4385 int err = 0;
4386
4387 if(smctr_debug > 10)
4388 printk("%s: smctr_restart_tx_chain\n", dev->name);
4389
4390 if(tp->num_tx_fcbs_used[queue] != 0
4391 && tp->tx_queue_status[queue] == NOT_TRANSMITING)
4392 {
4393 tp->tx_queue_status[queue] = TRANSMITING;
4394 err = smctr_issue_resume_tx_fcb_cmd(dev, queue);
4395 }
4396
4397 return (err);
4398 }
4399
smctr_ring_status_chg(struct net_device * dev)4400 static int smctr_ring_status_chg(struct net_device *dev)
4401 {
4402 struct net_local *tp = (struct net_local *)dev->priv;
4403
4404 if(smctr_debug > 10)
4405 printk("%s: smctr_ring_status_chg\n", dev->name);
4406
4407 /* Check for ring_status_flag: whenever MONITOR_STATE_BIT
4408 * Bit is set, check value of monitor_state, only then we
4409 * enable and start transmit/receive timeout (if and only
4410 * if it is MS_ACTIVE_MONITOR_STATE or MS_STANDBY_MONITOR_STATE)
4411 */
4412 if(tp->ring_status_flags == MONITOR_STATE_CHANGED)
4413 {
4414 if((tp->monitor_state == MS_ACTIVE_MONITOR_STATE)
4415 || (tp->monitor_state == MS_STANDBY_MONITOR_STATE))
4416 {
4417 tp->monitor_state_ready = 1;
4418 }
4419 else
4420 {
4421 /* if adapter is NOT in either active monitor
4422 * or standby monitor state => Disable
4423 * transmit/receive timeout.
4424 */
4425 tp->monitor_state_ready = 0;
4426
4427 /* Ring speed problem, switching to auto mode. */
4428 if(tp->monitor_state == MS_MONITOR_FSM_INACTIVE
4429 && !tp->cleanup)
4430 {
4431 printk(KERN_INFO "%s: Incorrect ring speed switching.\n",
4432 dev->name);
4433 smctr_set_ring_speed(dev);
4434 }
4435 }
4436 }
4437
4438 if(!(tp->ring_status_flags & RING_STATUS_CHANGED))
4439 return (0);
4440
4441 switch(tp->ring_status)
4442 {
4443 case RING_RECOVERY:
4444 printk(KERN_INFO "%s: Ring Recovery\n", dev->name);
4445 tp->current_ring_status |= RING_RECOVERY;
4446 break;
4447
4448 case SINGLE_STATION:
4449 printk(KERN_INFO "%s: Single Statinon\n", dev->name);
4450 tp->current_ring_status |= SINGLE_STATION;
4451 break;
4452
4453 case COUNTER_OVERFLOW:
4454 printk(KERN_INFO "%s: Counter Overflow\n", dev->name);
4455 tp->current_ring_status |= COUNTER_OVERFLOW;
4456 break;
4457
4458 case REMOVE_RECEIVED:
4459 printk(KERN_INFO "%s: Remove Received\n", dev->name);
4460 tp->current_ring_status |= REMOVE_RECEIVED;
4461 break;
4462
4463 case AUTO_REMOVAL_ERROR:
4464 printk(KERN_INFO "%s: Auto Remove Error\n", dev->name);
4465 tp->current_ring_status |= AUTO_REMOVAL_ERROR;
4466 break;
4467
4468 case LOBE_WIRE_FAULT:
4469 printk(KERN_INFO "%s: Lobe Wire Fault\n", dev->name);
4470 tp->current_ring_status |= LOBE_WIRE_FAULT;
4471 break;
4472
4473 case TRANSMIT_BEACON:
4474 printk(KERN_INFO "%s: Transmit Beacon\n", dev->name);
4475 tp->current_ring_status |= TRANSMIT_BEACON;
4476 break;
4477
4478 case SOFT_ERROR:
4479 printk(KERN_INFO "%s: Soft Error\n", dev->name);
4480 tp->current_ring_status |= SOFT_ERROR;
4481 break;
4482
4483 case HARD_ERROR:
4484 printk(KERN_INFO "%s: Hard Error\n", dev->name);
4485 tp->current_ring_status |= HARD_ERROR;
4486 break;
4487
4488 case SIGNAL_LOSS:
4489 printk(KERN_INFO "%s: Singal Loss\n", dev->name);
4490 tp->current_ring_status |= SIGNAL_LOSS;
4491 break;
4492
4493 default:
4494 printk(KERN_INFO "%s: Unknown ring status change\n",
4495 dev->name);
4496 break;
4497 }
4498
4499 return (0);
4500 }
4501
smctr_rx_frame(struct net_device * dev)4502 static int smctr_rx_frame(struct net_device *dev)
4503 {
4504 struct net_local *tp = (struct net_local *)dev->priv;
4505 __u16 queue, status, rx_size, err = 0;
4506 __u8 *pbuff;
4507
4508 if(smctr_debug > 10)
4509 printk("%s: smctr_rx_frame\n", dev->name);
4510
4511 cli();
4512 queue = tp->receive_queue_number;
4513
4514 while((status = tp->rx_fcb_curr[queue]->frame_status) != SUCCESS)
4515 {
4516 err = HARDWARE_FAILED;
4517
4518 if(((status & 0x007f) == 0)
4519 || ((tp->receive_mask & ACCEPT_ERR_PACKETS) != 0))
4520 {
4521 /* frame length less the CRC (4 bytes) + FS (1 byte) */
4522 rx_size = tp->rx_fcb_curr[queue]->frame_length - 5;
4523
4524 pbuff = smctr_get_rx_pointer(dev, queue);
4525
4526 smctr_set_page(dev, pbuff);
4527 smctr_disable_16bit(dev);
4528
4529 /* pbuff points to addr within one page */
4530 pbuff = (__u8 *)PAGE_POINTER(pbuff);
4531
4532 if(queue == NON_MAC_QUEUE)
4533 {
4534 struct sk_buff *skb;
4535
4536 skb = dev_alloc_skb(rx_size);
4537 if (skb) {
4538 skb_put(skb, rx_size);
4539
4540 memcpy(skb->data, pbuff, rx_size);
4541 sti();
4542
4543 /* Update Counters */
4544 tp->MacStat.rx_packets++;
4545 tp->MacStat.rx_bytes += skb->len;
4546
4547 /* Kick the packet on up. */
4548 skb->dev = dev;
4549 skb->protocol = tr_type_trans(skb, dev);
4550 netif_rx(skb);
4551 dev->last_rx = jiffies;
4552 } else {
4553 sti();
4554 }
4555 }
4556 else
4557 smctr_process_rx_packet((MAC_HEADER *)pbuff,
4558 rx_size, dev, status);
4559 }
4560
4561 smctr_enable_16bit(dev);
4562 smctr_set_page(dev, (__u8 *)tp->ram_access);
4563 smctr_update_rx_chain(dev, queue);
4564
4565 if(err != SUCCESS)
4566 break;
4567 }
4568
4569 return (err);
4570 }
4571
smctr_send_dat(struct net_device * dev)4572 static int smctr_send_dat(struct net_device *dev)
4573 {
4574 struct net_local *tp = (struct net_local *)dev->priv;
4575 unsigned int i, err;
4576 MAC_HEADER *tmf;
4577 FCBlock *fcb;
4578
4579 if(smctr_debug > 10)
4580 printk("%s: smctr_send_dat\n", dev->name);
4581
4582 if((fcb = smctr_get_tx_fcb(dev, MAC_QUEUE,
4583 sizeof(MAC_HEADER))) == (FCBlock *)(-1L))
4584 {
4585 return (OUT_OF_RESOURCES);
4586 }
4587
4588 /* Initialize DAT Data Fields. */
4589 tmf = (MAC_HEADER *)fcb->bdb_ptr->data_block_ptr;
4590 tmf->ac = MSB(AC_FC_DAT);
4591 tmf->fc = LSB(AC_FC_DAT);
4592
4593 for(i = 0; i < 6; i++)
4594 {
4595 tmf->sa[i] = dev->dev_addr[i];
4596 tmf->da[i] = dev->dev_addr[i];
4597
4598 }
4599
4600 tmf->vc = DAT;
4601 tmf->dc_sc = DC_RS | SC_RS;
4602 tmf->vl = 4;
4603 tmf->vl = SWAP_BYTES(tmf->vl);
4604
4605 /* Start Transmit. */
4606 if((err = smctr_trc_send_packet(dev, fcb, MAC_QUEUE)))
4607 return (err);
4608
4609 /* Wait for Transmit to Complete */
4610 for(i = 0; i < 10000; i++)
4611 {
4612 if(fcb->frame_status & FCB_COMMAND_DONE)
4613 break;
4614 mdelay(1);
4615 }
4616
4617 /* Check if GOOD frame Tx'ed. */
4618 if(!(fcb->frame_status & FCB_COMMAND_DONE)
4619 || fcb->frame_status & (FCB_TX_STATUS_E | FCB_TX_AC_BITS))
4620 {
4621 return (INITIALIZE_FAILED);
4622 }
4623
4624 /* De-allocated Tx FCB and Frame Buffer
4625 * The FCB must be de-allocated manually if executing with
4626 * interrupts disabled, other wise the ISR (LM_Service_Events)
4627 * will de-allocate it when the interrupt occurs.
4628 */
4629 tp->tx_queue_status[MAC_QUEUE] = NOT_TRANSMITING;
4630 smctr_update_tx_chain(dev, fcb, MAC_QUEUE);
4631
4632 return (0);
4633 }
4634
smctr_timeout(struct net_device * dev)4635 static void smctr_timeout(struct net_device *dev)
4636 {
4637 /*
4638 * If we get here, some higher level has decided we are broken.
4639 * There should really be a "kick me" function call instead.
4640 *
4641 * Resetting the token ring adapter takes a long time so just
4642 * fake transmission time and go on trying. Our own timeout
4643 * routine is in sktr_timer_chk()
4644 */
4645 dev->trans_start = jiffies;
4646 netif_wake_queue(dev);
4647 }
4648
4649 /*
4650 * Gets skb from system, queues it and checks if it can be sent
4651 */
smctr_send_packet(struct sk_buff * skb,struct net_device * dev)4652 static int smctr_send_packet(struct sk_buff *skb, struct net_device *dev)
4653 {
4654 struct net_local *tp = (struct net_local *)dev->priv;
4655
4656 if(smctr_debug > 10)
4657 printk("%s: smctr_send_packet\n", dev->name);
4658
4659 /*
4660 * Block a transmit overlap
4661 */
4662
4663 netif_stop_queue(dev);
4664
4665 if(tp->QueueSkb == 0)
4666 return (1); /* Return with tbusy set: queue full */
4667
4668 tp->QueueSkb--;
4669 skb_queue_tail(&tp->SendSkbQueue, skb);
4670 smctr_hardware_send_packet(dev, tp);
4671 if(tp->QueueSkb > 0)
4672 netif_wake_queue(dev);
4673
4674 return (0);
4675 }
4676
smctr_send_lobe_media_test(struct net_device * dev)4677 static int smctr_send_lobe_media_test(struct net_device *dev)
4678 {
4679 struct net_local *tp = (struct net_local *)dev->priv;
4680 MAC_SUB_VECTOR *tsv;
4681 MAC_HEADER *tmf;
4682 FCBlock *fcb;
4683 __u32 i;
4684 int err;
4685
4686 if(smctr_debug > 15)
4687 printk("%s: smctr_send_lobe_media_test\n", dev->name);
4688
4689 if((fcb = smctr_get_tx_fcb(dev, MAC_QUEUE, sizeof(struct trh_hdr)
4690 + S_WRAP_DATA + S_WRAP_DATA)) == (FCBlock *)(-1L))
4691 {
4692 return (OUT_OF_RESOURCES);
4693 }
4694
4695 /* Initialize DAT Data Fields. */
4696 tmf = (MAC_HEADER *)fcb->bdb_ptr->data_block_ptr;
4697 tmf->ac = MSB(AC_FC_LOBE_MEDIA_TEST);
4698 tmf->fc = LSB(AC_FC_LOBE_MEDIA_TEST);
4699
4700 for(i = 0; i < 6; i++)
4701 {
4702 tmf->da[i] = 0;
4703 tmf->sa[i] = dev->dev_addr[i];
4704 }
4705
4706 tmf->vc = LOBE_MEDIA_TEST;
4707 tmf->dc_sc = DC_RS | SC_RS;
4708 tmf->vl = 4;
4709
4710 tsv = (MAC_SUB_VECTOR *)((__u32)tmf + sizeof(MAC_HEADER));
4711 smctr_make_wrap_data(dev, tsv);
4712 tmf->vl += tsv->svl;
4713
4714 tsv = (MAC_SUB_VECTOR *)((__u32)tsv + tsv->svl);
4715 smctr_make_wrap_data(dev, tsv);
4716 tmf->vl += tsv->svl;
4717
4718 /* Start Transmit. */
4719 tmf->vl = SWAP_BYTES(tmf->vl);
4720 if((err = smctr_trc_send_packet(dev, fcb, MAC_QUEUE)))
4721 return (err);
4722
4723 /* Wait for Transmit to Complete. (10 ms). */
4724 for(i=0; i < 10000; i++)
4725 {
4726 if(fcb->frame_status & FCB_COMMAND_DONE)
4727 break;
4728 mdelay(1);
4729 }
4730
4731 /* Check if GOOD frame Tx'ed */
4732 if(!(fcb->frame_status & FCB_COMMAND_DONE)
4733 || fcb->frame_status & (FCB_TX_STATUS_E | FCB_TX_AC_BITS))
4734 {
4735 return (LOBE_MEDIA_TEST_FAILED);
4736 }
4737
4738 /* De-allocated Tx FCB and Frame Buffer
4739 * The FCB must be de-allocated manually if executing with
4740 * interrupts disabled, other wise the ISR (LM_Service_Events)
4741 * will de-allocate it when the interrupt occurs.
4742 */
4743 tp->tx_queue_status[MAC_QUEUE] = NOT_TRANSMITING;
4744 smctr_update_tx_chain(dev, fcb, MAC_QUEUE);
4745
4746 return (0);
4747 }
4748
smctr_send_rpt_addr(struct net_device * dev,MAC_HEADER * rmf,__u16 correlator)4749 static int smctr_send_rpt_addr(struct net_device *dev, MAC_HEADER *rmf,
4750 __u16 correlator)
4751 {
4752 MAC_HEADER *tmf;
4753 MAC_SUB_VECTOR *tsv;
4754 FCBlock *fcb;
4755
4756 if((fcb = smctr_get_tx_fcb(dev, MAC_QUEUE, sizeof(MAC_HEADER)
4757 + S_CORRELATOR + S_PHYSICAL_DROP + S_UPSTREAM_NEIGHBOR_ADDRESS
4758 + S_ADDRESS_MODIFER + S_GROUP_ADDRESS + S_FUNCTIONAL_ADDRESS))
4759 == (FCBlock *)(-1L))
4760 {
4761 return (0);
4762 }
4763
4764 tmf = (MAC_HEADER *)fcb->bdb_ptr->data_block_ptr;
4765 tmf->vc = RPT_ADDR;
4766 tmf->dc_sc = (rmf->dc_sc & SC_MASK) << 4;
4767 tmf->vl = 4;
4768
4769 smctr_make_8025_hdr(dev, rmf, tmf, AC_FC_RPT_ADDR);
4770
4771 tsv = (MAC_SUB_VECTOR *)((__u32)tmf + sizeof(MAC_HEADER));
4772 smctr_make_corr(dev, tsv, correlator);
4773
4774 tmf->vl += tsv->svl;
4775 tsv = (MAC_SUB_VECTOR *)((__u32)tsv + tsv->svl);
4776 smctr_make_phy_drop_num(dev, tsv);
4777
4778 tmf->vl += tsv->svl;
4779 tsv = (MAC_SUB_VECTOR *)((__u32)tsv + tsv->svl);
4780 smctr_make_upstream_neighbor_addr(dev, tsv);
4781
4782 tmf->vl += tsv->svl;
4783 tsv = (MAC_SUB_VECTOR *)((__u32)tsv + tsv->svl);
4784 smctr_make_addr_mod(dev, tsv);
4785
4786 tmf->vl += tsv->svl;
4787 tsv = (MAC_SUB_VECTOR *)((__u32)tsv + tsv->svl);
4788 smctr_make_group_addr(dev, tsv);
4789
4790 tmf->vl += tsv->svl;
4791 tsv = (MAC_SUB_VECTOR *)((__u32)tsv + tsv->svl);
4792 smctr_make_funct_addr(dev, tsv);
4793
4794 tmf->vl += tsv->svl;
4795
4796 /* Subtract out MVID and MVL which is
4797 * include in both vl and MAC_HEADER
4798 */
4799 /* fcb->frame_length = tmf->vl + sizeof(MAC_HEADER) - 4;
4800 fcb->bdb_ptr->buffer_length = tmf->vl + sizeof(MAC_HEADER) - 4;
4801 */
4802 tmf->vl = SWAP_BYTES(tmf->vl);
4803
4804 return (smctr_trc_send_packet(dev, fcb, MAC_QUEUE));
4805 }
4806
smctr_send_rpt_attch(struct net_device * dev,MAC_HEADER * rmf,__u16 correlator)4807 static int smctr_send_rpt_attch(struct net_device *dev, MAC_HEADER *rmf,
4808 __u16 correlator)
4809 {
4810 MAC_HEADER *tmf;
4811 MAC_SUB_VECTOR *tsv;
4812 FCBlock *fcb;
4813
4814 if((fcb = smctr_get_tx_fcb(dev, MAC_QUEUE, sizeof(MAC_HEADER)
4815 + S_CORRELATOR + S_PRODUCT_INSTANCE_ID + S_FUNCTIONAL_ADDRESS
4816 + S_AUTHORIZED_FUNCTION_CLASS + S_AUTHORIZED_ACCESS_PRIORITY))
4817 == (FCBlock *)(-1L))
4818 {
4819 return (0);
4820 }
4821
4822 tmf = (MAC_HEADER *)fcb->bdb_ptr->data_block_ptr;
4823 tmf->vc = RPT_ATTCH;
4824 tmf->dc_sc = (rmf->dc_sc & SC_MASK) << 4;
4825 tmf->vl = 4;
4826
4827 smctr_make_8025_hdr(dev, rmf, tmf, AC_FC_RPT_ATTCH);
4828
4829 tsv = (MAC_SUB_VECTOR *)((__u32)tmf + sizeof(MAC_HEADER));
4830 smctr_make_corr(dev, tsv, correlator);
4831
4832 tmf->vl += tsv->svl;
4833 tsv = (MAC_SUB_VECTOR *)((__u32)tsv + tsv->svl);
4834 smctr_make_product_id(dev, tsv);
4835
4836 tmf->vl += tsv->svl;
4837 tsv = (MAC_SUB_VECTOR *)((__u32)tsv + tsv->svl);
4838 smctr_make_funct_addr(dev, tsv);
4839
4840 tmf->vl += tsv->svl;
4841 tsv = (MAC_SUB_VECTOR *)((__u32)tsv + tsv->svl);
4842 smctr_make_auth_funct_class(dev, tsv);
4843
4844 tmf->vl += tsv->svl;
4845 tsv = (MAC_SUB_VECTOR *)((__u32)tsv + tsv->svl);
4846 smctr_make_access_pri(dev, tsv);
4847
4848 tmf->vl += tsv->svl;
4849
4850 /* Subtract out MVID and MVL which is
4851 * include in both vl and MAC_HEADER
4852 */
4853 /* fcb->frame_length = tmf->vl + sizeof(MAC_HEADER) - 4;
4854 fcb->bdb_ptr->buffer_length = tmf->vl + sizeof(MAC_HEADER) - 4;
4855 */
4856 tmf->vl = SWAP_BYTES(tmf->vl);
4857
4858 return (smctr_trc_send_packet(dev, fcb, MAC_QUEUE));
4859 }
4860
smctr_send_rpt_state(struct net_device * dev,MAC_HEADER * rmf,__u16 correlator)4861 static int smctr_send_rpt_state(struct net_device *dev, MAC_HEADER *rmf,
4862 __u16 correlator)
4863 {
4864 MAC_HEADER *tmf;
4865 MAC_SUB_VECTOR *tsv;
4866 FCBlock *fcb;
4867
4868 if((fcb = smctr_get_tx_fcb(dev, MAC_QUEUE, sizeof(MAC_HEADER)
4869 + S_CORRELATOR + S_RING_STATION_VERSION_NUMBER
4870 + S_RING_STATION_STATUS + S_STATION_IDENTIFER))
4871 == (FCBlock *)(-1L))
4872 {
4873 return (0);
4874 }
4875
4876 tmf = (MAC_HEADER *)fcb->bdb_ptr->data_block_ptr;
4877 tmf->vc = RPT_STATE;
4878 tmf->dc_sc = (rmf->dc_sc & SC_MASK) << 4;
4879 tmf->vl = 4;
4880
4881 smctr_make_8025_hdr(dev, rmf, tmf, AC_FC_RPT_STATE);
4882
4883 tsv = (MAC_SUB_VECTOR *)((__u32)tmf + sizeof(MAC_HEADER));
4884 smctr_make_corr(dev, tsv, correlator);
4885
4886 tmf->vl += tsv->svl;
4887 tsv = (MAC_SUB_VECTOR *)((__u32)tsv + tsv->svl);
4888 smctr_make_ring_station_version(dev, tsv);
4889
4890 tmf->vl += tsv->svl;
4891 tsv = (MAC_SUB_VECTOR *)((__u32)tsv + tsv->svl);
4892 smctr_make_ring_station_status(dev, tsv);
4893
4894 tmf->vl += tsv->svl;
4895 tsv = (MAC_SUB_VECTOR *)((__u32)tsv + tsv->svl);
4896 smctr_make_station_id(dev, tsv);
4897
4898 tmf->vl += tsv->svl;
4899
4900 /* Subtract out MVID and MVL which is
4901 * include in both vl and MAC_HEADER
4902 */
4903 /* fcb->frame_length = tmf->vl + sizeof(MAC_HEADER) - 4;
4904 fcb->bdb_ptr->buffer_length = tmf->vl + sizeof(MAC_HEADER) - 4;
4905 */
4906 tmf->vl = SWAP_BYTES(tmf->vl);
4907
4908 return (smctr_trc_send_packet(dev, fcb, MAC_QUEUE));
4909 }
4910
smctr_send_rpt_tx_forward(struct net_device * dev,MAC_HEADER * rmf,__u16 tx_fstatus)4911 static int smctr_send_rpt_tx_forward(struct net_device *dev,
4912 MAC_HEADER *rmf, __u16 tx_fstatus)
4913 {
4914 MAC_HEADER *tmf;
4915 MAC_SUB_VECTOR *tsv;
4916 FCBlock *fcb;
4917
4918 if((fcb = smctr_get_tx_fcb(dev, MAC_QUEUE, sizeof(MAC_HEADER)
4919 + S_TRANSMIT_STATUS_CODE)) == (FCBlock *)(-1L))
4920 {
4921 return (0);
4922 }
4923
4924 tmf = (MAC_HEADER *)fcb->bdb_ptr->data_block_ptr;
4925 tmf->vc = RPT_TX_FORWARD;
4926 tmf->dc_sc = (rmf->dc_sc & SC_MASK) << 4;
4927 tmf->vl = 4;
4928
4929 smctr_make_8025_hdr(dev, rmf, tmf, AC_FC_RPT_TX_FORWARD);
4930
4931 tsv = (MAC_SUB_VECTOR *)((__u32)tmf + sizeof(MAC_HEADER));
4932 smctr_make_tx_status_code(dev, tsv, tx_fstatus);
4933
4934 tmf->vl += tsv->svl;
4935
4936 /* Subtract out MVID and MVL which is
4937 * include in both vl and MAC_HEADER
4938 */
4939 /* fcb->frame_length = tmf->vl + sizeof(MAC_HEADER) - 4;
4940 fcb->bdb_ptr->buffer_length = tmf->vl + sizeof(MAC_HEADER) - 4;
4941 */
4942 tmf->vl = SWAP_BYTES(tmf->vl);
4943
4944 return(smctr_trc_send_packet(dev, fcb, MAC_QUEUE));
4945 }
4946
smctr_send_rsp(struct net_device * dev,MAC_HEADER * rmf,__u16 rcode,__u16 correlator)4947 static int smctr_send_rsp(struct net_device *dev, MAC_HEADER *rmf,
4948 __u16 rcode, __u16 correlator)
4949 {
4950 MAC_HEADER *tmf;
4951 MAC_SUB_VECTOR *tsv;
4952 FCBlock *fcb;
4953
4954 if((fcb = smctr_get_tx_fcb(dev, MAC_QUEUE, sizeof(MAC_HEADER)
4955 + S_CORRELATOR + S_RESPONSE_CODE)) == (FCBlock *)(-1L))
4956 {
4957 return (0);
4958 }
4959
4960 tmf = (MAC_HEADER *)fcb->bdb_ptr->data_block_ptr;
4961 tmf->vc = RSP;
4962 tmf->dc_sc = (rmf->dc_sc & SC_MASK) << 4;
4963 tmf->vl = 4;
4964
4965 smctr_make_8025_hdr(dev, rmf, tmf, AC_FC_RSP);
4966
4967 tsv = (MAC_SUB_VECTOR *)((__u32)tmf + sizeof(MAC_HEADER));
4968 smctr_make_corr(dev, tsv, correlator);
4969
4970 return (0);
4971 }
4972
smctr_send_rq_init(struct net_device * dev)4973 static int smctr_send_rq_init(struct net_device *dev)
4974 {
4975 struct net_local *tp = (struct net_local *)dev->priv;
4976 MAC_HEADER *tmf;
4977 MAC_SUB_VECTOR *tsv;
4978 FCBlock *fcb;
4979 unsigned int i, count = 0;
4980 __u16 fstatus;
4981 int err;
4982
4983 do {
4984 if(((fcb = smctr_get_tx_fcb(dev, MAC_QUEUE, sizeof(MAC_HEADER)
4985 + S_PRODUCT_INSTANCE_ID + S_UPSTREAM_NEIGHBOR_ADDRESS
4986 + S_RING_STATION_VERSION_NUMBER + S_ADDRESS_MODIFER))
4987 == (FCBlock *)(-1L)))
4988 {
4989 return (0);
4990 }
4991
4992 tmf = (MAC_HEADER *)fcb->bdb_ptr->data_block_ptr;
4993 tmf->vc = RQ_INIT;
4994 tmf->dc_sc = DC_RPS | SC_RS;
4995 tmf->vl = 4;
4996
4997 smctr_make_8025_hdr(dev, 0L, tmf, AC_FC_RQ_INIT);
4998
4999 tsv = (MAC_SUB_VECTOR *)((__u32)tmf + sizeof(MAC_HEADER));
5000 smctr_make_product_id(dev, tsv);
5001
5002 tmf->vl += tsv->svl;
5003 tsv = (MAC_SUB_VECTOR *)((__u32)tsv + tsv->svl);
5004 smctr_make_upstream_neighbor_addr(dev, tsv);
5005
5006 tmf->vl += tsv->svl;
5007 tsv = (MAC_SUB_VECTOR *)((__u32)tsv + tsv->svl);
5008 smctr_make_ring_station_version(dev, tsv);
5009
5010 tmf->vl += tsv->svl;
5011 tsv = (MAC_SUB_VECTOR *)((__u32)tsv + tsv->svl);
5012 smctr_make_addr_mod(dev, tsv);
5013
5014 tmf->vl += tsv->svl;
5015
5016 /* Subtract out MVID and MVL which is
5017 * include in both vl and MAC_HEADER
5018 */
5019 /* fcb->frame_length = tmf->vl + sizeof(MAC_HEADER) - 4;
5020 fcb->bdb_ptr->buffer_length = tmf->vl + sizeof(MAC_HEADER) - 4;
5021 */
5022 tmf->vl = SWAP_BYTES(tmf->vl);
5023
5024 if((err = smctr_trc_send_packet(dev, fcb, MAC_QUEUE)))
5025 return (err);
5026
5027 /* Wait for Transmit to Complete */
5028 for(i = 0; i < 10000; i++)
5029 {
5030 if(fcb->frame_status & FCB_COMMAND_DONE)
5031 break;
5032 mdelay(1);
5033 }
5034
5035 /* Check if GOOD frame Tx'ed */
5036 fstatus = fcb->frame_status;
5037
5038 if(!(fstatus & FCB_COMMAND_DONE))
5039 return (HARDWARE_FAILED);
5040
5041 if(!(fstatus & FCB_TX_STATUS_E))
5042 count++;
5043
5044 /* De-allocated Tx FCB and Frame Buffer
5045 * The FCB must be de-allocated manually if executing with
5046 * interrupts disabled, other wise the ISR (LM_Service_Events)
5047 * will de-allocate it when the interrupt occurs.
5048 */
5049 tp->tx_queue_status[MAC_QUEUE] = NOT_TRANSMITING;
5050 smctr_update_tx_chain(dev, fcb, MAC_QUEUE);
5051 } while(count < 4 && ((fstatus & FCB_TX_AC_BITS) ^ FCB_TX_AC_BITS));
5052
5053 return (smctr_join_complete_state(dev));
5054 }
5055
smctr_send_tx_forward(struct net_device * dev,MAC_HEADER * rmf,__u16 * tx_fstatus)5056 static int smctr_send_tx_forward(struct net_device *dev, MAC_HEADER *rmf,
5057 __u16 *tx_fstatus)
5058 {
5059 struct net_local *tp = (struct net_local *)dev->priv;
5060 FCBlock *fcb;
5061 unsigned int i;
5062 int err;
5063
5064 /* Check if this is the END POINT of the Transmit Forward Chain. */
5065 if(rmf->vl <= 18)
5066 return (0);
5067
5068 /* Allocate Transmit FCB only by requesting 0 bytes
5069 * of data buffer.
5070 */
5071 if((fcb = smctr_get_tx_fcb(dev, MAC_QUEUE, 0)) == (FCBlock *)(-1L))
5072 return (0);
5073
5074 /* Set pointer to Transmit Frame Buffer to the data
5075 * portion of the received TX Forward frame, making
5076 * sure to skip over the Vector Code (vc) and Vector
5077 * length (vl).
5078 */
5079 fcb->bdb_ptr->trc_data_block_ptr = TRC_POINTER((__u32)rmf
5080 + sizeof(MAC_HEADER) + 2);
5081 fcb->bdb_ptr->data_block_ptr = (__u16 *)((__u32)rmf
5082 + sizeof(MAC_HEADER) + 2);
5083
5084 fcb->frame_length = rmf->vl - 4 - 2;
5085 fcb->bdb_ptr->buffer_length = rmf->vl - 4 - 2;
5086
5087 if((err = smctr_trc_send_packet(dev, fcb, MAC_QUEUE)))
5088 return (err);
5089
5090 /* Wait for Transmit to Complete */
5091 for(i = 0; i < 10000; i++)
5092 {
5093 if(fcb->frame_status & FCB_COMMAND_DONE)
5094 break;
5095 mdelay(1);
5096 }
5097
5098 /* Check if GOOD frame Tx'ed */
5099 if(!(fcb->frame_status & FCB_COMMAND_DONE))
5100 {
5101 if((err = smctr_issue_resume_tx_fcb_cmd(dev, MAC_QUEUE)))
5102 return (err);
5103
5104 for(i = 0; i < 10000; i++)
5105 {
5106 if(fcb->frame_status & FCB_COMMAND_DONE)
5107 break;
5108 mdelay(1);
5109 }
5110
5111 if(!(fcb->frame_status & FCB_COMMAND_DONE))
5112 return (HARDWARE_FAILED);
5113 }
5114
5115 *tx_fstatus = fcb->frame_status;
5116
5117 return (A_FRAME_WAS_FORWARDED);
5118 }
5119
smctr_set_auth_access_pri(struct net_device * dev,MAC_SUB_VECTOR * rsv)5120 static int smctr_set_auth_access_pri(struct net_device *dev,
5121 MAC_SUB_VECTOR *rsv)
5122 {
5123 struct net_local *tp = (struct net_local *)dev->priv;
5124
5125 if(rsv->svl != S_AUTHORIZED_ACCESS_PRIORITY)
5126 return (E_SUB_VECTOR_LENGTH_ERROR);
5127
5128 tp->authorized_access_priority = (rsv->svv[0] << 8 | rsv->svv[1]);
5129
5130 return (POSITIVE_ACK);
5131 }
5132
smctr_set_auth_funct_class(struct net_device * dev,MAC_SUB_VECTOR * rsv)5133 static int smctr_set_auth_funct_class(struct net_device *dev,
5134 MAC_SUB_VECTOR *rsv)
5135 {
5136 struct net_local *tp = (struct net_local *)dev->priv;
5137
5138 if(rsv->svl != S_AUTHORIZED_FUNCTION_CLASS)
5139 return (E_SUB_VECTOR_LENGTH_ERROR);
5140
5141 tp->authorized_function_classes = (rsv->svv[0] << 8 | rsv->svv[1]);
5142
5143 return (POSITIVE_ACK);
5144 }
5145
smctr_set_corr(struct net_device * dev,MAC_SUB_VECTOR * rsv,__u16 * correlator)5146 static int smctr_set_corr(struct net_device *dev, MAC_SUB_VECTOR *rsv,
5147 __u16 *correlator)
5148 {
5149 if(rsv->svl != S_CORRELATOR)
5150 return (E_SUB_VECTOR_LENGTH_ERROR);
5151
5152 *correlator = (rsv->svv[0] << 8 | rsv->svv[1]);
5153
5154 return (POSITIVE_ACK);
5155 }
5156
smctr_set_error_timer_value(struct net_device * dev,MAC_SUB_VECTOR * rsv)5157 static int smctr_set_error_timer_value(struct net_device *dev,
5158 MAC_SUB_VECTOR *rsv)
5159 {
5160 __u16 err_tval;
5161 int err;
5162
5163 if(rsv->svl != S_ERROR_TIMER_VALUE)
5164 return (E_SUB_VECTOR_LENGTH_ERROR);
5165
5166 err_tval = (rsv->svv[0] << 8 | rsv->svv[1])*10;
5167
5168 smctr_issue_write_word_cmd(dev, RW_TER_THRESHOLD, &err_tval);
5169
5170 if((err = smctr_wait_cmd(dev)))
5171 return (err);
5172
5173 return (POSITIVE_ACK);
5174 }
5175
smctr_set_frame_forward(struct net_device * dev,MAC_SUB_VECTOR * rsv,__u8 dc_sc)5176 static int smctr_set_frame_forward(struct net_device *dev,
5177 MAC_SUB_VECTOR *rsv, __u8 dc_sc)
5178 {
5179 if((rsv->svl < 2) || (rsv->svl > S_FRAME_FORWARD))
5180 return (E_SUB_VECTOR_LENGTH_ERROR);
5181
5182 if((dc_sc & DC_MASK) != DC_CRS)
5183 {
5184 if(rsv->svl >= 2 && rsv->svl < 20)
5185 return (E_TRANSMIT_FORWARD_INVALID);
5186
5187 if((rsv->svv[0] != 0) || (rsv->svv[1] != 0))
5188 return (E_TRANSMIT_FORWARD_INVALID);
5189 }
5190
5191 return (POSITIVE_ACK);
5192 }
5193
smctr_set_local_ring_num(struct net_device * dev,MAC_SUB_VECTOR * rsv)5194 static int smctr_set_local_ring_num(struct net_device *dev,
5195 MAC_SUB_VECTOR *rsv)
5196 {
5197 struct net_local *tp = (struct net_local *)dev->priv;
5198
5199 if(rsv->svl != S_LOCAL_RING_NUMBER)
5200 return (E_SUB_VECTOR_LENGTH_ERROR);
5201
5202 if(tp->ptr_local_ring_num)
5203 *(__u16 *)(tp->ptr_local_ring_num)
5204 = (rsv->svv[0] << 8 | rsv->svv[1]);
5205
5206 return (POSITIVE_ACK);
5207 }
5208
smctr_set_ctrl_attention(struct net_device * dev)5209 static unsigned short smctr_set_ctrl_attention(struct net_device *dev)
5210 {
5211 struct net_local *tp = (struct net_local *)dev->priv;
5212 int ioaddr = dev->base_addr;
5213
5214 if(tp->bic_type == BIC_585_CHIP)
5215 outb((tp->trc_mask | HWR_CA), ioaddr + HWR);
5216 else
5217 {
5218 outb((tp->trc_mask | CSR_CA), ioaddr + CSR);
5219 outb(tp->trc_mask, ioaddr + CSR);
5220 }
5221
5222 return (0);
5223 }
5224
smctr_set_multicast_list(struct net_device * dev)5225 static void smctr_set_multicast_list(struct net_device *dev)
5226 {
5227 if(smctr_debug > 10)
5228 printk("%s: smctr_set_multicast_list\n", dev->name);
5229
5230 return;
5231 }
5232
smctr_set_page(struct net_device * dev,__u8 * buf)5233 static int smctr_set_page(struct net_device *dev, __u8 *buf)
5234 {
5235 struct net_local *tp = (struct net_local *)dev->priv;
5236 __u8 amask;
5237 __u32 tptr;
5238
5239 tptr = (__u32)buf - (__u32)tp->ram_access;
5240 amask = (__u8)((tptr & PR_PAGE_MASK) >> 8);
5241 outb(amask, dev->base_addr + PR);
5242
5243 return (0);
5244 }
5245
smctr_set_phy_drop(struct net_device * dev,MAC_SUB_VECTOR * rsv)5246 static int smctr_set_phy_drop(struct net_device *dev, MAC_SUB_VECTOR *rsv)
5247 {
5248 int err;
5249
5250 if(rsv->svl != S_PHYSICAL_DROP)
5251 return (E_SUB_VECTOR_LENGTH_ERROR);
5252
5253 smctr_issue_write_byte_cmd(dev, RW_PHYSICAL_DROP_NUMBER, &rsv->svv[0]);
5254 if((err = smctr_wait_cmd(dev)))
5255 return (err);
5256
5257 return (POSITIVE_ACK);
5258 }
5259
5260 /* Reset the ring speed to the opposite of what it was. This auto-pilot
5261 * mode requires a complete reset and re-init of the adapter.
5262 */
smctr_set_ring_speed(struct net_device * dev)5263 static int smctr_set_ring_speed(struct net_device *dev)
5264 {
5265 struct net_local *tp = (struct net_local *)dev->priv;
5266 int err;
5267
5268 if(tp->media_type == MEDIA_UTP_16)
5269 tp->media_type = MEDIA_UTP_4;
5270 else
5271 tp->media_type = MEDIA_UTP_16;
5272
5273 smctr_enable_16bit(dev);
5274
5275 /* Re-Initialize adapter's internal registers */
5276 smctr_reset_adapter(dev);
5277
5278 if((err = smctr_init_card_real(dev)))
5279 return (err);
5280
5281 smctr_enable_bic_int(dev);
5282
5283 if((err = smctr_issue_enable_int_cmd(dev, TRC_INTERRUPT_ENABLE_MASK)))
5284 return (err);
5285
5286 smctr_disable_16bit(dev);
5287
5288 return (0);
5289 }
5290
smctr_set_rx_look_ahead(struct net_device * dev)5291 static int smctr_set_rx_look_ahead(struct net_device *dev)
5292 {
5293 struct net_local *tp = (struct net_local *)dev->priv;
5294 __u16 sword, rword;
5295
5296 if(smctr_debug > 10)
5297 printk("%s: smctr_set_rx_look_ahead_flag\n", dev->name);
5298
5299 tp->adapter_flags &= ~(FORCED_16BIT_MODE);
5300 tp->adapter_flags |= RX_VALID_LOOKAHEAD;
5301
5302 if(tp->adapter_bus == BUS_ISA16_TYPE)
5303 {
5304 sword = *((__u16 *)(tp->ram_access));
5305 *((__u16 *)(tp->ram_access)) = 0x1234;
5306
5307 smctr_disable_16bit(dev);
5308 rword = *((__u16 *)(tp->ram_access));
5309 smctr_enable_16bit(dev);
5310
5311 if(rword != 0x1234)
5312 tp->adapter_flags |= FORCED_16BIT_MODE;
5313
5314 *((__u16 *)(tp->ram_access)) = sword;
5315 }
5316
5317 return (0);
5318 }
5319
smctr_set_trc_reset(int ioaddr)5320 static int smctr_set_trc_reset(int ioaddr)
5321 {
5322 __u8 r;
5323
5324 r = inb(ioaddr + MSR);
5325 outb(MSR_RST | r, ioaddr + MSR);
5326
5327 return (0);
5328 }
5329
5330 /*
5331 * This function can be called if the adapter is busy or not.
5332 */
smctr_setup_single_cmd(struct net_device * dev,__u16 command,__u16 subcommand)5333 static int smctr_setup_single_cmd(struct net_device *dev,
5334 __u16 command, __u16 subcommand)
5335 {
5336 struct net_local *tp = (struct net_local *)dev->priv;
5337 unsigned int err;
5338
5339 if(smctr_debug > 10)
5340 printk("%s: smctr_setup_single_cmd\n", dev->name);
5341
5342 if((err = smctr_wait_while_cbusy(dev)))
5343 return (err);
5344
5345 if((err = (unsigned int)smctr_wait_cmd(dev)))
5346 return (err);
5347
5348 tp->acb_head->cmd_done_status = 0;
5349 tp->acb_head->cmd = command;
5350 tp->acb_head->subcmd = subcommand;
5351
5352 err = smctr_issue_resume_acb_cmd(dev);
5353
5354 return (err);
5355 }
5356
5357 /*
5358 * This function can not be called with the adapter busy.
5359 */
smctr_setup_single_cmd_w_data(struct net_device * dev,__u16 command,__u16 subcommand)5360 static int smctr_setup_single_cmd_w_data(struct net_device *dev,
5361 __u16 command, __u16 subcommand)
5362 {
5363 struct net_local *tp = (struct net_local *)dev->priv;
5364
5365 tp->acb_head->cmd_done_status = ACB_COMMAND_NOT_DONE;
5366 tp->acb_head->cmd = command;
5367 tp->acb_head->subcmd = subcommand;
5368 tp->acb_head->data_offset_lo
5369 = (__u16)TRC_POINTER(tp->misc_command_data);
5370
5371 return(smctr_issue_resume_acb_cmd(dev));
5372 }
5373
smctr_malloc(struct net_device * dev,__u16 size)5374 static char *smctr_malloc(struct net_device *dev, __u16 size)
5375 {
5376 struct net_local *tp = (struct net_local *)dev->priv;
5377 char *m;
5378
5379 m = (char *)(tp->ram_access + tp->sh_mem_used);
5380 tp->sh_mem_used += (__u32)size;
5381
5382 return (m);
5383 }
5384
smctr_status_chg(struct net_device * dev)5385 static int smctr_status_chg(struct net_device *dev)
5386 {
5387 struct net_local *tp = (struct net_local *)dev->priv;
5388
5389 if(smctr_debug > 10)
5390 printk("%s: smctr_status_chg\n", dev->name);
5391
5392 switch(tp->status)
5393 {
5394 case OPEN:
5395 break;
5396
5397 case CLOSED:
5398 break;
5399
5400 /* Interrupt driven open() completion. XXX */
5401 case INITIALIZED:
5402 tp->group_address_0 = 0;
5403 tp->group_address[0] = 0;
5404 tp->group_address[1] = 0;
5405 tp->functional_address_0 = 0;
5406 tp->functional_address[0] = 0;
5407 tp->functional_address[1] = 0;
5408 smctr_open_tr(dev);
5409 break;
5410
5411 default:
5412 printk(KERN_INFO "%s: status change unknown %x\n",
5413 dev->name, tp->status);
5414 break;
5415 }
5416
5417 return (0);
5418 }
5419
smctr_trc_send_packet(struct net_device * dev,FCBlock * fcb,__u16 queue)5420 static int smctr_trc_send_packet(struct net_device *dev, FCBlock *fcb,
5421 __u16 queue)
5422 {
5423 struct net_local *tp = (struct net_local *)dev->priv;
5424 int err = 0;
5425
5426 if(smctr_debug > 10)
5427 printk("%s: smctr_trc_send_packet\n", dev->name);
5428
5429 fcb->info = FCB_CHAIN_END | FCB_ENABLE_TFS;
5430 if(tp->num_tx_fcbs[queue] != 1)
5431 fcb->back_ptr->info = FCB_INTERRUPT_ENABLE | FCB_ENABLE_TFS;
5432
5433 if(tp->tx_queue_status[queue] == NOT_TRANSMITING)
5434 {
5435 tp->tx_queue_status[queue] = TRANSMITING;
5436 err = smctr_issue_resume_tx_fcb_cmd(dev, queue);
5437 }
5438
5439 return (err);
5440 }
5441
smctr_tx_complete(struct net_device * dev,__u16 queue)5442 static __u16 smctr_tx_complete(struct net_device *dev, __u16 queue)
5443 {
5444 struct net_local *tp = (struct net_local *)dev->priv;
5445 __u16 status, err = 0;
5446 int cstatus;
5447
5448 if(smctr_debug > 10)
5449 printk("%s: smctr_tx_complete\n", dev->name);
5450
5451 while((status = tp->tx_fcb_end[queue]->frame_status) != SUCCESS)
5452 {
5453 if(status & 0x7e00 )
5454 {
5455 err = HARDWARE_FAILED;
5456 break;
5457 }
5458
5459 if((err = smctr_update_tx_chain(dev, tp->tx_fcb_end[queue],
5460 queue)) != SUCCESS)
5461 break;
5462
5463 smctr_disable_16bit(dev);
5464
5465 if(tp->mode_bits & UMAC)
5466 {
5467 if(!(status & (FCB_TX_STATUS_AR1 | FCB_TX_STATUS_AR2)))
5468 cstatus = NO_SUCH_DESTINATION;
5469 else
5470 {
5471 if(!(status & (FCB_TX_STATUS_CR1 | FCB_TX_STATUS_CR2)))
5472 cstatus = DEST_OUT_OF_RESOURCES;
5473 else
5474 {
5475 if(status & FCB_TX_STATUS_E)
5476 cstatus = MAX_COLLISIONS;
5477 else
5478 cstatus = SUCCESS;
5479 }
5480 }
5481 }
5482 else
5483 cstatus = SUCCESS;
5484
5485 if(queue == BUG_QUEUE)
5486 err = SUCCESS;
5487
5488 smctr_enable_16bit(dev);
5489 if(err != SUCCESS)
5490 break;
5491 }
5492
5493 return (err);
5494 }
5495
smctr_tx_move_frame(struct net_device * dev,struct sk_buff * skb,__u8 * pbuff,unsigned int bytes)5496 static unsigned short smctr_tx_move_frame(struct net_device *dev,
5497 struct sk_buff *skb, __u8 *pbuff, unsigned int bytes)
5498 {
5499 struct net_local *tp = (struct net_local *)dev->priv;
5500 unsigned int ram_usable;
5501 __u32 flen, len, offset = 0;
5502 __u8 *frag, *page;
5503
5504 if(smctr_debug > 10)
5505 printk("%s: smctr_tx_move_frame\n", dev->name);
5506
5507 ram_usable = ((unsigned int)tp->ram_usable) << 10;
5508 frag = skb->data;
5509 flen = skb->len;
5510
5511 while(flen > 0 && bytes > 0)
5512 {
5513 smctr_set_page(dev, pbuff);
5514
5515 offset = SMC_PAGE_OFFSET(pbuff);
5516
5517 if(offset + flen > ram_usable)
5518 len = ram_usable - offset;
5519 else
5520 len = flen;
5521
5522 if(len > bytes)
5523 len = bytes;
5524
5525 page = (char *) (offset + tp->ram_access);
5526 memcpy(page, frag, len);
5527
5528 flen -=len;
5529 bytes -= len;
5530 frag += len;
5531 pbuff += len;
5532 }
5533
5534 return (0);
5535 }
5536
5537 /* Update the error statistic counters for this adapter. */
smctr_update_err_stats(struct net_device * dev)5538 static int smctr_update_err_stats(struct net_device *dev)
5539 {
5540 struct net_local *tp = (struct net_local *)dev->priv;
5541 struct tr_statistics *tstat = &tp->MacStat;
5542
5543 if(tstat->internal_errors)
5544 tstat->internal_errors
5545 += *(tp->misc_command_data + 0) & 0x00ff;
5546
5547 if(tstat->line_errors)
5548 tstat->line_errors += *(tp->misc_command_data + 0) >> 8;
5549
5550 if(tstat->A_C_errors)
5551 tstat->A_C_errors += *(tp->misc_command_data + 1) & 0x00ff;
5552
5553 if(tstat->burst_errors)
5554 tstat->burst_errors += *(tp->misc_command_data + 1) >> 8;
5555
5556 if(tstat->abort_delimiters)
5557 tstat->abort_delimiters += *(tp->misc_command_data + 2) >> 8;
5558
5559 if(tstat->recv_congest_count)
5560 tstat->recv_congest_count
5561 += *(tp->misc_command_data + 3) & 0x00ff;
5562
5563 if(tstat->lost_frames)
5564 tstat->lost_frames
5565 += *(tp->misc_command_data + 3) >> 8;
5566
5567 if(tstat->frequency_errors)
5568 tstat->frequency_errors += *(tp->misc_command_data + 4) & 0x00ff;
5569
5570 if(tstat->frame_copied_errors)
5571 tstat->frame_copied_errors
5572 += *(tp->misc_command_data + 4) >> 8;
5573
5574 if(tstat->token_errors)
5575 tstat->token_errors += *(tp->misc_command_data + 5) >> 8;
5576
5577 return (0);
5578 }
5579
smctr_update_rx_chain(struct net_device * dev,__u16 queue)5580 static int smctr_update_rx_chain(struct net_device *dev, __u16 queue)
5581 {
5582 struct net_local *tp = (struct net_local *)dev->priv;
5583 FCBlock *fcb;
5584 BDBlock *bdb;
5585 __u16 size, len;
5586
5587 fcb = tp->rx_fcb_curr[queue];
5588 len = fcb->frame_length;
5589
5590 fcb->frame_status = 0;
5591 fcb->info = FCB_CHAIN_END;
5592 fcb->back_ptr->info = FCB_WARNING;
5593
5594 tp->rx_fcb_curr[queue] = tp->rx_fcb_curr[queue]->next_ptr;
5595
5596 /* update RX BDBs */
5597 size = (len >> RX_BDB_SIZE_SHIFT);
5598 if(len & RX_DATA_BUFFER_SIZE_MASK)
5599 size += sizeof(BDBlock);
5600 size &= (~RX_BDB_SIZE_MASK);
5601
5602 /* check if wrap around */
5603 bdb = (BDBlock *)((__u32)(tp->rx_bdb_curr[queue]) + (__u32)(size));
5604 if((__u32)bdb >= (__u32)tp->rx_bdb_end[queue])
5605 {
5606 bdb = (BDBlock *)((__u32)(tp->rx_bdb_head[queue])
5607 + (__u32)(bdb) - (__u32)(tp->rx_bdb_end[queue]));
5608 }
5609
5610 bdb->back_ptr->info = BDB_CHAIN_END;
5611 tp->rx_bdb_curr[queue]->back_ptr->info = BDB_NOT_CHAIN_END;
5612 tp->rx_bdb_curr[queue] = bdb;
5613
5614 return (0);
5615 }
5616
smctr_update_tx_chain(struct net_device * dev,FCBlock * fcb,__u16 queue)5617 static int smctr_update_tx_chain(struct net_device *dev, FCBlock *fcb,
5618 __u16 queue)
5619 {
5620 struct net_local *tp = (struct net_local *)dev->priv;
5621
5622 if(smctr_debug > 20)
5623 printk("smctr_update_tx_chain\n");
5624
5625 if(tp->num_tx_fcbs_used[queue] <= 0)
5626 return (HARDWARE_FAILED);
5627 else
5628 {
5629 if(tp->tx_buff_used[queue] < fcb->memory_alloc)
5630 {
5631 tp->tx_buff_used[queue] = 0;
5632 return (HARDWARE_FAILED);
5633 }
5634
5635 tp->tx_buff_used[queue] -= fcb->memory_alloc;
5636
5637 /* if all transmit buffer are cleared
5638 * need to set the tx_buff_curr[] to tx_buff_head[]
5639 * otherwise, tx buffer will be segregate and cannot
5640 * accomodate and buffer greater than (curr - head) and
5641 * (end - curr) since we do not allow wrap around allocation.
5642 */
5643 if(tp->tx_buff_used[queue] == 0)
5644 tp->tx_buff_curr[queue] = tp->tx_buff_head[queue];
5645
5646 tp->num_tx_fcbs_used[queue]--;
5647 fcb->frame_status = 0;
5648 tp->tx_fcb_end[queue] = fcb->next_ptr;
5649 netif_wake_queue(dev);
5650 return (0);
5651 }
5652 }
5653
smctr_wait_cmd(struct net_device * dev)5654 static int smctr_wait_cmd(struct net_device *dev)
5655 {
5656 struct net_local *tp = (struct net_local *)dev->priv;
5657 unsigned int loop_count = 0x20000;
5658
5659 if(smctr_debug > 10)
5660 printk("%s: smctr_wait_cmd\n", dev->name);
5661
5662 while(loop_count)
5663 {
5664 if(tp->acb_head->cmd_done_status & ACB_COMMAND_DONE)
5665 break;
5666 udelay(1);
5667 loop_count--;
5668 }
5669
5670 if(loop_count == 0)
5671 return(HARDWARE_FAILED);
5672
5673 if(tp->acb_head->cmd_done_status & 0xff)
5674 return(HARDWARE_FAILED);
5675
5676 return (0);
5677 }
5678
smctr_wait_while_cbusy(struct net_device * dev)5679 static int smctr_wait_while_cbusy(struct net_device *dev)
5680 {
5681 struct net_local *tp = (struct net_local *)dev->priv;
5682 unsigned int timeout = 0x20000;
5683 int ioaddr = dev->base_addr;
5684 __u8 r;
5685
5686 if(tp->bic_type == BIC_585_CHIP)
5687 {
5688 while(timeout)
5689 {
5690 r = inb(ioaddr + HWR);
5691 if((r & HWR_CBUSY) == 0)
5692 break;
5693 timeout--;
5694 }
5695 }
5696 else
5697 {
5698 while(timeout)
5699 {
5700 r = inb(ioaddr + CSR);
5701 if((r & CSR_CBUSY) == 0)
5702 break;
5703 timeout--;
5704 }
5705 }
5706
5707 if(timeout)
5708 return (0);
5709 else
5710 return (HARDWARE_FAILED);
5711 }
5712
5713 #ifdef MODULE
5714
5715 static struct net_device* dev_smctr[SMCTR_MAX_ADAPTERS];
5716 static int io[SMCTR_MAX_ADAPTERS];
5717 static int irq[SMCTR_MAX_ADAPTERS];
5718 static int mem[SMCTR_MAX_ADAPTERS];
5719
5720 MODULE_LICENSE("GPL");
5721
5722 MODULE_PARM(io, "1-" __MODULE_STRING(SMCTR_MAX_ADAPTERS) "i");
5723 MODULE_PARM(irq, "1-" __MODULE_STRING(SMCTR_MAX_ADAPTERS) "i");
5724 MODULE_PARM(mem, "1-" __MODULE_STRING(SMCTR_MAX_ADAPTERS) "i");
5725 MODULE_PARM(ringspeed, "1-" __MODULE_STRING(SMCTR_MAX_ADAPTERS) "i");
5726
init_module(void)5727 int init_module(void)
5728 {
5729 int i;
5730
5731 for(i = 0; i < SMCTR_MAX_ADAPTERS; i++)
5732 {
5733 irq[i] = 0;
5734 mem[i] = 0;
5735 dev_smctr[i] = NULL;
5736 dev_smctr[i] = init_trdev(dev_smctr[i], 0);
5737 if(dev_smctr[i] == NULL)
5738 return (-ENOMEM);
5739
5740 dev_smctr[i]->base_addr = io[i];
5741 dev_smctr[i]->irq = irq[i];
5742 dev_smctr[i]->mem_start = mem[i];
5743 dev_smctr[i]->init = &smctr_probe;
5744
5745 if(register_trdev(dev_smctr[i]) != 0)
5746 {
5747 kfree(dev_smctr[i]);
5748 dev_smctr[i] = NULL;
5749 if(i == 0)
5750 {
5751 printk("%s: register_trdev() returned (<0).\n",
5752 cardname);
5753 return (-EIO);
5754 }
5755 else
5756 return (0);
5757 }
5758 }
5759
5760 return (0);
5761 }
5762
cleanup_module(void)5763 void cleanup_module(void)
5764 {
5765 int i;
5766
5767 for(i = 0; i < SMCTR_MAX_ADAPTERS; i++)
5768 {
5769 if(dev_smctr[i])
5770 {
5771 #ifdef CONFIG_MCA
5772 struct net_local *tp
5773 = (struct net_local *)dev_smctr[i]->priv;
5774 if(tp->slot_num)
5775 mca_mark_as_unused(tp->slot_num);
5776 #endif
5777 unregister_trdev(dev_smctr[i]);
5778 release_region(dev_smctr[i]->base_addr,
5779 SMCTR_IO_EXTENT);
5780 if(dev_smctr[i]->irq)
5781 free_irq(dev_smctr[i]->irq, dev_smctr[i]);
5782 if(dev_smctr[i]->priv)
5783 kfree(dev_smctr[i]->priv);
5784 kfree(dev_smctr[i]);
5785 dev_smctr[i] = NULL;
5786 }
5787 }
5788 }
5789 #endif /* MODULE */
5790