1 /**********************************************************************
2  * iph5526.c: IP/SCSI driver for the Interphase 5526 PCI Fibre Channel
3  *			  Card.
4  * Copyright (C) 1999 Vineet M Abraham <vmabraham@hotmail.com>
5  *
6  * This program is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU General Public License as
8  * published by the Free Software Foundation; either version 2, or
9  * (at your option) any later version.
10  *
11  * This program is distributed in the hope that it will be useful, but
12  * WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14  * General Public License for more details.
15  *********************************************************************/
16 /**********************************************************************
17 Log:
18 Vineet M Abraham
19 02.12.99 Support multiple cards.
20 03.15.99 Added Fabric support.
21 04.04.99 Added N_Port support.
22 04.15.99 Added SCSI support.
23 06.18.99 Added ABTS Protocol.
24 06.24.99 Fixed data corruption when multiple XFER_RDYs are received.
25 07.07.99 Can be loaded as part of the Kernel. Changed semaphores. Added
26          more checks before invalidating SEST entries.
27 07.08.99 Added Broadcast IP stuff and fixed an unicast timeout bug.
28 ***********************************************************************/
29 /* TODO:
30 	R_T_TOV set to 15msec in Loop topology. Need to be 100 msec.
31     SMP testing.
32 	Fix ADISC Tx before completing FLOGI.
33 */
34 
35 static const char *version =
36     "iph5526.c:v1.0 07.08.99 Vineet Abraham (vmabraham@hotmail.com)\n";
37 
38 #include <linux/module.h>
39 #include <linux/kernel.h>
40 #include <linux/sched.h>
41 #include <linux/errno.h>
42 #include <linux/pci.h>
43 #include <linux/init.h>
44 #include <linux/mm.h>
45 #include <linux/delay.h>
46 #include <linux/skbuff.h>
47 #include <linux/if_arp.h>
48 #include <linux/timer.h>
49 #include <linux/spinlock.h>
50 #include <asm/system.h>
51 #include <asm/io.h>
52 
53 #include <linux/netdevice.h>
54 #include <linux/fcdevice.h> /* had the declarations for init_fcdev among others + includes if_fcdevice.h */
55 
56 #include <linux/blk.h>
57 #include "../../scsi/sd.h"
58 #include "../../scsi/scsi.h"
59 #include "../../scsi/hosts.h"
60 #include "../../fc4/fcp.h"
61 
62 /* driver specific header files */
63 #include "tach.h"
64 #include "tach_structs.h"
65 #include "iph5526_ip.h"
66 #include "iph5526_scsi.h"
67 #include "iph5526_novram.c"
68 
69 #define RUN_AT(x) (jiffies + (x))
70 
71 #define DEBUG_5526_0 0
72 #define DEBUG_5526_1 0
73 #define DEBUG_5526_2 0
74 
75 #if DEBUG_5526_0
76 #define DPRINTK(format, a...) {printk("%s: ", fi->name); \
77 							   printk(format, ##a); \
78 							   printk("\n");}
79 #define ENTER(x)	{printk("%s: ", fi->name); \
80 					 printk("iph5526.c : entering %s()\n", x);}
81 #define LEAVE(x)	{printk("%s: ", fi->name); \
82 					 printk("iph5526.c : leaving %s()\n",x);}
83 
84 #else
85 #define DPRINTK(format, a...) {}
86 #define ENTER(x)	{}
87 #define LEAVE(x)	{}
88 #endif
89 
90 #if DEBUG_5526_1
91 #define DPRINTK1(format, a...) {printk("%s: ", fi->name); \
92 							   printk(format, ##a); \
93 							   printk("\n");}
94 #else
95 #define DPRINTK1(format, a...) {}
96 #endif
97 
98 #if DEBUG_5526_2
99 #define DPRINTK2(format, a...) {printk("%s: ", fi->name); \
100 							   printk(format, ##a); \
101 							   printk("\n");}
102 #else
103 #define DPRINTK2(format, a...) {}
104 #endif
105 
106 #define T_MSG(format, a...) {printk("%s: ", fi->name); \
107 							 printk(format, ##a);\
108 							 printk("\n");}
109 
110 #define ALIGNED_SFS_ADDR(addr) ((((unsigned long)(addr) + (SFS_BUFFER_SIZE - 1)) & ~(SFS_BUFFER_SIZE - 1)) - (unsigned long)(addr))
111 #define ALIGNED_ADDR(addr, len) ((((unsigned long)(addr) + (len - 1)) & ~(len - 1)) - (unsigned long)(addr))
112 
113 
114 static struct pci_device_id iph5526_pci_tbl[] __initdata = {
115 	{ PCI_VENDOR_ID_INTERPHASE, PCI_DEVICE_ID_INTERPHASE_5526, PCI_ANY_ID, PCI_ANY_ID, },
116 	{ PCI_VENDOR_ID_INTERPHASE, PCI_DEVICE_ID_INTERPHASE_55x6, PCI_ANY_ID, PCI_ANY_ID, },
117 	{ }			/* Terminating entry */
118 };
119 MODULE_DEVICE_TABLE(pci, iph5526_pci_tbl);
120 
121 MODULE_LICENSE("GPL");
122 
123 #define MAX_FC_CARDS 2
124 static struct fc_info *fc[MAX_FC_CARDS+1];
125 static unsigned int pci_irq_line;
126 static struct {
127 	unsigned short vendor_id;
128 	unsigned short device_id;
129 	char *name;
130 }
131 clone_list[] __initdata  = {
132 	{PCI_VENDOR_ID_INTERPHASE, PCI_DEVICE_ID_INTERPHASE_5526, "Interphase Fibre Channel HBA"},
133 	{PCI_VENDOR_ID_INTERPHASE, PCI_DEVICE_ID_INTERPHASE_55x6, "Interphase Fibre Channel HBA"},
134 	{0,}
135 };
136 
137 static void tachyon_interrupt(int irq, void *dev_id, struct pt_regs *regs);
138 static void tachyon_interrupt_handler(int irq, void* dev_id, struct pt_regs* regs);
139 
140 static int initialize_register_pointers(struct fc_info *fi);
141 void clean_up_memory(struct fc_info *fi);
142 
143 static int tachyon_init(struct fc_info *fi);
144 static int build_queues(struct fc_info *fi);
145 static void build_tachyon_header(struct fc_info *fi, u_int my_id, u_int r_ctl, u_int d_id, u_int type, u_char seq_id, u_char df_ctl, u_short ox_id, u_short rx_id, char *data);
146 static int get_free_header(struct fc_info *fi);
147 static void build_EDB(struct fc_info *fi, char *data, u_short flags, u_short len);
148 static int get_free_EDB(struct fc_info *fi);
149 static void build_ODB(struct fc_info *fi, u_char seq_id, u_int d_id, u_int len, u_int cntl, u_short mtu, u_short ox_id, u_short rx_id, int NW_header, int int_required, u_int frame_class);
150 static void write_to_tachyon_registers(struct fc_info *fi);
151 static void reset_latch(struct fc_info *fi);
152 static void reset_tachyon(struct fc_info *fi, u_int value);
153 static void take_tachyon_offline(struct fc_info *fi);
154 static void read_novram(struct fc_info *fi);
155 static void reset_ichip(struct fc_info *fi);
156 static void update_OCQ_indx(struct fc_info *fi);
157 static void update_IMQ_indx(struct fc_info *fi, int count);
158 static void update_SFSBQ_indx(struct fc_info *fi);
159 static void update_MFSBQ_indx(struct fc_info *fi, int count);
160 static void update_tachyon_header_indx(struct fc_info *fi);
161 static void update_EDB_indx(struct fc_info *fi);
162 static void handle_FM_interrupt(struct fc_info *fi);
163 static void handle_MFS_interrupt(struct fc_info *fi);
164 static void handle_OOO_interrupt(struct fc_info *fi);
165 static void handle_SFS_interrupt(struct fc_info *fi);
166 static void handle_OCI_interrupt(struct fc_info *fi);
167 static void handle_SFS_BUF_WARN_interrupt(struct fc_info *fi);
168 static void handle_MFS_BUF_WARN_interrupt(struct fc_info *fi);
169 static void handle_IMQ_BUF_WARN_interrupt(struct fc_info *fi);
170 static void handle_Unknown_Frame_interrupt(struct fc_info *fi);
171 static void handle_Busied_Frame_interrupt(struct fc_info *fi);
172 static void handle_Bad_SCSI_Frame_interrupt(struct fc_info *fi);
173 static void handle_Inbound_SCSI_Status_interrupt(struct fc_info *fi);
174 static void handle_Inbound_SCSI_Command_interrupt(struct fc_info *fi);
175 static void completion_message_handler(struct fc_info *fi, u_int imq_int_type);
176 static void fill_login_frame(struct fc_info *fi, u_int logi);
177 
178 static int tx_exchange(struct fc_info *fi, char *data, u_int len, u_int r_ctl, u_int type, u_int d_id, u_int mtu, int int_required, u_short ox_id, u_int frame_class);
179 static int tx_sequence(struct fc_info *fi, char *data, u_int len, u_int mtu, u_int d_id, u_short ox_id, u_short rx_id, u_char seq_id, int NW_flag, int int_required, u_int frame_class);
180 static int validate_login(struct fc_info *fi, u_int *base_ptr);
181 static void add_to_address_cache(struct fc_info *fi, u_int *base_ptr);
182 static void remove_from_address_cache(struct fc_info *fi, u_int *data, u_int cmnd_code);
183 static int node_logged_in_prev(struct fc_info *fi, u_int *buff_addr);
184 static int sid_logged_in(struct fc_info *fi, u_int s_id);
185 static struct fc_node_info *look_up_cache(struct fc_info *fi, char *data);
186 static int display_cache(struct fc_info *fi);
187 
188 static void tx_logi(struct fc_info *fi, u_int logi, u_int d_id);
189 static void tx_logi_acc(struct fc_info *fi, u_int logi, u_int d_id, u_short received_ox_id);
190 static void tx_prli(struct fc_info *fi, u_int command_code, u_int d_id, u_short received_ox_id);
191 static void tx_logo(struct fc_info *fi, u_int d_id, u_short received_ox_id);
192 static void tx_adisc(struct fc_info *fi, u_int cmnd_code, u_int d_id, u_short received_ox_id);
193 static void tx_ls_rjt(struct fc_info *fi, u_int d_id, u_short received_ox_id, u_short reason_code, u_short expln_code);
194 static u_int plogi_ok(struct fc_info *fi, u_int *buff_addr, int size);
195 static void tx_acc(struct fc_info *fi, u_int d_id, u_short received_ox_id);
196 static void tx_name_server_req(struct fc_info *fi, u_int req);
197 static void rscn_handler(struct fc_info *fi, u_int node_id);
198 static void tx_scr(struct fc_info *fi);
199 static void scr_timer(unsigned long data);
200 static void explore_fabric(struct fc_info *fi, u_int *buff_addr);
201 static void perform_adisc(struct fc_info *fi);
202 static void local_port_discovery(struct fc_info *fi);
203 static void add_to_ox_id_list(struct fc_info *fi, u_int transaction_id, u_int cmnd_code);
204 static u_int remove_from_ox_id_list(struct fc_info *fi, u_short received_ox_id);
205 static void add_display_cache_timer(struct fc_info *fi);
206 
207 /* Timers... */
208 static void nos_ols_timer(unsigned long data);
209 static void loop_timer(unsigned long data);
210 static void fabric_explore_timer(unsigned long data);
211 static void port_discovery_timer(unsigned long data);
212 static void display_cache_timer(unsigned long data);
213 
214 /* SCSI Stuff */
215 static int add_to_sest(struct fc_info *fi, Scsi_Cmnd *Cmnd, struct fc_node_info *ni);
216 static struct fc_node_info *resolve_target(struct fc_info *fi, u_char target);
217 static void update_FCP_CMND_indx(struct fc_info *fi);
218 static int get_free_SDB(struct fc_info *fi);
219 static void update_SDB_indx(struct fc_info *fi);
220 static void mark_scsi_sid(struct fc_info *fi, u_int *buff_addr, u_char action);
221 static void invalidate_SEST_entry(struct fc_info *fi, u_short received_ox_id);
222 static int abort_exchange(struct fc_info *fi, u_short ox_id);
223 static void flush_tachyon_cache(struct fc_info *fi, u_short ox_id);
224 static int get_scsi_oxid(struct fc_info *fi);
225 static void update_scsi_oxid(struct fc_info *fi);
226 
227 static Scsi_Host_Template driver_template = IPH5526_SCSI_FC;
228 
229 static void iph5526_timeout(struct net_device *dev);
230 
231 static int iph5526_probe_pci(struct net_device *dev);
232 
iph5526_probe(struct net_device * dev)233 int __init iph5526_probe(struct net_device *dev)
234 {
235 	if (pci_present() && (iph5526_probe_pci(dev) == 0))
236 		return 0;
237     return -ENODEV;
238 }
239 
iph5526_probe_pci(struct net_device * dev)240 static int __init iph5526_probe_pci(struct net_device *dev)
241 {
242 #ifdef MODULE
243 	struct fc_info *fi = (struct fc_info *)dev->priv;
244 #else
245 	struct fc_info *fi;
246 	static int count;
247 
248 	if(fc[count] != NULL) {
249 		if (dev == NULL) {
250 			dev = init_fcdev(NULL, 0);
251 			if (dev == NULL)
252 				return -ENOMEM;
253 		}
254 		fi = fc[count];
255 #endif
256 		fi->dev = dev;
257 		dev->base_addr = fi->base_addr;
258 		dev->irq = fi->irq;
259 		if (dev->priv == NULL)
260 			dev->priv = fi;
261 		fcdev_init(dev);
262 		/* Assign ur MAC address.
263 		 */
264 		dev->dev_addr[0] = (fi->g.my_port_name_high & 0x0000FF00) >> 8;
265 		dev->dev_addr[1] = fi->g.my_port_name_high;
266 		dev->dev_addr[2] = (fi->g.my_port_name_low & 0xFF000000) >> 24;
267 		dev->dev_addr[3] = (fi->g.my_port_name_low & 0x00FF0000) >> 16;
268 		dev->dev_addr[4] = (fi->g.my_port_name_low & 0x0000FF00) >> 8;
269 		dev->dev_addr[5] = fi->g.my_port_name_low;
270 #ifndef MODULE
271 		count++;
272 	}
273 	else
274 		return -ENODEV;
275 #endif
276 	display_cache(fi);
277 	return 0;
278 }
279 
280 static int __init fcdev_init(struct net_device *dev)
281 {
282 	dev->open = iph5526_open;
283 	dev->stop = iph5526_close;
284 	dev->hard_start_xmit = iph5526_send_packet;
285 	dev->get_stats = iph5526_get_stats;
286 	dev->set_multicast_list = NULL;
287 	dev->change_mtu = iph5526_change_mtu;
288 	dev->tx_timeout = iph5526_timeout;
289 	dev->watchdog_timeo = 5*HZ;
290 #ifndef MODULE
291 	fc_setup(dev);
292 #endif
293 	return 0;
294 }
295 
296 /* initialize tachyon and take it OnLine */
297 static int tachyon_init(struct fc_info *fi)
298 {
299 	ENTER("tachyon_init");
300 	if (build_queues(fi) == 0) {
301 		T_MSG("build_queues() failed");
302 		return 0;
303 	}
304 
305 	/* Retrieve your port/node name.
306 	 */
307 	read_novram(fi);
308 
309 	reset_ichip(fi);
310 
311 	reset_tachyon(fi, SOFTWARE_RESET);
312 
313 	LEAVE("tachyon_init");
314 	return 1;
315 }
316 
317 /* Build the 4 Qs - IMQ, OCQ, MFSBQ, SFSBQ */
318 /* Lots of dma_pages needed as Tachyon DMAs almost everything into
319  * host memory.
320  */
321 static int build_queues(struct fc_info *fi)
322 {
323 int i,j;
324 u_char *addr;
325 	ENTER("build_queues");
326 	/* Initializing Queue Variables.
327 	 */
328 	fi->q.ptr_host_ocq_cons_indx = NULL;
329 	fi->q.ptr_host_hpcq_cons_indx = NULL;
330 	fi->q.ptr_host_imq_prod_indx = NULL;
331 
332 	fi->q.ptr_ocq_base = NULL;
333 	fi->q.ocq_len = 0;
334 	fi->q.ocq_end = 0;
335 	fi->q.ocq_prod_indx = 0;
336 
337 	fi->q.ptr_imq_base = NULL;
338 	fi->q.imq_len = 0;
339 	fi->q.imq_end = 0;
340 	fi->q.imq_cons_indx = 0;
341 	fi->q.imq_prod_indx = 0;
342 
343 	fi->q.ptr_mfsbq_base = NULL;
344 	fi->q.mfsbq_len = 0;
345 	fi->q.mfsbq_end = 0;
346 	fi->q.mfsbq_prod_indx = 0;
347 	fi->q.mfsbq_cons_indx = 0;
348 	fi->q.mfsbuff_len = 0;
349 	fi->q.mfsbuff_end = 0;
350 	fi->g.mfs_buffer_count = 0;
351 
352 	fi->q.ptr_sfsbq_base = NULL;
353 	fi->q.sfsbq_len = 0;
354 	fi->q.sfsbq_end = 0;
355 	fi->q.sfsbq_prod_indx = 0;
356 	fi->q.sfsbq_cons_indx = 0;
357 	fi->q.sfsbuff_len = 0;
358 	fi->q.sfsbuff_end = 0;
359 
360 	fi->q.sdb_indx = 0;
361 	fi->q.fcp_cmnd_indx = 0;
362 
363 	fi->q.ptr_edb_base = NULL;
364 	fi->q.edb_buffer_indx = 0;
365 	fi->q.ptr_tachyon_header_base = NULL;
366 	fi->q.tachyon_header_indx = 0;
367 	fi->node_info_list = NULL;
368 	fi->ox_id_list = NULL;
369 	fi->g.loop_up = FALSE;
370 	fi->g.ptp_up = FALSE;
371 	fi->g.link_up = FALSE;
372 	fi->g.fabric_present = FALSE;
373 	fi->g.n_port_try = FALSE;
374 	fi->g.dont_init = FALSE;
375 	fi->g.nport_timer_set = FALSE;
376 	fi->g.lport_timer_set = FALSE;
377 	fi->g.no_of_targets = 0;
378 	fi->g.sem = 0;
379 	fi->g.perform_adisc = FALSE;
380 	fi->g.e_i = 0;
381 
382 	/* build OCQ */
383 	if ( (fi->q.ptr_ocq_base = (u_int *)__get_free_pages(GFP_KERNEL, 0)) == 0) {
384 		T_MSG("failed to get OCQ page");
385 		return 0;
386 	}
387 	/* set up the OCQ structures */
388 	for (i = 0; i < OCQ_LENGTH; i++)
389 		fi->q.ptr_odb[i] = fi->q.ptr_ocq_base + NO_OF_ENTRIES*i;
390 
391 	/* build IMQ */
392 	if ( (fi->q.ptr_imq_base = (u_int *)__get_free_pages(GFP_KERNEL, 0)) == 0) {
393 		T_MSG("failed to get IMQ page");
394 		return 0;
395 	}
396 	for (i = 0; i < IMQ_LENGTH; i++)
397 		fi->q.ptr_imqe[i] = fi->q.ptr_imq_base + NO_OF_ENTRIES*i;
398 
399 	/* build MFSBQ */
400 	if ( (fi->q.ptr_mfsbq_base = (u_int *)__get_free_pages(GFP_KERNEL, 0)) == 0) {
401 		T_MSG("failed to get MFSBQ page");
402 		return 0;
403 	}
404 	memset((char *)fi->q.ptr_mfsbq_base, 0, MFSBQ_LENGTH * 32);
405 	/* Allocate one huge chunk of memory... helps while reassembling
406 	 * frames.
407 	 */
408 	if ( (addr = (u_char *)__get_free_pages(GFP_KERNEL, 5) ) == 0) {
409 		T_MSG("failed to get MFSBQ page");
410 		return 0;
411 	}
412 	/* fill in addresses of empty buffers */
413 	for (i = 0; i < MFSBQ_LENGTH; i++) {
414 		for (j = 0; j < NO_OF_ENTRIES; j++) {
415 				*(fi->q.ptr_mfsbq_base + i*NO_OF_ENTRIES + j) = htonl(virt_to_bus(addr));
416 				addr += MFS_BUFFER_SIZE;
417 		}
418 	}
419 
420 	/* The number of entries in each MFS buffer is 8. There are 8
421 	 * MFS buffers. That leaves us with 4096-256 bytes. We use them
422 	 * as temporary space for ELS frames. This is done to make sure that
423 	 * the addresses are aligned.
424 	 */
425 	fi->g.els_buffer[0] = fi->q.ptr_mfsbq_base + MFSBQ_LENGTH*NO_OF_ENTRIES;
426 	for (i = 1; i < MAX_PENDING_FRAMES; i++)
427 		fi->g.els_buffer[i] = fi->g.els_buffer[i-1] + 64;
428 
429 	/* build SFSBQ */
430 	if ( (fi->q.ptr_sfsbq_base = (u_int *)__get_free_pages(GFP_KERNEL, 0)) == 0) {
431 		T_MSG("failed to get SFSBQ page");
432 		return 0;
433 	}
434 	memset((char *)fi->q.ptr_sfsbq_base, 0, SFSBQ_LENGTH * 32);
435 	/* fill in addresses of empty buffers */
436 	for (i = 0; i < SFSBQ_LENGTH; i++)
437 		for (j = 0; j < NO_OF_ENTRIES; j++){
438 			addr = kmalloc(SFS_BUFFER_SIZE*2, GFP_KERNEL);
439 			if (addr == NULL){
440 				T_MSG("ptr_sfs_buffer : memory not allocated");
441 				return 0;
442 			}
443 			else {
444 			int offset = ALIGNED_SFS_ADDR(addr);
445 				memset((char *)addr, 0, SFS_BUFFER_SIZE);
446 				fi->q.ptr_sfs_buffers[i*NO_OF_ENTRIES +j] = (u_int *)addr;
447 				addr += offset;
448 				*(fi->q.ptr_sfsbq_base + i*NO_OF_ENTRIES + j) = htonl(virt_to_bus(addr));
449 			}
450 		}
451 
452 	/* The number of entries in each SFS buffer is 8. There are 8
453 	 * MFS buffers. That leaves us with 4096-256 bytes. We use them
454 	 * as temporary space for ARP frames. This is done inorder to
455 	 * support HW_Types of 0x1 and 0x6.
456 	 */
457 	fi->g.arp_buffer = (char *)fi->q.ptr_sfsbq_base + SFSBQ_LENGTH*NO_OF_ENTRIES*4;
458 
459 	/* build EDB */
460 	if ((fi->q.ptr_edb_base = (u_int *)__get_free_pages(GFP_KERNEL, 5) ) == 0) {
461 		T_MSG("failed to get EDB page");
462 		return 0;
463 	}
464 	for (i = 0; i < EDB_LEN; i++)
465 		fi->q.ptr_edb[i] = fi->q.ptr_edb_base + 2*i;
466 
467 	/* build SEST */
468 
469 	/* OX_IDs range from 0x0 - 0x4FFF.
470 	 */
471 	if ((fi->q.ptr_sest_base = (u_int *)__get_free_pages(GFP_KERNEL, 5)) == 0) {
472 		T_MSG("failed to get SEST page");
473 		return 0;
474 	}
475 	for (i = 0; i < SEST_LENGTH; i++)
476 		fi->q.ptr_sest[i] = fi->q.ptr_sest_base + NO_OF_ENTRIES*i;
477 
478 	if ((fi->q.ptr_sdb_base = (u_int *)__get_free_pages(GFP_KERNEL, 5)) == 0) {
479 		T_MSG("failed to get SDB page");
480 		return 0;
481 	}
482 	for (i = 0 ; i < NO_OF_SDB_ENTRIES; i++)
483 		fi->q.ptr_sdb_slot[i] = fi->q.ptr_sdb_base + (SDB_SIZE/4)*i;
484 
485 	if ((fi->q.ptr_fcp_cmnd_base = (u_int *)__get_free_pages(GFP_KERNEL, 0)) == 0) {
486 		T_MSG("failed to get FCP_CMND page");
487 		return 0;
488 	}
489 	for (i = 0; i < NO_OF_FCP_CMNDS; i++)
490 		fi->q.ptr_fcp_cmnd[i] = fi->q.ptr_fcp_cmnd_base + NO_OF_ENTRIES*i;
491 
492 	/* Allocate space for Tachyon Header as well...
493 	 */
494 	if ((fi->q.ptr_tachyon_header_base = (u_int *)__get_free_pages(GFP_KERNEL, 0) ) == 0) {
495 		T_MSG("failed to get tachyon_header page");
496 		return 0;
497 	}
498 	for (i = 0; i < NO_OF_TACH_HEADERS; i++)
499 		fi->q.ptr_tachyon_header[i] = fi->q.ptr_tachyon_header_base + 16*i;
500 
501 	/* Allocate memory for indices.
502 	 * Indices should be aligned on 32 byte boundries.
503 	 */
504 	fi->q.host_ocq_cons_indx = kmalloc(2*32, GFP_KERNEL);
505 	if (fi->q.host_ocq_cons_indx == NULL){
506 		T_MSG("fi->q.host_ocq_cons_indx : memory not allocated");
507 		return 0;
508 	}
509 	fi->q.ptr_host_ocq_cons_indx = fi->q.host_ocq_cons_indx;
510 	if ((u_long)(fi->q.host_ocq_cons_indx) % 32)
511 		fi->q.host_ocq_cons_indx++;
512 
513 	fi->q.host_hpcq_cons_indx = kmalloc(2*32, GFP_KERNEL);
514 	if (fi->q.host_hpcq_cons_indx == NULL){
515 		T_MSG("fi->q.host_hpcq_cons_indx : memory not allocated");
516 		return 0;
517 	}
518 	fi->q.ptr_host_hpcq_cons_indx= fi->q.host_hpcq_cons_indx;
519 	if ((u_long)(fi->q.host_hpcq_cons_indx) % 32)
520 		fi->q.host_hpcq_cons_indx++;
521 
522 	fi->q.host_imq_prod_indx = kmalloc(2*32, GFP_KERNEL);
523 	if (fi->q.host_imq_prod_indx == NULL){
524 		T_MSG("fi->q.host_imq_prod_indx : memory not allocated");
525 		return 0;
526 	}
527 	fi->q.ptr_host_imq_prod_indx = fi->q.host_imq_prod_indx;
528 	if ((u_long)(fi->q.host_imq_prod_indx) % 32)
529 		fi->q.host_imq_prod_indx++;
530 
531 	LEAVE("build_queues");
532 	return 1;
533 }
534 
535 
536 static void write_to_tachyon_registers(struct fc_info *fi)
537 {
538 u_int bus_addr, bus_indx_addr, i;
539 
540 	ENTER("write_to_tachyon_registers");
541 
542 	/* Clear Queues each time Tachyon is reset */
543 	memset((char *)fi->q.ptr_ocq_base, 0, OCQ_LENGTH * 32);
544 	memset((char *)fi->q.ptr_imq_base, 0, IMQ_LENGTH * 32);
545 	memset((char *)fi->q.ptr_edb_base, 0, EDB_LEN * 8);
546 	memset((char *)fi->q.ptr_sest_base, 0, SEST_LENGTH * 32);
547 	memset((char *)fi->q.ptr_sdb_base, 0, NO_OF_SDB_ENTRIES * SDB_SIZE);
548 	memset((char *)fi->q.ptr_tachyon_header_base, 0xFF, NO_OF_TACH_HEADERS * TACH_HEADER_SIZE);
549 	for (i = 0; i < SEST_LENGTH; i++)
550 		fi->q.free_scsi_oxid[i] = OXID_AVAILABLE;
551 	for (i = 0; i < NO_OF_SDB_ENTRIES; i++)
552 		fi->q.sdb_slot_status[i] = SDB_FREE;
553 
554 	take_tachyon_offline(fi);
555 	writel(readl(fi->t_r.ptr_tach_config_reg) | SCSI_ENABLE | WRITE_STREAM_SIZE | READ_STREAM_SIZE | PARITY_EVEN | OOO_REASSEMBLY_DISABLE, fi->t_r.ptr_tach_config_reg);
556 
557 	/* Write OCQ registers */
558 	fi->q.ocq_prod_indx = 0;
559 	*(fi->q.host_ocq_cons_indx) = 0;
560 
561 	/* The Tachyon needs to be passed the "real" address */
562 	bus_addr = virt_to_bus(fi->q.ptr_ocq_base);
563 	writel(bus_addr, fi->t_r.ptr_ocq_base_reg);
564 	writel(OCQ_LENGTH - 1, fi->t_r. ptr_ocq_len_reg);
565 	bus_indx_addr = virt_to_bus(fi->q.host_ocq_cons_indx);
566 	writel(bus_indx_addr, fi->t_r.ptr_ocq_cons_indx_reg);
567 
568 	/* Write IMQ registers */
569 	fi->q.imq_cons_indx = 0;
570 	*(fi->q.host_imq_prod_indx) = 0;
571 	bus_addr = virt_to_bus(fi->q.ptr_imq_base);
572 	writel(bus_addr, fi->t_r.ptr_imq_base_reg);
573 	writel(IMQ_LENGTH - 1, fi->t_r.ptr_imq_len_reg);
574 	bus_indx_addr = virt_to_bus(fi->q.host_imq_prod_indx);
575 	writel(bus_indx_addr, fi->t_r.ptr_imq_prod_indx_reg);
576 
577 	/* Write MFSBQ registers */
578 	fi->q.mfsbq_prod_indx = MFSBQ_LENGTH - 1;
579 	fi->q.mfsbuff_end = MFS_BUFFER_SIZE - 1;
580 	fi->q.mfsbq_cons_indx = 0;
581 	bus_addr = virt_to_bus(fi->q.ptr_mfsbq_base);
582 	writel(bus_addr, fi->t_r.ptr_mfsbq_base_reg);
583 	writel(MFSBQ_LENGTH - 1, fi->t_r.ptr_mfsbq_len_reg);
584 	writel(fi->q.mfsbuff_end, fi->t_r.ptr_mfsbuff_len_reg);
585 	/* Do this last as tachyon will prefetch the
586 	 * first entry as soon as we write to it.
587 	 */
588 	writel(fi->q.mfsbq_prod_indx, fi->t_r.ptr_mfsbq_prod_reg);
589 
590 	/* Write SFSBQ registers */
591 	fi->q.sfsbq_prod_indx = SFSBQ_LENGTH - 1;
592 	fi->q.sfsbuff_end = SFS_BUFFER_SIZE - 1;
593 	fi->q.sfsbq_cons_indx = 0;
594 	bus_addr = virt_to_bus(fi->q.ptr_sfsbq_base);
595 	writel(bus_addr, fi->t_r.ptr_sfsbq_base_reg);
596 	writel(SFSBQ_LENGTH - 1, fi->t_r.ptr_sfsbq_len_reg);
597 	writel(fi->q.sfsbuff_end, fi->t_r.ptr_sfsbuff_len_reg);
598 	/* Do this last as tachyon will prefetch the first
599 	 * entry as soon as we write to it.
600 	 */
601 	writel(fi->q.sfsbq_prod_indx, fi->t_r.ptr_sfsbq_prod_reg);
602 
603 	/* Write SEST registers */
604 	bus_addr = virt_to_bus(fi->q.ptr_sest_base);
605 	writel(bus_addr, fi->t_r.ptr_sest_base_reg);
606 	writel(SEST_LENGTH - 1, fi->t_r.ptr_sest_len_reg);
607 	/* the last 2 bits _should_ be 1 */
608 	writel(SEST_BUFFER_SIZE - 1, fi->t_r.ptr_scsibuff_len_reg);
609 
610 	/* write AL_TIME & E_D_TOV into the registers */
611 	writel(TOV_VALUES, fi->t_r.ptr_fm_tov_reg);
612 	/* Tell Tachyon to pick a Soft Assigned AL_PA */
613 	writel(LOOP_INIT_SOFT_ADDRESS, fi->t_r.ptr_fm_config_reg);
614 
615 	/* Read the WWN from EEPROM . But, for now we assign it here. */
616 	writel(WORLD_WIDE_NAME_LOW, fi->t_r.ptr_fm_wwn_low_reg);
617 	writel(WORLD_WIDE_NAME_HIGH, fi->t_r.ptr_fm_wwn_hi_reg);
618 
619 	DPRINTK1("TACHYON initializing as L_Port...\n");
620 	writel(INITIALIZE, fi->t_r.ptr_fm_control_reg);
621 
622 	LEAVE("write_to_tachyon_registers");
623 }
624 
625 
626 static void tachyon_interrupt(int irq, void* dev_id, struct pt_regs* regs)
627 {
628 struct Scsi_Host *host = dev_id;
629 struct iph5526_hostdata *hostdata = (struct iph5526_hostdata *)host->hostdata;
630 struct fc_info *fi = hostdata->fi;
631 u_long flags;
632 	spin_lock_irqsave(&fi->fc_lock, flags);
633 	tachyon_interrupt_handler(irq, dev_id, regs);
634 	spin_unlock_irqrestore(&fi->fc_lock, flags);
635 }
636 
637 static void tachyon_interrupt_handler(int irq, void* dev_id, struct pt_regs* regs)
638 {
639 struct Scsi_Host *host = dev_id;
640 struct iph5526_hostdata *hostdata = (struct iph5526_hostdata *)host->hostdata;
641 struct fc_info *fi = hostdata->fi;
642 u_int *ptr_imq_entry;
643 u_int imq_int_type, current_IMQ_index = 0, prev_IMQ_index;
644 int index, no_of_entries = 0;
645 
646 	DPRINTK("\n");
647 	ENTER("tachyon_interrupt");
648 	if (fi->q.host_imq_prod_indx != NULL) {
649 		current_IMQ_index =  ntohl(*(fi->q.host_imq_prod_indx));
650 	}
651 	else {
652 		/* _Should not_ happen */
653 		T_MSG("IMQ_indx NULL. DISABLING INTERRUPTS!!!\n");
654 		writel(0x0, fi->i_r.ptr_ichip_hw_control_reg);
655 	}
656 
657 	if (current_IMQ_index > fi->q.imq_cons_indx)
658 		no_of_entries = current_IMQ_index - fi->q.imq_cons_indx;
659 	else
660 	if (current_IMQ_index < fi->q.imq_cons_indx)
661 		no_of_entries = IMQ_LENGTH - (fi->q.imq_cons_indx - current_IMQ_index);
662 
663 	if (no_of_entries == 0) {
664 	u_int ichip_status;
665 		ichip_status = readl(fi->i_r.ptr_ichip_hw_status_reg);
666 		if (ichip_status & 0x20) {
667 			/* Should _never_ happen. Might require a hard reset */
668 			T_MSG("Too bad... PCI Bus Error. Resetting (i)chip");
669 			reset_ichip(fi);
670 			T_MSG("DISABLING INTERRUPTS!!!\n");
671 			writel(0x0, fi->i_r.ptr_ichip_hw_control_reg);
672 		}
673 	}
674 
675 	prev_IMQ_index = current_IMQ_index;
676 	for (index = 0; index < no_of_entries; index++) {
677 		ptr_imq_entry = fi->q.ptr_imqe[fi->q.imq_cons_indx];
678 		imq_int_type = ntohl(*ptr_imq_entry);
679 
680 		completion_message_handler(fi, imq_int_type);
681 		if ((fi->g.link_up == FALSE) && ((imq_int_type == MFS_BUF_WARN) || (imq_int_type == SFS_BUF_WARN) || (imq_int_type == IMQ_BUF_WARN)))
682 			break;
683 		update_IMQ_indx(fi, 1);
684 
685 		/* Check for more entries */
686 		current_IMQ_index =  ntohl(*(fi->q.host_imq_prod_indx));
687 		if (current_IMQ_index != prev_IMQ_index) {
688 			no_of_entries++;
689 			prev_IMQ_index = current_IMQ_index;
690 		}
691 	} /*end of for loop*/
692 	LEAVE("tachyon_interrupt");
693        return;
694 }
695 
696 
697 static void handle_SFS_BUF_WARN_interrupt(struct fc_info *fi)
698 {
699 int i;
700 	ENTER("handle_SFS_BUF_WARN_interrupt");
701 	if (fi->g.link_up == FALSE) {
702 		reset_tachyon(fi, SOFTWARE_RESET);
703 		return;
704 	}
705 	/* Free up all but one entry in the Q.
706 	 */
707 	for (i = 0; i < ((SFSBQ_LENGTH - 1) * NO_OF_ENTRIES); i++) {
708 		handle_SFS_interrupt(fi);
709 		update_IMQ_indx(fi, 1);
710 	}
711 	LEAVE("handle_SFS_BUF_WARN_interrupt");
712 }
713 
714 /* Untested_Code_Begin */
715 static void handle_MFS_BUF_WARN_interrupt(struct fc_info *fi)
716 {
717 int i;
718 	ENTER("handle_MFS_BUF_WARN_interrupt");
719 	if (fi->g.link_up == FALSE) {
720 		reset_tachyon(fi, SOFTWARE_RESET);
721 		return;
722 	}
723 	/* FIXME: freeing up 8 entries.
724 	 */
725 	for (i = 0; i < NO_OF_ENTRIES; i++) {
726 		handle_MFS_interrupt(fi);
727 		update_IMQ_indx(fi, 1);
728 	}
729 	LEAVE("handle_MFS_BUF_WARN_interrupt");
730 }
731 /*Untested_Code_End */
732 
733 static void handle_IMQ_BUF_WARN_interrupt(struct fc_info *fi)
734 {
735 u_int *ptr_imq_entry;
736 u_int imq_int_type, current_IMQ_index = 0, temp_imq_cons_indx;
737 int index, no_of_entries = 0;
738 
739 	ENTER("handle_IMQ_BUF_WARN_interrupt");
740 	if (fi->g.link_up == FALSE) {
741 		reset_tachyon(fi, SOFTWARE_RESET);
742 		return;
743 	}
744 	current_IMQ_index =  ntohl(*(fi->q.host_imq_prod_indx));
745 
746 	if (current_IMQ_index > fi->q.imq_cons_indx)
747  		no_of_entries = current_IMQ_index - fi->q.imq_cons_indx;
748 	else
749 		if (current_IMQ_index < fi->q.imq_cons_indx)
750 			no_of_entries = IMQ_LENGTH - (fi->q.imq_cons_indx - current_IMQ_index);
751 	/* We dont want to look at the same IMQ entry again.
752 	 */
753 	temp_imq_cons_indx = fi->q.imq_cons_indx + 1;
754 	if (no_of_entries != 0)
755 		no_of_entries -= 1;
756 	for (index = 0; index < no_of_entries; index++) {
757 		ptr_imq_entry = fi->q.ptr_imqe[temp_imq_cons_indx];
758 		imq_int_type = ntohl(*ptr_imq_entry);
759 		if (imq_int_type != IMQ_BUF_WARN)
760 			completion_message_handler(fi, imq_int_type);
761 		temp_imq_cons_indx++;
762 		if (temp_imq_cons_indx == IMQ_LENGTH)
763 			temp_imq_cons_indx = 0;
764 	} /*end of for loop*/
765 	if (no_of_entries != 0)
766 		update_IMQ_indx(fi, no_of_entries);
767 	LEAVE("handle_IMQ_BUF_WARN_interrupt");
768 }
769 
770 static void completion_message_handler(struct fc_info *fi, u_int imq_int_type)
771 {
772 	switch(imq_int_type) {
773 		case OUTBOUND_COMPLETION:
774 			DPRINTK("OUTBOUND_COMPLETION message received");
775 			break;
776 		case OUTBOUND_COMPLETION_I:
777 			DPRINTK("OUTBOUND_COMPLETION_I message received");
778 			handle_OCI_interrupt(fi);
779 			break;
780 		case OUT_HI_PRI_COMPLETION:
781 			DPRINTK("OUT_HI_PRI_COMPLETION message received");
782 			break;
783 		case OUT_HI_PRI_COMPLETION_I:
784 			DPRINTK("OUT_HI_PRI_COMPLETION_I message received");
785 			break;
786 		case INBOUND_MFS_COMPLETION:
787 			DPRINTK("INBOUND_MFS_COMPLETION message received");
788 			handle_MFS_interrupt(fi);
789 			break;
790 		case INBOUND_OOO_COMPLETION:
791 			DPRINTK("INBOUND_OOO_COMPLETION message received");
792 			handle_OOO_interrupt(fi);
793 			break;
794 		case INBOUND_SFS_COMPLETION:
795 			DPRINTK("INBOUND_SFS_COMPLETION message received");
796 			handle_SFS_interrupt(fi);
797 			break;
798 		case INBOUND_UNKNOWN_FRAME_I:
799 			DPRINTK("INBOUND_UNKNOWN_FRAME message received");
800 			handle_Unknown_Frame_interrupt(fi);
801 			break;
802 		case INBOUND_BUSIED_FRAME:
803 			DPRINTK("INBOUND_BUSIED_FRAME message received");
804 			handle_Busied_Frame_interrupt(fi);
805 			break;
806 		case FRAME_MGR_INTERRUPT:
807 			DPRINTK("FRAME_MGR_INTERRUPT message received");
808 			handle_FM_interrupt(fi);
809 			break;
810 		case READ_STATUS:
811 			DPRINTK("READ_STATUS message received");
812 			break;
813 		case SFS_BUF_WARN:
814 			DPRINTK("SFS_BUF_WARN message received");
815 			handle_SFS_BUF_WARN_interrupt(fi);
816 			break;
817 		case MFS_BUF_WARN:
818 			DPRINTK("MFS_BUF_WARN message received");
819 			handle_MFS_BUF_WARN_interrupt(fi);
820 			break;
821 		case IMQ_BUF_WARN:
822 			DPRINTK("IMQ_BUF_WARN message received");
823 			handle_IMQ_BUF_WARN_interrupt(fi);
824 			break;
825 		case INBOUND_C1_TIMEOUT:
826 			DPRINTK("INBOUND_C1_TIMEOUT message received");
827 			break;
828 		case BAD_SCSI_FRAME:
829 			DPRINTK("BAD_SCSI_FRAME message received");
830 			handle_Bad_SCSI_Frame_interrupt(fi);
831 			break;
832 		case INB_SCSI_STATUS_COMPLETION:
833 			DPRINTK("INB_SCSI_STATUS_COMPL message received");
834 			handle_Inbound_SCSI_Status_interrupt(fi);
835 			break;
836 		case INBOUND_SCSI_COMMAND:
837 			DPRINTK("INBOUND_SCSI_COMMAND message received");
838 			handle_Inbound_SCSI_Command_interrupt(fi);
839 			break;
840 		case INBOUND_SCSI_DATA_COMPLETION:
841 			DPRINTK("INBOUND_SCSI_DATA message received");
842 			/* Only for targets */
843 			break;
844 		default:
845 			T_MSG("DEFAULT message received, type = %x", imq_int_type);
846 			return;
847 	}
848 	reset_latch(fi);
849 }
850 
851 static void handle_OCI_interrupt(struct fc_info *fi)
852 {
853 u_int *ptr_imq_entry;
854 u_long transaction_id = 0;
855 unsigned short status, seq_count, transmitted_ox_id;
856 struct Scsi_Host *host = fi->host;
857 struct iph5526_hostdata *hostdata = (struct iph5526_hostdata *)host->hostdata;
858 Scsi_Cmnd *Cmnd;
859 u_int tag;
860 
861 	ENTER("handle_OCI_interrupt");
862 	ptr_imq_entry = fi->q.ptr_imqe[fi->q.imq_cons_indx];
863 	transaction_id = ntohl(*(ptr_imq_entry + 1));
864 	status = ntohl(*(ptr_imq_entry + 2)) >> 16;
865 	seq_count = ntohl(*(ptr_imq_entry + 3));
866 	DPRINTK("transaction_id= %x", (u_int)transaction_id);
867 	tag = transaction_id & 0xFFFF0000;
868 	transmitted_ox_id = transaction_id;
869 
870 	/* The INT could be either due to TIME_OUT | BAD_ALPA.
871 	 * But we check only for TimeOuts. Bad AL_PA will
872 	 * caught by FM_interrupt handler.
873 	 */
874 
875 	if ((status == OCM_TIMEOUT_OR_BAD_ALPA) && (!fi->g.port_discovery) && (!fi->g.perform_adisc)){
876 		DPRINTK("Frame TimeOut on OX_ID = %x", (u_int)transaction_id);
877 
878 		/* Is it a SCSI frame that is timing out ? Not a very good check...
879 		 */
880 		if ((transmitted_ox_id <= MAX_SCSI_OXID) && ((tag == FC_SCSI_BAD_TARGET) || (tag < 0x00FF0000))) {
881 			/* If it is a Bad AL_PA, we report it as BAD_TARGET.
882 			 * Else, we allow the command to time-out. A Link
883 			 * re-initialization could be taking place.
884 			 */
885 			if (tag == FC_SCSI_BAD_TARGET) {
886 				Cmnd = hostdata->cmnd_handler[transmitted_ox_id & MAX_SCSI_XID];
887 				hostdata->cmnd_handler[transmitted_ox_id & MAX_SCSI_XID] = NULL;
888 				if (Cmnd != NULL) {
889 					Cmnd->result = DID_BAD_TARGET << 16;
890 					(*Cmnd->scsi_done) (Cmnd);
891 				}
892 				else
893 					T_MSG("NULL Command out of handler!");
894 			} /* if Bad Target */
895 			else {
896 			u_char missing_target = tag >> 16;
897 			struct fc_node_info *q = fi->node_info_list;
898 				/* A Node that we thought was logged in has gone
899 				 * away. We are the optimistic kind and we keep
900 				 * hoping that our dear little Target will come back
901 				 * to us. For now we log him out.
902 				 */
903 				DPRINTK2("Missing Target = %d", missing_target);
904 				while (q != NULL) {
905 					if (q->target_id == missing_target) {
906 						T_MSG("Target %d Logged out", q->target_id);
907 						q->login = LOGIN_ATTEMPTED;
908 						if (fi->num_nodes > 0)
909 							fi->num_nodes--;
910 						tx_logi(fi, ELS_PLOGI, q->d_id);
911 						break;
912 					}
913 					else
914 						q = q->next;
915 				}
916 			}
917 		} /* End of SCSI frame timing out. */
918 		else {
919 			if (seq_count > 1) {
920 				/* An IP frame was transmitted to a Bad AL_PA. Free up
921 			 	 * the skb used.
922 			 	 */
923 				dev_kfree_skb_irq((struct sk_buff *)(bus_to_virt(transaction_id)));
924 				netif_wake_queue(fi->dev);
925 			}
926 		} /* End of IP frame timing out. */
927 	} /* End of frame timing out. */
928 	else {
929 		/* Frame was transmitted successfully. Check if it was an ELS
930 		 * frame or an IP frame or a Bad_Target_Notification frame (in
931 		 * case of a ptp_link). Ugly!
932 		 */
933 		if ((status == 0) && (seq_count == 0)) {
934 		u_int tag = transaction_id & 0xFFFF0000;
935 		/* Continue with port discovery after an ELS is successfully
936 		 * transmitted. (status == 0).
937 		 */
938 			DPRINTK("tag = %x", tag);
939 			switch(tag) {
940 				case ELS_FLOGI:
941 					/* Letz use the Name Server instead */
942 					fi->g.explore_fabric = TRUE;
943 					fi->g.port_discovery = FALSE;
944 					fi->g.alpa_list_index = MAX_NODES;
945 					add_to_ox_id_list(fi, transaction_id, tag);
946 					break;
947 				case ELS_PLOGI:
948 					if (fi->g.fabric_present && (fi->g.name_server == FALSE))
949 						add_to_ox_id_list(fi,transaction_id,ELS_NS_PLOGI);
950 					else
951 						add_to_ox_id_list(fi, transaction_id, tag);
952 					break;
953 				case FC_SCSI_BAD_TARGET:
954 					Cmnd = hostdata->cmnd_handler[transmitted_ox_id & MAX_SCSI_XID];
955 					hostdata->cmnd_handler[transmitted_ox_id & MAX_SCSI_XID] = NULL;
956 					if (Cmnd != NULL) {
957 						Cmnd->result = DID_BAD_TARGET << 16;
958 						(*Cmnd->scsi_done) (Cmnd);
959 					}
960 					else
961 						T_MSG("NULL Command out of handler!");
962 					break;
963 				default:
964 					add_to_ox_id_list(fi, transaction_id, tag);
965 			}
966 
967 			if (fi->g.alpa_list_index >= MAX_NODES) {
968 				if (fi->g.port_discovery == TRUE) {
969 					fi->g.port_discovery = FALSE;
970 					add_display_cache_timer(fi);
971 				}
972 				fi->g.alpa_list_index = MAX_NODES;
973 			}
974 			if (fi->g.port_discovery == TRUE)
975 				local_port_discovery(fi);
976 		}
977 		else {
978 			/* An IP frame has been successfully transmitted.
979 			 * Free the skb that was used for this IP frame.
980 			 */
981 			if ((status == 0) && (seq_count > 1)) {
982 				dev_kfree_skb_irq((struct sk_buff *)(bus_to_virt(transaction_id)));
983 				netif_wake_queue(fi->dev);
984 			}
985 		}
986 	}
987 	LEAVE("handle_OCI_interrupt");
988 }
989 
990 /* Right now we discard OOO frames */
991 static void handle_OOO_interrupt(struct fc_info *fi)
992 {
993 u_int *ptr_imq_entry;
994 int queue_indx, offset, payload_size;
995 int no_of_buffers = 1; /* header is in a separate buffer */
996 	ptr_imq_entry = fi->q.ptr_imqe[fi->q.imq_cons_indx];
997 	offset = ntohl(*(ptr_imq_entry + 1)) & 0x00000007;
998 	queue_indx = ntohl(*(ptr_imq_entry + 1)) & 0xFFFF0000;
999 	queue_indx = queue_indx >> 16;
1000 	payload_size = ntohl(*(ptr_imq_entry + 2)) - TACHYON_HEADER_LEN;
1001 	/* Calculate total number of buffers */
1002 	no_of_buffers += payload_size / MFS_BUFFER_SIZE;
1003 	if (payload_size % MFS_BUFFER_SIZE)
1004 		no_of_buffers++;
1005 
1006 	/* provide Tachyon will another set of buffers */
1007 	fi->g.mfs_buffer_count += no_of_buffers;
1008 	if (fi->g.mfs_buffer_count >= NO_OF_ENTRIES) {
1009 	int count = fi->g.mfs_buffer_count / NO_OF_ENTRIES;
1010 		fi->g.mfs_buffer_count -= NO_OF_ENTRIES * count;
1011 		update_MFSBQ_indx(fi, count);
1012 	}
1013 }
1014 
1015 static void handle_MFS_interrupt(struct fc_info *fi)
1016 {
1017 u_int *ptr_imq_entry, *buff_addr;
1018 u_int type_of_frame, s_id;
1019 int queue_indx, offset, payload_size, starting_indx, starting_offset;
1020 u_short received_ox_id;
1021 int no_of_buffers = 1; /* header is in a separate buffer */
1022 struct sk_buff *skb;
1023 int wrap_around = FALSE, no_of_wrap_buffs = NO_OF_ENTRIES - 1;
1024 	ENTER("handle_MFS_interrupt");
1025 	ptr_imq_entry = fi->q.ptr_imqe[fi->q.imq_cons_indx];
1026 	offset = ntohl(*(ptr_imq_entry + 1)) & 0x00000007;
1027 	queue_indx = ntohl(*(ptr_imq_entry + 1)) & 0xFFFF0000;
1028 	queue_indx = queue_indx >> 16;
1029 	DPRINTK("queue_indx = %d, offset  = %d\n", queue_indx, offset);
1030 	payload_size = ntohl(*(ptr_imq_entry + 2)) - TACHYON_HEADER_LEN;
1031 	DPRINTK("payload_size = %d", payload_size);
1032 	/* Calculate total number of buffers */
1033 	no_of_buffers += payload_size / MFS_BUFFER_SIZE;
1034 	if (payload_size % MFS_BUFFER_SIZE)
1035 		no_of_buffers++;
1036 	DPRINTK("no_of_buffers = %d", no_of_buffers);
1037 
1038 	if ((no_of_buffers - 1) <= offset) {
1039 		starting_offset = offset - (no_of_buffers - 1);
1040 		starting_indx = queue_indx;
1041 	}
1042 	else {
1043 	int temp = no_of_buffers - (offset + 1);
1044 	int no_of_queues = temp / NO_OF_ENTRIES;
1045 		starting_offset = temp % NO_OF_ENTRIES;
1046 		if (starting_offset != 0) {
1047 			no_of_wrap_buffs = starting_offset - 1; //exclude header
1048 			starting_offset = NO_OF_ENTRIES - starting_offset;
1049 			no_of_queues++;
1050 		}
1051 		starting_indx = queue_indx - no_of_queues;
1052 		if (starting_indx < 0) {
1053 			no_of_wrap_buffs -= (starting_indx + 1) * NO_OF_ENTRIES;
1054 			starting_indx = MFSBQ_LENGTH + starting_indx;
1055 			wrap_around = TRUE;
1056 		}
1057 	}
1058 
1059 	DPRINTK("starting_indx = %d, starting offset = %d no_of_wrap_buffs = %d\n", starting_indx, starting_offset, no_of_wrap_buffs);
1060 	/* Get Tachyon Header from first buffer */
1061 	buff_addr = bus_to_virt(ntohl(*(fi->q.ptr_mfsbq_base + starting_indx*NO_OF_ENTRIES + starting_offset)));
1062 
1063 
1064 	/* extract Type of Frame */
1065 	type_of_frame = (u_int)ntohl(*(buff_addr + 4)) & 0xFF000000;
1066 	s_id = (u_int)ntohl(*(buff_addr + 3)) & 0x00FFFFFF;
1067 	received_ox_id = ntohl(*(buff_addr + 6)) >> 16;
1068 	buff_addr += MFS_BUFFER_SIZE/4;
1069 	DPRINTK("type_of_frame = %x, s_id = %x, ox_id = %x", type_of_frame, s_id, received_ox_id);
1070 
1071  	switch(type_of_frame) {
1072 	  case TYPE_LLC_SNAP:
1073 		skb = dev_alloc_skb(payload_size);
1074 		if (skb == NULL) {
1075 			printk(KERN_NOTICE "%s: In handle_MFS_interrupt() Memory squeeze, dropping packet.\n", fi->name);
1076 			fi->fc_stats.rx_dropped++;
1077 			fi->g.mfs_buffer_count += no_of_buffers;
1078 			if (fi->g.mfs_buffer_count >= NO_OF_ENTRIES) {
1079 				int count = fi->g.mfs_buffer_count / NO_OF_ENTRIES;
1080 				fi->g.mfs_buffer_count -= NO_OF_ENTRIES * count;
1081 				update_MFSBQ_indx(fi, count);
1082 			}
1083 			return;
1084 		}
1085 		if (wrap_around) {
1086 		int wrap_size = no_of_wrap_buffs * MFS_BUFFER_SIZE;
1087 		int tail_size = payload_size - wrap_size;
1088 			DPRINTK("wrap_size = %d, tail_size = %d\n", wrap_size, tail_size);
1089 			if (no_of_wrap_buffs)
1090 				memcpy(skb_put(skb, wrap_size), buff_addr, wrap_size);
1091 			buff_addr = bus_to_virt(ntohl(*(fi->q.ptr_mfsbq_base)));
1092 			memcpy(skb_put(skb, tail_size), buff_addr, tail_size);
1093 		}
1094 		else
1095 			memcpy(skb_put(skb, payload_size), buff_addr, payload_size);
1096 		rx_net_mfs_packet(fi, skb);
1097 	  	break;
1098 	default:
1099 		T_MSG("Unknown Frame Type received. Type = %x", type_of_frame);
1100 	}
1101 
1102 	/* provide Tachyon will another set of buffers */
1103 	fi->g.mfs_buffer_count += no_of_buffers;
1104 	if (fi->g.mfs_buffer_count >= NO_OF_ENTRIES) {
1105 	int count = fi->g.mfs_buffer_count / NO_OF_ENTRIES;
1106 		fi->g.mfs_buffer_count -= NO_OF_ENTRIES * count;
1107 		update_MFSBQ_indx(fi, count);
1108 	}
1109 	LEAVE("handle_MFS_interrupt");
1110 }
1111 
1112 static void handle_Unknown_Frame_interrupt(struct fc_info *fi)
1113 {
1114 u_int *ptr_imq_entry;
1115 int queue_indx, offset;
1116 	ENTER("handle_Unknown_Frame_interrupt");
1117 	ptr_imq_entry = fi->q.ptr_imqe[fi->q.imq_cons_indx];
1118 	offset = ntohl(*(ptr_imq_entry + 1)) & 0x00000007;
1119 	queue_indx = ntohl(*(ptr_imq_entry + 1)) & 0xFFFF0000;
1120 	queue_indx = queue_indx >> 16;
1121 	/* We discard the "unknown" frame */
1122 	/* provide Tachyon will another set of buffers */
1123 	if (offset == (NO_OF_ENTRIES - 1))
1124 		update_SFSBQ_indx(fi);
1125 	LEAVE("handle_Unknown_Frame_interrupt");
1126 }
1127 
1128 static void handle_Busied_Frame_interrupt(struct fc_info *fi)
1129 {
1130 u_int *ptr_imq_entry;
1131 int queue_indx, offset;
1132 	ENTER("handle_Busied_Frame_interrupt");
1133 	ptr_imq_entry = fi->q.ptr_imqe[fi->q.imq_cons_indx];
1134 	offset = ntohl(*(ptr_imq_entry + 1)) & 0x00000007;
1135 	queue_indx = ntohl(*(ptr_imq_entry + 1)) & 0xFFFF0000;
1136 	queue_indx = queue_indx >> 16;
1137 	/* We discard the "busied" frame */
1138 	/* provide Tachyon will another set of buffers */
1139 	if (offset == (NO_OF_ENTRIES - 1))
1140 		update_SFSBQ_indx(fi);
1141 	LEAVE("handle_Busied_Frame_interrupt");
1142 }
1143 
1144 static void handle_Bad_SCSI_Frame_interrupt(struct fc_info *fi)
1145 {
1146 u_int *ptr_imq_entry, *buff_addr, *tach_header, *ptr_edb;
1147 u_int s_id, rctl, frame_class, burst_len, transfered_len, len = 0;
1148 int queue_indx, offset, payload_size, i;
1149 u_short ox_id, rx_id, x_id, mtu = 512;
1150 u_char target_id = 0xFF;
1151 
1152 	ENTER("handle_Bad_SCSI_Frame_interrupt");
1153 	ptr_imq_entry = fi->q.ptr_imqe[fi->q.imq_cons_indx];
1154 	offset = ntohl(*(ptr_imq_entry + 1)) & 0x00000007;
1155 	queue_indx = ntohl(*(ptr_imq_entry + 1)) & 0xFFFF0000;
1156 	queue_indx = queue_indx >> 16;
1157 	payload_size = ntohl(*(ptr_imq_entry + 2));
1158 
1159 	buff_addr = bus_to_virt(ntohl(*(fi->q.ptr_sfsbq_base + queue_indx*NO_OF_ENTRIES + offset)));
1160 
1161 	rctl = ntohl(*(buff_addr + 2)) & 0xFF000000;
1162 	s_id = ntohl(*(buff_addr + 3)) & 0x00FFFFFF;
1163 	ox_id = ntohl(*(buff_addr + 6)) >> 16;
1164 	rx_id = ntohl(*(buff_addr + 6));
1165 	x_id = ox_id & MAX_SCSI_XID;
1166 
1167 	/* Any frame that comes in with OX_ID that matches an OX_ID
1168 	 * that has been allocated for SCSI, will be called a Bad
1169 	 * SCSI frame if the Exchange is not valid any more.
1170 	 *
1171 	 * We will also get a Bad SCSI frame interrupt if we receive
1172 	 * a XFER_RDY with offset != 0. Tachyon washes its hands off
1173 	 * this Exchange. We have to take care of ourselves. Grrr...
1174 	 */
1175 	if (rctl == DATA_DESCRIPTOR) {
1176 	struct fc_node_info *q = fi->node_info_list;
1177 		while (q != NULL) {
1178 			if (q->d_id == s_id) {
1179 				target_id = q->target_id;
1180 				mtu = q->mtu;
1181 				break;
1182 			}
1183 			else
1184 				q = q->next;
1185 		}
1186 		frame_class = target_id;
1187 		transfered_len = ntohl(*(buff_addr + 8));
1188 		burst_len = ntohl(*(buff_addr + 9));
1189 
1190 		build_ODB(fi, fi->g.seq_id, s_id, burst_len, 0, mtu, ox_id, rx_id, 0, 0, frame_class << 16);
1191 		/* Update the SEQ_ID and Relative Offset in the
1192 		 * Tachyon Header Structure.
1193 		 */
1194 		tach_header = bus_to_virt(ntohl(*(fi->q.ptr_sest[x_id] + 5)));
1195 		*(tach_header + 5) = htonl(fi->g.seq_id << 24);
1196 		*(tach_header + 7) = htonl(transfered_len);
1197 		fi->g.odb.hdr_addr = *(fi->q.ptr_sest[x_id] + 5);
1198 
1199 		/* Invalidate the EDBs used
1200 		 */
1201 		ptr_edb = bus_to_virt(ntohl(*(fi->q.ptr_sest[x_id] + 7)));
1202 
1203 		for (i = 0; i < EDB_LEN; i++)
1204 			if (fi->q.ptr_edb[i] == ptr_edb)
1205 				break;
1206 		ptr_edb--;
1207 
1208 		if (i < EDB_LEN) {
1209 		int j;
1210 			do {
1211 				ptr_edb += 2;
1212 				len += (htonl(*ptr_edb) & 0xFFFF);
1213 				j = i;
1214 				fi->q.free_edb_list[i++] = EDB_FREE;
1215 				if (i == EDB_LEN) {
1216 					i = 0;
1217 					ptr_edb = fi->q.ptr_edb_base - 1;
1218 				}
1219 			} while (len < transfered_len);
1220 			if (len > transfered_len) {
1221 				ptr_edb--;
1222 				fi->q.free_edb_list[j] = EDB_BUSY;
1223 			}
1224 			else
1225 				ptr_edb++;
1226 		}
1227 		else {
1228 			T_MSG("EDB not found while freeing");
1229 			if (offset == (NO_OF_ENTRIES - 1))
1230 				update_SFSBQ_indx(fi);
1231 			return;
1232 		}
1233 
1234 		/* Update the EDB pointer in the ODB.
1235 		 */
1236 		fi->g.odb.edb_addr = htonl(virt_to_bus(ptr_edb));
1237 		memcpy(fi->q.ptr_odb[fi->q.ocq_prod_indx], &(fi->g.odb), sizeof(ODB));
1238 		/* Update the EDB pointer in the SEST entry. We might need
1239 		 * this if get another XFER_RDY for the same Exchange.
1240 		 */
1241 		*(fi->q.ptr_sest[x_id] + 7) = htonl(virt_to_bus(ptr_edb));
1242 
1243 		update_OCQ_indx(fi);
1244 		if (fi->g.seq_id == MAX_SEQ_ID)
1245 			fi->g.seq_id = 0;
1246 		else
1247 			fi->g.seq_id++;
1248 	}
1249 	else
1250 	/* Could be a BA_ACC or a BA_RJT.
1251 	 */
1252 	if (rctl == RCTL_BASIC_ACC) {
1253 	u_int bls_type = remove_from_ox_id_list(fi, ox_id);
1254 		DPRINTK1("BA_ACC received from S_ID 0x%x with OX_ID = %x in response to %x", s_id, ox_id, bls_type);
1255 		if (bls_type == RCTL_BASIC_ABTS) {
1256 		u_int STE_bit;
1257 			/* Invalidate resources for that Exchange.
1258 			 */
1259 			STE_bit = ntohl(*fi->q.ptr_sest[x_id]);
1260 			if (STE_bit & SEST_V) {
1261 				*(fi->q.ptr_sest[x_id]) &= htonl(SEST_INV);
1262 				invalidate_SEST_entry(fi, ox_id);
1263 			}
1264 		}
1265 	}
1266 	else
1267 	if (rctl == RCTL_BASIC_RJT) {
1268 	u_int bls_type = remove_from_ox_id_list(fi, ox_id);
1269 		DPRINTK1("BA_RJT received from S_ID 0x%x with OX_ID = %x in response to %x", s_id, ox_id, bls_type);
1270 		if (bls_type == RCTL_BASIC_ABTS) {
1271 		u_int STE_bit;
1272 			/* Invalidate resources for that Exchange.
1273 			 */
1274 			STE_bit = ntohl(*fi->q.ptr_sest[x_id]);
1275 			if (STE_bit & SEST_V) {
1276 				*(fi->q.ptr_sest[x_id]) &= htonl(SEST_INV);
1277 				invalidate_SEST_entry(fi, ox_id);
1278 			}
1279 		}
1280 	}
1281 	else
1282 		DPRINTK1("Frame with R_CTL = %x received from S_ID 0x%x with OX_ID %x", rctl, s_id, ox_id);
1283 
1284 	/* Else, discard the "Bad" SCSI frame.
1285 	 */
1286 
1287 	/* provide Tachyon will another set of buffers
1288 	 */
1289 	if (offset == (NO_OF_ENTRIES - 1))
1290 		update_SFSBQ_indx(fi);
1291 	LEAVE("handle_Bad_SCSI_Frame_interrupt");
1292 }
1293 
1294 static void handle_Inbound_SCSI_Status_interrupt(struct fc_info *fi)
1295 {
1296 struct Scsi_Host *host = fi->host;
1297 struct iph5526_hostdata *hostdata = (struct iph5526_hostdata *)host->hostdata;
1298 u_int *ptr_imq_entry, *buff_addr, *ptr_rsp_info, *ptr_sense_info = NULL;
1299 int queue_indx, offset, payload_size;
1300 u_short received_ox_id, x_id;
1301 Scsi_Cmnd *Cmnd;
1302 u_int fcp_status, fcp_rsp_info_len = 0, fcp_sense_info_len = 0, s_id;
1303 	ENTER("handle_SCSI_status_interrupt");
1304 
1305 	ptr_imq_entry = fi->q.ptr_imqe[fi->q.imq_cons_indx];
1306 	offset = ntohl(*(ptr_imq_entry + 1)) & 0x00000007;
1307 	queue_indx = ntohl(*(ptr_imq_entry + 1)) & 0xFFFF0000;
1308 	queue_indx = queue_indx >> 16;
1309 	buff_addr = bus_to_virt(ntohl(*(fi->q.ptr_sfsbq_base + queue_indx*NO_OF_ENTRIES + offset)));
1310 	payload_size = ntohl(*(ptr_imq_entry + 2));
1311 	received_ox_id = ntohl(*(buff_addr + 6)) >> 16;
1312 
1313 	buff_addr = bus_to_virt(ntohl(*(fi->q.ptr_sfsbq_base + queue_indx*NO_OF_ENTRIES + offset)));
1314 
1315 	fcp_status = ntohl(*(buff_addr + 10));
1316 	ptr_rsp_info = buff_addr + 14;
1317 	if (fcp_status & FCP_STATUS_RSP_LEN)
1318 		fcp_rsp_info_len = ntohl(*(buff_addr + 13));
1319 
1320 	if (fcp_status & FCP_STATUS_SENSE_LEN) {
1321 		ptr_sense_info = ptr_rsp_info + fcp_rsp_info_len / 4;
1322 		fcp_sense_info_len = ntohl(*(buff_addr + 12));
1323 		DPRINTK("sense_info = %x", (u_int)ntohl(*ptr_sense_info));
1324 	}
1325 	DPRINTK("fcp_status = %x, fcp_rsp_len = %x", fcp_status, fcp_rsp_info_len);
1326 	x_id = received_ox_id & MAX_SCSI_XID;
1327 	Cmnd = hostdata->cmnd_handler[x_id];
1328 	hostdata->cmnd_handler[x_id] = NULL;
1329 	if (Cmnd != NULL) {
1330 		memset(Cmnd->sense_buffer, 0, sizeof(Cmnd->sense_buffer));
1331 		/* Check if there is a Sense field */
1332 		if (fcp_status & FCP_STATUS_SENSE_LEN) {
1333 		int size = sizeof(Cmnd->sense_buffer);
1334 			if (fcp_sense_info_len < size)
1335 				size = fcp_sense_info_len;
1336 			memcpy(Cmnd->sense_buffer, (char *)ptr_sense_info, size);
1337 		}
1338 		Cmnd->result = fcp_status & FCP_STATUS_MASK;
1339 		(*Cmnd->scsi_done) (Cmnd);
1340 	}
1341 	else
1342 		T_MSG("NULL Command out of handler!");
1343 
1344 	invalidate_SEST_entry(fi, received_ox_id);
1345 	s_id = ntohl(*(buff_addr + 3)) & 0x00FFFFFF;
1346 	fi->q.free_scsi_oxid[x_id] = OXID_AVAILABLE;
1347 
1348 	/* provide Tachyon will another set of buffers */
1349 	if (offset == (NO_OF_ENTRIES - 1))
1350 		update_SFSBQ_indx(fi);
1351 	LEAVE("handle_SCSI_status_interrupt");
1352 }
1353 
1354 static void invalidate_SEST_entry(struct fc_info *fi, u_short received_ox_id)
1355 {
1356 u_short x_id = received_ox_id & MAX_SCSI_XID;
1357 	/* Invalidate SEST entry if it is an OutBound SEST Entry
1358 	 */
1359 	if (!(received_ox_id & SCSI_READ_BIT)) {
1360 	u_int *ptr_tach_header, *ptr_edb;
1361 	u_short temp_ox_id = NOT_SCSI_XID;
1362 	int i;
1363 		*(fi->q.ptr_sest[x_id]) &= htonl(SEST_INV);
1364 
1365 		/* Invalidate the Tachyon Header structure
1366 		 */
1367 		ptr_tach_header = bus_to_virt(ntohl(*(fi->q.ptr_sest[x_id] + 5)));
1368 		for (i = 0; i < NO_OF_TACH_HEADERS; i++)
1369 			if(fi->q.ptr_tachyon_header[i] == ptr_tach_header)
1370 				break;
1371 		if (i < NO_OF_TACH_HEADERS)
1372 			memset(ptr_tach_header, 0xFF, 32);
1373 		else
1374 			T_MSG("Tachyon Header not found while freeing in invalidate_SEST_entry()");
1375 
1376 		/* Invalidate the EDB used
1377 		 */
1378 		ptr_edb = bus_to_virt(ntohl(*(fi->q.ptr_sest[x_id] + 7)));
1379 		for (i = 0; i < EDB_LEN; i++)
1380 			if (fi->q.ptr_edb[i] == ptr_edb)
1381 				break;
1382 		ptr_edb--;
1383 		if (i < EDB_LEN) {
1384 			do {
1385 				ptr_edb += 2;
1386 				fi->q.free_edb_list[i++] = EDB_FREE;
1387 				if (i == EDB_LEN) {
1388 					i = 0;
1389 					ptr_edb = fi->q.ptr_edb_base - 1;
1390 				}
1391 			} while ((htonl(*ptr_edb) & 0x80000000) != 0x80000000);
1392 		}
1393 		else
1394 			T_MSG("EDB not found while freeing in invalidate_SEST_entry()");
1395 
1396 		/* Search for its other header structure and destroy it!
1397 		 */
1398 		if ((ptr_tach_header + 16) < (fi->q.ptr_tachyon_header_base + (MY_PAGE_SIZE/4)))
1399 			ptr_tach_header += 16;
1400 		else
1401 			ptr_tach_header = fi->q.ptr_tachyon_header_base;
1402 		while (temp_ox_id != x_id) {
1403 			temp_ox_id = ntohl(*(ptr_tach_header + 6)) >> 16;
1404 			if (temp_ox_id == x_id) {
1405 				/* Paranoid checking...
1406 				 */
1407 				for (i = 0; i < NO_OF_TACH_HEADERS; i++)
1408 					if(fi->q.ptr_tachyon_header[i] == ptr_tach_header)
1409 						break;
1410 				if (i < NO_OF_TACH_HEADERS)
1411 					memset(ptr_tach_header, 0xFF, 32);
1412 				else
1413 					T_MSG("Tachyon Header not found while freeing in invalidate_SEST_entry()");
1414 				break;
1415 			}
1416 			else {
1417 				if ((ptr_tach_header + 16) < (fi->q.ptr_tachyon_header_base + (MY_PAGE_SIZE/4)))
1418 					ptr_tach_header += 16;
1419 				else
1420 					ptr_tach_header = fi->q.ptr_tachyon_header_base;
1421 			}
1422 		}
1423 	}
1424 	else {
1425 	u_short sdb_table_indx;
1426 		/* An Inbound Command has completed or needs to be Aborted.
1427 	 	 * Clear up the SDB buffers.
1428 		 */
1429 		sdb_table_indx = *(fi->q.ptr_sest[x_id] + 5);
1430 		fi->q.sdb_slot_status[sdb_table_indx] = SDB_FREE;
1431 	}
1432 }
1433 
1434 static void handle_Inbound_SCSI_Command_interrupt(struct fc_info *fi)
1435 {
1436 u_int *ptr_imq_entry;
1437 int queue_indx, offset;
1438 	ENTER("handle_Inbound_SCSI_Command_interrupt");
1439 	ptr_imq_entry = fi->q.ptr_imqe[fi->q.imq_cons_indx];
1440 	offset = ntohl(*(ptr_imq_entry + 1)) & 0x00000007;
1441 	queue_indx = ntohl(*(ptr_imq_entry + 1)) & 0xFFFF0000;
1442 	queue_indx = queue_indx >> 16;
1443 	/* We discard the SCSI frame as we shouldn't be receiving
1444 	 * a SCSI Command in the first place
1445 	 */
1446 	/* provide Tachyon will another set of buffers */
1447 	if (offset == (NO_OF_ENTRIES - 1))
1448 		update_SFSBQ_indx(fi);
1449 	LEAVE("handle_Inbound_SCSI_Command_interrupt");
1450 }
1451 
1452 static void handle_SFS_interrupt(struct fc_info *fi)
1453 {
1454 u_int *ptr_imq_entry, *buff_addr;
1455 u_int class_of_frame, type_of_frame, s_id, els_type = 0, rctl;
1456 int queue_indx, offset, payload_size, login_state;
1457 u_short received_ox_id, fs_cmnd_code;
1458 	ENTER("handle_SFS_interrupt");
1459 	ptr_imq_entry = fi->q.ptr_imqe[fi->q.imq_cons_indx];
1460 	offset = ntohl(*(ptr_imq_entry + 1)) & 0x00000007;
1461 	queue_indx = ntohl(*(ptr_imq_entry + 1)) & 0xFFFF0000;
1462 	queue_indx = queue_indx >> 16;
1463 	DPRINTK("queue_indx = %d, offset  = %d\n", queue_indx, offset);
1464 	payload_size = ntohl(*(ptr_imq_entry + 2));
1465 	DPRINTK("payload_size = %d", payload_size);
1466 
1467 	buff_addr = bus_to_virt(ntohl(*(fi->q.ptr_sfsbq_base + queue_indx*NO_OF_ENTRIES + offset)));
1468 
1469 	/* extract Type of Frame */
1470 	type_of_frame = ntohl(*(buff_addr + 4)) & 0xFF000000;
1471 	s_id = ntohl(*(buff_addr + 3)) & 0x00FFFFFF;
1472 	received_ox_id = ntohl(*(buff_addr + 6)) >> 16;
1473 	switch(type_of_frame) {
1474 		case TYPE_BLS:
1475 			rctl = ntohl(*(buff_addr + 2)) & 0xFF000000;
1476 			switch(rctl) {
1477 				case RCTL_BASIC_ABTS:
1478 					/* As an Initiator, we should never be receiving
1479 					 * this.
1480 		 			 */
1481 					DPRINTK1("ABTS received from S_ID 0x%x with OX_ID = %x", s_id, received_ox_id);
1482 					break;
1483 			}
1484 			break;
1485 		case TYPE_ELS:
1486 			class_of_frame = ntohl(*(buff_addr + 8));
1487 			login_state = sid_logged_in(fi, s_id);
1488 			switch(class_of_frame & 0xFF000000) {
1489 				case ELS_PLOGI:
1490 					if (s_id != fi->g.my_id) {
1491 						u_int ret_code;
1492 						DPRINTK1("PLOGI received from D_ID 0x%x with 0X_ID = %x", s_id, received_ox_id);
1493 						if ((ret_code = plogi_ok(fi, buff_addr, payload_size)) == 0){
1494 							tx_logi_acc(fi, ELS_ACC, s_id, received_ox_id);
1495 							add_to_address_cache(fi, buff_addr);
1496 						}
1497 						else {
1498 							u_short cmnd_code = ret_code >> 16;
1499 							u_short expln_code =  ret_code;
1500 							tx_ls_rjt(fi, s_id, received_ox_id, cmnd_code, expln_code);
1501 						}
1502 					}
1503 					break;
1504 				case ELS_ACC:
1505 					els_type = remove_from_ox_id_list(fi, received_ox_id);
1506 					DPRINTK1("ELS_ACC received from D_ID 0x%x in response to ELS %x", s_id, els_type);
1507 					switch(els_type) {
1508 						case ELS_PLOGI:
1509 							add_to_address_cache(fi, buff_addr);
1510 							tx_prli(fi, ELS_PRLI, s_id, OX_ID_FIRST_SEQUENCE);
1511 							break;
1512 						case ELS_FLOGI:
1513 							add_to_address_cache(fi, buff_addr);
1514 							fi->g.my_id = ntohl(*(buff_addr + 2)) & 0x00FFFFFF;
1515 							fi->g.fabric_present = TRUE;
1516 							fi->g.my_ddaa = fi->g.my_id & 0xFFFF00;
1517 							/* Login to the Name Server
1518 							 */
1519 							tx_logi(fi, ELS_PLOGI, DIRECTORY_SERVER);
1520 							break;
1521 						case ELS_NS_PLOGI:
1522 							fi->g.name_server = TRUE;
1523 							add_to_address_cache(fi, buff_addr);
1524 							tx_name_server_req(fi, FCS_RFC_4);
1525 							tx_scr(fi);
1526 							/* Some devices have a delay before
1527 							 * registering with the Name Server
1528 							 */
1529 							udelay(500);
1530 							tx_name_server_req(fi, FCS_GP_ID4);
1531 							break;
1532 						case ELS_PRLI:
1533 							mark_scsi_sid(fi, buff_addr, ADD_ENTRY);
1534 							break;
1535 						case ELS_ADISC:
1536 							if (!(validate_login(fi, buff_addr)))
1537 								tx_logo(fi, s_id, OX_ID_FIRST_SEQUENCE);
1538 							break;
1539 					}
1540 					break;
1541 				case ELS_PDISC:
1542 					DPRINTK1("ELS_PDISC received from D_ID 0x%x", s_id);
1543 					tx_logo(fi, s_id, received_ox_id);
1544 					break;
1545 				case ELS_ADISC:
1546 					DPRINTK1("ELS_ADISC received from D_ID 0x%x", s_id);
1547 					if (node_logged_in_prev(fi, buff_addr))
1548 						tx_adisc(fi, ELS_ACC, s_id, received_ox_id);
1549 					else
1550 						tx_logo(fi, s_id, received_ox_id);
1551 					break;
1552 				case ELS_PRLI:
1553 					DPRINTK1("ELS_PRLI received from D_ID 0x%x", s_id);
1554 					if ((login_state == NODE_LOGGED_IN) || (login_state == NODE_PROCESS_LOGGED_IN)) {
1555 						tx_prli(fi, ELS_ACC, s_id, received_ox_id);
1556 						mark_scsi_sid(fi, buff_addr, ADD_ENTRY);
1557 					}
1558 					else
1559 						tx_logo(fi, s_id, received_ox_id);
1560 					break;
1561 				case ELS_PRLO:
1562 					DPRINTK1("ELS_PRLO received from D_ID 0x%x", s_id);
1563 					if ((login_state == NODE_LOGGED_OUT) || (login_state == NODE_NOT_PRESENT))
1564 						tx_logo(fi, s_id, received_ox_id);
1565 					else
1566 					if (login_state == NODE_LOGGED_IN)
1567 
1568 						tx_ls_rjt(fi, s_id, received_ox_id, CMND_NOT_SUPP, NO_EXPLN);
1569 					else
1570 					if (login_state == NODE_PROCESS_LOGGED_IN) {
1571 						tx_prli(fi, ELS_ACC, s_id, received_ox_id);
1572 						mark_scsi_sid(fi, buff_addr, DELETE_ENTRY);
1573 					}
1574 					break;
1575 				case ELS_LS_RJT:
1576 					els_type = remove_from_ox_id_list(fi, received_ox_id);
1577 					DPRINTK1("ELS_LS_RJT received from D_ID 0x%x in response to %x", s_id, els_type);
1578 					/* We should be chking the reason code.
1579 					 */
1580 					switch (els_type) {
1581 						case ELS_ADISC:
1582 							tx_logi(fi, ELS_PLOGI, s_id);
1583 							break;
1584 					}
1585 					break;
1586 				case ELS_LOGO:
1587 					els_type = remove_from_ox_id_list(fi, received_ox_id);
1588 					DPRINTK1("ELS_LOGO received from D_ID 0x%x in response to %x", s_id, els_type);
1589 					remove_from_address_cache(fi, buff_addr, ELS_LOGO);
1590 					tx_acc(fi, s_id, received_ox_id);
1591 					if (els_type == ELS_ADISC)
1592 						tx_logi(fi, ELS_PLOGI, s_id);
1593 					break;
1594 				case ELS_RSCN:
1595 					DPRINTK1("ELS_RSCN received from D_ID 0x%x", s_id);
1596 					tx_acc(fi, s_id, received_ox_id);
1597 					remove_from_address_cache(fi, buff_addr, ELS_RSCN);
1598 					break;
1599 				case ELS_FARP_REQ:
1600 					/* We do not support FARP.
1601 					   So, silently discard it */
1602 					DPRINTK1("ELS_FARP_REQ received from D_ID 0x%x", s_id);
1603 					break;
1604 				case ELS_ABTX:
1605 					DPRINTK1("ELS_ABTX received from D_ID 0x%x", s_id);
1606 					if ((login_state == NODE_LOGGED_IN) || (login_state == NODE_PROCESS_LOGGED_IN))
1607 						tx_ls_rjt(fi, s_id, received_ox_id, CMND_NOT_SUPP, NO_EXPLN);
1608 					else
1609 						tx_logo(fi, s_id, received_ox_id);
1610 					break;
1611 				case ELS_FLOGI:
1612 					DPRINTK1("ELS_FLOGI received from D_ID 0x%x", s_id);
1613 					if (fi->g.ptp_up == TRUE) {
1614 						/* The node could have come up as an N_Port
1615 						 * in a Loop! So,try initializing as an NL_port
1616 						 */
1617 						take_tachyon_offline(fi);
1618 						/* write AL_TIME & E_D_TOV into the registers */
1619 						writel(TOV_VALUES, fi->t_r.ptr_fm_tov_reg);
1620 						writel(LOOP_INIT_SOFT_ADDRESS, fi->t_r.ptr_fm_config_reg);
1621 						DPRINTK1("FLOGI received, TACHYON initializing as L_Port...\n");
1622 						writel(INITIALIZE, fi->t_r.ptr_fm_control_reg);
1623 					}
1624 					else {
1625 						if ((login_state == NODE_LOGGED_IN) || (login_state == NODE_PROCESS_LOGGED_IN))
1626 							tx_ls_rjt(fi, s_id, received_ox_id, CMND_NOT_SUPP, NO_EXPLN);
1627 						else
1628 							tx_logo(fi, s_id, received_ox_id);
1629 					}
1630 					break;
1631 				case ELS_ADVC:
1632 					DPRINTK1("ELS_ADVC received from D_ID 0x%x", s_id);
1633 					if ((login_state == NODE_LOGGED_IN) || (login_state == NODE_PROCESS_LOGGED_IN))
1634 						tx_ls_rjt(fi, s_id, received_ox_id, CMND_NOT_SUPP, NO_EXPLN);
1635 					else
1636 						tx_logo(fi, s_id, received_ox_id);
1637 					break;
1638 				case ELS_ECHO:
1639 					DPRINTK1("ELS_ECHO received from D_ID 0x%x", s_id);
1640 					if ((login_state == NODE_LOGGED_IN) || (login_state == NODE_PROCESS_LOGGED_IN))
1641 						tx_ls_rjt(fi, s_id, received_ox_id, CMND_NOT_SUPP, NO_EXPLN);
1642 					else
1643 						tx_logo(fi, s_id, received_ox_id);
1644 					break;
1645 				case ELS_ESTC:
1646 					DPRINTK1("ELS_ESTC received from D_ID 0x%x", s_id);
1647 					if ((login_state == NODE_LOGGED_IN) || (login_state == NODE_PROCESS_LOGGED_IN))
1648 						tx_ls_rjt(fi, s_id, received_ox_id, CMND_NOT_SUPP, NO_EXPLN);
1649 					else
1650 						tx_logo(fi, s_id, received_ox_id);
1651 					break;
1652 				case ELS_ESTS:
1653 					DPRINTK1("ELS_ESTS received from D_ID 0x%x", s_id);
1654 					if ((login_state == NODE_LOGGED_IN) || (login_state == NODE_PROCESS_LOGGED_IN))
1655 						tx_ls_rjt(fi, s_id, received_ox_id, CMND_NOT_SUPP, NO_EXPLN);
1656 					else
1657 						tx_logo(fi, s_id, received_ox_id);
1658 					break;
1659 				case ELS_RCS:
1660 					DPRINTK1("ELS_RCS received from D_ID 0x%x", s_id);
1661 					if ((login_state == NODE_LOGGED_IN) || (login_state == NODE_PROCESS_LOGGED_IN))
1662 						tx_ls_rjt(fi, s_id, received_ox_id, CMND_NOT_SUPP, NO_EXPLN);
1663 					else
1664 						tx_logo(fi, s_id, received_ox_id);
1665 					break;
1666 				case ELS_RES:
1667 					DPRINTK1("ELS_RES received from D_ID 0x%x", s_id);
1668 					if ((login_state == NODE_LOGGED_IN) || (login_state == NODE_PROCESS_LOGGED_IN))
1669 						tx_ls_rjt(fi, s_id, received_ox_id, CMND_NOT_SUPP, NO_EXPLN);
1670 					else
1671 						tx_logo(fi, s_id, received_ox_id);
1672 					break;
1673 				case ELS_RLS:
1674 					DPRINTK1("ELS_RLS received from D_ID 0x%x", s_id);
1675 					if ((login_state == NODE_LOGGED_IN) || (login_state == NODE_PROCESS_LOGGED_IN))
1676 						tx_ls_rjt(fi, s_id, received_ox_id, CMND_NOT_SUPP, NO_EXPLN);
1677 					else
1678 						tx_logo(fi, s_id, received_ox_id);
1679 					break;
1680 				case ELS_RRQ:
1681 					DPRINTK1("ELS_RRQ received from D_ID 0x%x", s_id);
1682 					if ((login_state == NODE_LOGGED_IN) || (login_state == NODE_PROCESS_LOGGED_IN))
1683 						tx_ls_rjt(fi, s_id, received_ox_id, CMND_NOT_SUPP, NO_EXPLN);
1684 					else
1685 						tx_logo(fi, s_id, received_ox_id);
1686 					break;
1687 				case ELS_RSS:
1688 					DPRINTK1("ELS_RSS received from D_ID 0x%x", s_id);
1689 					if ((login_state == NODE_LOGGED_IN) || (login_state == NODE_PROCESS_LOGGED_IN))
1690 						tx_ls_rjt(fi, s_id, received_ox_id, CMND_NOT_SUPP, NO_EXPLN);
1691 					else
1692 						tx_logo(fi, s_id, received_ox_id);
1693 					break;
1694 				case ELS_RTV:
1695 					DPRINTK1("ELS_RTV received from D_ID 0x%x", s_id);
1696 					if ((login_state == NODE_LOGGED_IN) || (login_state == NODE_PROCESS_LOGGED_IN))
1697 						tx_ls_rjt(fi, s_id, received_ox_id, CMND_NOT_SUPP, NO_EXPLN);
1698 					else
1699 						tx_logo(fi, s_id, received_ox_id);
1700 					break;
1701 				case ELS_RSI:
1702 					DPRINTK1("ELS_RSI received from D_ID 0x%x", s_id);
1703 					if ((login_state == NODE_LOGGED_IN) || (login_state == NODE_PROCESS_LOGGED_IN))
1704 						tx_ls_rjt(fi, s_id, received_ox_id, CMND_NOT_SUPP, NO_EXPLN);
1705 					else
1706 						tx_logo(fi, s_id, received_ox_id);
1707 					break;
1708 				case ELS_TEST:
1709 					/* No reply sequence */
1710 					DPRINTK1("ELS_TEST received from D_ID 0x%x", s_id);
1711 					break;
1712 				case ELS_RNC:
1713 					DPRINTK1("ELS_RNC received from D_ID 0x%x", s_id);
1714 					if ((login_state == NODE_LOGGED_IN) || (login_state == NODE_PROCESS_LOGGED_IN))
1715 						tx_ls_rjt(fi, s_id, received_ox_id, CMND_NOT_SUPP, NO_EXPLN);
1716 					else
1717 						tx_logo(fi, s_id, received_ox_id);
1718 					break;
1719 				case ELS_RVCS:
1720 					DPRINTK1("ELS_RVCS received from D_ID 0x%x", s_id);
1721 					if ((login_state == NODE_LOGGED_IN) || (login_state == NODE_PROCESS_LOGGED_IN))
1722 						tx_ls_rjt(fi, s_id, received_ox_id, CMND_NOT_SUPP, NO_EXPLN);
1723 					else
1724 						tx_logo(fi, s_id, received_ox_id);
1725 					break;
1726 				case ELS_TPLS:
1727 					DPRINTK1("ELS_TPLS received from D_ID 0x%x", s_id);
1728 					if ((login_state == NODE_LOGGED_IN) || (login_state == NODE_PROCESS_LOGGED_IN))
1729 						tx_ls_rjt(fi, s_id, received_ox_id, CMND_NOT_SUPP, NO_EXPLN);
1730 					else
1731 						tx_logo(fi, s_id, received_ox_id);
1732 					break;
1733 				case ELS_GAID:
1734 					DPRINTK1("ELS_GAID received from D_ID 0x%x", s_id);
1735 					if ((login_state == NODE_LOGGED_IN) || (login_state == NODE_PROCESS_LOGGED_IN))
1736 						tx_ls_rjt(fi, s_id, received_ox_id, CMND_NOT_SUPP, NO_EXPLN);
1737 					else
1738 						tx_logo(fi, s_id, received_ox_id);
1739 					break;
1740 				case ELS_FACT:
1741 					DPRINTK1("ELS_FACT received from D_ID 0x%x", s_id);
1742 					if ((login_state == NODE_LOGGED_IN) || (login_state == NODE_PROCESS_LOGGED_IN))
1743 						tx_ls_rjt(fi, s_id, received_ox_id, CMND_NOT_SUPP, NO_EXPLN);
1744 					else
1745 						tx_logo(fi, s_id, received_ox_id);
1746 					break;
1747 				case ELS_FAN:
1748 					/* Hmmm... You don't support FAN ??? */
1749 					DPRINTK1("ELS_FAN received from D_ID 0x%x", s_id);
1750 					tx_ls_rjt(fi, s_id, received_ox_id, CMND_NOT_SUPP, NO_EXPLN);
1751 					break;
1752 				case ELS_FDACT:
1753 					DPRINTK1("ELS_FDACT received from D_ID 0x%x", s_id);
1754 					if ((login_state == NODE_LOGGED_IN) || (login_state == NODE_PROCESS_LOGGED_IN))
1755 						tx_ls_rjt(fi, s_id, received_ox_id, CMND_NOT_SUPP, NO_EXPLN);
1756 					else
1757 						tx_logo(fi, s_id, received_ox_id);
1758 					break;
1759 				case ELS_NACT:
1760 					DPRINTK1("ELS_NACT received from D_ID 0x%x", s_id);
1761 					if ((login_state == NODE_LOGGED_IN) || (login_state == NODE_PROCESS_LOGGED_IN))
1762 						tx_ls_rjt(fi, s_id, received_ox_id, CMND_NOT_SUPP, NO_EXPLN);
1763 					else
1764 						tx_logo(fi, s_id, received_ox_id);
1765 					break;
1766 				case ELS_NDACT:
1767 					DPRINTK1("ELS_NDACT received from D_ID 0x%x", s_id);
1768 					if ((login_state == NODE_LOGGED_IN) || (login_state == NODE_PROCESS_LOGGED_IN))
1769 						tx_ls_rjt(fi, s_id, received_ox_id, CMND_NOT_SUPP, NO_EXPLN);
1770 					else
1771 						tx_logo(fi, s_id, received_ox_id);
1772 					break;
1773 				case ELS_QoSR:
1774 					DPRINTK1("ELS_QoSR received from D_ID 0x%x", s_id);
1775 					if ((login_state == NODE_LOGGED_IN) || (login_state == NODE_PROCESS_LOGGED_IN))
1776 						tx_ls_rjt(fi, s_id, received_ox_id, CMND_NOT_SUPP, NO_EXPLN);
1777 					else
1778 						tx_logo(fi, s_id, received_ox_id);
1779 					break;
1780 				case ELS_FDISC:
1781 					DPRINTK1("ELS_FDISC received from D_ID 0x%x", s_id);
1782 					if ((login_state == NODE_LOGGED_IN) || (login_state == NODE_PROCESS_LOGGED_IN))
1783 						tx_ls_rjt(fi, s_id, received_ox_id, CMND_NOT_SUPP, NO_EXPLN);
1784 					else
1785 						tx_logo(fi, s_id, received_ox_id);
1786 					break;
1787 				default:
1788 					DPRINTK1("ELS Frame %x received from D_ID 0x%x", class_of_frame, s_id);
1789 					if ((login_state == NODE_LOGGED_IN) || (login_state == NODE_PROCESS_LOGGED_IN))
1790 						tx_ls_rjt(fi, s_id, received_ox_id, CMND_NOT_SUPP, NO_EXPLN);
1791 					else
1792 						tx_logo(fi, s_id, received_ox_id);
1793 					break;
1794 			}
1795 			break;
1796 		case TYPE_FC_SERVICES:
1797 			fs_cmnd_code = (ntohl(*(buff_addr + 10)) & 0xFFFF0000) >>16;
1798 			switch(fs_cmnd_code) {
1799 				case FCS_ACC:
1800 					els_type = remove_from_ox_id_list(fi, received_ox_id);
1801 					DPRINTK1("FCS_ACC received from D_ID 0x%x in response to %x", s_id, els_type);
1802 					if (els_type == FCS_GP_ID4)
1803 						explore_fabric(fi, buff_addr);
1804 					break;
1805 				case FCS_REJECT:
1806 					DPRINTK1("FCS_REJECT received from D_ID 0x%x in response to %x", s_id, els_type);
1807 					break;
1808 			}
1809 			break;
1810 		case TYPE_LLC_SNAP:
1811 			rx_net_packet(fi, (u_char *)buff_addr, payload_size);
1812 			break;
1813 		default:
1814 			T_MSG("Frame Type %x received from %x", type_of_frame, s_id);
1815 	}
1816 
1817 	/* provide Tachyon will another set of buffers */
1818 	if (offset == (NO_OF_ENTRIES - 1))
1819 		update_SFSBQ_indx(fi);
1820 	LEAVE("handle_SFS_interrupt");
1821 }
1822 
1823 static void handle_FM_interrupt(struct fc_info *fi)
1824 {
1825 u_int fm_status;
1826 u_int tachyon_status;
1827 
1828 	ENTER("handle_FM_interrupt");
1829 	fm_status = readl(fi->t_r.ptr_fm_status_reg);
1830 	tachyon_status = readl(fi->t_r.ptr_tach_status_reg);
1831 	DPRINTK("FM_status = %x, Tachyon_status = %x", fm_status, tachyon_status);
1832 	if (fm_status & LINK_DOWN) {
1833 		T_MSG("Fibre Channel Link DOWN");
1834 		fm_status = readl(fi->t_r.ptr_fm_status_reg);
1835 
1836 		del_timer(&fi->explore_timer);
1837 		del_timer(&fi->nport_timer);
1838 		del_timer(&fi->lport_timer);
1839 		del_timer(&fi->display_cache_timer);
1840 		fi->g.link_up = FALSE;
1841 		if (fi->g.ptp_up == TRUE)
1842 			fi->g.n_port_try = FALSE;
1843 		fi->g.ptp_up = FALSE;
1844 		fi->g.port_discovery = FALSE;
1845 		fi->g.explore_fabric = FALSE;
1846 		fi->g.perform_adisc = FALSE;
1847 
1848 		/* Logout will all nodes */
1849 		if (fi->node_info_list) {
1850 			struct fc_node_info *temp_list = fi->node_info_list;
1851 				while(temp_list) {
1852 					temp_list->login = LOGIN_ATTEMPTED;
1853 					temp_list = temp_list->next;
1854 				}
1855 				fi->num_nodes = 0;
1856 		}
1857 
1858 		if ((fi->g.n_port_try == FALSE) && (fi->g.dont_init == FALSE)){
1859 			take_tachyon_offline(fi);
1860 			/* write AL_TIME & E_D_TOV into the registers */
1861 			writel(TOV_VALUES, fi->t_r.ptr_fm_tov_reg);
1862 
1863 			if ((fi->g.fabric_present == TRUE) && (fi->g.loop_up == TRUE)) {
1864 			u_int al_pa = fi->g.my_id & 0xFF;
1865 				writel((al_pa << 24) | LOOP_INIT_FABRIC_ADDRESS | LOOP_INIT_PREVIOUS_ADDRESS, fi->t_r.ptr_fm_config_reg);
1866 			}
1867 			else
1868 			if (fi->g.loop_up == TRUE) {
1869 			u_int al_pa = fi->g.my_id & 0xFF;
1870 				writel((al_pa << 24) | LOOP_INIT_PREVIOUS_ADDRESS, fi->t_r.ptr_fm_config_reg);
1871 			}
1872 			else
1873 				writel(LOOP_INIT_SOFT_ADDRESS, fi->t_r.ptr_fm_config_reg);
1874 			fi->g.loop_up = FALSE;
1875 			DPRINTK1("In LDWN TACHYON initializing as L_Port...\n");
1876 			writel(INITIALIZE, fi->t_r.ptr_fm_control_reg);
1877 		}
1878 	}
1879 
1880     if (fm_status & NON_PARTICIPATING) {
1881 	  	T_MSG("Did not acquire an AL_PA. I am not participating");
1882     }
1883 	else
1884 	if ((fm_status & LINK_UP) && ((fm_status & LINK_DOWN) == 0)) {
1885 	  T_MSG("Fibre Channel Link UP");
1886 	  if ((fm_status & NON_PARTICIPATING) != TRUE) {
1887 		fi->g.link_up = TRUE;
1888 		if (tachyon_status & OSM_FROZEN) {
1889 			reset_tachyon(fi, ERROR_RELEASE);
1890 			reset_tachyon(fi, OCQ_RESET);
1891 		}
1892 		init_timer(&fi->explore_timer);
1893 		init_timer(&fi->nport_timer);
1894 		init_timer(&fi->lport_timer);
1895 		init_timer(&fi->display_cache_timer);
1896 		if ((fm_status & OLD_PORT) == 0) {
1897 			fi->g.loop_up = TRUE;
1898 			fi->g.ptp_up = FALSE;
1899 			fi->g.my_id = readl(fi->t_r.ptr_fm_config_reg) >> 24;
1900 			DPRINTK1("My AL_PA = %x", fi->g.my_id);
1901 			fi->g.port_discovery = TRUE;
1902 			fi->g.explore_fabric = FALSE;
1903 		}
1904 		else
1905 		if (((fm_status & 0xF0) == OLD_PORT) && ((fm_status & 0x0F) == PORT_STATE_ACTIVE)) {
1906 			fi->g.loop_up = FALSE;
1907 			fi->g.my_id = 0x0;
1908 			/* In a point-to-point configuration, we expect to be
1909 			 * connected to an F_Port. This driver does not yet support
1910 			 * a configuration where it is connected to another N_Port
1911 			 * directly.
1912 			 */
1913 			fi->g.explore_fabric = TRUE;
1914 			fi->g.port_discovery = FALSE;
1915 			if (fi->g.n_port_try == FALSE) {
1916 				take_tachyon_offline(fi);
1917 				/* write R_T_TOV & E_D_TOV into the registers */
1918 				writel(PTP_TOV_VALUES, fi->t_r.ptr_fm_tov_reg);
1919 				writel(BB_CREDIT | NPORT, fi->t_r.ptr_fm_config_reg);
1920 				fi->g.n_port_try = TRUE;
1921 				DPRINTK1("In LUP TACHYON initializing as N_Port...\n");
1922 				writel(INITIALIZE, fi->t_r.ptr_fm_control_reg);
1923 			}
1924 			else {
1925 				fi->g.ptp_up = TRUE;
1926 				tx_logi(fi, ELS_FLOGI, F_PORT);
1927 			}
1928 		}
1929 		fi->g.my_ddaa = 0x0;
1930 		fi->g.fabric_present = FALSE;
1931 		/* We havn't sent out any Name Server Reqs */
1932 		fi->g.name_server = FALSE;
1933 		fi->g.alpa_list_index = 0;
1934 		fi->g.ox_id = NOT_SCSI_XID;
1935 		fi->g.my_mtu = TACH_FRAME_SIZE;
1936 
1937 		/* Implicitly LOGO with all logged-in nodes.
1938 		 */
1939 		if (fi->node_info_list) {
1940 		struct fc_node_info *temp_list = fi->node_info_list;
1941 			while(temp_list) {
1942 				temp_list->login = LOGIN_ATTEMPTED;
1943 				temp_list = temp_list->next;
1944 			}
1945 			fi->num_nodes = 0;
1946 			fi->g.perform_adisc = TRUE;
1947 			//fi->g.perform_adisc = FALSE;
1948 			fi->g.port_discovery = FALSE;
1949 			tx_logi(fi, ELS_FLOGI, F_PORT);
1950 		}
1951 		else {
1952 			/* If Link coming up for the _first_ time or no nodes
1953 			 * were logged in before...
1954 			 */
1955 			fi->g.scsi_oxid = 0;
1956 			fi->g.seq_id = 0x00;
1957 			fi->g.perform_adisc = FALSE;
1958 		}
1959 
1960 		/* reset OX_ID table */
1961 		while (fi->ox_id_list) {
1962 		struct ox_id_els_map *temp = fi->ox_id_list;
1963 			fi->ox_id_list = fi->ox_id_list->next;
1964 			kfree(temp);
1965 		}
1966 		fi->ox_id_list = NULL;
1967 	  } /* End of if partipating */
1968 	}
1969 
1970 	if (fm_status & ELASTIC_STORE_ERROR) {
1971 		/* Too much junk on the Link
1972 		 */
1973 		/* Trying to clear it up by Txing PLOGI to urself */
1974 		if (fi->g.link_up == TRUE)
1975 			tx_logi(fi, ELS_PLOGI, fi->g.my_id);
1976 	}
1977 
1978 	if (fm_status & LOOP_UP) {
1979 		if (tachyon_status & OSM_FROZEN) {
1980 			reset_tachyon(fi, ERROR_RELEASE);
1981 			reset_tachyon(fi, OCQ_RESET);
1982 		}
1983 	}
1984 
1985 	if (fm_status & NOS_OLS_RECEIVED){
1986 		if (fi->g.nport_timer_set == FALSE) {
1987 			DPRINTK("NOS/OLS Received");
1988 			DPRINTK("FM_status = %x", fm_status);
1989 			fi->nport_timer.function = nos_ols_timer;
1990 			fi->nport_timer.data = (unsigned long)fi;
1991 			fi->nport_timer.expires = RUN_AT((3*HZ)/100); /* 30 msec */
1992 			init_timer(&fi->nport_timer);
1993 			add_timer(&fi->nport_timer);
1994 			fi->g.nport_timer_set = TRUE;
1995 		}
1996 	}
1997 
1998 	if (((fm_status & 0xF0) == OLD_PORT) && (((fm_status & 0x0F) == PORT_STATE_LF1) || ((fm_status & 0x0F) == PORT_STATE_LF2))) {
1999 		DPRINTK1("Link Fail-I in OLD-PORT.");
2000 		take_tachyon_offline(fi);
2001 		reset_tachyon(fi, SOFTWARE_RESET);
2002 	}
2003 
2004 	if (fm_status & LOOP_STATE_TIMEOUT){
2005 		if ((fm_status & 0xF0) == ARBITRATING)
2006 			DPRINTK1("ED_TOV timesout.In ARBITRATING state...");
2007 		if ((fm_status & 0xF0) == ARB_WON)
2008 			DPRINTK1("ED_TOV timesout.In ARBITRATION WON state...");
2009 		if ((fm_status & 0xF0) == OPEN)
2010 			DPRINTK1("ED_TOV timesout.In OPEN state...");
2011 		if ((fm_status & 0xF0) == OPENED)
2012 			DPRINTK1("ED_TOV timesout.In OPENED state...");
2013 		if ((fm_status & 0xF0) == TX_CLS)
2014 			DPRINTK1("ED_TOV timesout.In XMITTED CLOSE state...");
2015 		if ((fm_status & 0xF0) == RX_CLS)
2016 			DPRINTK1("ED_TOV timesout.In RECEIVED CLOSE state...");
2017 		if ((fm_status & 0xF0) == INITIALIZING)
2018 			DPRINTK1("ED_TOV timesout.In INITIALIZING state...");
2019 		DPRINTK1("Initializing Loop...");
2020 		writel(INITIALIZE, fi->t_r.ptr_fm_control_reg);
2021 	}
2022 
2023 	if ((fm_status & BAD_ALPA) && (fi->g.loop_up == TRUE)) {
2024 	u_char bad_alpa = (readl(fi->t_r.ptr_fm_rx_al_pa_reg) & 0xFF00) >> 8;
2025 		if (tachyon_status & OSM_FROZEN) {
2026 			reset_tachyon(fi, ERROR_RELEASE);
2027 			reset_tachyon(fi, OCQ_RESET);
2028 		}
2029 		/* Fix for B34 */
2030 		tx_logi(fi, ELS_PLOGI, fi->g.my_id);
2031 
2032 		if (!fi->g.port_discovery && !fi->g.perform_adisc) {
2033 			if (bad_alpa != 0xFE)
2034 				DPRINTK("Bad AL_PA = %x", bad_alpa);
2035 		}
2036 		else {
2037 			if ((fi->g.perform_adisc == TRUE) && (bad_alpa == 0x00)) {
2038 				DPRINTK1("Performing ADISC...");
2039 				fi->g.fabric_present = FALSE;
2040 				perform_adisc(fi);
2041 			}
2042 		}
2043 	}
2044 
2045 	if (fm_status & LIPF_RECEIVED){
2046 		DPRINTK("LIP(F8) Received");
2047 	}
2048 
2049 	if (fm_status & LINK_FAILURE) {
2050 		if (fm_status & LOSS_OF_SIGNAL)
2051 			DPRINTK1("Detected Loss of Signal.");
2052 		if (fm_status & OUT_OF_SYNC)
2053 			DPRINTK1("Detected Loss of Synchronization.");
2054 	}
2055 
2056 	if (fm_status & TRANSMIT_PARITY_ERROR) {
2057 		/* Bad! Should not happen. Solution-> Hard Reset.
2058 		 */
2059 		T_MSG("Parity Error. Perform Hard Reset!");
2060 	}
2061 
2062 	if (fi->g.alpa_list_index >= MAX_NODES){
2063 		if (fi->g.port_discovery == TRUE) {
2064 			fi->g.port_discovery = FALSE;
2065 			add_display_cache_timer(fi);
2066 		}
2067 		fi->g.alpa_list_index = MAX_NODES;
2068 	}
2069 
2070 	if (fi->g.port_discovery == TRUE)
2071 		local_port_discovery(fi);
2072 
2073 	LEAVE("handle_FM_interrupt");
2074 	return;
2075 }
2076 
2077 static void local_port_discovery(struct fc_info *fi)
2078 {
2079 	if (fi->g.loop_up == TRUE) {
2080 		/* If this is not here, some of the Bad AL_PAs are missed.
2081 		 */
2082 		udelay(20);
2083 		if ((fi->g.alpa_list_index == 0) && (fi->g.fabric_present == FALSE)){
2084 			tx_logi(fi, ELS_FLOGI, F_PORT);
2085 		}
2086 		else {
2087 		int login_state = sid_logged_in(fi, fi->g.my_ddaa | alpa_list[fi->g.alpa_list_index]);
2088 			while ((fi->g.alpa_list_index == 0) || ((fi->g.alpa_list_index < MAX_NODES) && ((login_state == NODE_LOGGED_IN) || (login_state == NODE_PROCESS_LOGGED_IN) || (alpa_list[fi->g.alpa_list_index] == (fi->g.my_id & 0xFF)))))
2089 				fi->g.alpa_list_index++;
2090 			if (fi->g.alpa_list_index < MAX_NODES)
2091 				tx_logi(fi, ELS_PLOGI, alpa_list[fi->g.alpa_list_index]);
2092 		}
2093 		fi->g.alpa_list_index++;
2094 		if (fi->g.alpa_list_index >= MAX_NODES){
2095 			if (fi->g.port_discovery == TRUE) {
2096 				fi->g.port_discovery = FALSE;
2097 				add_display_cache_timer(fi);
2098 			}
2099 			fi->g.alpa_list_index = MAX_NODES;
2100 		}
2101 	}
2102 }
2103 
2104 static void nos_ols_timer(unsigned long data)
2105 {
2106 struct fc_info *fi = (struct fc_info*)data;
2107 u_int fm_status;
2108 	fm_status = readl(fi->t_r.ptr_fm_status_reg);
2109 	DPRINTK1("FM_status in timer= %x", fm_status);
2110 	fi->g.nport_timer_set = FALSE;
2111 	del_timer(&fi->nport_timer);
2112 	if ((fi->g.ptp_up == TRUE) || (fi->g.loop_up == TRUE))
2113 		return;
2114 	if (((fm_status & 0xF0) == OLD_PORT) && (((fm_status & 0x0F) == PORT_STATE_ACTIVE) || ((fm_status & 0x0F) == PORT_STATE_OFFLINE))) {
2115 		DPRINTK1("In OLD-PORT after E_D_TOV.");
2116 		take_tachyon_offline(fi);
2117 		/* write R_T_TOV & E_D_TOV into the registers */
2118 		writel(PTP_TOV_VALUES, fi->t_r.ptr_fm_tov_reg);
2119 		writel(BB_CREDIT | NPORT, fi->t_r.ptr_fm_config_reg);
2120 		fi->g.n_port_try = TRUE;
2121 		DPRINTK1("In timer, TACHYON initializing as N_Port...\n");
2122 		writel(INITIALIZE, fi->t_r.ptr_fm_control_reg);
2123 	}
2124 	else
2125 	if ((fi->g.lport_timer_set == FALSE) && ((fm_status & 0xF0) == LOOP_FAIL)) {
2126 		DPRINTK1("Loop Fail after E_D_TOV.");
2127 		fi->lport_timer.function = loop_timer;
2128 		fi->lport_timer.data = (unsigned long)fi;
2129 		fi->lport_timer.expires = RUN_AT((8*HZ)/100);
2130 		init_timer(&fi->lport_timer);
2131 		add_timer(&fi->lport_timer);
2132 		fi->g.lport_timer_set = TRUE;
2133 		take_tachyon_offline(fi);
2134 		reset_tachyon(fi, SOFTWARE_RESET);
2135 	}
2136 	else
2137 	if (((fm_status & 0xF0) == OLD_PORT) && (((fm_status & 0x0F) == PORT_STATE_LF1) || ((fm_status & 0x0F) == PORT_STATE_LF2))) {
2138 		DPRINTK1("Link Fail-II in OLD-PORT.");
2139 		take_tachyon_offline(fi);
2140 		reset_tachyon(fi, SOFTWARE_RESET);
2141 	}
2142 }
2143 
2144 static void loop_timer(unsigned long data)
2145 {
2146 struct fc_info *fi = (struct fc_info*)data;
2147 	fi->g.lport_timer_set = FALSE;
2148 	del_timer(&fi->lport_timer);
2149 	if ((fi->g.ptp_up == TRUE) || (fi->g.loop_up == TRUE))
2150 		return;
2151 }
2152 
2153 static void add_display_cache_timer(struct fc_info *fi)
2154 {
2155 	fi->display_cache_timer.function = display_cache_timer;
2156 	fi->display_cache_timer.data = (unsigned long)fi;
2157 	fi->display_cache_timer.expires = RUN_AT(fi->num_nodes * HZ);
2158 	init_timer(&fi->display_cache_timer);
2159 	add_timer(&fi->display_cache_timer);
2160 }
2161 
2162 static void display_cache_timer(unsigned long data)
2163 {
2164 struct fc_info *fi = (struct fc_info*)data;
2165 	del_timer(&fi->display_cache_timer);
2166 	display_cache(fi);
2167 	return;
2168 }
2169 
2170 static void reset_tachyon(struct fc_info *fi, u_int value)
2171 {
2172 u_int tachyon_status, reset_done = OCQ_RESET_STATUS | SCSI_FREEZE_STATUS;
2173 int not_done = 1, i = 0;
2174 	writel(value, fi->t_r.ptr_tach_control_reg);
2175 	if (value == OCQ_RESET)
2176 		fi->q.ocq_prod_indx = 0;
2177 	tachyon_status = readl(fi->t_r.ptr_tach_status_reg);
2178 
2179 	/* Software resets are immediately done, whereas other aren't. It
2180 	about 30 clocks to do the reset */
2181 	if (value != SOFTWARE_RESET) {
2182 		while(not_done) {
2183 			if (i++ > 100000) {
2184 				T_MSG("Reset was unsuccessful! Tachyon Status = %x", tachyon_status);
2185 				break;
2186 			}
2187 			tachyon_status = readl(fi->t_r.ptr_tach_status_reg);
2188 			if ((tachyon_status & reset_done) == 0)
2189 				not_done = 0;
2190 		}
2191 	}
2192 	else {
2193 		write_to_tachyon_registers(fi);
2194 	}
2195 }
2196 
2197 static void take_tachyon_offline(struct fc_info *fi)
2198 {
2199 u_int fm_status = readl(fi->t_r.ptr_fm_status_reg);
2200 
2201 	/* The first two conditions will never be true. The Manual and
2202 	 * the errata say this. But the current implementation is
2203 	 * decently stable.
2204 	 */
2205 	//if ((fm_status & 0xF0) == LOOP_FAIL) {
2206 	if (fm_status == LOOP_FAIL) {
2207 		// workaround as in P. 89
2208 		writel(HOST_CONTROL, fi->t_r.ptr_fm_control_reg);
2209 		if (fi->g.loop_up == TRUE)
2210 			writel(SOFTWARE_RESET, fi->t_r.ptr_tach_control_reg);
2211 		else {
2212 			writel(OFFLINE, fi->t_r.ptr_fm_control_reg);
2213 			writel(EXIT_HOST_CONTROL, fi->t_r.ptr_fm_control_reg);
2214 		}
2215 	}
2216 	else
2217 	//if ((fm_status & LOOP_UP) == LOOP_UP) {
2218 	if (fm_status == LOOP_UP) {
2219 		writel(SOFTWARE_RESET, fi->t_r.ptr_tach_control_reg);
2220 	}
2221 	else
2222 		writel(OFFLINE, fi->t_r.ptr_fm_control_reg);
2223 }
2224 
2225 
2226 static void read_novram(struct fc_info *fi)
2227 {
2228 int off = 0;
2229 	fi->n_r.ptr_novram_hw_control_reg = fi->i_r.ptr_ichip_hw_control_reg;
2230 	fi->n_r.ptr_novram_hw_status_reg = fi->i_r.ptr_ichip_hw_status_reg;
2231 	iph5526_nr_do_init(fi);
2232 	if (fi->clone_id == PCI_VENDOR_ID_INTERPHASE)
2233 		off = 32;
2234 
2235 	fi->g.my_node_name_high = (fi->n_r.data[off] << 16) | fi->n_r.data[off+1];
2236 	fi->g.my_node_name_low = (fi->n_r.data[off+2] << 16) | fi->n_r.data[off+3];
2237 	fi->g.my_port_name_high = (fi->n_r.data[off+4] << 16) | fi->n_r.data[off+5];
2238 	fi->g.my_port_name_low = (fi->n_r.data[off+6] << 16) | fi->n_r.data[off+7];
2239 	DPRINTK("node_name = %x %x", fi->g.my_node_name_high, fi->g.my_node_name_low);
2240 	DPRINTK("port_name = %x %x", fi->g.my_port_name_high, fi->g.my_port_name_low);
2241 }
2242 
2243 static void reset_ichip(struct fc_info *fi)
2244 {
2245 	/* (i)chip reset */
2246 	writel(ICHIP_HCR_RESET, fi->i_r.ptr_ichip_hw_control_reg);
2247 	/*wait for chip to get reset */
2248 	mdelay(10);
2249 	/*de-assert reset */
2250 	writel(ICHIP_HCR_DERESET, fi->i_r.ptr_ichip_hw_control_reg);
2251 
2252 	/* enable INT lines on the (i)chip */
2253 	writel(ICHIP_HCR_ENABLE_INTA , fi->i_r.ptr_ichip_hw_control_reg);
2254 	/* enable byte swap */
2255 	writel(ICHIP_HAMR_BYTE_SWAP_ADDR_TR, fi->i_r.ptr_ichip_hw_addr_mask_reg);
2256 }
2257 
2258 static void tx_logi(struct fc_info *fi, u_int logi, u_int d_id)
2259 {
2260 int int_required = 1;
2261 u_short ox_id = OX_ID_FIRST_SEQUENCE;
2262 u_int r_ctl = RCTL_ELS_UCTL;
2263 u_int type  = TYPE_ELS | SEQUENCE_INITIATIVE | FIRST_SEQUENCE;
2264 u_int my_mtu = fi->g.my_mtu;
2265 	ENTER("tx_logi");
2266 	/* We dont want interrupted for our own logi.
2267 	 * It screws up the port discovery process.
2268 	 */
2269 	if (d_id == fi->g.my_id)
2270 		int_required = 0;
2271 	fill_login_frame(fi, logi);
2272 	fi->g.type_of_frame = FC_ELS;
2273 	memcpy(fi->g.els_buffer[fi->g.e_i], &fi->g.login, sizeof(LOGIN));
2274 	tx_exchange(fi, (char *)(fi->g.els_buffer[fi->g.e_i]),sizeof(LOGIN), r_ctl, type, d_id, my_mtu, int_required, ox_id, logi);
2275 	fi->g.e_i++;
2276 	if (fi->g.e_i == MAX_PENDING_FRAMES)
2277 		fi->g.e_i = 0;
2278 	LEAVE("tx_logi");
2279 	return;
2280 }
2281 
2282 static void tx_logi_acc(struct fc_info *fi, u_int logi, u_int d_id, u_short received_ox_id)
2283 {
2284 int int_required = 0;
2285 u_int r_ctl = RCTL_ELS_SCTL;
2286 u_int type  = TYPE_ELS | EXCHANGE_RESPONDER | LAST_SEQUENCE;
2287 u_int my_mtu = fi->g.my_mtu;
2288 	ENTER("tx_logi_acc");
2289 	fill_login_frame(fi, logi);
2290 	fi->g.type_of_frame = FC_ELS;
2291 	memcpy(fi->g.els_buffer[fi->g.e_i], &fi->g.login, sizeof(LOGIN));
2292 	tx_exchange(fi, (char *)(fi->g.els_buffer[fi->g.e_i]),sizeof(LOGIN), r_ctl, type, d_id, my_mtu, int_required, received_ox_id, logi);
2293 	fi->g.e_i++;
2294 	if (fi->g.e_i == MAX_PENDING_FRAMES)
2295 		fi->g.e_i = 0;
2296 	LEAVE("tx_logi_acc");
2297 	return;
2298 }
2299 
2300 static void tx_prli(struct fc_info *fi, u_int command_code, u_int d_id, u_short received_ox_id)
2301 {
2302 int int_required = 1;
2303 u_int r_ctl = RCTL_ELS_UCTL;
2304 u_int type  = TYPE_ELS | SEQUENCE_INITIATIVE | FIRST_SEQUENCE;
2305 u_int my_mtu = fi->g.my_mtu;
2306 	ENTER("tx_prli");
2307 	if (command_code == ELS_PRLI)
2308 		fi->g.prli.cmnd_code = htons((ELS_PRLI | PAGE_LEN) >> 16);
2309 	else {
2310 		fi->g.prli.cmnd_code = htons((ELS_ACC | PAGE_LEN) >> 16);
2311 		int_required = 0;
2312 		type  = TYPE_ELS | EXCHANGE_RESPONDER | LAST_SEQUENCE;
2313 		r_ctl = RCTL_ELS_SCTL;
2314 	}
2315 	fi->g.prli.payload_length = htons(PRLI_LEN);
2316 	fi->g.prli.type_code = htons(FCP_TYPE_CODE);
2317 	fi->g.prli.est_image_pair = htons(IMAGE_PAIR);
2318 	fi->g.prli.responder_pa = 0;
2319 	fi->g.prli.originator_pa = 0;
2320 	fi->g.prli.service_params = htonl(INITIATOR_FUNC | READ_XFER_RDY_DISABLED);
2321 	fi->g.type_of_frame = FC_ELS;
2322 	memcpy(fi->g.els_buffer[fi->g.e_i], &fi->g.prli, sizeof(PRLI));
2323 	tx_exchange(fi, (char *)(fi->g.els_buffer[fi->g.e_i]), sizeof(PRLI), r_ctl, type, d_id, my_mtu, int_required, received_ox_id, command_code);
2324 	fi->g.e_i++;
2325 	if (fi->g.e_i == MAX_PENDING_FRAMES)
2326 		fi->g.e_i = 0;
2327 	LEAVE("tx_prli");
2328 	return;
2329 }
2330 
2331 static void tx_logo(struct fc_info *fi, u_int d_id, u_short received_ox_id)
2332 {
2333 int int_required = 1;
2334 u_int r_ctl = RCTL_ELS_UCTL;
2335 u_int type  = TYPE_ELS | EXCHANGE_RESPONDER | SEQUENCE_RESPONDER | FIRST_SEQUENCE | END_SEQUENCE | SEQUENCE_INITIATIVE;
2336 int size = sizeof(LOGO);
2337 char fc_id[3];
2338 u_int my_mtu = fi->g.my_mtu;
2339 	ENTER("tx_logo");
2340 	fi->g.logo.logo_cmnd = htonl(ELS_LOGO);
2341 	fi->g.logo.reserved = 0;
2342 	memcpy(fc_id, &(fi->g.my_id), 3);
2343 	fi->g.logo.n_port_id_0 = fc_id[0];
2344 	fi->g.logo.n_port_id_1 = fc_id[1];
2345 	fi->g.logo.n_port_id_2 = fc_id[2];
2346 	fi->g.logo.port_name_up = htonl(N_PORT_NAME_HIGH);
2347 	fi->g.logo.port_name_low = htonl(N_PORT_NAME_LOW);
2348 	fi->g.type_of_frame = FC_ELS;
2349 	memcpy(fi->g.els_buffer[fi->g.e_i], &fi->g.logo, sizeof(LOGO));
2350 	tx_exchange(fi, (char *)(fi->g.els_buffer[fi->g.e_i]),size, r_ctl, type, d_id, my_mtu, int_required, received_ox_id, ELS_LOGO);
2351 	fi->g.e_i++;
2352 	if (fi->g.e_i == MAX_PENDING_FRAMES)
2353 		fi->g.e_i = 0;
2354 	LEAVE("tx_logo");
2355 }
2356 
2357 static void tx_adisc(struct fc_info *fi, u_int cmnd_code, u_int d_id, u_short received_ox_id)
2358 {
2359 int int_required = 0;
2360 u_int r_ctl = RCTL_ELS_SCTL;
2361 u_int type  = TYPE_ELS | EXCHANGE_RESPONDER | SEQUENCE_RESPONDER | FIRST_SEQUENCE | END_SEQUENCE;
2362 int size = sizeof(ADISC);
2363 u_int my_mtu = fi->g.my_mtu;
2364 	fi->g.adisc.ls_cmnd_code = htonl(cmnd_code);
2365 	fi->g.adisc.hard_address = htonl(0);
2366 	fi->g.adisc.port_name_high = htonl(N_PORT_NAME_HIGH);
2367 	fi->g.adisc.port_name_low = htonl(N_PORT_NAME_LOW);
2368 	fi->g.adisc.node_name_high = htonl(NODE_NAME_HIGH);
2369 	fi->g.adisc.node_name_low = htonl(NODE_NAME_LOW);
2370 	fi->g.adisc.n_port_id = htonl(fi->g.my_id);
2371 	if (cmnd_code == ELS_ADISC) {
2372 		int_required = 1;
2373 		r_ctl = RCTL_ELS_UCTL;
2374 		type  = TYPE_ELS | SEQUENCE_INITIATIVE | FIRST_SEQUENCE;
2375 	}
2376 	fi->g.type_of_frame = FC_ELS;
2377 	memcpy(fi->g.els_buffer[fi->g.e_i], &fi->g.adisc, size);
2378 	tx_exchange(fi, (char *)(fi->g.els_buffer[fi->g.e_i]),size, r_ctl, type, d_id, my_mtu, int_required, received_ox_id, cmnd_code);
2379 	fi->g.e_i++;
2380 	if (fi->g.e_i == MAX_PENDING_FRAMES)
2381 		fi->g.e_i = 0;
2382 }
2383 
2384 static void tx_ls_rjt(struct fc_info *fi, u_int d_id, u_short received_ox_id, u_short reason_code, u_short expln_code)
2385 {
2386 int int_required = 0;
2387 u_int r_ctl = RCTL_ELS_SCTL;
2388 u_int type  = TYPE_ELS | EXCHANGE_RESPONDER | LAST_SEQUENCE;
2389 int size = sizeof(LS_RJT);
2390 u_int my_mtu = fi->g.my_mtu;
2391 	ENTER("tx_ls_rjt");
2392 	fi->g.ls_rjt.cmnd_code = htonl(ELS_LS_RJT);
2393 	fi->g.ls_rjt.reason_code = htonl((reason_code << 16) | expln_code);
2394 	fi->g.type_of_frame = FC_ELS;
2395 	memcpy(fi->g.els_buffer[fi->g.e_i], &fi->g.ls_rjt, size);
2396 	tx_exchange(fi, (char *)(fi->g.els_buffer[fi->g.e_i]),size, r_ctl, type, d_id, my_mtu, int_required, received_ox_id, ELS_LS_RJT);
2397 	fi->g.e_i++;
2398 	if (fi->g.e_i == MAX_PENDING_FRAMES)
2399 		fi->g.e_i = 0;
2400 	LEAVE("tx_ls_rjt");
2401 }
2402 
2403 static void tx_abts(struct fc_info *fi, u_int d_id, u_short ox_id)
2404 {
2405 int int_required = 1;
2406 u_int r_ctl = RCTL_BASIC_ABTS;
2407 u_int type  = TYPE_BLS | SEQUENCE_INITIATIVE | FIRST_SEQUENCE;
2408 int size = 0;
2409 u_int my_mtu = fi->g.my_mtu;
2410 	ENTER("tx_abts");
2411 	fi->g.type_of_frame = FC_BLS;
2412 	tx_exchange(fi, NULL, size, r_ctl, type, d_id, my_mtu, int_required, ox_id, RCTL_BASIC_ABTS);
2413 	LEAVE("tx_abts");
2414 }
2415 
2416 static u_int plogi_ok(struct fc_info *fi, u_int *buff_addr, int size)
2417 {
2418 int ret_code = 0;
2419 u_short mtu = ntohl(*(buff_addr + 10)) & 0x00000FFF;
2420 u_short class3 = ntohl(*(buff_addr + 25)) >> 16;
2421 u_short class3_conc_seq = ntohl(*(buff_addr + 27)) >> 16;
2422 u_short open_seq = ntohl(*(buff_addr + 28)) >> 16;
2423 	DPRINTK1("mtu = %x class3 = %x conc_seq = %x open_seq = %x", mtu, class3, class3_conc_seq, open_seq);
2424 	size -= TACHYON_HEADER_LEN;
2425 	if (!(class3 & 0x8000)) {
2426 		DPRINTK1("Received PLOGI with class3 = %x", class3);
2427 		ret_code = (LOGICAL_ERR << 16) | NO_EXPLN;
2428 		return ret_code;
2429 	}
2430 	if (mtu < 256) {
2431 		DPRINTK1("Received PLOGI with MTU set to %x", mtu);
2432 		ret_code = (LOGICAL_ERR << 16) | RECV_FIELD_SIZE;
2433 		return ret_code;
2434 	}
2435 	if (size != PLOGI_LEN) {
2436 		DPRINTK1("Received PLOGI of size %x", size);
2437 		ret_code = (LOGICAL_ERR << 16) | INV_PAYLOAD_LEN;
2438 		return ret_code;
2439 	}
2440 	if (class3_conc_seq == 0) {
2441 		DPRINTK1("Received PLOGI with conc_seq == 0");
2442 		ret_code = (LOGICAL_ERR << 16) | CONC_SEQ;
2443 		return ret_code;
2444 	}
2445 	if (open_seq == 0) {
2446 		DPRINTK1("Received PLOGI with open_seq == 0");
2447 		ret_code = (LOGICAL_ERR << 16) | NO_EXPLN;
2448 		return ret_code;
2449 	}
2450 
2451 	/* Could potentially check for more fields, but might end up
2452 	   not talking to most of the devices. ;-) */
2453 	/* Things that could get checked are:
2454 	   common_features = 0x8800
2455 	   total_concurrent_seq = at least 1
2456 	*/
2457 	return ret_code;
2458 }
2459 
2460 static void tx_acc(struct fc_info *fi, u_int d_id, u_short received_ox_id)
2461 {
2462 int int_required = 0;
2463 u_int r_ctl = RCTL_ELS_SCTL;
2464 u_int type  = TYPE_ELS | EXCHANGE_RESPONDER | LAST_SEQUENCE;
2465 int size = sizeof(ACC);
2466 u_int my_mtu = fi->g.my_mtu;
2467 	ENTER("tx_acc");
2468 	fi->g.acc.cmnd_code = htonl(ELS_ACC);
2469 	fi->g.type_of_frame = FC_ELS;
2470 	memcpy(fi->g.els_buffer[fi->g.e_i], &fi->g.acc, size);
2471 	tx_exchange(fi, (char *)(fi->g.els_buffer[fi->g.e_i]),size, r_ctl, type, d_id, my_mtu, int_required, received_ox_id, ELS_ACC);
2472 	fi->g.e_i++;
2473 	if (fi->g.e_i == MAX_PENDING_FRAMES)
2474 		fi->g.e_i = 0;
2475 	LEAVE("tx_acc");
2476 }
2477 
2478 
2479 static void tx_name_server_req(struct fc_info *fi, u_int req)
2480 {
2481 int int_required = 1, i, size = 0;
2482 u_short ox_id = OX_ID_FIRST_SEQUENCE;
2483 u_int type  = TYPE_FC_SERVICES | SEQUENCE_INITIATIVE | FIRST_SEQUENCE;
2484 u_int r_ctl = FC4_DEVICE_DATA | UNSOLICITED_CONTROL;
2485 u_int my_mtu = fi->g.my_mtu, d_id = DIRECTORY_SERVER;
2486 CT_HDR ct_hdr;
2487 	ENTER("tx_name_server_req");
2488 	/* Fill up CT_Header */
2489 	ct_hdr.rev_in_id = htonl(FC_CT_REV);
2490 	ct_hdr.fs_type = DIRECTORY_SERVER_APP;
2491 	ct_hdr.fs_subtype = NAME_SERVICE;
2492 	ct_hdr.options = 0;
2493 	ct_hdr.resv1 = 0;
2494 	ct_hdr.cmnd_resp_code = htons(req >> 16);
2495 	ct_hdr.max_res_size = 0;
2496 	ct_hdr.resv2 = 0;
2497 	ct_hdr.reason_code = 0;
2498 	ct_hdr.expln_code = 0;
2499 	ct_hdr.vendor_unique = 0;
2500 
2501 	fi->g.type_of_frame = FC_ELS;
2502 	switch(req) {
2503 		case FCS_RFC_4:
2504 			memcpy(&(fi->g.rfc_4.ct_hdr), &ct_hdr, sizeof(CT_HDR));
2505 			fi->g.rfc_4.s_id = htonl(fi->g.my_id);
2506 			for (i = 0; i < 32; i++)
2507 				fi->g.rfc_4.bit_map[i] = 0;
2508 			/* We support IP & SCSI */
2509 			fi->g.rfc_4.bit_map[2] = 0x01;
2510 			fi->g.rfc_4.bit_map[3] = 0x20;
2511 			size = sizeof(RFC_4);
2512 			memcpy(fi->g.els_buffer[fi->g.e_i], &fi->g.rfc_4, size);
2513 			tx_exchange(fi, (char *)(fi->g.els_buffer[fi->g.e_i]),size, r_ctl, type, d_id, my_mtu, int_required, ox_id, req);
2514 			break;
2515 		case FCS_GP_ID4:
2516 			memcpy(&(fi->g.gp_id4.ct_hdr), &ct_hdr, sizeof(CT_HDR));
2517 			fi->g.gp_id4.port_type = htonl(PORT_TYPE_NX_PORTS);
2518 			size = sizeof(GP_ID4);
2519 			memcpy(fi->g.els_buffer[fi->g.e_i], &fi->g.gp_id4, size);
2520 			tx_exchange(fi, (char *)(fi->g.els_buffer[fi->g.e_i]),size, r_ctl, type, d_id, my_mtu, int_required, ox_id, req);
2521 			break;
2522 	}
2523 	fi->g.e_i++;
2524 	if (fi->g.e_i == MAX_PENDING_FRAMES)
2525 		fi->g.e_i = 0;
2526 	LEAVE("tx_name_server_req");
2527 }
2528 
2529 static void tx_scr(struct fc_info *fi)
2530 {
2531 int int_required = 1, size = sizeof(SCR);
2532 u_short ox_id = OX_ID_FIRST_SEQUENCE;
2533 u_int type  = TYPE_ELS | SEQUENCE_INITIATIVE | FIRST_SEQUENCE;
2534 u_int r_ctl = RCTL_ELS_UCTL;
2535 u_int my_mtu = fi->g.my_mtu, d_id = FABRIC_CONTROLLER;
2536 	ENTER("tx_scr");
2537 	fi->g.scr.cmnd_code = htonl(ELS_SCR);
2538 	fi->g.scr.reg_function = htonl(FULL_REGISTRATION);
2539 	fi->g.type_of_frame = FC_ELS;
2540 	memcpy(fi->g.els_buffer[fi->g.e_i], &fi->g.scr, size);
2541 	tx_exchange(fi, (char *)(fi->g.els_buffer[fi->g.e_i]),size, r_ctl, type, d_id, my_mtu, int_required, ox_id, ELS_SCR);
2542 	fi->g.e_i++;
2543 	if (fi->g.e_i == MAX_PENDING_FRAMES)
2544 		fi->g.e_i = 0;
2545 	LEAVE("tx_scr");
2546 }
2547 
2548 static void perform_adisc(struct fc_info *fi)
2549 {
2550 int count = 0;
2551 	/* Will be set to TRUE when timer expires in a PLDA environment.
2552 	 */
2553 	fi->g.port_discovery = FALSE;
2554 
2555 	if (fi->node_info_list) {
2556 		struct fc_node_info *temp_list = fi->node_info_list;
2557 		while(temp_list) {
2558 			/* Tx ADISC to all non-fabric based
2559 	 	 	 * entities.
2560 	 	 	 */
2561 			if ((temp_list->d_id & 0xFF0000) != 0xFF0000)
2562 				tx_adisc(fi, ELS_ADISC, temp_list->d_id, OX_ID_FIRST_SEQUENCE);
2563 			temp_list = temp_list->next;
2564 			udelay(20);
2565 			count++;
2566 		}
2567 	}
2568 	/* Perform Port Discovery after timer expires.
2569 	 * We are giving time for the ADISCed nodes to respond
2570 	 * so that we dont have to perform PLOGI to those whose
2571 	 * login are _still_ valid.
2572 	 */
2573 	fi->explore_timer.function = port_discovery_timer;
2574 	fi->explore_timer.data = (unsigned long)fi;
2575 	fi->explore_timer.expires = RUN_AT((count*3*HZ)/100);
2576 	init_timer(&fi->explore_timer);
2577 	add_timer(&fi->explore_timer);
2578 }
2579 
2580 static void explore_fabric(struct fc_info *fi, u_int *buff_addr)
2581 {
2582 u_int *addr = buff_addr + 12; /* index into payload */
2583 u_char control_code;
2584 u_int d_id;
2585 int count = 0;
2586 	ENTER("explore_fabric");
2587 	DPRINTK1("entering explore_fabric");
2588 
2589 	/*fi->g.perform_adisc = TRUE;
2590 	fi->g.explore_fabric = TRUE;
2591 	perform_adisc(fi);*/
2592 
2593 	do {
2594 		d_id = ntohl(*addr) & 0x00FFFFFF;
2595 		if (d_id != fi->g.my_id) {
2596 			if (sid_logged_in(fi, d_id) == NODE_NOT_PRESENT)
2597 				tx_logi(fi, ELS_PLOGI, d_id);
2598 			else
2599 			if (sid_logged_in(fi, d_id) == NODE_LOGGED_OUT)
2600 				tx_adisc(fi, ELS_ADISC, d_id, OX_ID_FIRST_SEQUENCE);
2601 			count++;
2602 		}
2603 		control_code = (ntohl(*addr) & 0xFF000000) >> 24;
2604 		addr++;
2605 		DPRINTK1("cc = %x, d_id = %x", control_code, d_id);
2606 	} while (control_code != 0x80);
2607 
2608 	fi->explore_timer.function = fabric_explore_timer;
2609 	fi->explore_timer.data = (unsigned long)fi;
2610 	/* We give 30 msec for each device to respond and then send out
2611 	 * our SCSI enquiries.
2612 	 */
2613 	fi->explore_timer.expires = RUN_AT((count*3*HZ)/100);
2614 	init_timer(&fi->explore_timer);
2615 	add_timer(&fi->explore_timer);
2616 
2617 	DPRINTK1("leaving explore_fabric");
2618 	LEAVE("explore_fabric");
2619 }
2620 
2621 static void fabric_explore_timer(unsigned long data)
2622 {
2623 struct fc_info *fi = (struct fc_info*)data;
2624 	del_timer(&fi->explore_timer);
2625 
2626 	if ((fi->g.loop_up == TRUE) && (fi->g.ptp_up == FALSE)) {
2627 		/* Initiate Local Port Discovery on the Local Loop.
2628 		 */
2629 		fi->g.port_discovery = TRUE;
2630 		fi->g.alpa_list_index = 1;
2631 		local_port_discovery(fi);
2632 	}
2633 	fi->g.explore_fabric = FALSE;
2634 	return;
2635 }
2636 
2637 static void port_discovery_timer(unsigned long data)
2638 {
2639 struct fc_info *fi = (struct fc_info*)data;
2640 	del_timer(&fi->explore_timer);
2641 
2642 	if ((fi->g.loop_up == TRUE) && (fi->g.explore_fabric != TRUE)) {
2643 		fi->g.port_discovery = TRUE;
2644 		fi->g.alpa_list_index = 1;
2645 		local_port_discovery(fi);
2646 	}
2647 	fi->g.perform_adisc = FALSE;
2648 	return;
2649 }
2650 
2651 static void add_to_ox_id_list(struct fc_info *fi, u_int transaction_id, u_int cmnd_code)
2652 {
2653 struct ox_id_els_map *p, *q = fi->ox_id_list, *r = NULL;
2654 int size = sizeof(struct ox_id_els_map);
2655 	while (q != NULL) {
2656 		r = q;
2657 		q = q->next;
2658 	}
2659 	p = (struct ox_id_els_map *)kmalloc(size, GFP_ATOMIC);
2660 	if (p == NULL) {
2661 		T_MSG("kmalloc failed in add_to_ox_id_list()");
2662 		return;
2663 	}
2664 	p->ox_id = transaction_id;
2665 	p->els = cmnd_code;
2666 	p->next = NULL;
2667 	if (fi->ox_id_list == NULL)
2668 		fi->ox_id_list = p;
2669 	else
2670 		r->next = p;
2671 	return;
2672 }
2673 
2674 static u_int remove_from_ox_id_list(struct fc_info *fi, u_short received_ox_id)
2675 {
2676 struct ox_id_els_map *p = fi->ox_id_list, *q = fi->ox_id_list;
2677 u_int els_type;
2678 	while (q != NULL) {
2679 		if (q->ox_id == received_ox_id) {
2680 
2681 			if (q == fi->ox_id_list)
2682 				fi->ox_id_list = fi->ox_id_list->next;
2683 			else
2684 				if (q->next == NULL)
2685 					p->next = NULL;
2686 			else
2687 					p->next = q->next;
2688 
2689 			els_type = q->els;
2690 			kfree(q);
2691 			return els_type;
2692 		}
2693 		p = q;
2694 		q = q->next;
2695 	}
2696 	if (q == NULL)
2697 		DPRINTK2("Could not find ox_id %x in ox_id_els_map", received_ox_id);
2698 	return 0;
2699 }
2700 
2701 static void build_tachyon_header(struct fc_info *fi, u_int my_id, u_int r_ctl, u_int d_id, u_int type, u_char seq_id, u_char df_ctl, u_short ox_id, u_short rx_id, char *data)
2702 {
2703 u_char alpa = d_id & 0x0000FF;
2704 u_int dest_ddaa = d_id &0xFFFF00;
2705 
2706 	ENTER("build_tachyon_header");
2707 	DPRINTK("d_id = %x, my_ddaa = %x", d_id, fi->g.my_ddaa);
2708 	/* Does it have to go to/thru a Fabric? */
2709 	if ((dest_ddaa != 0) && ((d_id == F_PORT) || (fi->g.fabric_present && (dest_ddaa != fi->g.my_ddaa))))
2710 		alpa = 0x00;
2711 	fi->g.tach_header.resv = 0x00000000;
2712 	fi->g.tach_header.sof_and_eof = SOFI3 | EOFN;
2713 	fi->g.tach_header.dest_alpa = alpa;
2714 	/* Set LCr properly to have enuff credit */
2715 	if (alpa == REPLICATE)
2716 		fi->g.tach_header.lcr_and_time_stamp = htons(0xC00);/* LCr=3 */
2717 	else
2718 		fi->g.tach_header.lcr_and_time_stamp = 0;
2719 	fi->g.tach_header.r_ctl_and_d_id = htonl(r_ctl | d_id);
2720 	fi->g.tach_header.vc_id_and_s_id = htonl(my_id);
2721 	fi->g.tach_header.type_and_f_cntl = htonl(type);
2722 	fi->g.tach_header.seq_id = seq_id;
2723 	fi->g.tach_header.df_cntl = df_ctl;
2724 	fi->g.tach_header.seq_cnt = 0;
2725 	fi->g.tach_header.ox_id = htons(ox_id);
2726 	fi->g.tach_header.rx_id = htons(rx_id);
2727 	fi->g.tach_header.ro = 0;
2728 	if (data) {
2729 		/* We use the Seq_Count to keep track of IP frames in the
2730 		 * OCI_interrupt handler. Initial Seq_Count of IP frames is 1.
2731 		 */
2732 		if (fi->g.type_of_frame == FC_BROADCAST)
2733 			fi->g.tach_header.seq_cnt = htons(0x1);
2734 		else
2735 			fi->g.tach_header.seq_cnt = htons(0x2);
2736 		fi->g.tach_header.nw_header.d_naa = htons(0x1000);
2737 		fi->g.tach_header.nw_header.s_naa = htons(0x1000);
2738 		memcpy(&(fi->g.tach_header.nw_header.dest_high), data, 2);
2739 		memcpy(&(fi->g.tach_header.nw_header.dest_low), data + 2, 4);
2740 		memcpy(&(fi->g.tach_header.nw_header.source_high), data + 6, 2);
2741 		memcpy(&(fi->g.tach_header.nw_header.source_low), data + 8, 4);
2742 	}
2743 	LEAVE("build_tachyon_header");
2744 }
2745 
2746 static void build_EDB(struct fc_info *fi, char *data, u_short flags, u_short len)
2747 {
2748 	fi->g.edb.buf_addr = ntohl((u_int)virt_to_bus(data));
2749 	fi->g.edb.ehf = ntohs(flags);
2750 	if (len % 4)
2751 		len += (4 - (len % 4));
2752 	fi->g.edb.buf_len = ntohs(len);
2753 }
2754 
2755 static void build_ODB(struct fc_info *fi, u_char seq_id, u_int d_id, u_int len, u_int cntl, u_short mtu, u_short ox_id, u_short rx_id, int NW_header, int int_required, u_int frame_class)
2756 {
2757 	fi->g.odb.seq_d_id = htonl(seq_id << 24 | d_id);
2758 	fi->g.odb.tot_len = len;
2759 	if (NW_header)
2760 		fi->g.odb.tot_len += NW_HEADER_LEN;
2761 	if (fi->g.odb.tot_len % 4)
2762 		fi->g.odb.tot_len += (4 - (fi->g.odb.tot_len % 4));
2763 	fi->g.odb.tot_len = htonl(fi->g.odb.tot_len);
2764 	switch(int_required) {
2765 		case NO_COMP_AND_INT:
2766 			fi->g.odb.cntl = htons(ODB_CLASS_3 | ODB_EE_CREDIT | ODB_NO_INT | ODB_NO_COMP | cntl);
2767 			break;
2768 		case INT_AND_COMP_REQ:
2769 			fi->g.odb.cntl = htons(ODB_CLASS_3 | ODB_EE_CREDIT | cntl);
2770 			break;
2771 		case NO_INT_COMP_REQ:
2772 			fi->g.odb.cntl = htons(ODB_CLASS_3 | ODB_EE_CREDIT | ODB_NO_INT | cntl);
2773 			break;
2774 	}
2775 	fi->g.odb.rx_id = htons(rx_id);
2776 	fi->g.odb.cs_enable = 0;
2777 	fi->g.odb.cs_seed = htons(1);
2778 
2779 	fi->g.odb.hdr_addr = htonl(virt_to_bus(fi->q.ptr_tachyon_header[fi->q.tachyon_header_indx]));
2780 	fi->g.odb.frame_len = htons(mtu);
2781 
2782 	if (NW_header) {
2783 		/* The pointer to the sk_buff is in here. Freed up when the
2784 		 * OCI_interrupt is received.
2785 		 */
2786 		fi->g.odb.trans_id = htonl(frame_class);
2787 		fi->g.odb.hdr_len = TACHYON_HEADER_LEN + NW_HEADER_LEN;
2788 	}
2789 	else {
2790 		/* helps in tracking transmitted OX_IDs */
2791 		fi->g.odb.trans_id = htonl((frame_class & 0xFFFF0000) | ox_id);
2792 		fi->g.odb.hdr_len = TACHYON_HEADER_LEN;
2793 	}
2794 	fi->g.odb.hdr_len = htons(fi->g.odb.hdr_len);
2795 
2796 	fi->g.odb.edb_addr = htonl(virt_to_bus(fi->q.ptr_edb[fi->q.edb_buffer_indx]));
2797 }
2798 
2799 static void fill_login_frame(struct fc_info *fi, u_int logi)
2800 {
2801 int i;
2802 	fi->g.login.ls_cmnd_code= htonl(logi);
2803 	fi->g.login.fc_ph_version = htons(PH_VERSION);
2804 	if (fi->g.loop_up)
2805 		fi->g.login.buff_to_buff_credit = htons(LOOP_BB_CREDIT);
2806 	else
2807 	if (fi->g.ptp_up)
2808 		fi->g.login.buff_to_buff_credit = htons(PT2PT_BB_CREDIT);
2809 	if ((logi != ELS_FLOGI) || (logi == ELS_ACC))
2810 		fi->g.login.common_features = htons(PLOGI_C_F);
2811 	else
2812 	if (logi == ELS_FLOGI)
2813 		fi->g.login.common_features = htons(FLOGI_C_F);
2814 	fi->g.login.recv_data_field_size = htons(TACH_FRAME_SIZE);
2815 	fi->g.login.n_port_total_conc_seq = htons(CONCURRENT_SEQUENCES);
2816 	fi->g.login.rel_off_by_info_cat = htons(RO_INFO_CATEGORY);
2817 	fi->g.login.ED_TOV = htonl(E_D_TOV);
2818 	fi->g.login.n_port_name_high = htonl(N_PORT_NAME_HIGH);
2819 	fi->g.login.n_port_name_low = htonl(N_PORT_NAME_LOW);
2820 	fi->g.login.node_name_high = htonl(NODE_NAME_HIGH);
2821 	fi->g.login.node_name_low = htonl(NODE_NAME_LOW);
2822 
2823 	/* Fill Class 1 parameters */
2824 	fi->g.login.c_of_s[0].service_options = htons(0);
2825 	fi->g.login.c_of_s[0].initiator_ctl = htons(0);
2826 	fi->g.login.c_of_s[0].recipient_ctl = htons(0);
2827 	fi->g.login.c_of_s[0].recv_data_field_size = htons(0);
2828 	fi->g.login.c_of_s[0].concurrent_sequences = htons(0);
2829 	fi->g.login.c_of_s[0].n_port_end_to_end_credit = htons(0);
2830 	fi->g.login.c_of_s[0].open_seq_per_exchange = htons(0);
2831 	fi->g.login.c_of_s[0].resv = htons(0);
2832 
2833 	/* Fill Class 2 parameters */
2834 	fi->g.login.c_of_s[1].service_options = htons(0);
2835 	fi->g.login.c_of_s[1].initiator_ctl = htons(0);
2836 	fi->g.login.c_of_s[1].recipient_ctl = htons(0);
2837 	fi->g.login.c_of_s[1].recv_data_field_size = htons(0);
2838 	fi->g.login.c_of_s[1].concurrent_sequences = htons(0);
2839 	fi->g.login.c_of_s[1].n_port_end_to_end_credit = htons(0);
2840 	fi->g.login.c_of_s[1].open_seq_per_exchange = htons(0);
2841 	fi->g.login.c_of_s[1].resv = htons(0);
2842 
2843 	/* Fill Class 3 parameters */
2844 	if (logi == ELS_FLOGI)
2845 		fi->g.login.c_of_s[2].service_options  = htons(SERVICE_VALID | SEQUENCE_DELIVERY);
2846 	else
2847 		fi->g.login.c_of_s[2].service_options  = htons(SERVICE_VALID);
2848 	fi->g.login.c_of_s[2].initiator_ctl = htons(0);
2849 	fi->g.login.c_of_s[2].recipient_ctl = htons(0);
2850 	fi->g.login.c_of_s[2].recv_data_field_size = htons(TACH_FRAME_SIZE);
2851 	fi->g.login.c_of_s[2].concurrent_sequences = htons(CLASS3_CONCURRENT_SEQUENCE);
2852 	fi->g.login.c_of_s[2].n_port_end_to_end_credit = htons(0);
2853 	fi->g.login.c_of_s[2].open_seq_per_exchange = htons(CLASS3_OPEN_SEQUENCE);
2854 	fi->g.login.c_of_s[2].resv = htons(0);
2855 
2856 	for(i = 0; i < 4; i++) {
2857 		fi->g.login.resv[i] = 0;
2858 		fi->g.login.vendor_version_level[i] = 0;
2859 	}
2860 }
2861 
2862 
2863 /* clear the Interrupt Latch on the (i)chip, so that you can receive
2864  * Interrupts from Tachyon in future
2865  */
2866 static void reset_latch(struct fc_info *fi)
2867 {
2868 	writel(readl(fi->i_r.ptr_ichip_hw_status_reg) | ICHIP_HSR_INT_LATCH, fi->i_r.ptr_ichip_hw_status_reg);
2869 }
2870 
2871 static void update_OCQ_indx(struct fc_info *fi)
2872 {
2873 	fi->q.ocq_prod_indx++;
2874 	if (fi->q.ocq_prod_indx == OCQ_LENGTH)
2875 		fi->q.ocq_prod_indx = 0;
2876 	writel(fi->q.ocq_prod_indx, fi->t_r.ptr_ocq_prod_indx_reg);
2877 }
2878 
2879 static void update_IMQ_indx(struct fc_info *fi, int count)
2880 {
2881 	fi->q.imq_cons_indx += count;
2882 	if (fi->q.imq_cons_indx >= IMQ_LENGTH)
2883 		fi->q.imq_cons_indx -= IMQ_LENGTH;
2884 	writel(fi->q.imq_cons_indx, fi->t_r.ptr_imq_cons_indx_reg);
2885 }
2886 
2887 static void update_SFSBQ_indx(struct fc_info *fi)
2888 {
2889 	fi->q.sfsbq_prod_indx++;
2890 	if (fi->q.sfsbq_prod_indx == SFSBQ_LENGTH)
2891 		fi->q.sfsbq_prod_indx = 0;
2892 	writel(fi->q.sfsbq_prod_indx, fi->t_r.ptr_sfsbq_prod_reg);
2893 }
2894 
2895 static void update_MFSBQ_indx(struct fc_info *fi, int count)
2896 {
2897 	fi->q.mfsbq_prod_indx += count;
2898 	if (fi->q.mfsbq_prod_indx >= MFSBQ_LENGTH)
2899 		fi->q.mfsbq_prod_indx -= MFSBQ_LENGTH;
2900 	writel(fi->q.mfsbq_prod_indx, fi->t_r.ptr_mfsbq_prod_reg);
2901 }
2902 
2903 
2904 static void update_tachyon_header_indx(struct fc_info *fi)
2905 {
2906 	fi->q.tachyon_header_indx++;
2907 	if (fi->q.tachyon_header_indx == NO_OF_TACH_HEADERS)
2908 		fi->q.tachyon_header_indx = 0;
2909 }
2910 
2911 static void update_EDB_indx(struct fc_info *fi)
2912 {
2913 	fi->q.edb_buffer_indx++;
2914 	if (fi->q.edb_buffer_indx == EDB_LEN)
2915 		fi->q.edb_buffer_indx = 0;
2916 }
2917 
2918 static int iph5526_open(struct net_device *dev)
2919 {
2920 	netif_start_queue(dev);
2921 	MOD_INC_USE_COUNT;
2922 	return 0;
2923 }
2924 
2925 static int iph5526_close(struct net_device *dev)
2926 {
2927 	netif_stop_queue(dev);
2928 	MOD_DEC_USE_COUNT;
2929 	return 0;
2930 }
2931 
2932 static void iph5526_timeout(struct net_device *dev)
2933 {
2934 	struct fc_info *fi = (struct fc_info*)dev->priv;
2935 	printk(KERN_WARNING "%s: timed out on send.\n", dev->name);
2936 	fi->fc_stats.tx_dropped++;
2937 	dev->trans_start = jiffies;
2938 	netif_wake_queue(dev);
2939 }
2940 
2941 static int iph5526_send_packet(struct sk_buff *skb, struct net_device *dev)
2942 {
2943 	struct fc_info *fi = (struct fc_info*)dev->priv;
2944 	int status = 0;
2945 	short type = 0;
2946 	u_long flags;
2947 	struct fcllc *fcllc;
2948 
2949 	ENTER("iph5526_send_packet");
2950 
2951 	netif_stop_queue(dev);
2952 	/* Strip off the pseudo header.
2953 	 */
2954 	skb->data = skb->data + 2*FC_ALEN;
2955 	skb->len = skb->len - 2*FC_ALEN;
2956 	fcllc = (struct fcllc *)skb->data;
2957 	type = ntohs(fcllc->ethertype);
2958 
2959 	spin_lock_irqsave(&fi->fc_lock, flags);
2960 	switch(type) {
2961 		case ETH_P_IP:
2962 			status = tx_ip_packet(skb, skb->len, fi);
2963 			break;
2964 		case ETH_P_ARP:
2965 			status = tx_arp_packet(skb->data, skb->len, fi);
2966 			break;
2967 		default:
2968 			T_MSG("WARNING!!! Received Unknown Packet Type... Discarding...");
2969 			fi->fc_stats.rx_dropped++;
2970 			break;
2971 	}
2972 	spin_unlock_irqrestore(&fi->fc_lock, flags);
2973 
2974 	if (status) {
2975 		fi->fc_stats.tx_bytes += skb->len;
2976 		fi->fc_stats.tx_packets++;
2977 	}
2978 	else
2979 		fi->fc_stats.tx_dropped++;
2980 	dev->trans_start = jiffies;
2981 	/* We free up the IP buffers in the OCI_interrupt handler.
2982 	 * status == 0 implies that the frame was not transmitted. So the
2983 	 * skb is freed here.
2984 	 */
2985 	if ((type == ETH_P_ARP) || (status == 0))
2986 		dev_kfree_skb(skb);
2987 	netif_wake_queue(dev);
2988 	LEAVE("iph5526_send_packet");
2989 	return 0;
2990 }
2991 
2992 static int iph5526_change_mtu(struct net_device *dev, int mtu)
2993 {
2994 	return 0;
2995 }
2996 
2997 static int tx_ip_packet(struct sk_buff *skb, unsigned long len, struct fc_info *fi)
2998 {
2999 u_int d_id;
3000 int int_required = 1;
3001 u_int r_ctl = FC4_DEVICE_DATA | UNSOLICITED_DATA;
3002 u_int type = TYPE_LLC_SNAP;
3003 u_short ox_id = OX_ID_FIRST_SEQUENCE;
3004 u_int mtu;
3005 struct fc_node_info *q;
3006 
3007 	ENTER("tx_ip_packet");
3008 	q = look_up_cache(fi, skb->data - 2*FC_ALEN);
3009 	if (q != NULL) {
3010 		d_id = q->d_id;
3011 		DPRINTK("Look-Up Cache Succeeded for d_id = %x", d_id);
3012 		mtu = q->mtu;
3013 		if (q->login == LOGIN_COMPLETED){
3014 			fi->g.type_of_frame = FC_IP;
3015 			return tx_exchange(fi, skb->data, len, r_ctl, type, d_id, mtu, int_required, ox_id, virt_to_bus(skb));
3016 		}
3017 
3018 		if (q->d_id == BROADCAST) {
3019 		struct fc_node_info *p = fi->node_info_list;
3020 		int return_value = FALSE;
3021 			fi->g.type_of_frame = FC_BROADCAST;
3022 			/* Do unicast to local nodes.
3023 			 */
3024 			int_required = 0;
3025 			while(p != NULL) {
3026 				d_id = p->d_id;
3027 				if ((d_id & 0xFFFF00) == fi->g.my_ddaa)
3028 					return_value |= tx_exchange(fi, skb->data, len, r_ctl, type, d_id, fi->g.my_mtu, int_required, ox_id, TYPE_LLC_SNAP);
3029 				p = p->next;
3030 			}
3031 			kfree(q);
3032 			return return_value;
3033 		}
3034 
3035 		if (q->login != LOGIN_COMPLETED) {
3036 			DPRINTK1("Node not logged in... Txing PLOGI to %x", d_id);
3037 			/* FIXME: we are dumping the frame here */
3038 			tx_logi(fi, ELS_PLOGI, d_id);
3039 		}
3040 	}
3041 	DPRINTK2("Look-Up Cache Failed");
3042 	LEAVE("tx_ip_packet");
3043 	return 0;
3044 }
3045 
3046 static int tx_arp_packet(char *data, unsigned long len, struct fc_info *fi)
3047 {
3048 u_int opcode = data[ARP_OPCODE_0];
3049 u_int d_id;
3050 int int_required = 0, return_value = FALSE;
3051 u_int r_ctl = FC4_DEVICE_DATA | UNSOLICITED_DATA;
3052 u_int type = TYPE_LLC_SNAP;
3053 u_short ox_id = OX_ID_FIRST_SEQUENCE;
3054 u_int my_mtu = fi->g.my_mtu;
3055 	ENTER("tx_arp_packet");
3056 
3057 	opcode = opcode << 8 | data[ARP_OPCODE_1];
3058 	fi->g.type_of_frame = FC_IP;
3059 
3060 	if (opcode == ARPOP_REQUEST) {
3061 	struct fc_node_info *q = fi->node_info_list;
3062 		d_id = BROADCAST;
3063 		return_value |= tx_exchange(fi, data, len, r_ctl, type, d_id, my_mtu, int_required, ox_id, TYPE_LLC_SNAP);
3064 		/* Some devices support HW_TYPE 0x01 */
3065 		memcpy(fi->g.arp_buffer, data - 2*FC_ALEN, len + 2*FC_ALEN);
3066 		fi->g.arp_buffer[9 + 2*FC_ALEN] = 0x01;
3067 		return_value |= tx_exchange(fi, (char *)(fi->g.arp_buffer + 2*FC_ALEN), len, r_ctl, type, d_id, my_mtu, int_required, ox_id, TYPE_LLC_SNAP);
3068 
3069 		/* Do unicast to local nodes.
3070 		 */
3071 		while(q != NULL) {
3072 			fi->g.type_of_frame = FC_BROADCAST;
3073 			d_id = q->d_id;
3074 			if ((d_id & 0xFFFF00) == fi->g.my_ddaa) {
3075 				return_value |= tx_exchange(fi, data, len, r_ctl, type, d_id, my_mtu, int_required, ox_id, TYPE_LLC_SNAP);
3076 				// Some devices support HW_TYPE 0x01
3077 				memcpy(fi->g.arp_buffer, data - 2*FC_ALEN, len + 2*FC_ALEN);
3078 				fi->g.arp_buffer[9 + 2*FC_ALEN] = 0x01;
3079 				return_value |= tx_exchange(fi, (char *)(fi->g.arp_buffer + 2*FC_ALEN), len, r_ctl, type, d_id, my_mtu, int_required, ox_id, TYPE_LLC_SNAP);
3080 			}
3081 			q = q->next;
3082 		}
3083 		return return_value;
3084 	}
3085 	else
3086 	if (opcode == ARPOP_REPLY) {
3087 	struct fc_node_info *q; u_int mtu;
3088 		DPRINTK("We are sending out an ARP reply");
3089 		q = look_up_cache(fi, data - 2*FC_ALEN);
3090 		if (q != NULL) {
3091 			d_id = q->d_id;
3092 			DPRINTK("Look-Up Cache Succeeded for d_id = %x", d_id);
3093 			mtu = q->mtu;
3094 			if (q->login == LOGIN_COMPLETED){
3095 				tx_exchange(fi, data, len, r_ctl, type, d_id, mtu, int_required, ox_id, TYPE_LLC_SNAP);
3096 				/* Some devices support HW_TYPE 0x01 */
3097 				memcpy(fi->g.arp_buffer, data - 2*FC_ALEN, len + 2*FC_ALEN);
3098 				fi->g.arp_buffer[9 + 2*FC_ALEN] = 0x01;
3099 				return tx_exchange(fi, (char *)(fi->g.arp_buffer + 2*FC_ALEN), len, r_ctl, type, d_id, my_mtu, int_required, ox_id, TYPE_LLC_SNAP);
3100 			}
3101 			else {
3102 				DPRINTK1("Node not logged in... Txing PLOGI to %x", d_id);
3103 				tx_logi(fi, ELS_PLOGI, d_id); /* FIXME: we are dumping the frame here */
3104 			}
3105 		}
3106 		DPRINTK2("Look-Up Cache Failed");
3107 	}
3108 	else {
3109 		T_MSG("Warning!!! Invalid Opcode in ARP Packet!");
3110 	}
3111 	LEAVE("tx_arp_packet");
3112 	return 0;
3113 }
3114 
3115 
3116 static void rx_net_packet(struct fc_info *fi, u_char *buff_addr, int payload_size)
3117 {
3118 struct net_device *dev = fi->dev;
3119 struct sk_buff *skb;
3120 u_int skb_size = 0;
3121 struct fch_hdr fch;
3122 	ENTER("rx_net_packet");
3123 	skb_size = payload_size - TACHYON_HEADER_LEN;
3124 	DPRINTK("skb_size = %d", skb_size);
3125 	fi->fc_stats.rx_bytes += skb_size - 2;
3126 	skb = dev_alloc_skb(skb_size);
3127 	if (skb == NULL) {
3128 		printk(KERN_NOTICE "%s: In rx_net_packet() Memory squeeze, dropping packet.\n", dev->name);
3129 		fi->fc_stats.rx_dropped++;
3130 		return;
3131 	}
3132 	/* Skip over the Tachyon Frame Header.
3133 	 */
3134 	buff_addr += TACHYON_HEADER_LEN;
3135 
3136 	memcpy(fch.daddr, buff_addr + 2, FC_ALEN);
3137 	memcpy(fch.saddr, buff_addr + 10, FC_ALEN);
3138 	buff_addr += 2;
3139 	memcpy(buff_addr, fch.daddr, FC_ALEN);
3140 	memcpy(buff_addr + 6, fch.saddr, FC_ALEN);
3141 	skb_reserve(skb, 2);
3142 	memcpy(skb_put(skb, skb_size - 2), buff_addr, skb_size - 2);
3143 	skb->dev = dev;
3144 	skb->protocol = fc_type_trans(skb, dev);
3145 	DPRINTK("protocol = %x", skb->protocol);
3146 
3147 	/* Hmmm... to accept HW Type 0x01 as well...
3148 	 */
3149 	if (skb->protocol == ntohs(ETH_P_ARP))
3150 		skb->data[1] = 0x06;
3151 	netif_rx(skb);
3152 	dev->last_rx = jiffies;
3153 	fi->fc_stats.rx_packets++;
3154 	LEAVE("rx_net_packet");
3155 }
3156 
3157 
3158 static void rx_net_mfs_packet(struct fc_info *fi, struct sk_buff *skb)
3159 {
3160 struct net_device *dev = fi->dev;
3161 struct fch_hdr fch;
3162 	ENTER("rx_net_mfs_packet");
3163 	/* Construct your Hard Header */
3164 	memcpy(fch.daddr, skb->data + 2, FC_ALEN);
3165 	memcpy(fch.saddr, skb->data + 10, FC_ALEN);
3166 	skb_pull(skb, 2);
3167 	memcpy(skb->data, fch.daddr, FC_ALEN);
3168 	memcpy(skb->data + 6, fch.saddr, FC_ALEN);
3169 	skb->dev = dev;
3170 	skb->protocol = fc_type_trans(skb, dev);
3171 	DPRINTK("protocol = %x", skb->protocol);
3172 	netif_rx(skb);
3173 	dev->last_rx = jiffies;
3174 	LEAVE("rx_net_mfs_packet");
3175 }
3176 
3177 static int tx_exchange(struct fc_info *fi, char *data, u_int len, u_int r_ctl, u_int type, u_int d_id, u_int mtu, int int_required, u_short tx_ox_id, u_int frame_class)
3178 {
3179 u_char df_ctl;
3180 int NW_flag = 0, h_size, return_value;
3181 u_short rx_id = RX_ID_FIRST_SEQUENCE;
3182 u_int tachyon_status;
3183 u_int my_id = fi->g.my_id;
3184 	ENTER("tx_exchange");
3185 
3186 	tachyon_status = readl(fi->t_r.ptr_tach_status_reg);
3187 	DPRINTK("Tachyon Status = %x len = %d MTU = %d", tachyon_status, len, mtu);
3188 	if (tachyon_status & OSM_FROZEN) {
3189 		reset_tachyon(fi, ERROR_RELEASE);
3190 		reset_tachyon(fi, OCQ_RESET);
3191 		DPRINTK("Tachyon Status = %x len = %d MTU = %d", tachyon_status, len, mtu);
3192 	}
3193 	if (tx_ox_id == OX_ID_FIRST_SEQUENCE) {
3194 		switch(fi->g.type_of_frame) {
3195 			case FC_SCSI_READ:
3196 				tx_ox_id = fi->g.scsi_oxid | SCSI_READ_BIT;
3197 				break;
3198 			case FC_SCSI_WRITE:
3199 				tx_ox_id = fi->g.scsi_oxid;
3200 				break;
3201 			default:
3202 				tx_ox_id = fi->g.ox_id;
3203 				break;
3204 		}
3205 	}
3206 	else {
3207 		switch(fi->g.type_of_frame) {
3208 			case FC_SCSI_READ:
3209 				rx_id = fi->g.scsi_oxid | SCSI_READ_BIT;
3210 				break;
3211 			case FC_SCSI_WRITE:
3212 				rx_id = fi->g.scsi_oxid;
3213 				break;
3214 			case FC_BLS:
3215 				rx_id = RX_ID_FIRST_SEQUENCE;
3216 				break;
3217 			default:
3218 				rx_id = fi->g.ox_id;
3219 				break;
3220 		}
3221 	}
3222 
3223 	if (type == TYPE_LLC_SNAP) {
3224 		df_ctl = 0x20;
3225 		NW_flag = 1;
3226 		/* Multi Frame Sequence ? If yes, set RO bit */
3227 		if (len > mtu)
3228 			type |= RELATIVE_OFF_PRESENT;
3229 		build_tachyon_header(fi, my_id, r_ctl, d_id, type, fi->g.seq_id, df_ctl, tx_ox_id, rx_id, data - 2*FC_ALEN);
3230 	}
3231 	else {
3232 		df_ctl = 0;
3233 		/* Multi Frame Sequence ? If yes, set RO bit */
3234 		if (len > mtu)
3235 			type |= RELATIVE_OFF_PRESENT;
3236 		build_tachyon_header(fi, my_id, r_ctl, d_id, type, fi->g.seq_id, df_ctl, tx_ox_id, rx_id, NULL);
3237 	}
3238 
3239 	/* Get free Tachyon Headers and EDBs */
3240 	if (get_free_header(fi) || get_free_EDB(fi))
3241 		return 0;
3242 
3243 	if ((type & 0xFF000000) == TYPE_LLC_SNAP) {
3244 		h_size =  TACHYON_HEADER_LEN + NW_HEADER_LEN;
3245 		memcpy(fi->q.ptr_tachyon_header[fi->q.tachyon_header_indx], &(fi->g.tach_header), h_size);
3246 	}
3247 	else
3248 		memcpy(fi->q.ptr_tachyon_header[fi->q.tachyon_header_indx], &(fi->g.tach_header), TACHYON_HEADER_LEN);
3249 
3250 	return_value = tx_sequence(fi, data, len, mtu, d_id, tx_ox_id, rx_id, fi->g.seq_id, NW_flag, int_required, frame_class);
3251 
3252 	switch(fi->g.type_of_frame) {
3253 		case FC_SCSI_READ:
3254 		case FC_SCSI_WRITE:
3255 			update_scsi_oxid(fi);
3256 			break;
3257 		case FC_BLS:
3258 			break;
3259 		default:
3260 			fi->g.ox_id++;
3261 			if (fi->g.ox_id == 0xFFFF)
3262 				fi->g.ox_id = NOT_SCSI_XID;
3263 			break;
3264 	}
3265 
3266 	if (fi->g.seq_id == MAX_SEQ_ID)
3267 		fi->g.seq_id = 0;
3268 	else
3269 		fi->g.seq_id++;
3270 	LEAVE("tx_exchange");
3271 	return return_value;
3272 }
3273 
3274 static int tx_sequence(struct fc_info *fi, char *data, u_int len, u_int mtu, u_int d_id, u_short ox_id, u_short rx_id, u_char seq_id, int NW_flag, int int_required, u_int frame_class)
3275 {
3276 u_int cntl = 0;
3277 int return_value;
3278 	ENTER("tx_sequence");
3279 	build_EDB(fi, data, EDB_END, len);
3280 	memcpy(fi->q.ptr_edb[fi->q.edb_buffer_indx], &(fi->g.edb), sizeof(EDB));
3281 	build_ODB(fi, seq_id, d_id, len, cntl, mtu, ox_id, rx_id, NW_flag, int_required, frame_class);
3282 	memcpy(fi->q.ptr_odb[fi->q.ocq_prod_indx], &(fi->g.odb), sizeof(ODB));
3283 	if (fi->g.link_up != TRUE) {
3284 		DPRINTK2("Fibre Channel Link not up. Dropping Exchange!");
3285 		return_value = FALSE;
3286 	}
3287 	else {
3288 		/* To be on the safe side, a check should be included
3289 		 * at this point to check if we are overrunning
3290 		 * Tachyon.
3291 		 */
3292 		update_OCQ_indx(fi);
3293 		return_value = TRUE;
3294 	}
3295 	update_EDB_indx(fi);
3296 	update_tachyon_header_indx(fi);
3297 	LEAVE("tx_sequence");
3298 	return return_value;
3299 }
3300 
3301 static int get_free_header(struct fc_info *fi)
3302 {
3303 u_short temp_ox_id;
3304 u_int *tach_header, initial_indx = fi->q.tachyon_header_indx;
3305 	/* Check if the header is in use.
3306 	 * We could have an outstanding command.
3307 	 * We should find a free slot as we can queue a
3308 	 * maximum of 32 SCSI commands only.
3309 	 */
3310 	tach_header = fi->q.ptr_tachyon_header[fi->q.tachyon_header_indx];
3311 	temp_ox_id = ntohl(*(tach_header + 6)) >> 16;
3312 	/* We care about the SCSI writes only. Those are the wicked ones
3313 	 * that need an additional set of buffers.
3314 	 */
3315 	while(temp_ox_id <= MAX_SCSI_XID) {
3316 		update_tachyon_header_indx(fi);
3317 		if (fi->q.tachyon_header_indx == initial_indx) {
3318 			/* Should never happen.
3319 			 */
3320 			T_MSG("No free Tachyon headers available");
3321 			reset_tachyon(fi, SOFTWARE_RESET);
3322 			return 1;
3323 		}
3324 		tach_header = fi->q.ptr_tachyon_header[fi->q.tachyon_header_indx];
3325 		temp_ox_id = ntohl(*(tach_header + 6)) >> 16;
3326 	}
3327 	return 0;
3328 }
3329 
3330 static int get_free_EDB(struct fc_info *fi)
3331 {
3332 unsigned int initial_indx = fi->q.edb_buffer_indx;
3333 	/* Check if the EDB is in use.
3334 	 * We could have an outstanding SCSI Write command.
3335 	 * We should find a free slot as we can queue a
3336 	 * maximum of 32 SCSI commands only.
3337 	 */
3338 	while (fi->q.free_edb_list[fi->q.edb_buffer_indx] != EDB_FREE) {
3339 		update_EDB_indx(fi);
3340 		if (fi->q.edb_buffer_indx == initial_indx) {
3341 			T_MSG("No free EDB buffers avaliable")
3342 			reset_tachyon(fi, SOFTWARE_RESET);
3343 			return 1;
3344 		}
3345 	}
3346 	return 0;
3347 }
3348 
3349 static int validate_login(struct fc_info *fi, u_int *base_ptr)
3350 {
3351 struct fc_node_info *q = fi->node_info_list;
3352 char n_port_name[PORT_NAME_LEN];
3353 char node_name[NODE_NAME_LEN];
3354 u_int s_id;
3355 	ENTER("validate_login");
3356 	/*index to Port Name in the payload. We need the 8 byte Port Name */
3357 	memcpy(n_port_name, base_ptr + 10, PORT_NAME_LEN);
3358 	memcpy(node_name, base_ptr + 12, NODE_NAME_LEN);
3359 	s_id = ntohl(*(base_ptr + 3)) & 0x00FFFFFF;
3360 
3361 	/* check if Fibre Channel IDs have changed */
3362 	while(q != NULL) {
3363 		if (memcmp(n_port_name, q->hw_addr, PORT_NAME_LEN) == 0) {
3364 			if ((s_id != q->d_id) || (memcmp(node_name, q->node_name, NODE_NAME_LEN) != 0)) {
3365 				DPRINTK1("Fibre Channel ID of Node has changed. Txing LOGO.");
3366 				return 0;
3367 			}
3368 			q->login = LOGIN_COMPLETED;
3369 #if DEBUG_5526_2
3370 			display_cache(fi);
3371 #endif
3372 			return 1;
3373 		}
3374 		q = q->next;
3375 	}
3376 	DPRINTK1("Port Name does not match. Txing LOGO.");
3377 	LEAVE("validate_login");
3378        return 0;
3379 }
3380 
3381 static void add_to_address_cache(struct fc_info *fi, u_int *base_ptr)
3382 {
3383 int size = sizeof(struct fc_node_info);
3384 struct fc_node_info *p, *q = fi->node_info_list, *r = NULL;
3385 char n_port_name[PORT_NAME_LEN];
3386 u_int s_id;
3387 	ENTER("add_to_address_cache");
3388 	/*index to Port Name in the payload. We need the 8 byte Port Name */
3389 	memcpy(n_port_name, base_ptr + 13, PORT_NAME_LEN);
3390 	s_id = ntohl(*(base_ptr + 3)) & 0x00FFFFFF;
3391 
3392 	/* check if info already exists */
3393 	while(q != NULL) {
3394 		if (memcmp(n_port_name, q->hw_addr, PORT_NAME_LEN) == 0) {
3395 			if (s_id != q->d_id) {
3396 				memcpy(&(q->c_of_s[0]), base_ptr + 17, 3 * sizeof(CLASS_OF_SERVICE));
3397 				q->mtu = ntohl(*(base_ptr + 10)) & 0x00000FFF;
3398 				q->d_id = s_id;
3399 				memcpy(q->node_name, base_ptr + 15, NODE_NAME_LEN);
3400 			}
3401 			q->login = LOGIN_COMPLETED;
3402 			q->scsi = FALSE;
3403 			fi->num_nodes++;
3404 #if DEBUG_5526_2
3405 			display_cache(fi);
3406 #endif
3407 			return;
3408 		}
3409 		r = q;
3410 		q = q->next;
3411 	}
3412 	p = (struct fc_node_info *)kmalloc(size, GFP_ATOMIC);
3413 	if (p == NULL) {
3414 		T_MSG("kmalloc failed in add_to_address_cache()");
3415 		return;
3416 	}
3417 	memcpy(&(p->c_of_s[0]), base_ptr + 17, 3 * sizeof(CLASS_OF_SERVICE));
3418 	p->mtu = ntohl(*(base_ptr + 10)) & 0x00000FFF;
3419 	p->d_id = s_id;
3420 	memcpy(p->hw_addr, base_ptr + 13, PORT_NAME_LEN);
3421 	memcpy(p->node_name, base_ptr + 15, NODE_NAME_LEN);
3422 	p->login = LOGIN_COMPLETED;
3423 	p->scsi = FALSE;
3424 	p->target_id = 0xFF;
3425 	p->next = NULL;
3426 	if (fi->node_info_list == NULL)
3427 		fi->node_info_list = p;
3428 	else
3429 		r->next = p;
3430 	fi->num_nodes++;
3431 #if DEBUG_5526_2
3432 	display_cache(fi);
3433 #endif
3434 	LEAVE("add_to_address_cache");
3435 	return;
3436 }
3437 
3438 static void remove_from_address_cache(struct fc_info *fi, u_int *base_ptr, u_int cmnd_code)
3439 {
3440 struct fc_node_info *q = fi->node_info_list;
3441 u_int s_id;
3442 	ENTER("remove_from_address_cache");
3443 	s_id = ntohl(*(base_ptr + 3)) & 0x00FFFFFF;
3444 	switch(cmnd_code) {
3445 		case ELS_LOGO:
3446 			/* check if info exists */
3447 			while (q != NULL) {
3448 				if (s_id == q->d_id) {
3449 					if (q->login == LOGIN_COMPLETED)
3450 						q->login = LOGIN_ATTEMPTED;
3451 					if (fi->num_nodes > 0)
3452 						fi->num_nodes--;
3453 #if DEBUG_5526_2
3454 					display_cache(fi);
3455 #endif
3456 					return;
3457 				}
3458 				q = q->next;
3459 			}
3460 			DPRINTK1("ELS_LOGO received from node 0x%x which is not logged-in", s_id);
3461 			break;
3462 		case ELS_RSCN:
3463 		{
3464 		int payload_len = ntohl(*(base_ptr + 8)) & 0xFF;
3465 		int no_of_pages, i;
3466 		u_char address_format;
3467 		u_short received_ox_id = ntohl(*(base_ptr + 6)) >> 16;
3468 		u_int node_id, mask, *page_ptr = base_ptr + 9;
3469 			if ((payload_len < 4) || (payload_len > 256)) {
3470 				DPRINTK1("RSCN with invalid payload length received");
3471 				tx_ls_rjt(fi, s_id, received_ox_id, LOGICAL_ERR, RECV_FIELD_SIZE);
3472 				return;
3473 			}
3474 			/* Page_size includes the Command Code */
3475 			no_of_pages = (payload_len / 4) - 1;
3476 			for (i = 0; i < no_of_pages; i++) {
3477 				address_format = ntohl(*page_ptr) >> 24;
3478 				node_id = ntohl(*page_ptr) & 0x00FFFFFF;
3479 				switch(address_format) {
3480 					case PORT_ADDRESS_FORMAT:
3481 						rscn_handler(fi, node_id);
3482 						break;
3483 					case AREA_ADDRESS_FORMAT:
3484 					case DOMAIN_ADDRESS_FORMAT:
3485 						if (address_format == AREA_ADDRESS_FORMAT)
3486 							mask = 0xFFFF00;
3487 						else
3488 							mask = 0xFF0000;
3489 						while(q != NULL) {
3490 							if ((q->d_id & mask) == (node_id & mask))
3491 								rscn_handler(fi, q->d_id);
3492 							q = q->next;
3493 						}
3494 						/* There might be some new nodes to be
3495 						 * discovered. But, some of the earlier
3496 						 * requests as a result of the RSCN might be
3497 						 * in progress. We dont want to duplicate that
3498 						 * effort. So letz call SCR after a lag.
3499 						 */
3500 						fi->explore_timer.function = scr_timer;
3501 						fi->explore_timer.data = (unsigned long)fi;
3502 						fi->explore_timer.expires = RUN_AT((no_of_pages*3*HZ)/100);
3503 						init_timer(&fi->explore_timer);
3504 						add_timer(&fi->explore_timer);
3505 						break;
3506 					default:
3507 						T_MSG("RSCN with invalid address format received");
3508 						tx_ls_rjt(fi, s_id, received_ox_id, LOGICAL_ERR, NO_EXPLN);
3509 				}
3510 				page_ptr += 1;
3511 			} /* end of for loop */
3512 		} /* end of case RSCN: */
3513 		break;
3514 	}
3515 #if DEBUG_5526_2
3516 	display_cache(fi);
3517 #endif
3518 	LEAVE("remove_from_address_cache");
3519 }
3520 
3521 static void rscn_handler(struct fc_info *fi, u_int node_id)
3522 {
3523 struct fc_node_info *q = fi->node_info_list;
3524 int login_state = sid_logged_in(fi, node_id);
3525 	if ((login_state == NODE_LOGGED_IN) || (login_state == NODE_PROCESS_LOGGED_IN)) {
3526 		while(q != NULL) {
3527 			if (q->d_id == node_id) {
3528 				q->login = LOGIN_ATTEMPTED;
3529 				if (fi->num_nodes > 0)
3530 					fi->num_nodes--;
3531 				break;
3532 			}
3533 			else
3534 				q = q->next;
3535 		}
3536 	}
3537 	else
3538 	if (login_state == NODE_LOGGED_OUT)
3539 		tx_adisc(fi, ELS_ADISC, node_id, OX_ID_FIRST_SEQUENCE);
3540 	else
3541 	if (login_state == NODE_LOGGED_OUT)
3542 		tx_logi(fi, ELS_PLOGI, node_id);
3543 }
3544 
3545 static void scr_timer(unsigned long data)
3546 {
3547 struct fc_info *fi = (struct fc_info *)data;
3548 	del_timer(&fi->explore_timer);
3549 	tx_name_server_req(fi, FCS_GP_ID4);
3550 }
3551 
3552 static int sid_logged_in(struct fc_info *fi, u_int s_id)
3553 {
3554 struct fc_node_info *temp = fi->node_info_list;
3555 	while(temp != NULL)
3556 		if ((temp->d_id == s_id) && (temp->login == LOGIN_COMPLETED)) {
3557 			if (temp->scsi != FALSE)
3558 				return NODE_PROCESS_LOGGED_IN;
3559 			else
3560 				return NODE_LOGGED_IN;
3561 		}
3562 		else
3563 		if ((temp->d_id == s_id) && (temp->login != LOGIN_COMPLETED))
3564 			return NODE_LOGGED_OUT;
3565 		else
3566 			temp = temp->next;
3567 	return NODE_NOT_PRESENT;
3568 }
3569 
3570 static void mark_scsi_sid(struct fc_info *fi, u_int *buff_addr, u_char action)
3571 {
3572 struct fc_node_info *temp = fi->node_info_list;
3573 u_int s_id;
3574 u_int service_params;
3575 	s_id = ntohl(*(buff_addr + 3)) & 0x00FFFFFF;
3576 	service_params = ntohl(*(buff_addr + 12)) & 0x000000F0;
3577 	while(temp != NULL)
3578 		if ((temp->d_id == s_id) && (temp->login == LOGIN_COMPLETED)) {
3579 			if (action == DELETE_ENTRY) {
3580 				temp->scsi = FALSE;
3581 #if DEBUG_5526_2
3582 				display_cache(fi);
3583 #endif
3584 				return;
3585 			}
3586 			/* Check if it is a SCSI Target */
3587 			if (!(service_params & TARGET_FUNC)) {
3588 				temp->scsi = INITIATOR;
3589 #if DEBUG_5526_2
3590 				display_cache(fi);
3591 #endif
3592 				return;
3593 			}
3594 			temp->scsi = TARGET;
3595 			/* This helps to maintain the target_id no matter what your
3596 			 *  Fibre Channel ID is.
3597 			 */
3598 			if (temp->target_id == 0xFF) {
3599 				if (fi->g.no_of_targets <= MAX_SCSI_TARGETS)
3600 					temp->target_id = fi->g.no_of_targets++;
3601 				else
3602 					T_MSG("MAX TARGETS reached!");
3603 			}
3604 			else
3605 				DPRINTK1("Target_id %d already present", temp->target_id);
3606 #if DEBUG_5526_2
3607 			display_cache(fi);
3608 #endif
3609 			return;
3610 		}
3611 		else
3612 			temp = temp->next;
3613 	return;
3614 }
3615 
3616 static int node_logged_in_prev(struct fc_info *fi, u_int *buff_addr)
3617 {
3618 struct fc_node_info *temp;
3619 u_char *data = (u_char *)buff_addr;
3620 u_int s_id;
3621 char node_name[NODE_NAME_LEN];
3622 	s_id = ntohl(*(buff_addr + 3)) & 0x00FFFFFF;
3623 	memcpy(node_name, buff_addr + 12, NODE_NAME_LEN);
3624 	/* point to port_name in the ADISC payload */
3625 	data += 10 * 4;
3626 	/* point to last 6 bytes of port_name */
3627 	data += 2;
3628 	temp = look_up_cache(fi, data);
3629 	if (temp != NULL) {
3630 		if ((temp->d_id == s_id) && (memcmp(node_name, temp->node_name, NODE_NAME_LEN) == 0)) {
3631 			temp->login = LOGIN_COMPLETED;
3632 #if DEBUG_5526_2
3633 			display_cache(fi);
3634 #endif
3635 			return TRUE;
3636 		}
3637 	}
3638 	return FALSE;
3639 }
3640 
3641 static struct fc_node_info *look_up_cache(struct fc_info *fi, char *data)
3642 {
3643 struct fc_node_info *temp_list = fi->node_info_list, *q;
3644 u_char n_port_name[FC_ALEN], temp_addr[FC_ALEN];
3645 	ENTER("look_up_cache");
3646 	memcpy(n_port_name, data, FC_ALEN);
3647 	while(temp_list) {
3648 		if (memcmp(n_port_name, &(temp_list->hw_addr[2]), FC_ALEN) == 0)
3649 			return temp_list;
3650 		else
3651 			temp_list = temp_list->next;
3652 	}
3653 
3654 	/* Broadcast IP ?
3655 	 */
3656 	temp_addr[0] = temp_addr[1] = temp_addr[2] = 0xFF;
3657 	temp_addr[3] = temp_addr[4] = temp_addr[5] = 0xFF;
3658 	if (memcmp(n_port_name, temp_addr, FC_ALEN) == 0) {
3659 		q = (struct fc_node_info *)kmalloc(sizeof(struct fc_node_info), GFP_ATOMIC);
3660 		if (q == NULL) {
3661 			T_MSG("kmalloc failed in look_up_cache()");
3662 			return NULL;
3663 		}
3664 		q->d_id = BROADCAST;
3665 		return q;
3666 	}
3667 	LEAVE("look_up_cache");
3668 	return NULL;
3669 }
3670 
3671 static int display_cache(struct fc_info *fi)
3672 {
3673 struct fc_node_info *q = fi->node_info_list;
3674 #if DEBUG_5526_2
3675 struct ox_id_els_map *temp_ox_id_list = fi->ox_id_list;
3676 #endif
3677 int count = 0, j;
3678 	printk("\nFibre Channel Node Information for %s\n", fi->name);
3679 	printk("My FC_ID = %x, My WWN = %x %x, ", fi->g.my_id, fi->g.my_node_name_high, fi->g.my_node_name_low);
3680 	if (fi->g.ptp_up == TRUE)
3681 		printk("Port_Type = N_Port\n");
3682 	if (fi->g.loop_up == TRUE)
3683 		printk("Port_Type = L_Port\n");
3684 	while(q != NULL) {
3685 		printk("WWN = ");
3686 		for (j = 0; j < PORT_NAME_LEN; j++)
3687 			printk("%x ", q->hw_addr[j]);
3688 		printk("FC_ID = %x, ", q->d_id);
3689 		printk("Login = ");
3690 		if (q->login == LOGIN_COMPLETED)
3691 			printk("ON ");
3692 		else
3693 			printk("OFF ");
3694 		if (q->scsi == TARGET)
3695 			printk("Target_ID = %d ", q->target_id);
3696 		printk("\n");
3697 		q = q->next;
3698 		count++;
3699 	}
3700 
3701 #if DEBUG_5526_2
3702 	printk("OX_ID -> ELS Map\n");
3703 	while(temp_ox_id_list) {
3704 			printk("ox_id = %x, ELS = %x\n", temp_ox_id_list->ox_id, temp_ox_id_list->els);
3705 			temp_ox_id_list = temp_ox_id_list->next;
3706 	}
3707 #endif
3708 
3709 	return 0;
3710 }
3711 
3712 static struct net_device_stats * iph5526_get_stats(struct net_device *dev)
3713 {
3714 struct fc_info *fi = (struct fc_info*)dev->priv;
3715 	return (struct net_device_stats *) &fi->fc_stats;
3716 }
3717 
3718 
3719 /* SCSI stuff starts here */
3720 
3721 int iph5526_detect(Scsi_Host_Template *tmpt)
3722 {
3723 struct Scsi_Host *host = NULL;
3724 struct iph5526_hostdata *hostdata;
3725 struct fc_info *fi = NULL;
3726 int no_of_hosts = 0, timeout, i, j, count = 0;
3727 u_int pci_maddr = 0;
3728 struct pci_dev *pdev = NULL;
3729 
3730 	tmpt->proc_name = "iph5526";
3731 	if (pci_present() == 0) {
3732 		printk("iph5526: PCI not present\n");
3733 		return 0;
3734 	}
3735 
3736 	for (i = 0; i <= MAX_FC_CARDS; i++)
3737 		fc[i] = NULL;
3738 
3739 	for (i = 0; clone_list[i].vendor_id != 0; i++)
3740 	while ((pdev = pci_find_device(clone_list[i].vendor_id, clone_list[i].device_id, pdev))) {
3741 		unsigned short pci_command;
3742 		if (pci_enable_device(pdev))
3743 			continue;
3744 		if (count < MAX_FC_CARDS) {
3745 			fc[count] = kmalloc(sizeof(struct fc_info), GFP_ATOMIC);
3746 			if (fc[count] == NULL) {
3747 				printk("iph5526.c: Unable to register card # %d\n", count + 1);
3748 				return no_of_hosts;
3749 			}
3750 			memset(fc[count], 0, sizeof(struct fc_info));
3751 		}
3752 		else {
3753 			printk("iph5526.c: Maximum Number of cards reached.\n");
3754 			return no_of_hosts;
3755 		}
3756 
3757 		fi = fc[count];
3758 		sprintf(fi->name, "fc%d", count);
3759 
3760 		host = scsi_register(tmpt, sizeof(struct iph5526_hostdata));
3761 		if(host==NULL) {
3762 			kfree(fc[count]);
3763 			return no_of_hosts;
3764 		}
3765 
3766 		hostdata = (struct iph5526_hostdata *)host->hostdata;
3767 		memset(hostdata, 0 , sizeof(struct iph5526_hostdata));
3768 		for (j = 0; j < MAX_SCSI_TARGETS; j++)
3769 			hostdata->tag_ages[j] = jiffies;
3770 		hostdata->fi = fi;
3771 		fi->host = host;
3772 		//host->max_id = MAX_SCSI_TARGETS;
3773 		host->max_id = 5;
3774 		host->hostt->use_new_eh_code = 1;
3775 		host->this_id = tmpt->this_id;
3776 
3777 		pci_maddr = pci_resource_start(pdev, 0);
3778 		if (pci_resource_flags(pdev, 0) & IORESOURCE_IO) {
3779 			printk("iph5526.c : Cannot find proper PCI device base address.\n");
3780 			scsi_unregister(host);
3781 			kfree(fc[count]);
3782 			fc[count] = NULL;
3783 			continue;
3784 		}
3785 
3786 		DPRINTK("pci_maddr = %x", pci_maddr);
3787 		pci_read_config_word(pdev, PCI_COMMAND, &pci_command);
3788 
3789 		pci_irq_line = pdev->irq;
3790 		printk("iph5526.c: PCI BIOS reports %s at i/o %#x, irq %d.\n", clone_list[i].name, pci_maddr, pci_irq_line);
3791 		fi->g.mem_base = ioremap(pci_maddr & PAGE_MASK, 1024);
3792 
3793 		/* We use Memory Mapped IO. The initial space contains the
3794 		 * PCI Configuration registers followed by the (i) chip
3795 		 * registers followed by the Tachyon registers.
3796 		 */
3797 		/* Thatz where (i)chip maps Tachyon Address Space.
3798 		 */
3799 		fi->g.tachyon_base = (u_long)fi->g.mem_base + TACHYON_OFFSET + ( pci_maddr & ~PAGE_MASK );
3800 		DPRINTK("fi->g.tachyon_base = %x", (u_int)fi->g.tachyon_base);
3801 		if (fi->g.mem_base == NULL) {
3802 			printk("iph5526.c : ioremap failed!!!\n");
3803 			scsi_unregister(host);
3804 			kfree(fc[count]);
3805 			fc[count] = NULL;
3806 			continue;
3807 		}
3808 		DPRINTK("IRQ1 = %d\n", pci_irq_line);
3809 		printk(version);
3810 		fi->base_addr = (long) pdev;
3811 
3812 		if (pci_irq_line) {
3813 		int irqval = 0;
3814 			/* Found it, get IRQ.
3815 			 */
3816 			irqval = request_irq(pci_irq_line, &tachyon_interrupt, pci_irq_line ? SA_SHIRQ : 0, fi->name, host);
3817 			if (irqval) {
3818 				printk("iph5526.c : Unable to get IRQ %d (irqval = %d).\n", pci_irq_line, irqval);
3819 				scsi_unregister(host);
3820 				kfree(fc[count]);
3821 				fc[count] = NULL;
3822 				continue;
3823 			}
3824 			host->irq = fi->irq = pci_irq_line;
3825 			pci_irq_line = 0;
3826 			fi->clone_id = clone_list[i].vendor_id;
3827 		}
3828 
3829 		if (!initialize_register_pointers(fi) || !tachyon_init(fi)) {
3830 			printk("iph5526.c: TACHYON initialization failed for card # %d!!!\n", count + 1);
3831 			free_irq(host->irq, host);
3832 			scsi_unregister(host);
3833 			if (fi)
3834 				clean_up_memory(fi);
3835 			kfree(fc[count]);
3836 			fc[count] = NULL;
3837 			break;
3838 		}
3839 		DPRINTK1("Fibre Channel card initialized");
3840 		/* Wait for the Link to come up and the login process
3841 		 * to complete.
3842 		 */
3843 		for(timeout = jiffies + 10*HZ; time_before(jiffies, timeout) && ((fi->g.link_up == FALSE) || (fi->g.port_discovery == TRUE) || (fi->g.explore_fabric == TRUE) || (fi->g.perform_adisc == TRUE));)
3844 		{
3845 			cpu_relax();
3846 			barrier();
3847 		}
3848 
3849 		count++;
3850 		no_of_hosts++;
3851 	}
3852 	DPRINTK1("no_of_hosts = %d",no_of_hosts);
3853 
3854 	/* This is to make sure that the ACC to the PRLI comes in
3855 	 * for the last ALPA.
3856 	 */
3857 	mdelay(1000); /* Ugly! Let the Gods forgive me */
3858 
3859 	DPRINTK1("leaving iph5526_detect\n");
3860 	return no_of_hosts;
3861 }
3862 
3863 
3864 int iph5526_biosparam(Disk * disk, kdev_t n, int ip[])
3865 {
3866 int size = disk->capacity;
3867 	ip[0] = 64;
3868 	ip[1] = 32;
3869 	ip[2] = size >> 11;
3870 	if (ip[2] > 1024) {
3871 		ip[0] = 255;
3872 		ip[1] = 63;
3873 		ip[2] = size / (ip[0] * ip[1]);
3874 	}
3875 	return 0;
3876 }
3877 
3878 int iph5526_queuecommand(Scsi_Cmnd *Cmnd, void (*done) (Scsi_Cmnd *))
3879 {
3880 int int_required = 0;
3881 u_int r_ctl = FC4_DEVICE_DATA | UNSOLICITED_COMMAND;
3882 u_int type = TYPE_FCP | SEQUENCE_INITIATIVE;
3883 u_int frame_class = Cmnd->target;
3884 u_short ox_id = OX_ID_FIRST_SEQUENCE;
3885 struct Scsi_Host *host = Cmnd->host;
3886 struct iph5526_hostdata *hostdata = (struct iph5526_hostdata*)host->hostdata;
3887 struct fc_info *fi = hostdata->fi;
3888 struct fc_node_info *q;
3889 u_long flags;
3890 	ENTER("iph5526_queuecommand");
3891 
3892 	spin_lock_irqsave(&fi->fc_lock, flags);
3893 	Cmnd->scsi_done = done;
3894 
3895 	if (Cmnd->device->tagged_supported) {
3896 		switch(Cmnd->tag) {
3897 			case SIMPLE_QUEUE_TAG:
3898 				hostdata->cmnd.fcp_cntl = FCP_CNTL_QTYPE_SIMPLE;
3899 				break;
3900 			case HEAD_OF_QUEUE_TAG:
3901 				hostdata->cmnd.fcp_cntl = FCP_CNTL_QTYPE_HEAD_OF_Q;
3902 				break;
3903 			case  ORDERED_QUEUE_TAG:
3904 				hostdata->cmnd.fcp_cntl = FCP_CNTL_QTYPE_ORDERED;
3905 				break;
3906 			default:
3907 				if ((jiffies - hostdata->tag_ages[Cmnd->target]) > (5 * HZ)) {
3908 					hostdata->cmnd.fcp_cntl = FCP_CNTL_QTYPE_ORDERED;
3909 					hostdata->tag_ages[Cmnd->target] = jiffies;
3910 				}
3911 				else
3912 					hostdata->cmnd.fcp_cntl = FCP_CNTL_QTYPE_SIMPLE;
3913 				break;
3914 		}
3915 	}
3916 	/*else
3917 		hostdata->cmnd.fcp_cntl = FCP_CNTL_QTYPE_UNTAGGED;
3918 	*/
3919 
3920 	hostdata->cmnd.fcp_addr[3] = 0;
3921 	hostdata->cmnd.fcp_addr[2] = 0;
3922 	hostdata->cmnd.fcp_addr[1] = 0;
3923 	hostdata->cmnd.fcp_addr[0] = htons(Cmnd->lun);
3924 
3925 	memcpy(&hostdata->cmnd.fcp_cdb, Cmnd->cmnd, Cmnd->cmd_len);
3926 	hostdata->cmnd.fcp_data_len = htonl(Cmnd->request_bufflen);
3927 
3928 	/* Get an used OX_ID. We could have pending commands.
3929 	 */
3930 	if (get_scsi_oxid(fi)) {
3931 		spin_unlock_irqrestore(&fi->fc_lock, flags);
3932 		return 1;
3933 	}
3934 	fi->q.free_scsi_oxid[fi->g.scsi_oxid] = OXID_INUSE;
3935 
3936 	/* Maintain a handler so that we can associate the done() function
3937 	 * on completion of the SCSI command.
3938 	 */
3939 	hostdata->cmnd_handler[fi->g.scsi_oxid] = Cmnd;
3940 
3941 	switch(Cmnd->cmnd[0]) {
3942 		case WRITE_6:
3943 		case WRITE_10:
3944 		case WRITE_12:
3945 			fi->g.type_of_frame = FC_SCSI_WRITE;
3946 			hostdata->cmnd.fcp_cntl = htonl(FCP_CNTL_WRITE | hostdata->cmnd.fcp_cntl);
3947 			break;
3948 		default:
3949 			fi->g.type_of_frame = FC_SCSI_READ;
3950 			hostdata->cmnd.fcp_cntl = htonl(FCP_CNTL_READ | hostdata->cmnd.fcp_cntl);
3951 	}
3952 
3953 	memcpy(fi->q.ptr_fcp_cmnd[fi->q.fcp_cmnd_indx], &(hostdata->cmnd), sizeof(fcp_cmd));
3954 
3955 	q = resolve_target(fi, Cmnd->target);
3956 
3957 	if (q == NULL) {
3958 	u_int bad_id = fi->g.my_ddaa | 0xFE;
3959 		/* We transmit to an non-existant AL_PA so that the "done"
3960 		 * function can be called while receiving the interrupt
3961 		 * due to a Timeout for a bad AL_PA. In a PTP configuration,
3962 		 * the int_required field is set, since there is no notion
3963 		 * of AL_PAs. This approach sucks, but works alright!
3964 		 */
3965 		if (fi->g.ptp_up == TRUE)
3966 			int_required = 1;
3967 		tx_exchange(fi, (char *)(&(hostdata->cmnd)), sizeof(fcp_cmd), r_ctl, type, bad_id, fi->g.my_mtu, int_required, ox_id, FC_SCSI_BAD_TARGET);
3968 		spin_unlock_irqrestore(&fi->fc_lock, flags);
3969 		DPRINTK1("Target ID %x not present", Cmnd->target);
3970 		return 0;
3971 	}
3972 	if (q->login == LOGIN_COMPLETED) {
3973 		if (add_to_sest(fi, Cmnd, q)) {
3974 			DPRINTK1("add_to_sest() failed.");
3975 			spin_unlock_irqrestore(&fi->fc_lock, flags);
3976 			return 0;
3977 		}
3978 		tx_exchange(fi, (char *)(fi->q.ptr_fcp_cmnd[fi->q.fcp_cmnd_indx]), sizeof(fcp_cmd), r_ctl, type, q->d_id, q->mtu, int_required, ox_id, frame_class << 16);
3979 		update_FCP_CMND_indx(fi);
3980 	}
3981 	spin_unlock_irqrestore(&fi->fc_lock, flags);
3982 	/* If q != NULL, then we have a SCSI Target.
3983 	 * If q->login != LOGIN_COMPLETED, then that device could be
3984 	 * offline temporarily. So we let the command to time-out.
3985 	 */
3986 	LEAVE("iph5526_queuecommand");
3987 	return 0;
3988 }
3989 
3990 int iph5526_abort(Scsi_Cmnd *Cmnd)
3991 {
3992 struct Scsi_Host *host = Cmnd->host;
3993 struct iph5526_hostdata *hostdata = (struct iph5526_hostdata *)host->hostdata;
3994 struct fc_info *fi = hostdata->fi;
3995 struct fc_node_info *q;
3996 u_int r_ctl = FC4_DEVICE_DATA | UNSOLICITED_COMMAND;
3997 u_int type = TYPE_FCP | SEQUENCE_INITIATIVE;
3998 u_short ox_id = OX_ID_FIRST_SEQUENCE;
3999 int int_required = 1, i, abort_status = FALSE;
4000 u_long flags;
4001 
4002 	ENTER("iph5526_abort");
4003 
4004 	spin_lock_irqsave(&fi->fc_lock, flags);
4005 
4006 	q = resolve_target(fi, Cmnd->target);
4007 	if (q == NULL) {
4008 	u_int bad_id = fi->g.my_ddaa | 0xFE;
4009 		/* This should not happen as we should always be able to
4010 		 * resolve a target id. But, jus in case...
4011 		 * We transmit to an non-existant AL_PA so that the done
4012 		 * function can be called while receiving the interrupt
4013 		 * for a bad AL_PA.
4014 		 */
4015 		DPRINTK1("Unresolved Target ID!");
4016 		tx_exchange(fi, (char *)(&(hostdata->cmnd)), sizeof(fcp_cmd), r_ctl, type, bad_id, fi->g.my_mtu, int_required, ox_id, FC_SCSI_BAD_TARGET);
4017 		DPRINTK1("Target ID %x not present", Cmnd->target);
4018 		spin_unlock_irqrestore(&fi->fc_lock, flags);
4019 		return FAILED;
4020 	}
4021 
4022 	/* If q != NULL, then we have a SCSI Target. If
4023 	 * q->login != LOGIN_COMPLETED, then that device could
4024 	 * be offline temporarily. So we let the command to time-out.
4025 	 */
4026 
4027 	/* Get the OX_ID for the Command to be aborted.
4028 	 */
4029 	for (i = 0; i <= MAX_SCSI_XID; i++) {
4030 		if (hostdata->cmnd_handler[i] == Cmnd) {
4031 			hostdata->cmnd_handler[i] = NULL;
4032 			ox_id = i;
4033 			break;
4034 		}
4035 	}
4036 	if (i > MAX_SCSI_XID) {
4037 		T_MSG("Command could not be resolved to OX_ID");
4038 		spin_unlock_irqrestore(&fi->fc_lock, flags);
4039 		return FAILED;
4040 	}
4041 
4042 	switch(Cmnd->cmnd[0]) {
4043 		case WRITE_6:
4044 		case WRITE_10:
4045 		case WRITE_12:
4046 			break;
4047 		default:
4048 			ox_id |= SCSI_READ_BIT;
4049 	}
4050 	abort_status = abort_exchange(fi, ox_id);
4051 
4052 	if ((q->login == LOGIN_COMPLETED) && (abort_status == TRUE)) {
4053 		/* Then, transmit an ABTS to the target. The rest
4054 		 * is done when the BA_ACC is received for the ABTS.
4055  	 	 */
4056 		tx_abts(fi, q->d_id, ox_id);
4057 	}
4058 	else {
4059 	u_int STE_bit;
4060 	u_short x_id;
4061 		/* Invalidate resources for that Exchange.
4062 		 */
4063 		x_id = ox_id & MAX_SCSI_XID;
4064 		STE_bit = ntohl(*fi->q.ptr_sest[x_id]);
4065 		if (STE_bit & SEST_V) {
4066 			*(fi->q.ptr_sest[x_id]) &= htonl(SEST_INV);
4067 			invalidate_SEST_entry(fi, ox_id);
4068 		}
4069 	}
4070 
4071 	LEAVE("iph5526_abort");
4072 	spin_unlock_irqrestore(&fi->fc_lock, flags);
4073 	return SUCCESS;
4074 }
4075 
4076 static int abort_exchange(struct fc_info *fi, u_short ox_id)
4077 {
4078 u_short x_id;
4079 volatile u_int flush_SEST, STE_bit;
4080 	x_id = ox_id & MAX_SCSI_XID;
4081 	DPRINTK1("Aborting Exchange %x", ox_id);
4082 
4083 	STE_bit = ntohl(*fi->q.ptr_sest[x_id]);
4084 	/* Is the Exchange still active?.
4085 	 */
4086 	if (STE_bit & SEST_V) {
4087 		if (ox_id & SCSI_READ_BIT) {
4088 			/* If the Exchange to be aborted is Inbound,
4089 			 * Flush the SEST Entry from Tachyon's Cache.
4090 			 */
4091 			*(fi->q.ptr_sest[x_id]) &= htonl(SEST_INV);
4092 			flush_tachyon_cache(fi, ox_id);
4093 			flush_SEST = readl(fi->t_r.ptr_tach_flush_oxid_reg);
4094 			while ((flush_SEST & 0x80000000) != 0)
4095 				flush_SEST = readl(fi->t_r.ptr_tach_flush_oxid_reg);
4096 			STE_bit = ntohl(*fi->q.ptr_sest[x_id]);
4097 			while ((STE_bit & 0x80000000) != 0)
4098 				STE_bit = ntohl(*fi->q.ptr_sest[x_id]);
4099 			flush_SEST = readl(fi->t_r.ptr_tach_flush_oxid_reg);
4100 			invalidate_SEST_entry(fi, ox_id);
4101 		}
4102 		else {
4103 		int i;
4104 		u_int *ptr_edb;
4105 			/* For In-Order Reassembly, the following is done:
4106 			 * First, write zero as the buffer length in the EDB.
4107 		 	 */
4108 			ptr_edb = bus_to_virt(ntohl(*(fi->q.ptr_sest[x_id] + 7)));
4109 			for (i = 0; i < EDB_LEN; i++)
4110 				if (fi->q.ptr_edb[i] == ptr_edb)
4111 					break;
4112 			if (i < EDB_LEN)
4113 				*ptr_edb = *ptr_edb & 0x0000FFFF;
4114 			else
4115 				T_MSG("EDB not found while clearing in abort_exchange()");
4116 		}
4117 		DPRINTK1("Exchange %x invalidated", ox_id);
4118 		return TRUE;
4119 	}
4120 	else {
4121 		DPRINTK1("SEST Entry for exchange %x not valid", ox_id);
4122 		return FALSE;
4123 	}
4124 }
4125 
4126 static void flush_tachyon_cache(struct fc_info *fi, u_short ox_id)
4127 {
4128 volatile u_int tachyon_status;
4129 	if (fi->g.loop_up == TRUE) {
4130 		writel(HOST_CONTROL, fi->t_r.ptr_fm_control_reg);
4131 		/* Make sure that the Inbound FIFO is empty.
4132 		 */
4133 		do {
4134 			tachyon_status = readl(fi->t_r.ptr_tach_status_reg);
4135 			udelay(200);
4136 		}while ((tachyon_status & RECEIVE_FIFO_EMPTY) == 0);
4137 		/* Ok. Go ahead and flushhhhhhhhh!
4138 		 */
4139 		writel(0x80000000 | ox_id, fi->t_r.ptr_tach_flush_oxid_reg);
4140 		writel(EXIT_HOST_CONTROL, fi->t_r.ptr_fm_control_reg);
4141 		return;
4142 	}
4143 	if (fi->g.ptp_up == TRUE) {
4144 		take_tachyon_offline(fi);
4145 		/* Make sure that the Inbound FIFO is empty.
4146 		 */
4147 		do {
4148 			tachyon_status = readl(fi->t_r.ptr_tach_status_reg);
4149 			udelay(200);
4150 		}while ((tachyon_status & RECEIVE_FIFO_EMPTY) == 0);
4151 		writel(0x80000000 | ox_id, fi->t_r.ptr_tach_flush_oxid_reg);
4152 		/* Write the Initialize command to the FM Control reg.
4153 		 */
4154 		fi->g.n_port_try = TRUE;
4155 		DPRINTK1("In abort_exchange, TACHYON initializing as N_Port...\n");
4156 		writel(INITIALIZE, fi->t_r.ptr_fm_control_reg);
4157 	}
4158 }
4159 
4160 static struct fc_node_info *resolve_target(struct fc_info *fi, u_char target)
4161 {
4162 struct fc_node_info *temp = fi->node_info_list;
4163 	while(temp != NULL)
4164 		if (temp->target_id == target) {
4165 			if ((temp->scsi == TARGET) && (temp->login == LOGIN_COMPLETED))
4166 				return temp;
4167 			else {
4168 				if (temp->login != LOGIN_COMPLETED) {
4169 					/* The Target is not currently logged in.
4170 					 * It could be a Target on the Local Loop or
4171 					 * on a Remote Loop connected through a switch.
4172 					 * In either case, we will know whenever the Target
4173 					 * comes On-Line again. We let the command to
4174 					 * time-out so that it gets retried.
4175 					 */
4176 					T_MSG("Target %d not logged in.", temp->target_id);
4177 					tx_logi(fi, ELS_PLOGI, temp->d_id);
4178 					return temp;
4179 				}
4180 				else {
4181 					if (temp->scsi != TARGET) {
4182 						/* For some reason, we did not get a response to
4183 						 * PRLI. Letz try it again...
4184 						 */
4185 						DPRINTK1("Node not PRLIied. Txing PRLI...");
4186 						tx_prli(fi, ELS_PRLI, temp->d_id, OX_ID_FIRST_SEQUENCE);
4187 					}
4188 				}
4189 				return temp;
4190 			}
4191 		}
4192 		else
4193 			temp = temp->next;
4194 	return NULL;
4195 }
4196 
4197 static int add_to_sest(struct fc_info *fi, Scsi_Cmnd *Cmnd, struct fc_node_info *ni)
4198 {
4199 /* we have at least 1 buffer, the terminator */
4200 int no_of_sdb_buffers = 1, i;
4201 int no_of_edb_buffers = 0;
4202 u_int *req_buffer = (u_int *)Cmnd->request_buffer;
4203 u_int *ptr_sdb = NULL;
4204 struct scatterlist *sl1, *sl2 = NULL;
4205 int no_of_sg = 0;
4206 
4207 	switch(fi->g.type_of_frame) {
4208 		case FC_SCSI_READ:
4209 			fi->g.inb_sest_entry.flags_and_byte_offset = htonl(INB_SEST_VED);
4210 			fi->g.inb_sest_entry.byte_count = 0;
4211 			fi->g.inb_sest_entry.no_of_recvd_frames = 0;
4212 			fi->g.inb_sest_entry.no_of_expected_frames = 0;
4213 			fi->g.inb_sest_entry.last_fctl = 0;
4214 
4215 			if (Cmnd->use_sg) {
4216 				no_of_sg = Cmnd->use_sg;
4217 				sl1 = sl2 = (struct scatterlist *)Cmnd->request_buffer;
4218 				for (i = 0; i < no_of_sg; i++) {
4219 					no_of_sdb_buffers += sl1->length / SEST_BUFFER_SIZE;
4220 					if (sl1->length % SEST_BUFFER_SIZE)
4221 						no_of_sdb_buffers++;
4222 					sl1++;
4223 				}
4224 			}
4225 			else {
4226 				no_of_sdb_buffers += Cmnd->request_bufflen / SEST_BUFFER_SIZE;
4227 				if (Cmnd->request_bufflen % SEST_BUFFER_SIZE)
4228 					no_of_sdb_buffers++;
4229 			} /* if !use_sg */
4230 
4231 			/* We are working with the premise that at the max we would
4232 			 * get a scatter-gather buffer containing 63 buffers
4233 			 * of size 1024 bytes each. Is it a _bad_ assumption?
4234 			 */
4235 			if (no_of_sdb_buffers > 512) {
4236 				T_MSG("Number of SDB buffers needed = %d", no_of_sdb_buffers);
4237 				T_MSG("Disable Scatter-Gather!!!");
4238 				return 1;
4239 			}
4240 
4241 
4242 			/* Store it in the sdb_table so that we can retrieve that
4243 			 * free up the memory when the Read Command completes.
4244 			 */
4245 			if (get_free_SDB(fi))
4246 				return 1;
4247 			ptr_sdb = fi->q.ptr_sdb_slot[fi->q.sdb_indx];
4248 			fi->q.sdb_slot_status[fi->q.sdb_indx] = SDB_BUSY;
4249 			fi->g.inb_sest_entry.sdb_address = htonl(virt_to_bus(ptr_sdb));
4250 
4251 			if (Cmnd->use_sg) {
4252 			int count = 0, j;
4253 				for(i = 0; i < no_of_sg; i++) {
4254 				char *addr_ptr = sl2->address;
4255 					count = sl2->length / SEST_BUFFER_SIZE;
4256 					if (sl2->length % SEST_BUFFER_SIZE)
4257 						count++;
4258 					for (j = 0; j < count; j++) {
4259 						*(ptr_sdb) = htonl(virt_to_bus(addr_ptr));
4260 						addr_ptr += SEST_BUFFER_SIZE;
4261 						ptr_sdb++;
4262 					}
4263 					count = 0;
4264 					sl2++;
4265 				}
4266 			}
4267 			else {
4268 				for (i = 0; i < no_of_sdb_buffers - 1; i++) {
4269 					*(ptr_sdb) = htonl(virt_to_bus(req_buffer));
4270 					req_buffer += SEST_BUFFER_SIZE/4;
4271 					ptr_sdb++;
4272 				}
4273 			}
4274 			*(ptr_sdb) = htonl(0x1); /* Terminator */
4275 
4276 			/* The scratch pad is used to hold the index into the SDB.
4277 			 */
4278 			fi->g.inb_sest_entry.scratch_pad = fi->q.sdb_indx;
4279 			fi->g.inb_sest_entry.expected_ro = 0;
4280 			fi->g.inb_sest_entry.buffer_index = 0;
4281 			fi->g.inb_sest_entry.buffer_offset = 0;
4282 			memcpy(fi->q.ptr_sest[fi->g.scsi_oxid], &fi->g.inb_sest_entry, sizeof(INB_SEST_ENTRY));
4283 			break;
4284 		case FC_SCSI_WRITE:
4285 			fi->g.outb_sest_entry.flags_and_did = htonl(OUTB_SEST_VED | ni->d_id);
4286 			fi->g.outb_sest_entry.max_frame_len = htons(ni->mtu << 4);
4287 			fi->g.outb_sest_entry.cntl = htons(ODB_CLASS_3 | ODB_EE_CREDIT | ODB_NO_INT | ODB_NO_COMP);
4288 			fi->g.outb_sest_entry.total_seq_length = INV_SEQ_LEN;
4289 			fi->g.outb_sest_entry.link = htons(OUTB_SEST_LINK);
4290 			fi->g.outb_sest_entry.transaction_id = htonl(fi->g.scsi_oxid);
4291 			fi->g.outb_sest_entry.seq_id = fi->g.seq_id;
4292 			fi->g.outb_sest_entry.reserved = 0x0;
4293 			fi->g.outb_sest_entry.header_length = htons(TACHYON_HEADER_LEN);
4294 
4295 			{
4296 			u_char df_ctl = 0;
4297 			u_short rx_id = RX_ID_FIRST_SEQUENCE;
4298 			u_int r_ctl = FC4_DEVICE_DATA | SOLICITED_DATA;
4299 			u_int type = TYPE_FCP | SEQUENCE_INITIATIVE;
4300 				/* Multi Frame Sequence ? If yes, set RO bit.
4301 				 */
4302 				if (Cmnd->request_bufflen > ni->mtu)
4303 					type |= RELATIVE_OFF_PRESENT;
4304 				build_tachyon_header(fi, fi->g.my_id, r_ctl, ni->d_id, type, fi->g.seq_id, df_ctl, fi->g.scsi_oxid, rx_id, NULL);
4305 				if (get_free_header(fi) || get_free_EDB(fi))
4306 					return 1;
4307 				memcpy(fi->q.ptr_tachyon_header[fi->q.tachyon_header_indx], &(fi->g.tach_header), TACHYON_HEADER_LEN);
4308 				fi->g.outb_sest_entry.header_address = htonl(virt_to_bus(fi->q.ptr_tachyon_header[fi->q.tachyon_header_indx]));
4309 				update_tachyon_header_indx(fi);
4310 			}
4311 
4312 			if (Cmnd->use_sg) {
4313 				no_of_sg = Cmnd->use_sg;
4314 				sl1 = sl2 = (struct scatterlist *)Cmnd->request_buffer;
4315 				for (i = 0; i < no_of_sg; i++) {
4316 					no_of_edb_buffers += sl1->length / SEST_BUFFER_SIZE;
4317 					if (sl1->length % SEST_BUFFER_SIZE)
4318 						no_of_edb_buffers++;
4319 					sl1++;
4320 				}
4321 			}
4322 			else {
4323 				no_of_edb_buffers += Cmnd->request_bufflen / SEST_BUFFER_SIZE;
4324 				if (Cmnd->request_bufflen % SEST_BUFFER_SIZE)
4325 					no_of_edb_buffers++;
4326 			} /* if !use_sg */
4327 
4328 
4329 			/* We need "no_of_edb_buffers" _contiguous_ EDBs
4330 			 * that are FREE. Check for that first.
4331 			 */
4332 			for (i = 0; i < no_of_edb_buffers; i++) {
4333 			int j;
4334 				if ((fi->q.edb_buffer_indx + no_of_edb_buffers) >= EDB_LEN)
4335 					fi->q.edb_buffer_indx = 0;
4336 				if (fi->q.free_edb_list[fi->q.edb_buffer_indx + i] != EDB_FREE) {
4337 					for (j = 0; j < i; j++)
4338 						update_EDB_indx(fi);
4339 					if (get_free_EDB(fi))
4340 						return 1;
4341 					i = 0;
4342 				}
4343 			}
4344 
4345 			/* We got enuff FREE EDBs.
4346 			 */
4347 			if (Cmnd->use_sg) {
4348 				fi->g.outb_sest_entry.edb_address = htonl(virt_to_bus(fi->q.ptr_edb[fi->q.edb_buffer_indx]));
4349 				sl1 = (struct scatterlist *)Cmnd->request_buffer;
4350 				for(i = 0; i < no_of_sg; i++) {
4351 				int count = 0, j;
4352 					count = sl1->length / SEST_BUFFER_SIZE;
4353 					for (j = 0; j < count; j++) {
4354 						build_EDB(fi, (char *)sl1->address, 0, SEST_BUFFER_SIZE);
4355 						memcpy(fi->q.ptr_edb[fi->q.edb_buffer_indx], &(fi->g.edb), sizeof(EDB));
4356 						/* Mark this EDB as being in use */
4357 						fi->q.free_edb_list[fi->q.edb_buffer_indx] = EDB_BUSY;
4358 						/* We have already made sure that we have enuff
4359 				 	 	 * free EDBs that are contiguous. So this is
4360 						 * safe.
4361 				 	 	 */
4362 						update_EDB_indx(fi);
4363 						sl1->address += SEST_BUFFER_SIZE;
4364 					}
4365 					/* Just in case itz not a multiple of
4366 					 * SEST_BUFFER_SIZE bytes.
4367 					 */
4368 					if (sl1->length % SEST_BUFFER_SIZE) {
4369 						build_EDB(fi, (char *)sl1->address, 0, sl1->length % SEST_BUFFER_SIZE);
4370 						memcpy(fi->q.ptr_edb[fi->q.edb_buffer_indx], &(fi->g.edb), sizeof(EDB));
4371 						fi->q.free_edb_list[fi->q.edb_buffer_indx] = EDB_BUSY;
4372 						update_EDB_indx(fi);
4373 					}
4374 					sl1++;
4375 				}
4376 				/* The last EDB is special. It needs the "end bit" to
4377 				 * be set.
4378 				 */
4379 				*(fi->q.ptr_edb[fi->q.edb_buffer_indx - 1] + 1) = *(fi->q.ptr_edb[fi->q.edb_buffer_indx - 1] + 1) | ntohs(EDB_END);
4380 			}
4381 			else {
4382 			int count = 0, j;
4383 				fi->g.outb_sest_entry.edb_address = htonl(virt_to_bus(fi->q.ptr_edb[fi->q.edb_buffer_indx]));
4384 				count = Cmnd->request_bufflen / SEST_BUFFER_SIZE;
4385 				for (j = 0; j < count; j++) {
4386 					build_EDB(fi, (char *)req_buffer, 0, SEST_BUFFER_SIZE);
4387 					memcpy(fi->q.ptr_edb[fi->q.edb_buffer_indx], &(fi->g.edb), sizeof(EDB));
4388 					/* Mark this EDB as being in use */
4389 					fi->q.free_edb_list[fi->q.edb_buffer_indx] = EDB_BUSY;
4390 					/* We have already made sure that we have enuff
4391 			 	 	 * free EDBs that are contiguous. So this is
4392 					 * safe.
4393 			 	 	 */
4394 					update_EDB_indx(fi);
4395 					req_buffer += SEST_BUFFER_SIZE;
4396 				}
4397 				/* Just in case itz not a multiple of
4398 				 * SEST_BUFFER_SIZE bytes.
4399 				 */
4400 				if (Cmnd->request_bufflen % SEST_BUFFER_SIZE) {
4401 					build_EDB(fi, (char *)req_buffer, EDB_END, Cmnd->request_bufflen % SEST_BUFFER_SIZE);
4402 					memcpy(fi->q.ptr_edb[fi->q.edb_buffer_indx], &(fi->g.edb), sizeof(EDB));
4403 					fi->q.free_edb_list[fi->q.edb_buffer_indx] = EDB_BUSY;
4404 					update_EDB_indx(fi);
4405 				}
4406 				else {
4407 					/* Mark the last EDB as the "end edb".
4408 					 */
4409 					*(fi->q.ptr_edb[fi->q.edb_buffer_indx - 1] + 1) = *(fi->q.ptr_edb[fi->q.edb_buffer_indx - 1] + 1) | htons(EDB_END);
4410 				}
4411 			}
4412 
4413 			/* Finally we have something to send!.
4414 			 */
4415 			memcpy(fi->q.ptr_sest[fi->g.scsi_oxid], &fi->g.outb_sest_entry, sizeof(OUTB_SEST_ENTRY));
4416 			break;
4417 		}
4418 	return 0;
4419 }
4420 
4421 static void update_FCP_CMND_indx(struct fc_info *fi)
4422 {
4423 	fi->q.fcp_cmnd_indx++;
4424 	if (fi->q.fcp_cmnd_indx == NO_OF_FCP_CMNDS)
4425 		fi->q.fcp_cmnd_indx = 0;
4426 }
4427 
4428 static int get_scsi_oxid(struct fc_info *fi)
4429 {
4430 u_short initial_oxid = fi->g.scsi_oxid;
4431 	/* Check if the OX_ID is in use.
4432 	 * We could have an outstanding SCSI command.
4433 	 */
4434 	while (fi->q.free_scsi_oxid[fi->g.scsi_oxid] != OXID_AVAILABLE) {
4435 		update_scsi_oxid(fi);
4436 		if (fi->g.scsi_oxid == initial_oxid) {
4437 			T_MSG("No free OX_IDs avaliable")
4438 			reset_tachyon(fi, SOFTWARE_RESET);
4439 			return 1;
4440 		}
4441 	}
4442 	return 0;
4443 }
4444 
4445 static void update_scsi_oxid(struct fc_info *fi)
4446 {
4447 	fi->g.scsi_oxid++;
4448 	if (fi->g.scsi_oxid == (MAX_SCSI_XID + 1))
4449 		fi->g.scsi_oxid = 0;
4450 }
4451 
4452 static int get_free_SDB(struct fc_info *fi)
4453 {
4454 unsigned int initial_indx = fi->q.sdb_indx;
4455 	/* Check if the SDB is in use.
4456 	 * We could have an outstanding SCSI Read command.
4457 	 * We should find a free slot as we can queue a
4458 	 * maximum of 32 SCSI commands only.
4459 	 */
4460 	while (fi->q.sdb_slot_status[fi->q.sdb_indx] != SDB_FREE) {
4461 		update_SDB_indx(fi);
4462 		if (fi->q.sdb_indx == initial_indx) {
4463 			T_MSG("No free SDB buffers avaliable")
4464 			reset_tachyon(fi, SOFTWARE_RESET);
4465 			return 1;
4466 		}
4467 	}
4468 	return 0;
4469 }
4470 
4471 static void update_SDB_indx(struct fc_info *fi)
4472 {
4473 	fi->q.sdb_indx++;
4474 	if (fi->q.sdb_indx == NO_OF_SDB_ENTRIES)
4475 		fi->q.sdb_indx = 0;
4476 }
4477 
4478 int iph5526_release(struct Scsi_Host *host)
4479 {
4480 struct iph5526_hostdata *hostdata = (struct iph5526_hostdata*)host->hostdata;
4481 struct fc_info *fi = hostdata->fi;
4482 	free_irq(host->irq, host);
4483 	iounmap(fi->g.mem_base);
4484 	return 0;
4485 }
4486 
4487 const char *iph5526_info(struct Scsi_Host *host)
4488 {
4489 static char buf[80];
4490 	sprintf(buf, "Interphase 5526 Fibre Channel PCI SCSI Adapter using IRQ %d\n", host->irq);
4491 	return buf;
4492 }
4493 
4494 #ifdef MODULE
4495 
4496 #define NAMELEN		8	/* # of chars for storing dev->name */
4497 
4498 static struct net_device *dev_fc[MAX_FC_CARDS];
4499 
4500 static int io;
4501 static int irq;
4502 static int bad;	/* 0xbad = bad sig or no reset ack */
4503 static int scsi_registered;
4504 
4505 
4506 int init_module(void)
4507 {
4508 int i = 0;
4509 
4510 	driver_template.module = &__this_module;
4511 	scsi_register_module(MODULE_SCSI_HA, &driver_template);
4512 	if (driver_template.present)
4513 		scsi_registered = TRUE;
4514 	else {
4515 		printk("iph5526: SCSI registeration failed!!!\n");
4516 		scsi_registered = FALSE;
4517 		scsi_unregister_module(MODULE_SCSI_HA, &driver_template);
4518 	}
4519 
4520 	while(fc[i] != NULL) {
4521 		dev_fc[i] = NULL;
4522 		dev_fc[i] = init_fcdev(dev_fc[i], 0);
4523 		if (dev_fc[i] == NULL) {
4524 			printk("iph5526.c: init_fcdev failed for card #%d\n", i+1);
4525 			break;
4526 		}
4527 		dev_fc[i]->irq = irq;
4528 		dev_fc[i]->mem_end = bad;
4529 		dev_fc[i]->base_addr = io;
4530 		dev_fc[i]->init = iph5526_probe;
4531 		dev_fc[i]->priv = fc[i];
4532 		fc[i]->dev = dev_fc[i];
4533 		if (register_fcdev(dev_fc[i]) != 0) {
4534 			kfree(dev_fc[i]);
4535 			dev_fc[i] = NULL;
4536 			if (i == 0) {
4537 				printk("iph5526.c: IP registeration failed!!!\n");
4538 				return -ENODEV;
4539 			}
4540 		}
4541 		i++;
4542 	}
4543 	if (i == 0)
4544 		return -ENODEV;
4545 
4546 	return 0;
4547 }
4548 
4549 void cleanup_module(void)
4550 {
4551 int i = 0;
4552 	while(fc[i] != NULL) {
4553 	struct net_device *dev = fc[i]->dev;
4554 	void *priv = dev->priv;
4555 		fc[i]->g.dont_init = TRUE;
4556 		take_tachyon_offline(fc[i]);
4557 		unregister_fcdev(dev);
4558 		clean_up_memory(fc[i]);
4559 		if (dev->priv)
4560 			kfree(priv);
4561 		kfree(dev);
4562 		dev = NULL;
4563 		i++;
4564 	}
4565 	if (scsi_registered == TRUE)
4566 		scsi_unregister_module(MODULE_SCSI_HA, &driver_template);
4567 }
4568 #endif /* MODULE */
4569 
4570 void clean_up_memory(struct fc_info *fi)
4571 {
4572 int i,j;
4573 	ENTER("clean_up_memory");
4574 	if (fi->q.ptr_mfsbq_base)
4575 		free_pages((u_long)bus_to_virt(ntohl(*(fi->q.ptr_mfsbq_base))), 5);
4576 	DPRINTK("after kfree2");
4577 	for (i = 0; i < SFSBQ_LENGTH; i++)
4578 		for (j = 0; j < NO_OF_ENTRIES; j++)
4579 			if (fi->q.ptr_sfs_buffers[i*NO_OF_ENTRIES + j])
4580 				kfree(fi->q.ptr_sfs_buffers[i*NO_OF_ENTRIES + j]);
4581 	DPRINTK("after kfree1");
4582 	if (fi->q.ptr_ocq_base)
4583 		free_page((u_long)fi->q.ptr_ocq_base);
4584 	if (fi->q.ptr_imq_base)
4585 		free_page((u_long)fi->q.ptr_imq_base);
4586 	if (fi->q.ptr_mfsbq_base)
4587 		free_page((u_long)fi->q.ptr_mfsbq_base);
4588 	if (fi->q.ptr_sfsbq_base)
4589 		free_page((u_long)fi->q.ptr_sfsbq_base);
4590 	if (fi->q.ptr_edb_base)
4591 		free_pages((u_long)fi->q.ptr_edb_base, 5);
4592 	if (fi->q.ptr_sest_base)
4593 		free_pages((u_long)fi->q.ptr_sest_base, 5);
4594 	if (fi->q.ptr_tachyon_header_base)
4595 		free_page((u_long)fi->q.ptr_tachyon_header_base);
4596 	if (fi->q.ptr_sdb_base)
4597 		free_pages((u_long)fi->q.ptr_sdb_base, 5);
4598 	if (fi->q.ptr_fcp_cmnd_base)
4599 		free_page((u_long)fi->q.ptr_fcp_cmnd_base);
4600 	DPRINTK("after free_pages");
4601 	if (fi->q.ptr_host_ocq_cons_indx)
4602 		kfree(fi->q.ptr_host_ocq_cons_indx);
4603 	if (fi->q.ptr_host_hpcq_cons_indx)
4604 		kfree(fi->q.ptr_host_hpcq_cons_indx);
4605 	if (fi->q.ptr_host_imq_prod_indx)
4606 		kfree(fi->q.ptr_host_imq_prod_indx);
4607 	DPRINTK("after kfree3");
4608 	while (fi->node_info_list) {
4609 	struct fc_node_info *temp_list = fi->node_info_list;
4610 		fi->node_info_list = fi->node_info_list->next;
4611 		kfree(temp_list);
4612 	}
4613 	while (fi->ox_id_list) {
4614 	struct ox_id_els_map *temp = fi->ox_id_list;
4615 		fi->ox_id_list = fi->ox_id_list->next;
4616 		kfree(temp);
4617 	}
4618 	LEAVE("clean_up_memory");
4619 }
4620 
4621 static int initialize_register_pointers(struct fc_info *fi)
4622 {
4623 ENTER("initialize_register_pointers");
4624 if(fi->g.tachyon_base == 0)
4625 	return -ENOMEM;
4626 
4627 fi->i_r.ptr_ichip_hw_control_reg	= ICHIP_HW_CONTROL_REG_OFF + fi->g.tachyon_base;
4628 fi->i_r.ptr_ichip_hw_status_reg = ICHIP_HW_STATUS_REG_OFF + fi->g.tachyon_base;
4629 fi->i_r.ptr_ichip_hw_addr_mask_reg = ICHIP_HW_ADDR_MASK_REG_OFF + fi->g.tachyon_base;
4630 fi->t_r.ptr_ocq_base_reg = OCQ_BASE_REGISTER_OFFSET + fi->g.tachyon_base;
4631 fi->t_r.ptr_ocq_len_reg = OCQ_LENGTH_REGISTER_OFFSET + fi->g.tachyon_base;
4632 fi->t_r.ptr_ocq_prod_indx_reg = OCQ_PRODUCER_REGISTER_OFFSET + fi->g.tachyon_base;
4633 fi->t_r.ptr_ocq_cons_indx_reg = OCQ_CONSUMER_REGISTER_OFFSET + fi->g.tachyon_base;
4634 fi->t_r.ptr_imq_base_reg = IMQ_BASE_REGISTER_OFFSET + fi->g.tachyon_base;
4635 fi->t_r.ptr_imq_len_reg = IMQ_LENGTH_REGISTER_OFFSET + fi->g.tachyon_base;
4636 fi->t_r.ptr_imq_cons_indx_reg = IMQ_CONSUMER_REGISTER_OFFSET + fi->g.tachyon_base;
4637 fi->t_r.ptr_imq_prod_indx_reg = IMQ_PRODUCER_REGISTER_OFFSET + fi->g.tachyon_base;
4638 fi->t_r.ptr_mfsbq_base_reg = MFSBQ_BASE_REGISTER_OFFSET + fi->g.tachyon_base;
4639 fi->t_r.ptr_mfsbq_len_reg = MFSBQ_LENGTH_REGISTER_OFFSET + fi->g.tachyon_base;
4640 fi->t_r.ptr_mfsbq_prod_reg = MFSBQ_PRODUCER_REGISTER_OFFSET + fi->g.tachyon_base;
4641 fi->t_r.ptr_mfsbq_cons_reg = MFSBQ_CONSUMER_REGISTER_OFFSET + fi->g.tachyon_base;
4642 fi->t_r.ptr_mfsbuff_len_reg = MFS_LENGTH_REGISTER_OFFSET + fi->g.tachyon_base;
4643 fi->t_r.ptr_sfsbq_base_reg = SFSBQ_BASE_REGISTER_OFFSET + fi->g.tachyon_base;
4644 fi->t_r.ptr_sfsbq_len_reg = SFSBQ_LENGTH_REGISTER_OFFSET + fi->g.tachyon_base;
4645 fi->t_r.ptr_sfsbq_prod_reg = SFSBQ_PRODUCER_REGISTER_OFFSET + fi->g.tachyon_base;
4646 fi->t_r.ptr_sfsbq_cons_reg = SFSBQ_CONSUMER_REGISTER_OFFSET + fi->g.tachyon_base;
4647 fi->t_r.ptr_sfsbuff_len_reg = SFS_LENGTH_REGISTER_OFFSET + fi->g.tachyon_base;
4648 fi->t_r.ptr_sest_base_reg = SEST_BASE_REGISTER_OFFSET + fi->g.tachyon_base;
4649 fi->t_r.ptr_sest_len_reg = SEST_LENGTH_REGISTER_OFFSET + fi->g.tachyon_base;
4650 fi->t_r.ptr_scsibuff_len_reg = SCSI_LENGTH_REGISTER_OFFSET + fi->g.tachyon_base;
4651 fi->t_r.ptr_tach_config_reg = TACHYON_CONFIG_REGISTER_OFFSET + fi->g.tachyon_base;
4652 fi->t_r.ptr_tach_control_reg = TACHYON_CONTROL_REGISTER_OFFSET + fi->g.tachyon_base;
4653 fi->t_r.ptr_tach_status_reg = TACHYON_STATUS_REGISTER_OFFSET + fi->g.tachyon_base;
4654 fi->t_r.ptr_tach_flush_oxid_reg = TACHYON_FLUSH_SEST_REGISTER_OFFSET + fi->g.tachyon_base;
4655 fi->t_r.ptr_fm_config_reg = FMGR_CONFIG_REGISTER_OFFSET + fi->g.tachyon_base;
4656 fi->t_r.ptr_fm_control_reg = FMGR_CONTROL_REGISTER_OFFSET + fi->g.tachyon_base;
4657 fi->t_r.ptr_fm_status_reg = FMGR_STATUS_REGISTER_OFFSET + fi->g.tachyon_base;
4658 fi->t_r.ptr_fm_tov_reg = FMGR_TIMER_REGISTER_OFFSET + fi->g.tachyon_base;
4659 fi->t_r.ptr_fm_wwn_hi_reg = FMGR_WWN_HI_REGISTER_OFFSET + fi->g.tachyon_base;
4660 fi->t_r.ptr_fm_wwn_low_reg = FMGR_WWN_LO_REGISTER_OFFSET + fi->g.tachyon_base;
4661 fi->t_r.ptr_fm_rx_al_pa_reg = FMGR_RCVD_ALPA_REGISTER_OFFSET + fi->g.tachyon_base;
4662 
4663 LEAVE("initialize_register_pointers");
4664 return 1;
4665 }
4666 
4667 
4668 
4669 /*
4670  * Local variables:
4671  *  compile-command: "gcc -DKERNEL -Wall -O6 -fomit-frame-pointer -I/usr/src/linux/net/tcp -c iph5526.c"
4672  *  version-control: t
4673  *  kept-new-versions: 5
4674  * End:
4675  */
4676