1 /*
2  * QLogic Fibre Channel HBA Driver
3  * Copyright (c)  2003-2010 QLogic Corporation
4  *
5  * See LICENSE.qla2xxx for copyright and licensing details.
6  */
7 #include "qla_def.h"
8 #include "qla_gbl.h"
9 
10 #include <linux/delay.h>
11 #include <linux/slab.h>
12 #include <linux/vmalloc.h>
13 
14 #include "qla_devtbl.h"
15 
16 #ifdef CONFIG_SPARC
17 #include <asm/prom.h>
18 #endif
19 
20 /*
21 *  QLogic ISP2x00 Hardware Support Function Prototypes.
22 */
23 static int qla2x00_isp_firmware(scsi_qla_host_t *);
24 static int qla2x00_setup_chip(scsi_qla_host_t *);
25 static int qla2x00_init_rings(scsi_qla_host_t *);
26 static int qla2x00_fw_ready(scsi_qla_host_t *);
27 static int qla2x00_configure_hba(scsi_qla_host_t *);
28 static int qla2x00_configure_loop(scsi_qla_host_t *);
29 static int qla2x00_configure_local_loop(scsi_qla_host_t *);
30 static int qla2x00_configure_fabric(scsi_qla_host_t *);
31 static int qla2x00_find_all_fabric_devs(scsi_qla_host_t *, struct list_head *);
32 static int qla2x00_device_resync(scsi_qla_host_t *);
33 static int qla2x00_fabric_dev_login(scsi_qla_host_t *, fc_port_t *,
34     uint16_t *);
35 
36 static int qla2x00_restart_isp(scsi_qla_host_t *);
37 
38 static int qla2x00_find_new_loop_id(scsi_qla_host_t *, fc_port_t *);
39 
40 static struct qla_chip_state_84xx *qla84xx_get_chip(struct scsi_qla_host *);
41 static int qla84xx_init_chip(scsi_qla_host_t *);
42 static int qla25xx_init_queues(struct qla_hw_data *);
43 
44 /* SRB Extensions ---------------------------------------------------------- */
45 
46 static void
qla2x00_ctx_sp_timeout(unsigned long __data)47 qla2x00_ctx_sp_timeout(unsigned long __data)
48 {
49 	srb_t *sp = (srb_t *)__data;
50 	struct srb_ctx *ctx;
51 	struct srb_iocb *iocb;
52 	fc_port_t *fcport = sp->fcport;
53 	struct qla_hw_data *ha = fcport->vha->hw;
54 	struct req_que *req;
55 	unsigned long flags;
56 
57 	spin_lock_irqsave(&ha->hardware_lock, flags);
58 	req = ha->req_q_map[0];
59 	req->outstanding_cmds[sp->handle] = NULL;
60 	ctx = sp->ctx;
61 	iocb = ctx->u.iocb_cmd;
62 	iocb->timeout(sp);
63 	iocb->free(sp);
64 	spin_unlock_irqrestore(&ha->hardware_lock, flags);
65 }
66 
67 static void
qla2x00_ctx_sp_free(srb_t * sp)68 qla2x00_ctx_sp_free(srb_t *sp)
69 {
70 	struct srb_ctx *ctx = sp->ctx;
71 	struct srb_iocb *iocb = ctx->u.iocb_cmd;
72 	struct scsi_qla_host *vha = sp->fcport->vha;
73 
74 	del_timer(&iocb->timer);
75 	kfree(iocb);
76 	kfree(ctx);
77 	mempool_free(sp, sp->fcport->vha->hw->srb_mempool);
78 
79 	QLA_VHA_MARK_NOT_BUSY(vha);
80 }
81 
82 inline srb_t *
qla2x00_get_ctx_sp(scsi_qla_host_t * vha,fc_port_t * fcport,size_t size,unsigned long tmo)83 qla2x00_get_ctx_sp(scsi_qla_host_t *vha, fc_port_t *fcport, size_t size,
84     unsigned long tmo)
85 {
86 	srb_t *sp = NULL;
87 	struct qla_hw_data *ha = vha->hw;
88 	struct srb_ctx *ctx;
89 	struct srb_iocb *iocb;
90 	uint8_t bail;
91 
92 	QLA_VHA_MARK_BUSY(vha, bail);
93 	if (bail)
94 		return NULL;
95 
96 	sp = mempool_alloc(ha->srb_mempool, GFP_KERNEL);
97 	if (!sp)
98 		goto done;
99 	ctx = kzalloc(size, GFP_KERNEL);
100 	if (!ctx) {
101 		mempool_free(sp, ha->srb_mempool);
102 		sp = NULL;
103 		goto done;
104 	}
105 	iocb = kzalloc(sizeof(struct srb_iocb), GFP_KERNEL);
106 	if (!iocb) {
107 		mempool_free(sp, ha->srb_mempool);
108 		sp = NULL;
109 		kfree(ctx);
110 		goto done;
111 	}
112 
113 	memset(sp, 0, sizeof(*sp));
114 	sp->fcport = fcport;
115 	sp->ctx = ctx;
116 	ctx->u.iocb_cmd = iocb;
117 	iocb->free = qla2x00_ctx_sp_free;
118 
119 	init_timer(&iocb->timer);
120 	if (!tmo)
121 		goto done;
122 	iocb->timer.expires = jiffies + tmo * HZ;
123 	iocb->timer.data = (unsigned long)sp;
124 	iocb->timer.function = qla2x00_ctx_sp_timeout;
125 	add_timer(&iocb->timer);
126 done:
127 	if (!sp)
128 		QLA_VHA_MARK_NOT_BUSY(vha);
129 	return sp;
130 }
131 
132 /* Asynchronous Login/Logout Routines -------------------------------------- */
133 
134 static inline unsigned long
qla2x00_get_async_timeout(struct scsi_qla_host * vha)135 qla2x00_get_async_timeout(struct scsi_qla_host *vha)
136 {
137 	unsigned long tmo;
138 	struct qla_hw_data *ha = vha->hw;
139 
140 	/* Firmware should use switch negotiated r_a_tov for timeout. */
141 	tmo = ha->r_a_tov / 10 * 2;
142 	if (!IS_FWI2_CAPABLE(ha)) {
143 		/*
144 		 * Except for earlier ISPs where the timeout is seeded from the
145 		 * initialization control block.
146 		 */
147 		tmo = ha->login_timeout;
148 	}
149 	return tmo;
150 }
151 
152 static void
qla2x00_async_iocb_timeout(srb_t * sp)153 qla2x00_async_iocb_timeout(srb_t *sp)
154 {
155 	fc_port_t *fcport = sp->fcport;
156 	struct srb_ctx *ctx = sp->ctx;
157 
158 	DEBUG2(printk(KERN_WARNING
159 		"scsi(%ld:%x): Async-%s timeout - portid=%02x%02x%02x.\n",
160 		fcport->vha->host_no, sp->handle,
161 		ctx->name, fcport->d_id.b.domain,
162 		fcport->d_id.b.area, fcport->d_id.b.al_pa));
163 
164 	fcport->flags &= ~FCF_ASYNC_SENT;
165 	if (ctx->type == SRB_LOGIN_CMD) {
166 		struct srb_iocb *lio = ctx->u.iocb_cmd;
167 		qla2x00_post_async_logout_work(fcport->vha, fcport, NULL);
168 		/* Retry as needed. */
169 		lio->u.logio.data[0] = MBS_COMMAND_ERROR;
170 		lio->u.logio.data[1] = lio->u.logio.flags & SRB_LOGIN_RETRIED ?
171 			QLA_LOGIO_LOGIN_RETRIED : 0;
172 		qla2x00_post_async_login_done_work(fcport->vha, fcport,
173 			lio->u.logio.data);
174 	}
175 }
176 
177 static void
qla2x00_async_login_ctx_done(srb_t * sp)178 qla2x00_async_login_ctx_done(srb_t *sp)
179 {
180 	struct srb_ctx *ctx = sp->ctx;
181 	struct srb_iocb *lio = ctx->u.iocb_cmd;
182 
183 	qla2x00_post_async_login_done_work(sp->fcport->vha, sp->fcport,
184 		lio->u.logio.data);
185 	lio->free(sp);
186 }
187 
188 int
qla2x00_async_login(struct scsi_qla_host * vha,fc_port_t * fcport,uint16_t * data)189 qla2x00_async_login(struct scsi_qla_host *vha, fc_port_t *fcport,
190     uint16_t *data)
191 {
192 	srb_t *sp;
193 	struct srb_ctx *ctx;
194 	struct srb_iocb *lio;
195 	int rval;
196 
197 	rval = QLA_FUNCTION_FAILED;
198 	sp = qla2x00_get_ctx_sp(vha, fcport, sizeof(struct srb_ctx),
199 	    qla2x00_get_async_timeout(vha) + 2);
200 	if (!sp)
201 		goto done;
202 
203 	ctx = sp->ctx;
204 	ctx->type = SRB_LOGIN_CMD;
205 	ctx->name = "login";
206 	lio = ctx->u.iocb_cmd;
207 	lio->timeout = qla2x00_async_iocb_timeout;
208 	lio->done = qla2x00_async_login_ctx_done;
209 	lio->u.logio.flags |= SRB_LOGIN_COND_PLOGI;
210 	if (data[1] & QLA_LOGIO_LOGIN_RETRIED)
211 		lio->u.logio.flags |= SRB_LOGIN_RETRIED;
212 	rval = qla2x00_start_sp(sp);
213 	if (rval != QLA_SUCCESS)
214 		goto done_free_sp;
215 
216 	DEBUG2(printk(KERN_DEBUG
217 	    "scsi(%ld:%x): Async-login - loop-id=%x portid=%02x%02x%02x "
218 	    "retries=%d.\n", fcport->vha->host_no, sp->handle, fcport->loop_id,
219 	    fcport->d_id.b.domain, fcport->d_id.b.area, fcport->d_id.b.al_pa,
220 	    fcport->login_retry));
221 	return rval;
222 
223 done_free_sp:
224 	lio->free(sp);
225 done:
226 	return rval;
227 }
228 
229 static void
qla2x00_async_logout_ctx_done(srb_t * sp)230 qla2x00_async_logout_ctx_done(srb_t *sp)
231 {
232 	struct srb_ctx *ctx = sp->ctx;
233 	struct srb_iocb *lio = ctx->u.iocb_cmd;
234 
235 	qla2x00_post_async_logout_done_work(sp->fcport->vha, sp->fcport,
236 	    lio->u.logio.data);
237 	lio->free(sp);
238 }
239 
240 int
qla2x00_async_logout(struct scsi_qla_host * vha,fc_port_t * fcport)241 qla2x00_async_logout(struct scsi_qla_host *vha, fc_port_t *fcport)
242 {
243 	srb_t *sp;
244 	struct srb_ctx *ctx;
245 	struct srb_iocb *lio;
246 	int rval;
247 
248 	rval = QLA_FUNCTION_FAILED;
249 	sp = qla2x00_get_ctx_sp(vha, fcport, sizeof(struct srb_ctx),
250 	    qla2x00_get_async_timeout(vha) + 2);
251 	if (!sp)
252 		goto done;
253 
254 	ctx = sp->ctx;
255 	ctx->type = SRB_LOGOUT_CMD;
256 	ctx->name = "logout";
257 	lio = ctx->u.iocb_cmd;
258 	lio->timeout = qla2x00_async_iocb_timeout;
259 	lio->done = qla2x00_async_logout_ctx_done;
260 	rval = qla2x00_start_sp(sp);
261 	if (rval != QLA_SUCCESS)
262 		goto done_free_sp;
263 
264 	DEBUG2(printk(KERN_DEBUG
265 	    "scsi(%ld:%x): Async-logout - loop-id=%x portid=%02x%02x%02x.\n",
266 	    fcport->vha->host_no, sp->handle, fcport->loop_id,
267 	    fcport->d_id.b.domain, fcport->d_id.b.area, fcport->d_id.b.al_pa));
268 	return rval;
269 
270 done_free_sp:
271 	lio->free(sp);
272 done:
273 	return rval;
274 }
275 
276 static void
qla2x00_async_adisc_ctx_done(srb_t * sp)277 qla2x00_async_adisc_ctx_done(srb_t *sp)
278 {
279 	struct srb_ctx *ctx = sp->ctx;
280 	struct srb_iocb *lio = ctx->u.iocb_cmd;
281 
282 	qla2x00_post_async_adisc_done_work(sp->fcport->vha, sp->fcport,
283 	    lio->u.logio.data);
284 	lio->free(sp);
285 }
286 
287 int
qla2x00_async_adisc(struct scsi_qla_host * vha,fc_port_t * fcport,uint16_t * data)288 qla2x00_async_adisc(struct scsi_qla_host *vha, fc_port_t *fcport,
289     uint16_t *data)
290 {
291 	srb_t *sp;
292 	struct srb_ctx *ctx;
293 	struct srb_iocb *lio;
294 	int rval;
295 
296 	rval = QLA_FUNCTION_FAILED;
297 	sp = qla2x00_get_ctx_sp(vha, fcport, sizeof(struct srb_ctx),
298 	    qla2x00_get_async_timeout(vha) + 2);
299 	if (!sp)
300 		goto done;
301 
302 	ctx = sp->ctx;
303 	ctx->type = SRB_ADISC_CMD;
304 	ctx->name = "adisc";
305 	lio = ctx->u.iocb_cmd;
306 	lio->timeout = qla2x00_async_iocb_timeout;
307 	lio->done = qla2x00_async_adisc_ctx_done;
308 	if (data[1] & QLA_LOGIO_LOGIN_RETRIED)
309 		lio->u.logio.flags |= SRB_LOGIN_RETRIED;
310 	rval = qla2x00_start_sp(sp);
311 	if (rval != QLA_SUCCESS)
312 		goto done_free_sp;
313 
314 	DEBUG2(printk(KERN_DEBUG
315 	    "scsi(%ld:%x): Async-adisc - loop-id=%x portid=%02x%02x%02x.\n",
316 	    fcport->vha->host_no, sp->handle, fcport->loop_id,
317 	    fcport->d_id.b.domain, fcport->d_id.b.area, fcport->d_id.b.al_pa));
318 
319 	return rval;
320 
321 done_free_sp:
322 	lio->free(sp);
323 done:
324 	return rval;
325 }
326 
327 static void
qla2x00_async_tm_cmd_ctx_done(srb_t * sp)328 qla2x00_async_tm_cmd_ctx_done(srb_t *sp)
329 {
330 	struct srb_ctx *ctx = sp->ctx;
331 	struct srb_iocb *iocb = (struct srb_iocb *)ctx->u.iocb_cmd;
332 
333 	qla2x00_async_tm_cmd_done(sp->fcport->vha, sp->fcport, iocb);
334 	iocb->free(sp);
335 }
336 
337 int
qla2x00_async_tm_cmd(fc_port_t * fcport,uint32_t flags,uint32_t lun,uint32_t tag)338 qla2x00_async_tm_cmd(fc_port_t *fcport, uint32_t flags, uint32_t lun,
339 	uint32_t tag)
340 {
341 	struct scsi_qla_host *vha = fcport->vha;
342 	srb_t *sp;
343 	struct srb_ctx *ctx;
344 	struct srb_iocb *tcf;
345 	int rval;
346 
347 	rval = QLA_FUNCTION_FAILED;
348 	sp = qla2x00_get_ctx_sp(vha, fcport, sizeof(struct srb_ctx),
349 	    qla2x00_get_async_timeout(vha) + 2);
350 	if (!sp)
351 		goto done;
352 
353 	ctx = sp->ctx;
354 	ctx->type = SRB_TM_CMD;
355 	ctx->name = "tmf";
356 	tcf = ctx->u.iocb_cmd;
357 	tcf->u.tmf.flags = flags;
358 	tcf->u.tmf.lun = lun;
359 	tcf->u.tmf.data = tag;
360 	tcf->timeout = qla2x00_async_iocb_timeout;
361 	tcf->done = qla2x00_async_tm_cmd_ctx_done;
362 
363 	rval = qla2x00_start_sp(sp);
364 	if (rval != QLA_SUCCESS)
365 		goto done_free_sp;
366 
367 	DEBUG2(printk(KERN_DEBUG
368 	    "scsi(%ld:%x): Async-tmf - loop-id=%x portid=%02x%02x%02x.\n",
369 	    fcport->vha->host_no, sp->handle, fcport->loop_id,
370 	    fcport->d_id.b.domain, fcport->d_id.b.area, fcport->d_id.b.al_pa));
371 
372 	return rval;
373 
374 done_free_sp:
375 	tcf->free(sp);
376 done:
377 	return rval;
378 }
379 
380 void
qla2x00_async_login_done(struct scsi_qla_host * vha,fc_port_t * fcport,uint16_t * data)381 qla2x00_async_login_done(struct scsi_qla_host *vha, fc_port_t *fcport,
382     uint16_t *data)
383 {
384 	int rval;
385 
386 	switch (data[0]) {
387 	case MBS_COMMAND_COMPLETE:
388 		if (fcport->flags & FCF_FCP2_DEVICE) {
389 			fcport->flags |= FCF_ASYNC_SENT;
390 			qla2x00_post_async_adisc_work(vha, fcport, data);
391 			break;
392 		}
393 		qla2x00_update_fcport(vha, fcport);
394 		break;
395 	case MBS_COMMAND_ERROR:
396 		fcport->flags &= ~FCF_ASYNC_SENT;
397 		if (data[1] & QLA_LOGIO_LOGIN_RETRIED)
398 			set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
399 		else
400 			qla2x00_mark_device_lost(vha, fcport, 1, 1);
401 		break;
402 	case MBS_PORT_ID_USED:
403 		fcport->loop_id = data[1];
404 		qla2x00_post_async_logout_work(vha, fcport, NULL);
405 		qla2x00_post_async_login_work(vha, fcport, NULL);
406 		break;
407 	case MBS_LOOP_ID_USED:
408 		fcport->loop_id++;
409 		rval = qla2x00_find_new_loop_id(vha, fcport);
410 		if (rval != QLA_SUCCESS) {
411 			fcport->flags &= ~FCF_ASYNC_SENT;
412 			qla2x00_mark_device_lost(vha, fcport, 1, 1);
413 			break;
414 		}
415 		qla2x00_post_async_login_work(vha, fcport, NULL);
416 		break;
417 	}
418 	return;
419 }
420 
421 void
qla2x00_async_logout_done(struct scsi_qla_host * vha,fc_port_t * fcport,uint16_t * data)422 qla2x00_async_logout_done(struct scsi_qla_host *vha, fc_port_t *fcport,
423     uint16_t *data)
424 {
425 	qla2x00_mark_device_lost(vha, fcport, 1, 0);
426 	return;
427 }
428 
429 void
qla2x00_async_adisc_done(struct scsi_qla_host * vha,fc_port_t * fcport,uint16_t * data)430 qla2x00_async_adisc_done(struct scsi_qla_host *vha, fc_port_t *fcport,
431     uint16_t *data)
432 {
433 	if (data[0] == MBS_COMMAND_COMPLETE) {
434 		qla2x00_update_fcport(vha, fcport);
435 
436 		return;
437 	}
438 
439 	/* Retry login. */
440 	fcport->flags &= ~FCF_ASYNC_SENT;
441 	if (data[1] & QLA_LOGIO_LOGIN_RETRIED)
442 		set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
443 	else
444 		qla2x00_mark_device_lost(vha, fcport, 1, 1);
445 
446 	return;
447 }
448 
449 void
qla2x00_async_tm_cmd_done(struct scsi_qla_host * vha,fc_port_t * fcport,struct srb_iocb * iocb)450 qla2x00_async_tm_cmd_done(struct scsi_qla_host *vha, fc_port_t *fcport,
451     struct srb_iocb *iocb)
452 {
453 	int rval;
454 	uint32_t flags;
455 	uint16_t lun;
456 
457 	flags = iocb->u.tmf.flags;
458 	lun = (uint16_t)iocb->u.tmf.lun;
459 
460 	/* Issue Marker IOCB */
461 	rval = qla2x00_marker(vha, vha->hw->req_q_map[0],
462 		vha->hw->rsp_q_map[0], fcport->loop_id, lun,
463 		flags == TCF_LUN_RESET ? MK_SYNC_ID_LUN : MK_SYNC_ID);
464 
465 	if ((rval != QLA_SUCCESS) || iocb->u.tmf.data) {
466 		DEBUG2_3_11(printk(KERN_WARNING
467 			"%s(%ld): TM IOCB failed (%x).\n",
468 			__func__, vha->host_no, rval));
469 	}
470 
471 	return;
472 }
473 
474 /****************************************************************************/
475 /*                QLogic ISP2x00 Hardware Support Functions.                */
476 /****************************************************************************/
477 
478 /*
479 * qla2x00_initialize_adapter
480 *      Initialize board.
481 *
482 * Input:
483 *      ha = adapter block pointer.
484 *
485 * Returns:
486 *      0 = success
487 */
488 int
qla2x00_initialize_adapter(scsi_qla_host_t * vha)489 qla2x00_initialize_adapter(scsi_qla_host_t *vha)
490 {
491 	int	rval;
492 	struct qla_hw_data *ha = vha->hw;
493 	struct req_que *req = ha->req_q_map[0];
494 
495 	/* Clear adapter flags. */
496 	vha->flags.online = 0;
497 	ha->flags.chip_reset_done = 0;
498 	vha->flags.reset_active = 0;
499 	ha->flags.pci_channel_io_perm_failure = 0;
500 	ha->flags.eeh_busy = 0;
501 	ha->flags.thermal_supported = 1;
502 	atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
503 	atomic_set(&vha->loop_state, LOOP_DOWN);
504 	vha->device_flags = DFLG_NO_CABLE;
505 	vha->dpc_flags = 0;
506 	vha->flags.management_server_logged_in = 0;
507 	vha->marker_needed = 0;
508 	ha->isp_abort_cnt = 0;
509 	ha->beacon_blink_led = 0;
510 
511 	set_bit(0, ha->req_qid_map);
512 	set_bit(0, ha->rsp_qid_map);
513 
514 	qla_printk(KERN_INFO, ha, "Configuring PCI space...\n");
515 	rval = ha->isp_ops->pci_config(vha);
516 	if (rval) {
517 		DEBUG2(printk("scsi(%ld): Unable to configure PCI space.\n",
518 		    vha->host_no));
519 		return (rval);
520 	}
521 
522 	ha->isp_ops->reset_chip(vha);
523 
524 	rval = qla2xxx_get_flash_info(vha);
525 	if (rval) {
526 		DEBUG2(printk("scsi(%ld): Unable to validate FLASH data.\n",
527 		    vha->host_no));
528 		return (rval);
529 	}
530 
531 	ha->isp_ops->get_flash_version(vha, req->ring);
532 
533 	qla_printk(KERN_INFO, ha, "Configure NVRAM parameters...\n");
534 
535 	ha->isp_ops->nvram_config(vha);
536 
537 	if (ha->flags.disable_serdes) {
538 		/* Mask HBA via NVRAM settings? */
539 		qla_printk(KERN_INFO, ha, "Masking HBA WWPN "
540 		    "%02x%02x%02x%02x%02x%02x%02x%02x (via NVRAM).\n",
541 		    vha->port_name[0], vha->port_name[1],
542 		    vha->port_name[2], vha->port_name[3],
543 		    vha->port_name[4], vha->port_name[5],
544 		    vha->port_name[6], vha->port_name[7]);
545 		return QLA_FUNCTION_FAILED;
546 	}
547 
548 	qla_printk(KERN_INFO, ha, "Verifying loaded RISC code...\n");
549 
550 	if (qla2x00_isp_firmware(vha) != QLA_SUCCESS) {
551 		rval = ha->isp_ops->chip_diag(vha);
552 		if (rval)
553 			return (rval);
554 		rval = qla2x00_setup_chip(vha);
555 		if (rval)
556 			return (rval);
557 	}
558 
559 	if (IS_QLA84XX(ha)) {
560 		ha->cs84xx = qla84xx_get_chip(vha);
561 		if (!ha->cs84xx) {
562 			qla_printk(KERN_ERR, ha,
563 			    "Unable to configure ISP84XX.\n");
564 			return QLA_FUNCTION_FAILED;
565 		}
566 	}
567 	rval = qla2x00_init_rings(vha);
568 	ha->flags.chip_reset_done = 1;
569 
570 	if (rval == QLA_SUCCESS && IS_QLA84XX(ha)) {
571 		/* Issue verify 84xx FW IOCB to complete 84xx initialization */
572 		rval = qla84xx_init_chip(vha);
573 		if (rval != QLA_SUCCESS) {
574 			qla_printk(KERN_ERR, ha,
575 				"Unable to initialize ISP84XX.\n");
576 		qla84xx_put_chip(vha);
577 		}
578 	}
579 
580 	if (IS_QLA24XX_TYPE(ha) || IS_QLA25XX(ha))
581 		qla24xx_read_fcp_prio_cfg(vha);
582 
583 	return (rval);
584 }
585 
586 /**
587  * qla2100_pci_config() - Setup ISP21xx PCI configuration registers.
588  * @ha: HA context
589  *
590  * Returns 0 on success.
591  */
592 int
qla2100_pci_config(scsi_qla_host_t * vha)593 qla2100_pci_config(scsi_qla_host_t *vha)
594 {
595 	uint16_t w;
596 	unsigned long flags;
597 	struct qla_hw_data *ha = vha->hw;
598 	struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
599 
600 	pci_set_master(ha->pdev);
601 	pci_try_set_mwi(ha->pdev);
602 
603 	pci_read_config_word(ha->pdev, PCI_COMMAND, &w);
604 	w |= (PCI_COMMAND_PARITY | PCI_COMMAND_SERR);
605 	pci_write_config_word(ha->pdev, PCI_COMMAND, w);
606 
607 	pci_disable_rom(ha->pdev);
608 
609 	/* Get PCI bus information. */
610 	spin_lock_irqsave(&ha->hardware_lock, flags);
611 	ha->pci_attr = RD_REG_WORD(&reg->ctrl_status);
612 	spin_unlock_irqrestore(&ha->hardware_lock, flags);
613 
614 	return QLA_SUCCESS;
615 }
616 
617 /**
618  * qla2300_pci_config() - Setup ISP23xx PCI configuration registers.
619  * @ha: HA context
620  *
621  * Returns 0 on success.
622  */
623 int
qla2300_pci_config(scsi_qla_host_t * vha)624 qla2300_pci_config(scsi_qla_host_t *vha)
625 {
626 	uint16_t	w;
627 	unsigned long   flags = 0;
628 	uint32_t	cnt;
629 	struct qla_hw_data *ha = vha->hw;
630 	struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
631 
632 	pci_set_master(ha->pdev);
633 	pci_try_set_mwi(ha->pdev);
634 
635 	pci_read_config_word(ha->pdev, PCI_COMMAND, &w);
636 	w |= (PCI_COMMAND_PARITY | PCI_COMMAND_SERR);
637 
638 	if (IS_QLA2322(ha) || IS_QLA6322(ha))
639 		w &= ~PCI_COMMAND_INTX_DISABLE;
640 	pci_write_config_word(ha->pdev, PCI_COMMAND, w);
641 
642 	/*
643 	 * If this is a 2300 card and not 2312, reset the
644 	 * COMMAND_INVALIDATE due to a bug in the 2300. Unfortunately,
645 	 * the 2310 also reports itself as a 2300 so we need to get the
646 	 * fb revision level -- a 6 indicates it really is a 2300 and
647 	 * not a 2310.
648 	 */
649 	if (IS_QLA2300(ha)) {
650 		spin_lock_irqsave(&ha->hardware_lock, flags);
651 
652 		/* Pause RISC. */
653 		WRT_REG_WORD(&reg->hccr, HCCR_PAUSE_RISC);
654 		for (cnt = 0; cnt < 30000; cnt++) {
655 			if ((RD_REG_WORD(&reg->hccr) & HCCR_RISC_PAUSE) != 0)
656 				break;
657 
658 			udelay(10);
659 		}
660 
661 		/* Select FPM registers. */
662 		WRT_REG_WORD(&reg->ctrl_status, 0x20);
663 		RD_REG_WORD(&reg->ctrl_status);
664 
665 		/* Get the fb rev level */
666 		ha->fb_rev = RD_FB_CMD_REG(ha, reg);
667 
668 		if (ha->fb_rev == FPM_2300)
669 			pci_clear_mwi(ha->pdev);
670 
671 		/* Deselect FPM registers. */
672 		WRT_REG_WORD(&reg->ctrl_status, 0x0);
673 		RD_REG_WORD(&reg->ctrl_status);
674 
675 		/* Release RISC module. */
676 		WRT_REG_WORD(&reg->hccr, HCCR_RELEASE_RISC);
677 		for (cnt = 0; cnt < 30000; cnt++) {
678 			if ((RD_REG_WORD(&reg->hccr) & HCCR_RISC_PAUSE) == 0)
679 				break;
680 
681 			udelay(10);
682 		}
683 
684 		spin_unlock_irqrestore(&ha->hardware_lock, flags);
685 	}
686 
687 	pci_write_config_byte(ha->pdev, PCI_LATENCY_TIMER, 0x80);
688 
689 	pci_disable_rom(ha->pdev);
690 
691 	/* Get PCI bus information. */
692 	spin_lock_irqsave(&ha->hardware_lock, flags);
693 	ha->pci_attr = RD_REG_WORD(&reg->ctrl_status);
694 	spin_unlock_irqrestore(&ha->hardware_lock, flags);
695 
696 	return QLA_SUCCESS;
697 }
698 
699 /**
700  * qla24xx_pci_config() - Setup ISP24xx PCI configuration registers.
701  * @ha: HA context
702  *
703  * Returns 0 on success.
704  */
705 int
qla24xx_pci_config(scsi_qla_host_t * vha)706 qla24xx_pci_config(scsi_qla_host_t *vha)
707 {
708 	uint16_t w;
709 	unsigned long flags = 0;
710 	struct qla_hw_data *ha = vha->hw;
711 	struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
712 
713 	pci_set_master(ha->pdev);
714 	pci_try_set_mwi(ha->pdev);
715 
716 	pci_read_config_word(ha->pdev, PCI_COMMAND, &w);
717 	w |= (PCI_COMMAND_PARITY | PCI_COMMAND_SERR);
718 	w &= ~PCI_COMMAND_INTX_DISABLE;
719 	pci_write_config_word(ha->pdev, PCI_COMMAND, w);
720 
721 	pci_write_config_byte(ha->pdev, PCI_LATENCY_TIMER, 0x80);
722 
723 	/* PCI-X -- adjust Maximum Memory Read Byte Count (2048). */
724 	if (pci_find_capability(ha->pdev, PCI_CAP_ID_PCIX))
725 		pcix_set_mmrbc(ha->pdev, 2048);
726 
727 	/* PCIe -- adjust Maximum Read Request Size (2048). */
728 	if (pci_find_capability(ha->pdev, PCI_CAP_ID_EXP))
729 		pcie_set_readrq(ha->pdev, 2048);
730 
731 	pci_disable_rom(ha->pdev);
732 
733 	ha->chip_revision = ha->pdev->revision;
734 
735 	/* Get PCI bus information. */
736 	spin_lock_irqsave(&ha->hardware_lock, flags);
737 	ha->pci_attr = RD_REG_DWORD(&reg->ctrl_status);
738 	spin_unlock_irqrestore(&ha->hardware_lock, flags);
739 
740 	return QLA_SUCCESS;
741 }
742 
743 /**
744  * qla25xx_pci_config() - Setup ISP25xx PCI configuration registers.
745  * @ha: HA context
746  *
747  * Returns 0 on success.
748  */
749 int
qla25xx_pci_config(scsi_qla_host_t * vha)750 qla25xx_pci_config(scsi_qla_host_t *vha)
751 {
752 	uint16_t w;
753 	struct qla_hw_data *ha = vha->hw;
754 
755 	pci_set_master(ha->pdev);
756 	pci_try_set_mwi(ha->pdev);
757 
758 	pci_read_config_word(ha->pdev, PCI_COMMAND, &w);
759 	w |= (PCI_COMMAND_PARITY | PCI_COMMAND_SERR);
760 	w &= ~PCI_COMMAND_INTX_DISABLE;
761 	pci_write_config_word(ha->pdev, PCI_COMMAND, w);
762 
763 	/* PCIe -- adjust Maximum Read Request Size (2048). */
764 	if (pci_find_capability(ha->pdev, PCI_CAP_ID_EXP))
765 		pcie_set_readrq(ha->pdev, 2048);
766 
767 	pci_disable_rom(ha->pdev);
768 
769 	ha->chip_revision = ha->pdev->revision;
770 
771 	return QLA_SUCCESS;
772 }
773 
774 /**
775  * qla2x00_isp_firmware() - Choose firmware image.
776  * @ha: HA context
777  *
778  * Returns 0 on success.
779  */
780 static int
qla2x00_isp_firmware(scsi_qla_host_t * vha)781 qla2x00_isp_firmware(scsi_qla_host_t *vha)
782 {
783 	int  rval;
784 	uint16_t loop_id, topo, sw_cap;
785 	uint8_t domain, area, al_pa;
786 	struct qla_hw_data *ha = vha->hw;
787 
788 	/* Assume loading risc code */
789 	rval = QLA_FUNCTION_FAILED;
790 
791 	if (ha->flags.disable_risc_code_load) {
792 		DEBUG2(printk("scsi(%ld): RISC CODE NOT loaded\n",
793 		    vha->host_no));
794 		qla_printk(KERN_INFO, ha, "RISC CODE NOT loaded\n");
795 
796 		/* Verify checksum of loaded RISC code. */
797 		rval = qla2x00_verify_checksum(vha, ha->fw_srisc_address);
798 		if (rval == QLA_SUCCESS) {
799 			/* And, verify we are not in ROM code. */
800 			rval = qla2x00_get_adapter_id(vha, &loop_id, &al_pa,
801 			    &area, &domain, &topo, &sw_cap);
802 		}
803 	}
804 
805 	if (rval) {
806 		DEBUG2_3(printk("scsi(%ld): **** Load RISC code ****\n",
807 		    vha->host_no));
808 	}
809 
810 	return (rval);
811 }
812 
813 /**
814  * qla2x00_reset_chip() - Reset ISP chip.
815  * @ha: HA context
816  *
817  * Returns 0 on success.
818  */
819 void
qla2x00_reset_chip(scsi_qla_host_t * vha)820 qla2x00_reset_chip(scsi_qla_host_t *vha)
821 {
822 	unsigned long   flags = 0;
823 	struct qla_hw_data *ha = vha->hw;
824 	struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
825 	uint32_t	cnt;
826 	uint16_t	cmd;
827 
828 	if (unlikely(pci_channel_offline(ha->pdev)))
829 		return;
830 
831 	ha->isp_ops->disable_intrs(ha);
832 
833 	spin_lock_irqsave(&ha->hardware_lock, flags);
834 
835 	/* Turn off master enable */
836 	cmd = 0;
837 	pci_read_config_word(ha->pdev, PCI_COMMAND, &cmd);
838 	cmd &= ~PCI_COMMAND_MASTER;
839 	pci_write_config_word(ha->pdev, PCI_COMMAND, cmd);
840 
841 	if (!IS_QLA2100(ha)) {
842 		/* Pause RISC. */
843 		WRT_REG_WORD(&reg->hccr, HCCR_PAUSE_RISC);
844 		if (IS_QLA2200(ha) || IS_QLA2300(ha)) {
845 			for (cnt = 0; cnt < 30000; cnt++) {
846 				if ((RD_REG_WORD(&reg->hccr) &
847 				    HCCR_RISC_PAUSE) != 0)
848 					break;
849 				udelay(100);
850 			}
851 		} else {
852 			RD_REG_WORD(&reg->hccr);	/* PCI Posting. */
853 			udelay(10);
854 		}
855 
856 		/* Select FPM registers. */
857 		WRT_REG_WORD(&reg->ctrl_status, 0x20);
858 		RD_REG_WORD(&reg->ctrl_status);		/* PCI Posting. */
859 
860 		/* FPM Soft Reset. */
861 		WRT_REG_WORD(&reg->fpm_diag_config, 0x100);
862 		RD_REG_WORD(&reg->fpm_diag_config);	/* PCI Posting. */
863 
864 		/* Toggle Fpm Reset. */
865 		if (!IS_QLA2200(ha)) {
866 			WRT_REG_WORD(&reg->fpm_diag_config, 0x0);
867 			RD_REG_WORD(&reg->fpm_diag_config); /* PCI Posting. */
868 		}
869 
870 		/* Select frame buffer registers. */
871 		WRT_REG_WORD(&reg->ctrl_status, 0x10);
872 		RD_REG_WORD(&reg->ctrl_status);		/* PCI Posting. */
873 
874 		/* Reset frame buffer FIFOs. */
875 		if (IS_QLA2200(ha)) {
876 			WRT_FB_CMD_REG(ha, reg, 0xa000);
877 			RD_FB_CMD_REG(ha, reg);		/* PCI Posting. */
878 		} else {
879 			WRT_FB_CMD_REG(ha, reg, 0x00fc);
880 
881 			/* Read back fb_cmd until zero or 3 seconds max */
882 			for (cnt = 0; cnt < 3000; cnt++) {
883 				if ((RD_FB_CMD_REG(ha, reg) & 0xff) == 0)
884 					break;
885 				udelay(100);
886 			}
887 		}
888 
889 		/* Select RISC module registers. */
890 		WRT_REG_WORD(&reg->ctrl_status, 0);
891 		RD_REG_WORD(&reg->ctrl_status);		/* PCI Posting. */
892 
893 		/* Reset RISC processor. */
894 		WRT_REG_WORD(&reg->hccr, HCCR_RESET_RISC);
895 		RD_REG_WORD(&reg->hccr);		/* PCI Posting. */
896 
897 		/* Release RISC processor. */
898 		WRT_REG_WORD(&reg->hccr, HCCR_RELEASE_RISC);
899 		RD_REG_WORD(&reg->hccr);		/* PCI Posting. */
900 	}
901 
902 	WRT_REG_WORD(&reg->hccr, HCCR_CLR_RISC_INT);
903 	WRT_REG_WORD(&reg->hccr, HCCR_CLR_HOST_INT);
904 
905 	/* Reset ISP chip. */
906 	WRT_REG_WORD(&reg->ctrl_status, CSR_ISP_SOFT_RESET);
907 
908 	/* Wait for RISC to recover from reset. */
909 	if (IS_QLA2100(ha) || IS_QLA2200(ha) || IS_QLA2300(ha)) {
910 		/*
911 		 * It is necessary to for a delay here since the card doesn't
912 		 * respond to PCI reads during a reset. On some architectures
913 		 * this will result in an MCA.
914 		 */
915 		udelay(20);
916 		for (cnt = 30000; cnt; cnt--) {
917 			if ((RD_REG_WORD(&reg->ctrl_status) &
918 			    CSR_ISP_SOFT_RESET) == 0)
919 				break;
920 			udelay(100);
921 		}
922 	} else
923 		udelay(10);
924 
925 	/* Reset RISC processor. */
926 	WRT_REG_WORD(&reg->hccr, HCCR_RESET_RISC);
927 
928 	WRT_REG_WORD(&reg->semaphore, 0);
929 
930 	/* Release RISC processor. */
931 	WRT_REG_WORD(&reg->hccr, HCCR_RELEASE_RISC);
932 	RD_REG_WORD(&reg->hccr);			/* PCI Posting. */
933 
934 	if (IS_QLA2100(ha) || IS_QLA2200(ha) || IS_QLA2300(ha)) {
935 		for (cnt = 0; cnt < 30000; cnt++) {
936 			if (RD_MAILBOX_REG(ha, reg, 0) != MBS_BUSY)
937 				break;
938 
939 			udelay(100);
940 		}
941 	} else
942 		udelay(100);
943 
944 	/* Turn on master enable */
945 	cmd |= PCI_COMMAND_MASTER;
946 	pci_write_config_word(ha->pdev, PCI_COMMAND, cmd);
947 
948 	/* Disable RISC pause on FPM parity error. */
949 	if (!IS_QLA2100(ha)) {
950 		WRT_REG_WORD(&reg->hccr, HCCR_DISABLE_PARITY_PAUSE);
951 		RD_REG_WORD(&reg->hccr);		/* PCI Posting. */
952 	}
953 
954 	spin_unlock_irqrestore(&ha->hardware_lock, flags);
955 }
956 
957 /**
958  * qla81xx_reset_mpi() - Reset's MPI FW via Write MPI Register MBC.
959  *
960  * Returns 0 on success.
961  */
962 int
qla81xx_reset_mpi(scsi_qla_host_t * vha)963 qla81xx_reset_mpi(scsi_qla_host_t *vha)
964 {
965 	uint16_t mb[4] = {0x1010, 0, 1, 0};
966 
967 	return qla81xx_write_mpi_register(vha, mb);
968 }
969 
970 /**
971  * qla24xx_reset_risc() - Perform full reset of ISP24xx RISC.
972  * @ha: HA context
973  *
974  * Returns 0 on success.
975  */
976 static inline void
qla24xx_reset_risc(scsi_qla_host_t * vha)977 qla24xx_reset_risc(scsi_qla_host_t *vha)
978 {
979 	unsigned long flags = 0;
980 	struct qla_hw_data *ha = vha->hw;
981 	struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
982 	uint32_t cnt, d2;
983 	uint16_t wd;
984 	static int abts_cnt; /* ISP abort retry counts */
985 
986 	spin_lock_irqsave(&ha->hardware_lock, flags);
987 
988 	/* Reset RISC. */
989 	WRT_REG_DWORD(&reg->ctrl_status, CSRX_DMA_SHUTDOWN|MWB_4096_BYTES);
990 	for (cnt = 0; cnt < 30000; cnt++) {
991 		if ((RD_REG_DWORD(&reg->ctrl_status) & CSRX_DMA_ACTIVE) == 0)
992 			break;
993 
994 		udelay(10);
995 	}
996 
997 	WRT_REG_DWORD(&reg->ctrl_status,
998 	    CSRX_ISP_SOFT_RESET|CSRX_DMA_SHUTDOWN|MWB_4096_BYTES);
999 	pci_read_config_word(ha->pdev, PCI_COMMAND, &wd);
1000 
1001 	udelay(100);
1002 	/* Wait for firmware to complete NVRAM accesses. */
1003 	d2 = (uint32_t) RD_REG_WORD(&reg->mailbox0);
1004 	for (cnt = 10000 ; cnt && d2; cnt--) {
1005 		udelay(5);
1006 		d2 = (uint32_t) RD_REG_WORD(&reg->mailbox0);
1007 		barrier();
1008 	}
1009 
1010 	/* Wait for soft-reset to complete. */
1011 	d2 = RD_REG_DWORD(&reg->ctrl_status);
1012 	for (cnt = 6000000 ; cnt && (d2 & CSRX_ISP_SOFT_RESET); cnt--) {
1013 		udelay(5);
1014 		d2 = RD_REG_DWORD(&reg->ctrl_status);
1015 		barrier();
1016 	}
1017 
1018 	/* If required, do an MPI FW reset now */
1019 	if (test_and_clear_bit(MPI_RESET_NEEDED, &vha->dpc_flags)) {
1020 		if (qla81xx_reset_mpi(vha) != QLA_SUCCESS) {
1021 			if (++abts_cnt < 5) {
1022 				set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
1023 				set_bit(MPI_RESET_NEEDED, &vha->dpc_flags);
1024 			} else {
1025 				/*
1026 				 * We exhausted the ISP abort retries. We have to
1027 				 * set the board offline.
1028 				 */
1029 				abts_cnt = 0;
1030 				vha->flags.online = 0;
1031 			}
1032 		}
1033 	}
1034 
1035 	WRT_REG_DWORD(&reg->hccr, HCCRX_SET_RISC_RESET);
1036 	RD_REG_DWORD(&reg->hccr);
1037 
1038 	WRT_REG_DWORD(&reg->hccr, HCCRX_REL_RISC_PAUSE);
1039 	RD_REG_DWORD(&reg->hccr);
1040 
1041 	WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_RESET);
1042 	RD_REG_DWORD(&reg->hccr);
1043 
1044 	d2 = (uint32_t) RD_REG_WORD(&reg->mailbox0);
1045 	for (cnt = 6000000 ; cnt && d2; cnt--) {
1046 		udelay(5);
1047 		d2 = (uint32_t) RD_REG_WORD(&reg->mailbox0);
1048 		barrier();
1049 	}
1050 
1051 	spin_unlock_irqrestore(&ha->hardware_lock, flags);
1052 
1053 	if (IS_NOPOLLING_TYPE(ha))
1054 		ha->isp_ops->enable_intrs(ha);
1055 }
1056 
1057 /**
1058  * qla24xx_reset_chip() - Reset ISP24xx chip.
1059  * @ha: HA context
1060  *
1061  * Returns 0 on success.
1062  */
1063 void
qla24xx_reset_chip(scsi_qla_host_t * vha)1064 qla24xx_reset_chip(scsi_qla_host_t *vha)
1065 {
1066 	struct qla_hw_data *ha = vha->hw;
1067 
1068 	if (pci_channel_offline(ha->pdev) &&
1069 	    ha->flags.pci_channel_io_perm_failure) {
1070 		return;
1071 	}
1072 
1073 	ha->isp_ops->disable_intrs(ha);
1074 
1075 	/* Perform RISC reset. */
1076 	qla24xx_reset_risc(vha);
1077 }
1078 
1079 /**
1080  * qla2x00_chip_diag() - Test chip for proper operation.
1081  * @ha: HA context
1082  *
1083  * Returns 0 on success.
1084  */
1085 int
qla2x00_chip_diag(scsi_qla_host_t * vha)1086 qla2x00_chip_diag(scsi_qla_host_t *vha)
1087 {
1088 	int		rval;
1089 	struct qla_hw_data *ha = vha->hw;
1090 	struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
1091 	unsigned long	flags = 0;
1092 	uint16_t	data;
1093 	uint32_t	cnt;
1094 	uint16_t	mb[5];
1095 	struct req_que *req = ha->req_q_map[0];
1096 
1097 	/* Assume a failed state */
1098 	rval = QLA_FUNCTION_FAILED;
1099 
1100 	DEBUG3(printk("scsi(%ld): Testing device at %lx.\n",
1101 	    vha->host_no, (u_long)&reg->flash_address));
1102 
1103 	spin_lock_irqsave(&ha->hardware_lock, flags);
1104 
1105 	/* Reset ISP chip. */
1106 	WRT_REG_WORD(&reg->ctrl_status, CSR_ISP_SOFT_RESET);
1107 
1108 	/*
1109 	 * We need to have a delay here since the card will not respond while
1110 	 * in reset causing an MCA on some architectures.
1111 	 */
1112 	udelay(20);
1113 	data = qla2x00_debounce_register(&reg->ctrl_status);
1114 	for (cnt = 6000000 ; cnt && (data & CSR_ISP_SOFT_RESET); cnt--) {
1115 		udelay(5);
1116 		data = RD_REG_WORD(&reg->ctrl_status);
1117 		barrier();
1118 	}
1119 
1120 	if (!cnt)
1121 		goto chip_diag_failed;
1122 
1123 	DEBUG3(printk("scsi(%ld): Reset register cleared by chip reset\n",
1124 	    vha->host_no));
1125 
1126 	/* Reset RISC processor. */
1127 	WRT_REG_WORD(&reg->hccr, HCCR_RESET_RISC);
1128 	WRT_REG_WORD(&reg->hccr, HCCR_RELEASE_RISC);
1129 
1130 	/* Workaround for QLA2312 PCI parity error */
1131 	if (IS_QLA2100(ha) || IS_QLA2200(ha) || IS_QLA2300(ha)) {
1132 		data = qla2x00_debounce_register(MAILBOX_REG(ha, reg, 0));
1133 		for (cnt = 6000000; cnt && (data == MBS_BUSY); cnt--) {
1134 			udelay(5);
1135 			data = RD_MAILBOX_REG(ha, reg, 0);
1136 			barrier();
1137 		}
1138 	} else
1139 		udelay(10);
1140 
1141 	if (!cnt)
1142 		goto chip_diag_failed;
1143 
1144 	/* Check product ID of chip */
1145 	DEBUG3(printk("scsi(%ld): Checking product ID of chip\n", vha->host_no));
1146 
1147 	mb[1] = RD_MAILBOX_REG(ha, reg, 1);
1148 	mb[2] = RD_MAILBOX_REG(ha, reg, 2);
1149 	mb[3] = RD_MAILBOX_REG(ha, reg, 3);
1150 	mb[4] = qla2x00_debounce_register(MAILBOX_REG(ha, reg, 4));
1151 	if (mb[1] != PROD_ID_1 || (mb[2] != PROD_ID_2 && mb[2] != PROD_ID_2a) ||
1152 	    mb[3] != PROD_ID_3) {
1153 		qla_printk(KERN_WARNING, ha,
1154 		    "Wrong product ID = 0x%x,0x%x,0x%x\n", mb[1], mb[2], mb[3]);
1155 
1156 		goto chip_diag_failed;
1157 	}
1158 	ha->product_id[0] = mb[1];
1159 	ha->product_id[1] = mb[2];
1160 	ha->product_id[2] = mb[3];
1161 	ha->product_id[3] = mb[4];
1162 
1163 	/* Adjust fw RISC transfer size */
1164 	if (req->length > 1024)
1165 		ha->fw_transfer_size = REQUEST_ENTRY_SIZE * 1024;
1166 	else
1167 		ha->fw_transfer_size = REQUEST_ENTRY_SIZE *
1168 		    req->length;
1169 
1170 	if (IS_QLA2200(ha) &&
1171 	    RD_MAILBOX_REG(ha, reg, 7) == QLA2200A_RISC_ROM_VER) {
1172 		/* Limit firmware transfer size with a 2200A */
1173 		DEBUG3(printk("scsi(%ld): Found QLA2200A chip.\n",
1174 		    vha->host_no));
1175 
1176 		ha->device_type |= DT_ISP2200A;
1177 		ha->fw_transfer_size = 128;
1178 	}
1179 
1180 	/* Wrap Incoming Mailboxes Test. */
1181 	spin_unlock_irqrestore(&ha->hardware_lock, flags);
1182 
1183 	DEBUG3(printk("scsi(%ld): Checking mailboxes.\n", vha->host_no));
1184 	rval = qla2x00_mbx_reg_test(vha);
1185 	if (rval) {
1186 		DEBUG(printk("scsi(%ld): Failed mailbox send register test\n",
1187 		    vha->host_no));
1188 		qla_printk(KERN_WARNING, ha,
1189 		    "Failed mailbox send register test\n");
1190 	}
1191 	else {
1192 		/* Flag a successful rval */
1193 		rval = QLA_SUCCESS;
1194 	}
1195 	spin_lock_irqsave(&ha->hardware_lock, flags);
1196 
1197 chip_diag_failed:
1198 	if (rval)
1199 		DEBUG2_3(printk("scsi(%ld): Chip diagnostics **** FAILED "
1200 		    "****\n", vha->host_no));
1201 
1202 	spin_unlock_irqrestore(&ha->hardware_lock, flags);
1203 
1204 	return (rval);
1205 }
1206 
1207 /**
1208  * qla24xx_chip_diag() - Test ISP24xx for proper operation.
1209  * @ha: HA context
1210  *
1211  * Returns 0 on success.
1212  */
1213 int
qla24xx_chip_diag(scsi_qla_host_t * vha)1214 qla24xx_chip_diag(scsi_qla_host_t *vha)
1215 {
1216 	int rval;
1217 	struct qla_hw_data *ha = vha->hw;
1218 	struct req_que *req = ha->req_q_map[0];
1219 
1220 	if (IS_QLA82XX(ha))
1221 		return QLA_SUCCESS;
1222 
1223 	ha->fw_transfer_size = REQUEST_ENTRY_SIZE * req->length;
1224 
1225 	rval = qla2x00_mbx_reg_test(vha);
1226 	if (rval) {
1227 		DEBUG(printk("scsi(%ld): Failed mailbox send register test\n",
1228 		    vha->host_no));
1229 		qla_printk(KERN_WARNING, ha,
1230 		    "Failed mailbox send register test\n");
1231 	} else {
1232 		/* Flag a successful rval */
1233 		rval = QLA_SUCCESS;
1234 	}
1235 
1236 	return rval;
1237 }
1238 
1239 void
qla2x00_alloc_fw_dump(scsi_qla_host_t * vha)1240 qla2x00_alloc_fw_dump(scsi_qla_host_t *vha)
1241 {
1242 	int rval;
1243 	uint32_t dump_size, fixed_size, mem_size, req_q_size, rsp_q_size,
1244 	    eft_size, fce_size, mq_size;
1245 	dma_addr_t tc_dma;
1246 	void *tc;
1247 	struct qla_hw_data *ha = vha->hw;
1248 	struct req_que *req = ha->req_q_map[0];
1249 	struct rsp_que *rsp = ha->rsp_q_map[0];
1250 
1251 	if (ha->fw_dump) {
1252 		qla_printk(KERN_WARNING, ha,
1253 		    "Firmware dump previously allocated.\n");
1254 		return;
1255 	}
1256 
1257 	ha->fw_dumped = 0;
1258 	fixed_size = mem_size = eft_size = fce_size = mq_size = 0;
1259 	if (IS_QLA2100(ha) || IS_QLA2200(ha)) {
1260 		fixed_size = sizeof(struct qla2100_fw_dump);
1261 	} else if (IS_QLA23XX(ha)) {
1262 		fixed_size = offsetof(struct qla2300_fw_dump, data_ram);
1263 		mem_size = (ha->fw_memory_size - 0x11000 + 1) *
1264 		    sizeof(uint16_t);
1265 	} else if (IS_FWI2_CAPABLE(ha)) {
1266 		if (IS_QLA81XX(ha))
1267 			fixed_size = offsetof(struct qla81xx_fw_dump, ext_mem);
1268 		else if (IS_QLA25XX(ha))
1269 			fixed_size = offsetof(struct qla25xx_fw_dump, ext_mem);
1270 		else
1271 			fixed_size = offsetof(struct qla24xx_fw_dump, ext_mem);
1272 		mem_size = (ha->fw_memory_size - 0x100000 + 1) *
1273 		    sizeof(uint32_t);
1274 		if (ha->mqenable)
1275 			mq_size = sizeof(struct qla2xxx_mq_chain);
1276 		/* Allocate memory for Fibre Channel Event Buffer. */
1277 		if (!IS_QLA25XX(ha) && !IS_QLA81XX(ha))
1278 			goto try_eft;
1279 
1280 		tc = dma_alloc_coherent(&ha->pdev->dev, FCE_SIZE, &tc_dma,
1281 		    GFP_KERNEL);
1282 		if (!tc) {
1283 			qla_printk(KERN_WARNING, ha, "Unable to allocate "
1284 			    "(%d KB) for FCE.\n", FCE_SIZE / 1024);
1285 			goto try_eft;
1286 		}
1287 
1288 		memset(tc, 0, FCE_SIZE);
1289 		rval = qla2x00_enable_fce_trace(vha, tc_dma, FCE_NUM_BUFFERS,
1290 		    ha->fce_mb, &ha->fce_bufs);
1291 		if (rval) {
1292 			qla_printk(KERN_WARNING, ha, "Unable to initialize "
1293 			    "FCE (%d).\n", rval);
1294 			dma_free_coherent(&ha->pdev->dev, FCE_SIZE, tc,
1295 			    tc_dma);
1296 			ha->flags.fce_enabled = 0;
1297 			goto try_eft;
1298 		}
1299 
1300 		qla_printk(KERN_INFO, ha, "Allocated (%d KB) for FCE...\n",
1301 		    FCE_SIZE / 1024);
1302 
1303 		fce_size = sizeof(struct qla2xxx_fce_chain) + FCE_SIZE;
1304 		ha->flags.fce_enabled = 1;
1305 		ha->fce_dma = tc_dma;
1306 		ha->fce = tc;
1307 try_eft:
1308 		/* Allocate memory for Extended Trace Buffer. */
1309 		tc = dma_alloc_coherent(&ha->pdev->dev, EFT_SIZE, &tc_dma,
1310 		    GFP_KERNEL);
1311 		if (!tc) {
1312 			qla_printk(KERN_WARNING, ha, "Unable to allocate "
1313 			    "(%d KB) for EFT.\n", EFT_SIZE / 1024);
1314 			goto cont_alloc;
1315 		}
1316 
1317 		memset(tc, 0, EFT_SIZE);
1318 		rval = qla2x00_enable_eft_trace(vha, tc_dma, EFT_NUM_BUFFERS);
1319 		if (rval) {
1320 			qla_printk(KERN_WARNING, ha, "Unable to initialize "
1321 			    "EFT (%d).\n", rval);
1322 			dma_free_coherent(&ha->pdev->dev, EFT_SIZE, tc,
1323 			    tc_dma);
1324 			goto cont_alloc;
1325 		}
1326 
1327 		qla_printk(KERN_INFO, ha, "Allocated (%d KB) for EFT...\n",
1328 		    EFT_SIZE / 1024);
1329 
1330 		eft_size = EFT_SIZE;
1331 		ha->eft_dma = tc_dma;
1332 		ha->eft = tc;
1333 	}
1334 cont_alloc:
1335 	req_q_size = req->length * sizeof(request_t);
1336 	rsp_q_size = rsp->length * sizeof(response_t);
1337 
1338 	dump_size = offsetof(struct qla2xxx_fw_dump, isp);
1339 	dump_size += fixed_size + mem_size + req_q_size + rsp_q_size + eft_size;
1340 	ha->chain_offset = dump_size;
1341 	dump_size += mq_size + fce_size;
1342 
1343 	ha->fw_dump = vmalloc(dump_size);
1344 	if (!ha->fw_dump) {
1345 		qla_printk(KERN_WARNING, ha, "Unable to allocate (%d KB) for "
1346 		    "firmware dump!!!\n", dump_size / 1024);
1347 
1348 		if (ha->fce) {
1349 			dma_free_coherent(&ha->pdev->dev, FCE_SIZE, ha->fce,
1350 			    ha->fce_dma);
1351 			ha->fce = NULL;
1352 			ha->fce_dma = 0;
1353 		}
1354 
1355 		if (ha->eft) {
1356 			dma_free_coherent(&ha->pdev->dev, eft_size, ha->eft,
1357 			    ha->eft_dma);
1358 			ha->eft = NULL;
1359 			ha->eft_dma = 0;
1360 		}
1361 		return;
1362 	}
1363 	qla_printk(KERN_INFO, ha, "Allocated (%d KB) for firmware dump...\n",
1364 	    dump_size / 1024);
1365 
1366 	ha->fw_dump_len = dump_size;
1367 	ha->fw_dump->signature[0] = 'Q';
1368 	ha->fw_dump->signature[1] = 'L';
1369 	ha->fw_dump->signature[2] = 'G';
1370 	ha->fw_dump->signature[3] = 'C';
1371 	ha->fw_dump->version = __constant_htonl(1);
1372 
1373 	ha->fw_dump->fixed_size = htonl(fixed_size);
1374 	ha->fw_dump->mem_size = htonl(mem_size);
1375 	ha->fw_dump->req_q_size = htonl(req_q_size);
1376 	ha->fw_dump->rsp_q_size = htonl(rsp_q_size);
1377 
1378 	ha->fw_dump->eft_size = htonl(eft_size);
1379 	ha->fw_dump->eft_addr_l = htonl(LSD(ha->eft_dma));
1380 	ha->fw_dump->eft_addr_h = htonl(MSD(ha->eft_dma));
1381 
1382 	ha->fw_dump->header_size =
1383 	    htonl(offsetof(struct qla2xxx_fw_dump, isp));
1384 }
1385 
1386 static int
qla81xx_mpi_sync(scsi_qla_host_t * vha)1387 qla81xx_mpi_sync(scsi_qla_host_t *vha)
1388 {
1389 #define MPS_MASK	0xe0
1390 	int rval;
1391 	uint16_t dc;
1392 	uint32_t dw;
1393 	struct qla_hw_data *ha = vha->hw;
1394 
1395 	if (!IS_QLA81XX(vha->hw))
1396 		return QLA_SUCCESS;
1397 
1398 	rval = qla2x00_write_ram_word(vha, 0x7c00, 1);
1399 	if (rval != QLA_SUCCESS) {
1400 		DEBUG2(qla_printk(KERN_WARNING, ha,
1401 		    "Sync-MPI: Unable to acquire semaphore.\n"));
1402 		goto done;
1403 	}
1404 
1405 	pci_read_config_word(vha->hw->pdev, 0x54, &dc);
1406 	rval = qla2x00_read_ram_word(vha, 0x7a15, &dw);
1407 	if (rval != QLA_SUCCESS) {
1408 		DEBUG2(qla_printk(KERN_WARNING, ha,
1409 		    "Sync-MPI: Unable to read sync.\n"));
1410 		goto done_release;
1411 	}
1412 
1413 	dc &= MPS_MASK;
1414 	if (dc == (dw & MPS_MASK))
1415 		goto done_release;
1416 
1417 	dw &= ~MPS_MASK;
1418 	dw |= dc;
1419 	rval = qla2x00_write_ram_word(vha, 0x7a15, dw);
1420 	if (rval != QLA_SUCCESS) {
1421 		DEBUG2(qla_printk(KERN_WARNING, ha,
1422 		    "Sync-MPI: Unable to gain sync.\n"));
1423 	}
1424 
1425 done_release:
1426 	rval = qla2x00_write_ram_word(vha, 0x7c00, 0);
1427 	if (rval != QLA_SUCCESS) {
1428 		DEBUG2(qla_printk(KERN_WARNING, ha,
1429 		    "Sync-MPI: Unable to release semaphore.\n"));
1430 	}
1431 
1432 done:
1433 	return rval;
1434 }
1435 
1436 /**
1437  * qla2x00_setup_chip() - Load and start RISC firmware.
1438  * @ha: HA context
1439  *
1440  * Returns 0 on success.
1441  */
1442 static int
qla2x00_setup_chip(scsi_qla_host_t * vha)1443 qla2x00_setup_chip(scsi_qla_host_t *vha)
1444 {
1445 	int rval;
1446 	uint32_t srisc_address = 0;
1447 	struct qla_hw_data *ha = vha->hw;
1448 	struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
1449 	unsigned long flags;
1450 	uint16_t fw_major_version;
1451 
1452 	if (IS_QLA82XX(ha)) {
1453 		rval = ha->isp_ops->load_risc(vha, &srisc_address);
1454 		if (rval == QLA_SUCCESS) {
1455 			qla2x00_stop_firmware(vha);
1456 			goto enable_82xx_npiv;
1457 		} else
1458 			goto failed;
1459 	}
1460 
1461 	if (!IS_FWI2_CAPABLE(ha) && !IS_QLA2100(ha) && !IS_QLA2200(ha)) {
1462 		/* Disable SRAM, Instruction RAM and GP RAM parity.  */
1463 		spin_lock_irqsave(&ha->hardware_lock, flags);
1464 		WRT_REG_WORD(&reg->hccr, (HCCR_ENABLE_PARITY + 0x0));
1465 		RD_REG_WORD(&reg->hccr);
1466 		spin_unlock_irqrestore(&ha->hardware_lock, flags);
1467 	}
1468 
1469 	qla81xx_mpi_sync(vha);
1470 
1471 	/* Load firmware sequences */
1472 	rval = ha->isp_ops->load_risc(vha, &srisc_address);
1473 	if (rval == QLA_SUCCESS) {
1474 		DEBUG(printk("scsi(%ld): Verifying Checksum of loaded RISC "
1475 		    "code.\n", vha->host_no));
1476 
1477 		rval = qla2x00_verify_checksum(vha, srisc_address);
1478 		if (rval == QLA_SUCCESS) {
1479 			/* Start firmware execution. */
1480 			DEBUG(printk("scsi(%ld): Checksum OK, start "
1481 			    "firmware.\n", vha->host_no));
1482 
1483 			rval = qla2x00_execute_fw(vha, srisc_address);
1484 			/* Retrieve firmware information. */
1485 			if (rval == QLA_SUCCESS) {
1486 enable_82xx_npiv:
1487 				fw_major_version = ha->fw_major_version;
1488 				rval = qla2x00_get_fw_version(vha,
1489 				    &ha->fw_major_version,
1490 				    &ha->fw_minor_version,
1491 				    &ha->fw_subminor_version,
1492 				    &ha->fw_attributes, &ha->fw_memory_size,
1493 				    ha->mpi_version, &ha->mpi_capabilities,
1494 				    ha->phy_version);
1495 				if (rval != QLA_SUCCESS)
1496 					goto failed;
1497 				ha->flags.npiv_supported = 0;
1498 				if (IS_QLA2XXX_MIDTYPE(ha) &&
1499 					 (ha->fw_attributes & BIT_2)) {
1500 					ha->flags.npiv_supported = 1;
1501 					if ((!ha->max_npiv_vports) ||
1502 					    ((ha->max_npiv_vports + 1) %
1503 					    MIN_MULTI_ID_FABRIC))
1504 						ha->max_npiv_vports =
1505 						    MIN_MULTI_ID_FABRIC - 1;
1506 				}
1507 				qla2x00_get_resource_cnts(vha, NULL,
1508 				    &ha->fw_xcb_count, NULL, NULL,
1509 				    &ha->max_npiv_vports, NULL);
1510 
1511 				if (!fw_major_version && ql2xallocfwdump) {
1512 					if (!IS_QLA82XX(ha))
1513 						qla2x00_alloc_fw_dump(vha);
1514 				}
1515 			}
1516 		} else {
1517 			DEBUG2(printk(KERN_INFO
1518 			    "scsi(%ld): ISP Firmware failed checksum.\n",
1519 			    vha->host_no));
1520 		}
1521 	}
1522 
1523 	if (!IS_FWI2_CAPABLE(ha) && !IS_QLA2100(ha) && !IS_QLA2200(ha)) {
1524 		/* Enable proper parity. */
1525 		spin_lock_irqsave(&ha->hardware_lock, flags);
1526 		if (IS_QLA2300(ha))
1527 			/* SRAM parity */
1528 			WRT_REG_WORD(&reg->hccr, HCCR_ENABLE_PARITY + 0x1);
1529 		else
1530 			/* SRAM, Instruction RAM and GP RAM parity */
1531 			WRT_REG_WORD(&reg->hccr, HCCR_ENABLE_PARITY + 0x7);
1532 		RD_REG_WORD(&reg->hccr);
1533 		spin_unlock_irqrestore(&ha->hardware_lock, flags);
1534 	}
1535 
1536 	if (rval == QLA_SUCCESS && IS_FAC_REQUIRED(ha)) {
1537 		uint32_t size;
1538 
1539 		rval = qla81xx_fac_get_sector_size(vha, &size);
1540 		if (rval == QLA_SUCCESS) {
1541 			ha->flags.fac_supported = 1;
1542 			ha->fdt_block_size = size << 2;
1543 		} else {
1544 			qla_printk(KERN_ERR, ha,
1545 			    "Unsupported FAC firmware (%d.%02d.%02d).\n",
1546 			    ha->fw_major_version, ha->fw_minor_version,
1547 			    ha->fw_subminor_version);
1548 		}
1549 	}
1550 failed:
1551 	if (rval) {
1552 		DEBUG2_3(printk("scsi(%ld): Setup chip **** FAILED ****.\n",
1553 		    vha->host_no));
1554 	}
1555 
1556 	return (rval);
1557 }
1558 
1559 /**
1560  * qla2x00_init_response_q_entries() - Initializes response queue entries.
1561  * @ha: HA context
1562  *
1563  * Beginning of request ring has initialization control block already built
1564  * by nvram config routine.
1565  *
1566  * Returns 0 on success.
1567  */
1568 void
qla2x00_init_response_q_entries(struct rsp_que * rsp)1569 qla2x00_init_response_q_entries(struct rsp_que *rsp)
1570 {
1571 	uint16_t cnt;
1572 	response_t *pkt;
1573 
1574 	rsp->ring_ptr = rsp->ring;
1575 	rsp->ring_index    = 0;
1576 	rsp->status_srb = NULL;
1577 	pkt = rsp->ring_ptr;
1578 	for (cnt = 0; cnt < rsp->length; cnt++) {
1579 		pkt->signature = RESPONSE_PROCESSED;
1580 		pkt++;
1581 	}
1582 }
1583 
1584 /**
1585  * qla2x00_update_fw_options() - Read and process firmware options.
1586  * @ha: HA context
1587  *
1588  * Returns 0 on success.
1589  */
1590 void
qla2x00_update_fw_options(scsi_qla_host_t * vha)1591 qla2x00_update_fw_options(scsi_qla_host_t *vha)
1592 {
1593 	uint16_t swing, emphasis, tx_sens, rx_sens;
1594 	struct qla_hw_data *ha = vha->hw;
1595 
1596 	memset(ha->fw_options, 0, sizeof(ha->fw_options));
1597 	qla2x00_get_fw_options(vha, ha->fw_options);
1598 
1599 	if (IS_QLA2100(ha) || IS_QLA2200(ha))
1600 		return;
1601 
1602 	/* Serial Link options. */
1603 	DEBUG3(printk("scsi(%ld): Serial link options:\n",
1604 	    vha->host_no));
1605 	DEBUG3(qla2x00_dump_buffer((uint8_t *)&ha->fw_seriallink_options,
1606 	    sizeof(ha->fw_seriallink_options)));
1607 
1608 	ha->fw_options[1] &= ~FO1_SET_EMPHASIS_SWING;
1609 	if (ha->fw_seriallink_options[3] & BIT_2) {
1610 		ha->fw_options[1] |= FO1_SET_EMPHASIS_SWING;
1611 
1612 		/*  1G settings */
1613 		swing = ha->fw_seriallink_options[2] & (BIT_2 | BIT_1 | BIT_0);
1614 		emphasis = (ha->fw_seriallink_options[2] &
1615 		    (BIT_4 | BIT_3)) >> 3;
1616 		tx_sens = ha->fw_seriallink_options[0] &
1617 		    (BIT_3 | BIT_2 | BIT_1 | BIT_0);
1618 		rx_sens = (ha->fw_seriallink_options[0] &
1619 		    (BIT_7 | BIT_6 | BIT_5 | BIT_4)) >> 4;
1620 		ha->fw_options[10] = (emphasis << 14) | (swing << 8);
1621 		if (IS_QLA2300(ha) || IS_QLA2312(ha) || IS_QLA6312(ha)) {
1622 			if (rx_sens == 0x0)
1623 				rx_sens = 0x3;
1624 			ha->fw_options[10] |= (tx_sens << 4) | rx_sens;
1625 		} else if (IS_QLA2322(ha) || IS_QLA6322(ha))
1626 			ha->fw_options[10] |= BIT_5 |
1627 			    ((rx_sens & (BIT_1 | BIT_0)) << 2) |
1628 			    (tx_sens & (BIT_1 | BIT_0));
1629 
1630 		/*  2G settings */
1631 		swing = (ha->fw_seriallink_options[2] &
1632 		    (BIT_7 | BIT_6 | BIT_5)) >> 5;
1633 		emphasis = ha->fw_seriallink_options[3] & (BIT_1 | BIT_0);
1634 		tx_sens = ha->fw_seriallink_options[1] &
1635 		    (BIT_3 | BIT_2 | BIT_1 | BIT_0);
1636 		rx_sens = (ha->fw_seriallink_options[1] &
1637 		    (BIT_7 | BIT_6 | BIT_5 | BIT_4)) >> 4;
1638 		ha->fw_options[11] = (emphasis << 14) | (swing << 8);
1639 		if (IS_QLA2300(ha) || IS_QLA2312(ha) || IS_QLA6312(ha)) {
1640 			if (rx_sens == 0x0)
1641 				rx_sens = 0x3;
1642 			ha->fw_options[11] |= (tx_sens << 4) | rx_sens;
1643 		} else if (IS_QLA2322(ha) || IS_QLA6322(ha))
1644 			ha->fw_options[11] |= BIT_5 |
1645 			    ((rx_sens & (BIT_1 | BIT_0)) << 2) |
1646 			    (tx_sens & (BIT_1 | BIT_0));
1647 	}
1648 
1649 	/* FCP2 options. */
1650 	/*  Return command IOCBs without waiting for an ABTS to complete. */
1651 	ha->fw_options[3] |= BIT_13;
1652 
1653 	/* LED scheme. */
1654 	if (ha->flags.enable_led_scheme)
1655 		ha->fw_options[2] |= BIT_12;
1656 
1657 	/* Detect ISP6312. */
1658 	if (IS_QLA6312(ha))
1659 		ha->fw_options[2] |= BIT_13;
1660 
1661 	/* Update firmware options. */
1662 	qla2x00_set_fw_options(vha, ha->fw_options);
1663 }
1664 
1665 void
qla24xx_update_fw_options(scsi_qla_host_t * vha)1666 qla24xx_update_fw_options(scsi_qla_host_t *vha)
1667 {
1668 	int rval;
1669 	struct qla_hw_data *ha = vha->hw;
1670 
1671 	if (IS_QLA82XX(ha))
1672 		return;
1673 
1674 	/* Update Serial Link options. */
1675 	if ((le16_to_cpu(ha->fw_seriallink_options24[0]) & BIT_0) == 0)
1676 		return;
1677 
1678 	rval = qla2x00_set_serdes_params(vha,
1679 	    le16_to_cpu(ha->fw_seriallink_options24[1]),
1680 	    le16_to_cpu(ha->fw_seriallink_options24[2]),
1681 	    le16_to_cpu(ha->fw_seriallink_options24[3]));
1682 	if (rval != QLA_SUCCESS) {
1683 		qla_printk(KERN_WARNING, ha,
1684 		    "Unable to update Serial Link options (%x).\n", rval);
1685 	}
1686 }
1687 
1688 void
qla2x00_config_rings(struct scsi_qla_host * vha)1689 qla2x00_config_rings(struct scsi_qla_host *vha)
1690 {
1691 	struct qla_hw_data *ha = vha->hw;
1692 	struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
1693 	struct req_que *req = ha->req_q_map[0];
1694 	struct rsp_que *rsp = ha->rsp_q_map[0];
1695 
1696 	/* Setup ring parameters in initialization control block. */
1697 	ha->init_cb->request_q_outpointer = __constant_cpu_to_le16(0);
1698 	ha->init_cb->response_q_inpointer = __constant_cpu_to_le16(0);
1699 	ha->init_cb->request_q_length = cpu_to_le16(req->length);
1700 	ha->init_cb->response_q_length = cpu_to_le16(rsp->length);
1701 	ha->init_cb->request_q_address[0] = cpu_to_le32(LSD(req->dma));
1702 	ha->init_cb->request_q_address[1] = cpu_to_le32(MSD(req->dma));
1703 	ha->init_cb->response_q_address[0] = cpu_to_le32(LSD(rsp->dma));
1704 	ha->init_cb->response_q_address[1] = cpu_to_le32(MSD(rsp->dma));
1705 
1706 	WRT_REG_WORD(ISP_REQ_Q_IN(ha, reg), 0);
1707 	WRT_REG_WORD(ISP_REQ_Q_OUT(ha, reg), 0);
1708 	WRT_REG_WORD(ISP_RSP_Q_IN(ha, reg), 0);
1709 	WRT_REG_WORD(ISP_RSP_Q_OUT(ha, reg), 0);
1710 	RD_REG_WORD(ISP_RSP_Q_OUT(ha, reg));		/* PCI Posting. */
1711 }
1712 
1713 void
qla24xx_config_rings(struct scsi_qla_host * vha)1714 qla24xx_config_rings(struct scsi_qla_host *vha)
1715 {
1716 	struct qla_hw_data *ha = vha->hw;
1717 	device_reg_t __iomem *reg = ISP_QUE_REG(ha, 0);
1718 	struct device_reg_2xxx __iomem *ioreg = &ha->iobase->isp;
1719 	struct qla_msix_entry *msix;
1720 	struct init_cb_24xx *icb;
1721 	uint16_t rid = 0;
1722 	struct req_que *req = ha->req_q_map[0];
1723 	struct rsp_que *rsp = ha->rsp_q_map[0];
1724 
1725 /* Setup ring parameters in initialization control block. */
1726 	icb = (struct init_cb_24xx *)ha->init_cb;
1727 	icb->request_q_outpointer = __constant_cpu_to_le16(0);
1728 	icb->response_q_inpointer = __constant_cpu_to_le16(0);
1729 	icb->request_q_length = cpu_to_le16(req->length);
1730 	icb->response_q_length = cpu_to_le16(rsp->length);
1731 	icb->request_q_address[0] = cpu_to_le32(LSD(req->dma));
1732 	icb->request_q_address[1] = cpu_to_le32(MSD(req->dma));
1733 	icb->response_q_address[0] = cpu_to_le32(LSD(rsp->dma));
1734 	icb->response_q_address[1] = cpu_to_le32(MSD(rsp->dma));
1735 
1736 	if (ha->mqenable) {
1737 		icb->qos = __constant_cpu_to_le16(QLA_DEFAULT_QUE_QOS);
1738 		icb->rid = __constant_cpu_to_le16(rid);
1739 		if (ha->flags.msix_enabled) {
1740 			msix = &ha->msix_entries[1];
1741 			DEBUG2_17(printk(KERN_INFO
1742 			"Registering vector 0x%x for base que\n", msix->entry));
1743 			icb->msix = cpu_to_le16(msix->entry);
1744 		}
1745 		/* Use alternate PCI bus number */
1746 		if (MSB(rid))
1747 			icb->firmware_options_2 |=
1748 				__constant_cpu_to_le32(BIT_19);
1749 		/* Use alternate PCI devfn */
1750 		if (LSB(rid))
1751 			icb->firmware_options_2 |=
1752 				__constant_cpu_to_le32(BIT_18);
1753 
1754 		/* Use Disable MSIX Handshake mode for capable adapters */
1755 		if (IS_MSIX_NACK_CAPABLE(ha)) {
1756 			icb->firmware_options_2 &=
1757 				__constant_cpu_to_le32(~BIT_22);
1758 			ha->flags.disable_msix_handshake = 1;
1759 			qla_printk(KERN_INFO, ha,
1760 				"MSIX Handshake Disable Mode turned on\n");
1761 		} else {
1762 			icb->firmware_options_2 |=
1763 				__constant_cpu_to_le32(BIT_22);
1764 		}
1765 		icb->firmware_options_2 |= __constant_cpu_to_le32(BIT_23);
1766 
1767 		WRT_REG_DWORD(&reg->isp25mq.req_q_in, 0);
1768 		WRT_REG_DWORD(&reg->isp25mq.req_q_out, 0);
1769 		WRT_REG_DWORD(&reg->isp25mq.rsp_q_in, 0);
1770 		WRT_REG_DWORD(&reg->isp25mq.rsp_q_out, 0);
1771 	} else {
1772 		WRT_REG_DWORD(&reg->isp24.req_q_in, 0);
1773 		WRT_REG_DWORD(&reg->isp24.req_q_out, 0);
1774 		WRT_REG_DWORD(&reg->isp24.rsp_q_in, 0);
1775 		WRT_REG_DWORD(&reg->isp24.rsp_q_out, 0);
1776 	}
1777 	/* PCI posting */
1778 	RD_REG_DWORD(&ioreg->hccr);
1779 }
1780 
1781 /**
1782  * qla2x00_init_rings() - Initializes firmware.
1783  * @ha: HA context
1784  *
1785  * Beginning of request ring has initialization control block already built
1786  * by nvram config routine.
1787  *
1788  * Returns 0 on success.
1789  */
1790 static int
qla2x00_init_rings(scsi_qla_host_t * vha)1791 qla2x00_init_rings(scsi_qla_host_t *vha)
1792 {
1793 	int	rval;
1794 	unsigned long flags = 0;
1795 	int cnt, que;
1796 	struct qla_hw_data *ha = vha->hw;
1797 	struct req_que *req;
1798 	struct rsp_que *rsp;
1799 	struct scsi_qla_host *vp;
1800 	struct mid_init_cb_24xx *mid_init_cb =
1801 	    (struct mid_init_cb_24xx *) ha->init_cb;
1802 
1803 	spin_lock_irqsave(&ha->hardware_lock, flags);
1804 
1805 	/* Clear outstanding commands array. */
1806 	for (que = 0; que < ha->max_req_queues; que++) {
1807 		req = ha->req_q_map[que];
1808 		if (!req)
1809 			continue;
1810 		for (cnt = 1; cnt < MAX_OUTSTANDING_COMMANDS; cnt++)
1811 			req->outstanding_cmds[cnt] = NULL;
1812 
1813 		req->current_outstanding_cmd = 1;
1814 
1815 		/* Initialize firmware. */
1816 		req->ring_ptr  = req->ring;
1817 		req->ring_index    = 0;
1818 		req->cnt      = req->length;
1819 	}
1820 
1821 	for (que = 0; que < ha->max_rsp_queues; que++) {
1822 		rsp = ha->rsp_q_map[que];
1823 		if (!rsp)
1824 			continue;
1825 		/* Initialize response queue entries */
1826 		qla2x00_init_response_q_entries(rsp);
1827 	}
1828 
1829 	spin_lock(&ha->vport_slock);
1830 	/* Clear RSCN queue. */
1831 	list_for_each_entry(vp, &ha->vp_list, list) {
1832 		vp->rscn_in_ptr = 0;
1833 		vp->rscn_out_ptr = 0;
1834 	}
1835 
1836 	spin_unlock(&ha->vport_slock);
1837 
1838 	ha->isp_ops->config_rings(vha);
1839 
1840 	spin_unlock_irqrestore(&ha->hardware_lock, flags);
1841 
1842 	/* Update any ISP specific firmware options before initialization. */
1843 	ha->isp_ops->update_fw_options(vha);
1844 
1845 	DEBUG(printk("scsi(%ld): Issue init firmware.\n", vha->host_no));
1846 
1847 	if (ha->flags.npiv_supported) {
1848 		if (ha->operating_mode == LOOP)
1849 			ha->max_npiv_vports = MIN_MULTI_ID_FABRIC - 1;
1850 		mid_init_cb->count = cpu_to_le16(ha->max_npiv_vports);
1851 	}
1852 
1853 	if (IS_FWI2_CAPABLE(ha)) {
1854 		mid_init_cb->options = __constant_cpu_to_le16(BIT_1);
1855 		mid_init_cb->init_cb.execution_throttle =
1856 		    cpu_to_le16(ha->fw_xcb_count);
1857 	}
1858 
1859 	rval = qla2x00_init_firmware(vha, ha->init_cb_size);
1860 	if (rval) {
1861 		DEBUG2_3(printk("scsi(%ld): Init firmware **** FAILED ****.\n",
1862 		    vha->host_no));
1863 	} else {
1864 		DEBUG3(printk("scsi(%ld): Init firmware -- success.\n",
1865 		    vha->host_no));
1866 	}
1867 
1868 	return (rval);
1869 }
1870 
1871 /**
1872  * qla2x00_fw_ready() - Waits for firmware ready.
1873  * @ha: HA context
1874  *
1875  * Returns 0 on success.
1876  */
1877 static int
qla2x00_fw_ready(scsi_qla_host_t * vha)1878 qla2x00_fw_ready(scsi_qla_host_t *vha)
1879 {
1880 	int		rval;
1881 	unsigned long	wtime, mtime, cs84xx_time;
1882 	uint16_t	min_wait;	/* Minimum wait time if loop is down */
1883 	uint16_t	wait_time;	/* Wait time if loop is coming ready */
1884 	uint16_t	state[5];
1885 	struct qla_hw_data *ha = vha->hw;
1886 
1887 	rval = QLA_SUCCESS;
1888 
1889 	/* 20 seconds for loop down. */
1890 	min_wait = 20;
1891 
1892 	/*
1893 	 * Firmware should take at most one RATOV to login, plus 5 seconds for
1894 	 * our own processing.
1895 	 */
1896 	if ((wait_time = (ha->retry_count*ha->login_timeout) + 5) < min_wait) {
1897 		wait_time = min_wait;
1898 	}
1899 
1900 	/* Min wait time if loop down */
1901 	mtime = jiffies + (min_wait * HZ);
1902 
1903 	/* wait time before firmware ready */
1904 	wtime = jiffies + (wait_time * HZ);
1905 
1906 	/* Wait for ISP to finish LIP */
1907 	if (!vha->flags.init_done)
1908  		qla_printk(KERN_INFO, ha, "Waiting for LIP to complete...\n");
1909 
1910 	DEBUG3(printk("scsi(%ld): Waiting for LIP to complete...\n",
1911 	    vha->host_no));
1912 
1913 	do {
1914 		rval = qla2x00_get_firmware_state(vha, state);
1915 		if (rval == QLA_SUCCESS) {
1916 			if (state[0] < FSTATE_LOSS_OF_SYNC) {
1917 				vha->device_flags &= ~DFLG_NO_CABLE;
1918 			}
1919 			if (IS_QLA84XX(ha) && state[0] != FSTATE_READY) {
1920 				DEBUG16(printk("scsi(%ld): fw_state=%x "
1921 				    "84xx=%x.\n", vha->host_no, state[0],
1922 				    state[2]));
1923 				if ((state[2] & FSTATE_LOGGED_IN) &&
1924 				     (state[2] & FSTATE_WAITING_FOR_VERIFY)) {
1925 					DEBUG16(printk("scsi(%ld): Sending "
1926 					    "verify iocb.\n", vha->host_no));
1927 
1928 					cs84xx_time = jiffies;
1929 					rval = qla84xx_init_chip(vha);
1930 					if (rval != QLA_SUCCESS)
1931 						break;
1932 
1933 					/* Add time taken to initialize. */
1934 					cs84xx_time = jiffies - cs84xx_time;
1935 					wtime += cs84xx_time;
1936 					mtime += cs84xx_time;
1937 					DEBUG16(printk("scsi(%ld): Increasing "
1938 					    "wait time by %ld. New time %ld\n",
1939 					    vha->host_no, cs84xx_time, wtime));
1940 				}
1941 			} else if (state[0] == FSTATE_READY) {
1942 				DEBUG(printk("scsi(%ld): F/W Ready - OK \n",
1943 				    vha->host_no));
1944 
1945 				qla2x00_get_retry_cnt(vha, &ha->retry_count,
1946 				    &ha->login_timeout, &ha->r_a_tov);
1947 
1948 				rval = QLA_SUCCESS;
1949 				break;
1950 			}
1951 
1952 			rval = QLA_FUNCTION_FAILED;
1953 
1954 			if (atomic_read(&vha->loop_down_timer) &&
1955 			    state[0] != FSTATE_READY) {
1956 				/* Loop down. Timeout on min_wait for states
1957 				 * other than Wait for Login.
1958 				 */
1959 				if (time_after_eq(jiffies, mtime)) {
1960 					qla_printk(KERN_INFO, ha,
1961 					    "Cable is unplugged...\n");
1962 
1963 					vha->device_flags |= DFLG_NO_CABLE;
1964 					break;
1965 				}
1966 			}
1967 		} else {
1968 			/* Mailbox cmd failed. Timeout on min_wait. */
1969 			if (time_after_eq(jiffies, mtime) ||
1970 				ha->flags.isp82xx_fw_hung)
1971 				break;
1972 		}
1973 
1974 		if (time_after_eq(jiffies, wtime))
1975 			break;
1976 
1977 		/* Delay for a while */
1978 		msleep(500);
1979 
1980 		DEBUG3(printk("scsi(%ld): fw_state=%x curr time=%lx.\n",
1981 		    vha->host_no, state[0], jiffies));
1982 	} while (1);
1983 
1984 	DEBUG(printk("scsi(%ld): fw_state=%x (%x, %x, %x, %x) curr time=%lx.\n",
1985 	    vha->host_no, state[0], state[1], state[2], state[3], state[4],
1986 	    jiffies));
1987 
1988 	if (rval) {
1989 		DEBUG2_3(printk("scsi(%ld): Firmware ready **** FAILED ****.\n",
1990 		    vha->host_no));
1991 	}
1992 
1993 	return (rval);
1994 }
1995 
1996 /*
1997 *  qla2x00_configure_hba
1998 *      Setup adapter context.
1999 *
2000 * Input:
2001 *      ha = adapter state pointer.
2002 *
2003 * Returns:
2004 *      0 = success
2005 *
2006 * Context:
2007 *      Kernel context.
2008 */
2009 static int
qla2x00_configure_hba(scsi_qla_host_t * vha)2010 qla2x00_configure_hba(scsi_qla_host_t *vha)
2011 {
2012 	int       rval;
2013 	uint16_t      loop_id;
2014 	uint16_t      topo;
2015 	uint16_t      sw_cap;
2016 	uint8_t       al_pa;
2017 	uint8_t       area;
2018 	uint8_t       domain;
2019 	char		connect_type[22];
2020 	struct qla_hw_data *ha = vha->hw;
2021 
2022 	/* Get host addresses. */
2023 	rval = qla2x00_get_adapter_id(vha,
2024 	    &loop_id, &al_pa, &area, &domain, &topo, &sw_cap);
2025 	if (rval != QLA_SUCCESS) {
2026 		if (LOOP_TRANSITION(vha) || atomic_read(&ha->loop_down_timer) ||
2027 		    IS_QLA8XXX_TYPE(ha) ||
2028 		    (rval == QLA_COMMAND_ERROR && loop_id == 0x7)) {
2029 			DEBUG2(printk("%s(%ld) Loop is in a transition state\n",
2030 			    __func__, vha->host_no));
2031 		} else {
2032 			qla_printk(KERN_WARNING, ha,
2033 			    "ERROR -- Unable to get host loop ID.\n");
2034 			set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
2035 		}
2036 		return (rval);
2037 	}
2038 
2039 	if (topo == 4) {
2040 		qla_printk(KERN_INFO, ha,
2041 			"Cannot get topology - retrying.\n");
2042 		return (QLA_FUNCTION_FAILED);
2043 	}
2044 
2045 	vha->loop_id = loop_id;
2046 
2047 	/* initialize */
2048 	ha->min_external_loopid = SNS_FIRST_LOOP_ID;
2049 	ha->operating_mode = LOOP;
2050 	ha->switch_cap = 0;
2051 
2052 	switch (topo) {
2053 	case 0:
2054 		DEBUG3(printk("scsi(%ld): HBA in NL topology.\n",
2055 		    vha->host_no));
2056 		ha->current_topology = ISP_CFG_NL;
2057 		strcpy(connect_type, "(Loop)");
2058 		break;
2059 
2060 	case 1:
2061 		DEBUG3(printk("scsi(%ld): HBA in FL topology.\n",
2062 		    vha->host_no));
2063 		ha->switch_cap = sw_cap;
2064 		ha->current_topology = ISP_CFG_FL;
2065 		strcpy(connect_type, "(FL_Port)");
2066 		break;
2067 
2068 	case 2:
2069 		DEBUG3(printk("scsi(%ld): HBA in N P2P topology.\n",
2070 		    vha->host_no));
2071 		ha->operating_mode = P2P;
2072 		ha->current_topology = ISP_CFG_N;
2073 		strcpy(connect_type, "(N_Port-to-N_Port)");
2074 		break;
2075 
2076 	case 3:
2077 		DEBUG3(printk("scsi(%ld): HBA in F P2P topology.\n",
2078 		    vha->host_no));
2079 		ha->switch_cap = sw_cap;
2080 		ha->operating_mode = P2P;
2081 		ha->current_topology = ISP_CFG_F;
2082 		strcpy(connect_type, "(F_Port)");
2083 		break;
2084 
2085 	default:
2086 		DEBUG3(printk("scsi(%ld): HBA in unknown topology %x. "
2087 		    "Using NL.\n",
2088 		    vha->host_no, topo));
2089 		ha->current_topology = ISP_CFG_NL;
2090 		strcpy(connect_type, "(Loop)");
2091 		break;
2092 	}
2093 
2094 	/* Save Host port and loop ID. */
2095 	/* byte order - Big Endian */
2096 	vha->d_id.b.domain = domain;
2097 	vha->d_id.b.area = area;
2098 	vha->d_id.b.al_pa = al_pa;
2099 
2100 	if (!vha->flags.init_done)
2101  		qla_printk(KERN_INFO, ha,
2102 		    "Topology - %s, Host Loop address 0x%x\n",
2103 		    connect_type, vha->loop_id);
2104 
2105 	if (rval) {
2106 		DEBUG2_3(printk("scsi(%ld): FAILED.\n", vha->host_no));
2107 	} else {
2108 		DEBUG3(printk("scsi(%ld): exiting normally.\n", vha->host_no));
2109 	}
2110 
2111 	return(rval);
2112 }
2113 
2114 inline void
qla2x00_set_model_info(scsi_qla_host_t * vha,uint8_t * model,size_t len,char * def)2115 qla2x00_set_model_info(scsi_qla_host_t *vha, uint8_t *model, size_t len,
2116 	char *def)
2117 {
2118 	char *st, *en;
2119 	uint16_t index;
2120 	struct qla_hw_data *ha = vha->hw;
2121 	int use_tbl = !IS_QLA24XX_TYPE(ha) && !IS_QLA25XX(ha) &&
2122 	    !IS_QLA8XXX_TYPE(ha);
2123 
2124 	if (memcmp(model, BINZERO, len) != 0) {
2125 		strncpy(ha->model_number, model, len);
2126 		st = en = ha->model_number;
2127 		en += len - 1;
2128 		while (en > st) {
2129 			if (*en != 0x20 && *en != 0x00)
2130 				break;
2131 			*en-- = '\0';
2132 		}
2133 
2134 		index = (ha->pdev->subsystem_device & 0xff);
2135 		if (use_tbl &&
2136 		    ha->pdev->subsystem_vendor == PCI_VENDOR_ID_QLOGIC &&
2137 		    index < QLA_MODEL_NAMES)
2138 			strncpy(ha->model_desc,
2139 			    qla2x00_model_name[index * 2 + 1],
2140 			    sizeof(ha->model_desc) - 1);
2141 	} else {
2142 		index = (ha->pdev->subsystem_device & 0xff);
2143 		if (use_tbl &&
2144 		    ha->pdev->subsystem_vendor == PCI_VENDOR_ID_QLOGIC &&
2145 		    index < QLA_MODEL_NAMES) {
2146 			strcpy(ha->model_number,
2147 			    qla2x00_model_name[index * 2]);
2148 			strncpy(ha->model_desc,
2149 			    qla2x00_model_name[index * 2 + 1],
2150 			    sizeof(ha->model_desc) - 1);
2151 		} else {
2152 			strcpy(ha->model_number, def);
2153 		}
2154 	}
2155 	if (IS_FWI2_CAPABLE(ha))
2156 		qla2xxx_get_vpd_field(vha, "\x82", ha->model_desc,
2157 		    sizeof(ha->model_desc));
2158 }
2159 
2160 /* On sparc systems, obtain port and node WWN from firmware
2161  * properties.
2162  */
qla2xxx_nvram_wwn_from_ofw(scsi_qla_host_t * vha,nvram_t * nv)2163 static void qla2xxx_nvram_wwn_from_ofw(scsi_qla_host_t *vha, nvram_t *nv)
2164 {
2165 #ifdef CONFIG_SPARC
2166 	struct qla_hw_data *ha = vha->hw;
2167 	struct pci_dev *pdev = ha->pdev;
2168 	struct device_node *dp = pci_device_to_OF_node(pdev);
2169 	const u8 *val;
2170 	int len;
2171 
2172 	val = of_get_property(dp, "port-wwn", &len);
2173 	if (val && len >= WWN_SIZE)
2174 		memcpy(nv->port_name, val, WWN_SIZE);
2175 
2176 	val = of_get_property(dp, "node-wwn", &len);
2177 	if (val && len >= WWN_SIZE)
2178 		memcpy(nv->node_name, val, WWN_SIZE);
2179 #endif
2180 }
2181 
2182 /*
2183 * NVRAM configuration for ISP 2xxx
2184 *
2185 * Input:
2186 *      ha                = adapter block pointer.
2187 *
2188 * Output:
2189 *      initialization control block in response_ring
2190 *      host adapters parameters in host adapter block
2191 *
2192 * Returns:
2193 *      0 = success.
2194 */
2195 int
qla2x00_nvram_config(scsi_qla_host_t * vha)2196 qla2x00_nvram_config(scsi_qla_host_t *vha)
2197 {
2198 	int             rval;
2199 	uint8_t         chksum = 0;
2200 	uint16_t        cnt;
2201 	uint8_t         *dptr1, *dptr2;
2202 	struct qla_hw_data *ha = vha->hw;
2203 	init_cb_t       *icb = ha->init_cb;
2204 	nvram_t         *nv = ha->nvram;
2205 	uint8_t         *ptr = ha->nvram;
2206 	struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
2207 
2208 	rval = QLA_SUCCESS;
2209 
2210 	/* Determine NVRAM starting address. */
2211 	ha->nvram_size = sizeof(nvram_t);
2212 	ha->nvram_base = 0;
2213 	if (!IS_QLA2100(ha) && !IS_QLA2200(ha) && !IS_QLA2300(ha))
2214 		if ((RD_REG_WORD(&reg->ctrl_status) >> 14) == 1)
2215 			ha->nvram_base = 0x80;
2216 
2217 	/* Get NVRAM data and calculate checksum. */
2218 	ha->isp_ops->read_nvram(vha, ptr, ha->nvram_base, ha->nvram_size);
2219 	for (cnt = 0, chksum = 0; cnt < ha->nvram_size; cnt++)
2220 		chksum += *ptr++;
2221 
2222 	DEBUG5(printk("scsi(%ld): Contents of NVRAM\n", vha->host_no));
2223 	DEBUG5(qla2x00_dump_buffer((uint8_t *)nv, ha->nvram_size));
2224 
2225 	/* Bad NVRAM data, set defaults parameters. */
2226 	if (chksum || nv->id[0] != 'I' || nv->id[1] != 'S' ||
2227 	    nv->id[2] != 'P' || nv->id[3] != ' ' || nv->nvram_version < 1) {
2228 		/* Reset NVRAM data. */
2229 		qla_printk(KERN_WARNING, ha, "Inconsistent NVRAM detected: "
2230 		    "checksum=0x%x id=%c version=0x%x.\n", chksum, nv->id[0],
2231 		    nv->nvram_version);
2232 		qla_printk(KERN_WARNING, ha, "Falling back to functioning (yet "
2233 		    "invalid -- WWPN) defaults.\n");
2234 
2235 		/*
2236 		 * Set default initialization control block.
2237 		 */
2238 		memset(nv, 0, ha->nvram_size);
2239 		nv->parameter_block_version = ICB_VERSION;
2240 
2241 		if (IS_QLA23XX(ha)) {
2242 			nv->firmware_options[0] = BIT_2 | BIT_1;
2243 			nv->firmware_options[1] = BIT_7 | BIT_5;
2244 			nv->add_firmware_options[0] = BIT_5;
2245 			nv->add_firmware_options[1] = BIT_5 | BIT_4;
2246 			nv->frame_payload_size = __constant_cpu_to_le16(2048);
2247 			nv->special_options[1] = BIT_7;
2248 		} else if (IS_QLA2200(ha)) {
2249 			nv->firmware_options[0] = BIT_2 | BIT_1;
2250 			nv->firmware_options[1] = BIT_7 | BIT_5;
2251 			nv->add_firmware_options[0] = BIT_5;
2252 			nv->add_firmware_options[1] = BIT_5 | BIT_4;
2253 			nv->frame_payload_size = __constant_cpu_to_le16(1024);
2254 		} else if (IS_QLA2100(ha)) {
2255 			nv->firmware_options[0] = BIT_3 | BIT_1;
2256 			nv->firmware_options[1] = BIT_5;
2257 			nv->frame_payload_size = __constant_cpu_to_le16(1024);
2258 		}
2259 
2260 		nv->max_iocb_allocation = __constant_cpu_to_le16(256);
2261 		nv->execution_throttle = __constant_cpu_to_le16(16);
2262 		nv->retry_count = 8;
2263 		nv->retry_delay = 1;
2264 
2265 		nv->port_name[0] = 33;
2266 		nv->port_name[3] = 224;
2267 		nv->port_name[4] = 139;
2268 
2269 		qla2xxx_nvram_wwn_from_ofw(vha, nv);
2270 
2271 		nv->login_timeout = 4;
2272 
2273 		/*
2274 		 * Set default host adapter parameters
2275 		 */
2276 		nv->host_p[1] = BIT_2;
2277 		nv->reset_delay = 5;
2278 		nv->port_down_retry_count = 8;
2279 		nv->max_luns_per_target = __constant_cpu_to_le16(8);
2280 		nv->link_down_timeout = 60;
2281 
2282 		rval = 1;
2283 	}
2284 
2285 #if defined(CONFIG_IA64_GENERIC) || defined(CONFIG_IA64_SGI_SN2)
2286 	/*
2287 	 * The SN2 does not provide BIOS emulation which means you can't change
2288 	 * potentially bogus BIOS settings. Force the use of default settings
2289 	 * for link rate and frame size.  Hope that the rest of the settings
2290 	 * are valid.
2291 	 */
2292 	if (ia64_platform_is("sn2")) {
2293 		nv->frame_payload_size = __constant_cpu_to_le16(2048);
2294 		if (IS_QLA23XX(ha))
2295 			nv->special_options[1] = BIT_7;
2296 	}
2297 #endif
2298 
2299 	/* Reset Initialization control block */
2300 	memset(icb, 0, ha->init_cb_size);
2301 
2302 	/*
2303 	 * Setup driver NVRAM options.
2304 	 */
2305 	nv->firmware_options[0] |= (BIT_6 | BIT_1);
2306 	nv->firmware_options[0] &= ~(BIT_5 | BIT_4);
2307 	nv->firmware_options[1] |= (BIT_5 | BIT_0);
2308 	nv->firmware_options[1] &= ~BIT_4;
2309 
2310 	if (IS_QLA23XX(ha)) {
2311 		nv->firmware_options[0] |= BIT_2;
2312 		nv->firmware_options[0] &= ~BIT_3;
2313 		nv->firmware_options[0] &= ~BIT_6;
2314 		nv->add_firmware_options[1] |= BIT_5 | BIT_4;
2315 
2316 		if (IS_QLA2300(ha)) {
2317 			if (ha->fb_rev == FPM_2310) {
2318 				strcpy(ha->model_number, "QLA2310");
2319 			} else {
2320 				strcpy(ha->model_number, "QLA2300");
2321 			}
2322 		} else {
2323 			qla2x00_set_model_info(vha, nv->model_number,
2324 			    sizeof(nv->model_number), "QLA23xx");
2325 		}
2326 	} else if (IS_QLA2200(ha)) {
2327 		nv->firmware_options[0] |= BIT_2;
2328 		/*
2329 		 * 'Point-to-point preferred, else loop' is not a safe
2330 		 * connection mode setting.
2331 		 */
2332 		if ((nv->add_firmware_options[0] & (BIT_6 | BIT_5 | BIT_4)) ==
2333 		    (BIT_5 | BIT_4)) {
2334 			/* Force 'loop preferred, else point-to-point'. */
2335 			nv->add_firmware_options[0] &= ~(BIT_6 | BIT_5 | BIT_4);
2336 			nv->add_firmware_options[0] |= BIT_5;
2337 		}
2338 		strcpy(ha->model_number, "QLA22xx");
2339 	} else /*if (IS_QLA2100(ha))*/ {
2340 		strcpy(ha->model_number, "QLA2100");
2341 	}
2342 
2343 	/*
2344 	 * Copy over NVRAM RISC parameter block to initialization control block.
2345 	 */
2346 	dptr1 = (uint8_t *)icb;
2347 	dptr2 = (uint8_t *)&nv->parameter_block_version;
2348 	cnt = (uint8_t *)&icb->request_q_outpointer - (uint8_t *)&icb->version;
2349 	while (cnt--)
2350 		*dptr1++ = *dptr2++;
2351 
2352 	/* Copy 2nd half. */
2353 	dptr1 = (uint8_t *)icb->add_firmware_options;
2354 	cnt = (uint8_t *)icb->reserved_3 - (uint8_t *)icb->add_firmware_options;
2355 	while (cnt--)
2356 		*dptr1++ = *dptr2++;
2357 
2358 	/* Use alternate WWN? */
2359 	if (nv->host_p[1] & BIT_7) {
2360 		memcpy(icb->node_name, nv->alternate_node_name, WWN_SIZE);
2361 		memcpy(icb->port_name, nv->alternate_port_name, WWN_SIZE);
2362 	}
2363 
2364 	/* Prepare nodename */
2365 	if ((icb->firmware_options[1] & BIT_6) == 0) {
2366 		/*
2367 		 * Firmware will apply the following mask if the nodename was
2368 		 * not provided.
2369 		 */
2370 		memcpy(icb->node_name, icb->port_name, WWN_SIZE);
2371 		icb->node_name[0] &= 0xF0;
2372 	}
2373 
2374 	/*
2375 	 * Set host adapter parameters.
2376 	 */
2377 	if (nv->host_p[0] & BIT_7)
2378 		ql2xextended_error_logging = 1;
2379 	ha->flags.disable_risc_code_load = ((nv->host_p[0] & BIT_4) ? 1 : 0);
2380 	/* Always load RISC code on non ISP2[12]00 chips. */
2381 	if (!IS_QLA2100(ha) && !IS_QLA2200(ha))
2382 		ha->flags.disable_risc_code_load = 0;
2383 	ha->flags.enable_lip_reset = ((nv->host_p[1] & BIT_1) ? 1 : 0);
2384 	ha->flags.enable_lip_full_login = ((nv->host_p[1] & BIT_2) ? 1 : 0);
2385 	ha->flags.enable_target_reset = ((nv->host_p[1] & BIT_3) ? 1 : 0);
2386 	ha->flags.enable_led_scheme = (nv->special_options[1] & BIT_4) ? 1 : 0;
2387 	ha->flags.disable_serdes = 0;
2388 
2389 	ha->operating_mode =
2390 	    (icb->add_firmware_options[0] & (BIT_6 | BIT_5 | BIT_4)) >> 4;
2391 
2392 	memcpy(ha->fw_seriallink_options, nv->seriallink_options,
2393 	    sizeof(ha->fw_seriallink_options));
2394 
2395 	/* save HBA serial number */
2396 	ha->serial0 = icb->port_name[5];
2397 	ha->serial1 = icb->port_name[6];
2398 	ha->serial2 = icb->port_name[7];
2399 	memcpy(vha->node_name, icb->node_name, WWN_SIZE);
2400 	memcpy(vha->port_name, icb->port_name, WWN_SIZE);
2401 
2402 	icb->execution_throttle = __constant_cpu_to_le16(0xFFFF);
2403 
2404 	ha->retry_count = nv->retry_count;
2405 
2406 	/* Set minimum login_timeout to 4 seconds. */
2407 	if (nv->login_timeout != ql2xlogintimeout)
2408 		nv->login_timeout = ql2xlogintimeout;
2409 	if (nv->login_timeout < 4)
2410 		nv->login_timeout = 4;
2411 	ha->login_timeout = nv->login_timeout;
2412 	icb->login_timeout = nv->login_timeout;
2413 
2414 	/* Set minimum RATOV to 100 tenths of a second. */
2415 	ha->r_a_tov = 100;
2416 
2417 	ha->loop_reset_delay = nv->reset_delay;
2418 
2419 	/* Link Down Timeout = 0:
2420 	 *
2421 	 * 	When Port Down timer expires we will start returning
2422 	 *	I/O's to OS with "DID_NO_CONNECT".
2423 	 *
2424 	 * Link Down Timeout != 0:
2425 	 *
2426 	 *	 The driver waits for the link to come up after link down
2427 	 *	 before returning I/Os to OS with "DID_NO_CONNECT".
2428 	 */
2429 	if (nv->link_down_timeout == 0) {
2430 		ha->loop_down_abort_time =
2431 		    (LOOP_DOWN_TIME - LOOP_DOWN_TIMEOUT);
2432 	} else {
2433 		ha->link_down_timeout =	 nv->link_down_timeout;
2434 		ha->loop_down_abort_time =
2435 		    (LOOP_DOWN_TIME - ha->link_down_timeout);
2436 	}
2437 
2438 	/*
2439 	 * Need enough time to try and get the port back.
2440 	 */
2441 	ha->port_down_retry_count = nv->port_down_retry_count;
2442 	if (qlport_down_retry)
2443 		ha->port_down_retry_count = qlport_down_retry;
2444 	/* Set login_retry_count */
2445 	ha->login_retry_count  = nv->retry_count;
2446 	if (ha->port_down_retry_count == nv->port_down_retry_count &&
2447 	    ha->port_down_retry_count > 3)
2448 		ha->login_retry_count = ha->port_down_retry_count;
2449 	else if (ha->port_down_retry_count > (int)ha->login_retry_count)
2450 		ha->login_retry_count = ha->port_down_retry_count;
2451 	if (ql2xloginretrycount)
2452 		ha->login_retry_count = ql2xloginretrycount;
2453 
2454 	icb->lun_enables = __constant_cpu_to_le16(0);
2455 	icb->command_resource_count = 0;
2456 	icb->immediate_notify_resource_count = 0;
2457 	icb->timeout = __constant_cpu_to_le16(0);
2458 
2459 	if (IS_QLA2100(ha) || IS_QLA2200(ha)) {
2460 		/* Enable RIO */
2461 		icb->firmware_options[0] &= ~BIT_3;
2462 		icb->add_firmware_options[0] &=
2463 		    ~(BIT_3 | BIT_2 | BIT_1 | BIT_0);
2464 		icb->add_firmware_options[0] |= BIT_2;
2465 		icb->response_accumulation_timer = 3;
2466 		icb->interrupt_delay_timer = 5;
2467 
2468 		vha->flags.process_response_queue = 1;
2469 	} else {
2470 		/* Enable ZIO. */
2471 		if (!vha->flags.init_done) {
2472 			ha->zio_mode = icb->add_firmware_options[0] &
2473 			    (BIT_3 | BIT_2 | BIT_1 | BIT_0);
2474 			ha->zio_timer = icb->interrupt_delay_timer ?
2475 			    icb->interrupt_delay_timer: 2;
2476 		}
2477 		icb->add_firmware_options[0] &=
2478 		    ~(BIT_3 | BIT_2 | BIT_1 | BIT_0);
2479 		vha->flags.process_response_queue = 0;
2480 		if (ha->zio_mode != QLA_ZIO_DISABLED) {
2481 			ha->zio_mode = QLA_ZIO_MODE_6;
2482 
2483 			DEBUG2(printk("scsi(%ld): ZIO mode %d enabled; timer "
2484 			    "delay (%d us).\n", vha->host_no, ha->zio_mode,
2485 			    ha->zio_timer * 100));
2486 			qla_printk(KERN_INFO, ha,
2487 			    "ZIO mode %d enabled; timer delay (%d us).\n",
2488 			    ha->zio_mode, ha->zio_timer * 100);
2489 
2490 			icb->add_firmware_options[0] |= (uint8_t)ha->zio_mode;
2491 			icb->interrupt_delay_timer = (uint8_t)ha->zio_timer;
2492 			vha->flags.process_response_queue = 1;
2493 		}
2494 	}
2495 
2496 	if (rval) {
2497 		DEBUG2_3(printk(KERN_WARNING
2498 		    "scsi(%ld): NVRAM configuration failed!\n", vha->host_no));
2499 	}
2500 	return (rval);
2501 }
2502 
2503 static void
qla2x00_rport_del(void * data)2504 qla2x00_rport_del(void *data)
2505 {
2506 	fc_port_t *fcport = data;
2507 	struct fc_rport *rport;
2508 	unsigned long flags;
2509 
2510 	spin_lock_irqsave(fcport->vha->host->host_lock, flags);
2511 	rport = fcport->drport ? fcport->drport: fcport->rport;
2512 	fcport->drport = NULL;
2513 	spin_unlock_irqrestore(fcport->vha->host->host_lock, flags);
2514 	if (rport)
2515 		fc_remote_port_delete(rport);
2516 }
2517 
2518 /**
2519  * qla2x00_alloc_fcport() - Allocate a generic fcport.
2520  * @ha: HA context
2521  * @flags: allocation flags
2522  *
2523  * Returns a pointer to the allocated fcport, or NULL, if none available.
2524  */
2525 fc_port_t *
qla2x00_alloc_fcport(scsi_qla_host_t * vha,gfp_t flags)2526 qla2x00_alloc_fcport(scsi_qla_host_t *vha, gfp_t flags)
2527 {
2528 	fc_port_t *fcport;
2529 
2530 	fcport = kzalloc(sizeof(fc_port_t), flags);
2531 	if (!fcport)
2532 		return NULL;
2533 
2534 	/* Setup fcport template structure. */
2535 	fcport->vha = vha;
2536 	fcport->vp_idx = vha->vp_idx;
2537 	fcport->port_type = FCT_UNKNOWN;
2538 	fcport->loop_id = FC_NO_LOOP_ID;
2539 	atomic_set(&fcport->state, FCS_UNCONFIGURED);
2540 	fcport->supported_classes = FC_COS_UNSPECIFIED;
2541 
2542 	return fcport;
2543 }
2544 
2545 /*
2546  * qla2x00_configure_loop
2547  *      Updates Fibre Channel Device Database with what is actually on loop.
2548  *
2549  * Input:
2550  *      ha                = adapter block pointer.
2551  *
2552  * Returns:
2553  *      0 = success.
2554  *      1 = error.
2555  *      2 = database was full and device was not configured.
2556  */
2557 static int
qla2x00_configure_loop(scsi_qla_host_t * vha)2558 qla2x00_configure_loop(scsi_qla_host_t *vha)
2559 {
2560 	int  rval;
2561 	unsigned long flags, save_flags;
2562 	struct qla_hw_data *ha = vha->hw;
2563 	rval = QLA_SUCCESS;
2564 
2565 	/* Get Initiator ID */
2566 	if (test_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags)) {
2567 		rval = qla2x00_configure_hba(vha);
2568 		if (rval != QLA_SUCCESS) {
2569 			DEBUG(printk("scsi(%ld): Unable to configure HBA.\n",
2570 			    vha->host_no));
2571 			return (rval);
2572 		}
2573 	}
2574 
2575 	save_flags = flags = vha->dpc_flags;
2576 	DEBUG(printk("scsi(%ld): Configure loop -- dpc flags =0x%lx\n",
2577 	    vha->host_no, flags));
2578 
2579 	/*
2580 	 * If we have both an RSCN and PORT UPDATE pending then handle them
2581 	 * both at the same time.
2582 	 */
2583 	clear_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
2584 	clear_bit(RSCN_UPDATE, &vha->dpc_flags);
2585 
2586 	qla2x00_get_data_rate(vha);
2587 
2588 	/* Determine what we need to do */
2589 	if (ha->current_topology == ISP_CFG_FL &&
2590 	    (test_bit(LOCAL_LOOP_UPDATE, &flags))) {
2591 
2592 		vha->flags.rscn_queue_overflow = 1;
2593 		set_bit(RSCN_UPDATE, &flags);
2594 
2595 	} else if (ha->current_topology == ISP_CFG_F &&
2596 	    (test_bit(LOCAL_LOOP_UPDATE, &flags))) {
2597 
2598 		vha->flags.rscn_queue_overflow = 1;
2599 		set_bit(RSCN_UPDATE, &flags);
2600 		clear_bit(LOCAL_LOOP_UPDATE, &flags);
2601 
2602 	} else if (ha->current_topology == ISP_CFG_N) {
2603 		clear_bit(RSCN_UPDATE, &flags);
2604 
2605 	} else if (!vha->flags.online ||
2606 	    (test_bit(ABORT_ISP_ACTIVE, &flags))) {
2607 
2608 		vha->flags.rscn_queue_overflow = 1;
2609 		set_bit(RSCN_UPDATE, &flags);
2610 		set_bit(LOCAL_LOOP_UPDATE, &flags);
2611 	}
2612 
2613 	if (test_bit(LOCAL_LOOP_UPDATE, &flags)) {
2614 		if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags))
2615 			rval = QLA_FUNCTION_FAILED;
2616 		else
2617 			rval = qla2x00_configure_local_loop(vha);
2618 	}
2619 
2620 	if (rval == QLA_SUCCESS && test_bit(RSCN_UPDATE, &flags)) {
2621 		if (LOOP_TRANSITION(vha))
2622 			rval = QLA_FUNCTION_FAILED;
2623 		else
2624 			rval = qla2x00_configure_fabric(vha);
2625 	}
2626 
2627 	if (rval == QLA_SUCCESS) {
2628 		if (atomic_read(&vha->loop_down_timer) ||
2629 		    test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)) {
2630 			rval = QLA_FUNCTION_FAILED;
2631 		} else {
2632 			atomic_set(&vha->loop_state, LOOP_READY);
2633 
2634 			DEBUG(printk("scsi(%ld): LOOP READY\n", vha->host_no));
2635 		}
2636 	}
2637 
2638 	if (rval) {
2639 		DEBUG2_3(printk("%s(%ld): *** FAILED ***\n",
2640 		    __func__, vha->host_no));
2641 	} else {
2642 		DEBUG3(printk("%s: exiting normally\n", __func__));
2643 	}
2644 
2645 	/* Restore state if a resync event occurred during processing */
2646 	if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)) {
2647 		if (test_bit(LOCAL_LOOP_UPDATE, &save_flags))
2648 			set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
2649 		if (test_bit(RSCN_UPDATE, &save_flags)) {
2650 			set_bit(RSCN_UPDATE, &vha->dpc_flags);
2651 			if (!IS_ALOGIO_CAPABLE(ha))
2652 				vha->flags.rscn_queue_overflow = 1;
2653 		}
2654 	}
2655 
2656 	return (rval);
2657 }
2658 
2659 
2660 
2661 /*
2662  * qla2x00_configure_local_loop
2663  *	Updates Fibre Channel Device Database with local loop devices.
2664  *
2665  * Input:
2666  *	ha = adapter block pointer.
2667  *
2668  * Returns:
2669  *	0 = success.
2670  */
2671 static int
qla2x00_configure_local_loop(scsi_qla_host_t * vha)2672 qla2x00_configure_local_loop(scsi_qla_host_t *vha)
2673 {
2674 	int		rval, rval2;
2675 	int		found_devs;
2676 	int		found;
2677 	fc_port_t	*fcport, *new_fcport;
2678 
2679 	uint16_t	index;
2680 	uint16_t	entries;
2681 	char		*id_iter;
2682 	uint16_t	loop_id;
2683 	uint8_t		domain, area, al_pa;
2684 	struct qla_hw_data *ha = vha->hw;
2685 
2686 	found_devs = 0;
2687 	new_fcport = NULL;
2688 	entries = MAX_FIBRE_DEVICES;
2689 
2690 	DEBUG3(printk("scsi(%ld): Getting FCAL position map\n", vha->host_no));
2691 	DEBUG3(qla2x00_get_fcal_position_map(vha, NULL));
2692 
2693 	/* Get list of logged in devices. */
2694 	memset(ha->gid_list, 0, GID_LIST_SIZE);
2695 	rval = qla2x00_get_id_list(vha, ha->gid_list, ha->gid_list_dma,
2696 	    &entries);
2697 	if (rval != QLA_SUCCESS)
2698 		goto cleanup_allocation;
2699 
2700 	DEBUG3(printk("scsi(%ld): Entries in ID list (%d)\n",
2701 	    vha->host_no, entries));
2702 	DEBUG3(qla2x00_dump_buffer((uint8_t *)ha->gid_list,
2703 	    entries * sizeof(struct gid_list_info)));
2704 
2705 	/* Allocate temporary fcport for any new fcports discovered. */
2706 	new_fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
2707 	if (new_fcport == NULL) {
2708 		rval = QLA_MEMORY_ALLOC_FAILED;
2709 		goto cleanup_allocation;
2710 	}
2711 	new_fcport->flags &= ~FCF_FABRIC_DEVICE;
2712 
2713 	/*
2714 	 * Mark local devices that were present with FCF_DEVICE_LOST for now.
2715 	 */
2716 	list_for_each_entry(fcport, &vha->vp_fcports, list) {
2717 		if (atomic_read(&fcport->state) == FCS_ONLINE &&
2718 		    fcport->port_type != FCT_BROADCAST &&
2719 		    (fcport->flags & FCF_FABRIC_DEVICE) == 0) {
2720 
2721 			DEBUG(printk("scsi(%ld): Marking port lost, "
2722 			    "loop_id=0x%04x\n",
2723 			    vha->host_no, fcport->loop_id));
2724 
2725 			atomic_set(&fcport->state, FCS_DEVICE_LOST);
2726 		}
2727 	}
2728 
2729 	/* Add devices to port list. */
2730 	id_iter = (char *)ha->gid_list;
2731 	for (index = 0; index < entries; index++) {
2732 		domain = ((struct gid_list_info *)id_iter)->domain;
2733 		area = ((struct gid_list_info *)id_iter)->area;
2734 		al_pa = ((struct gid_list_info *)id_iter)->al_pa;
2735 		if (IS_QLA2100(ha) || IS_QLA2200(ha))
2736 			loop_id = (uint16_t)
2737 			    ((struct gid_list_info *)id_iter)->loop_id_2100;
2738 		else
2739 			loop_id = le16_to_cpu(
2740 			    ((struct gid_list_info *)id_iter)->loop_id);
2741 		id_iter += ha->gid_list_info_size;
2742 
2743 		/* Bypass reserved domain fields. */
2744 		if ((domain & 0xf0) == 0xf0)
2745 			continue;
2746 
2747 		/* Bypass if not same domain and area of adapter. */
2748 		if (area && domain &&
2749 		    (area != vha->d_id.b.area || domain != vha->d_id.b.domain))
2750 			continue;
2751 
2752 		/* Bypass invalid local loop ID. */
2753 		if (loop_id > LAST_LOCAL_LOOP_ID)
2754 			continue;
2755 
2756 		/* Fill in member data. */
2757 		new_fcport->d_id.b.domain = domain;
2758 		new_fcport->d_id.b.area = area;
2759 		new_fcport->d_id.b.al_pa = al_pa;
2760 		new_fcport->loop_id = loop_id;
2761 		new_fcport->vp_idx = vha->vp_idx;
2762 		rval2 = qla2x00_get_port_database(vha, new_fcport, 0);
2763 		if (rval2 != QLA_SUCCESS) {
2764 			DEBUG2(printk("scsi(%ld): Failed to retrieve fcport "
2765 			    "information -- get_port_database=%x, "
2766 			    "loop_id=0x%04x\n",
2767 			    vha->host_no, rval2, new_fcport->loop_id));
2768 			DEBUG2(printk("scsi(%ld): Scheduling resync...\n",
2769 			    vha->host_no));
2770 			set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
2771 			continue;
2772 		}
2773 
2774 		/* Check for matching device in port list. */
2775 		found = 0;
2776 		fcport = NULL;
2777 		list_for_each_entry(fcport, &vha->vp_fcports, list) {
2778 			if (memcmp(new_fcport->port_name, fcport->port_name,
2779 			    WWN_SIZE))
2780 				continue;
2781 
2782 			fcport->flags &= ~FCF_FABRIC_DEVICE;
2783 			fcport->loop_id = new_fcport->loop_id;
2784 			fcport->port_type = new_fcport->port_type;
2785 			fcport->d_id.b24 = new_fcport->d_id.b24;
2786 			memcpy(fcport->node_name, new_fcport->node_name,
2787 			    WWN_SIZE);
2788 
2789 			found++;
2790 			break;
2791 		}
2792 
2793 		if (!found) {
2794 			/* New device, add to fcports list. */
2795 			if (vha->vp_idx) {
2796 				new_fcport->vha = vha;
2797 				new_fcport->vp_idx = vha->vp_idx;
2798 			}
2799 			list_add_tail(&new_fcport->list, &vha->vp_fcports);
2800 
2801 			/* Allocate a new replacement fcport. */
2802 			fcport = new_fcport;
2803 			new_fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
2804 			if (new_fcport == NULL) {
2805 				rval = QLA_MEMORY_ALLOC_FAILED;
2806 				goto cleanup_allocation;
2807 			}
2808 			new_fcport->flags &= ~FCF_FABRIC_DEVICE;
2809 		}
2810 
2811 		/* Base iIDMA settings on HBA port speed. */
2812 		fcport->fp_speed = ha->link_data_rate;
2813 
2814 		qla2x00_update_fcport(vha, fcport);
2815 
2816 		found_devs++;
2817 	}
2818 
2819 cleanup_allocation:
2820 	kfree(new_fcport);
2821 
2822 	if (rval != QLA_SUCCESS) {
2823 		DEBUG2(printk("scsi(%ld): Configure local loop error exit: "
2824 		    "rval=%x\n", vha->host_no, rval));
2825 	}
2826 
2827 	return (rval);
2828 }
2829 
2830 static void
qla2x00_iidma_fcport(scsi_qla_host_t * vha,fc_port_t * fcport)2831 qla2x00_iidma_fcport(scsi_qla_host_t *vha, fc_port_t *fcport)
2832 {
2833 #define LS_UNKNOWN      2
2834 	static char *link_speeds[] = { "1", "2", "?", "4", "8", "10" };
2835 	char *link_speed;
2836 	int rval;
2837 	uint16_t mb[4];
2838 	struct qla_hw_data *ha = vha->hw;
2839 
2840 	if (!IS_IIDMA_CAPABLE(ha))
2841 		return;
2842 
2843 	if (atomic_read(&fcport->state) != FCS_ONLINE)
2844 		return;
2845 
2846 	if (fcport->fp_speed == PORT_SPEED_UNKNOWN ||
2847 	    fcport->fp_speed > ha->link_data_rate)
2848 		return;
2849 
2850 	rval = qla2x00_set_idma_speed(vha, fcport->loop_id, fcport->fp_speed,
2851 	    mb);
2852 	if (rval != QLA_SUCCESS) {
2853 		DEBUG2(printk("scsi(%ld): Unable to adjust iIDMA "
2854 		    "%02x%02x%02x%02x%02x%02x%02x%02x -- %04x %x %04x %04x.\n",
2855 		    vha->host_no, fcport->port_name[0], fcport->port_name[1],
2856 		    fcport->port_name[2], fcport->port_name[3],
2857 		    fcport->port_name[4], fcport->port_name[5],
2858 		    fcport->port_name[6], fcport->port_name[7], rval,
2859 		    fcport->fp_speed, mb[0], mb[1]));
2860 	} else {
2861 		link_speed = link_speeds[LS_UNKNOWN];
2862 		if (fcport->fp_speed < 5)
2863 			link_speed = link_speeds[fcport->fp_speed];
2864 		else if (fcport->fp_speed == 0x13)
2865 			link_speed = link_speeds[5];
2866 		DEBUG2(qla_printk(KERN_INFO, ha,
2867 		    "iIDMA adjusted to %s GB/s on "
2868 		    "%02x%02x%02x%02x%02x%02x%02x%02x.\n",
2869 		    link_speed, fcport->port_name[0],
2870 		    fcport->port_name[1], fcport->port_name[2],
2871 		    fcport->port_name[3], fcport->port_name[4],
2872 		    fcport->port_name[5], fcport->port_name[6],
2873 		    fcport->port_name[7]));
2874 	}
2875 }
2876 
2877 static void
qla2x00_reg_remote_port(scsi_qla_host_t * vha,fc_port_t * fcport)2878 qla2x00_reg_remote_port(scsi_qla_host_t *vha, fc_port_t *fcport)
2879 {
2880 	struct fc_rport_identifiers rport_ids;
2881 	struct fc_rport *rport;
2882 	struct qla_hw_data *ha = vha->hw;
2883 	unsigned long flags;
2884 
2885 	qla2x00_rport_del(fcport);
2886 
2887 	rport_ids.node_name = wwn_to_u64(fcport->node_name);
2888 	rport_ids.port_name = wwn_to_u64(fcport->port_name);
2889 	rport_ids.port_id = fcport->d_id.b.domain << 16 |
2890 	    fcport->d_id.b.area << 8 | fcport->d_id.b.al_pa;
2891 	rport_ids.roles = FC_RPORT_ROLE_UNKNOWN;
2892 	fcport->rport = rport = fc_remote_port_add(vha->host, 0, &rport_ids);
2893 	if (!rport) {
2894 		qla_printk(KERN_WARNING, ha,
2895 		    "Unable to allocate fc remote port!\n");
2896 		return;
2897 	}
2898 	spin_lock_irqsave(fcport->vha->host->host_lock, flags);
2899 	*((fc_port_t **)rport->dd_data) = fcport;
2900 	spin_unlock_irqrestore(fcport->vha->host->host_lock, flags);
2901 
2902 	rport->supported_classes = fcport->supported_classes;
2903 
2904 	rport_ids.roles = FC_RPORT_ROLE_UNKNOWN;
2905 	if (fcport->port_type == FCT_INITIATOR)
2906 		rport_ids.roles |= FC_RPORT_ROLE_FCP_INITIATOR;
2907 	if (fcport->port_type == FCT_TARGET)
2908 		rport_ids.roles |= FC_RPORT_ROLE_FCP_TARGET;
2909 	fc_remote_port_rolechg(rport, rport_ids.roles);
2910 }
2911 
2912 /*
2913  * qla2x00_update_fcport
2914  *	Updates device on list.
2915  *
2916  * Input:
2917  *	ha = adapter block pointer.
2918  *	fcport = port structure pointer.
2919  *
2920  * Return:
2921  *	0  - Success
2922  *  BIT_0 - error
2923  *
2924  * Context:
2925  *	Kernel context.
2926  */
2927 void
qla2x00_update_fcport(scsi_qla_host_t * vha,fc_port_t * fcport)2928 qla2x00_update_fcport(scsi_qla_host_t *vha, fc_port_t *fcport)
2929 {
2930 	fcport->vha = vha;
2931 	fcport->login_retry = 0;
2932 	fcport->flags &= ~(FCF_LOGIN_NEEDED | FCF_ASYNC_SENT);
2933 
2934 	qla2x00_iidma_fcport(vha, fcport);
2935 	qla24xx_update_fcport_fcp_prio(vha, fcport);
2936 	qla2x00_reg_remote_port(vha, fcport);
2937 	atomic_set(&fcport->state, FCS_ONLINE);
2938 }
2939 
2940 /*
2941  * qla2x00_configure_fabric
2942  *      Setup SNS devices with loop ID's.
2943  *
2944  * Input:
2945  *      ha = adapter block pointer.
2946  *
2947  * Returns:
2948  *      0 = success.
2949  *      BIT_0 = error
2950  */
2951 static int
qla2x00_configure_fabric(scsi_qla_host_t * vha)2952 qla2x00_configure_fabric(scsi_qla_host_t *vha)
2953 {
2954 	int	rval, rval2;
2955 	fc_port_t	*fcport, *fcptemp;
2956 	uint16_t	next_loopid;
2957 	uint16_t	mb[MAILBOX_REGISTER_COUNT];
2958 	uint16_t	loop_id;
2959 	LIST_HEAD(new_fcports);
2960 	struct qla_hw_data *ha = vha->hw;
2961 	struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
2962 
2963 	/* If FL port exists, then SNS is present */
2964 	if (IS_FWI2_CAPABLE(ha))
2965 		loop_id = NPH_F_PORT;
2966 	else
2967 		loop_id = SNS_FL_PORT;
2968 	rval = qla2x00_get_port_name(vha, loop_id, vha->fabric_node_name, 1);
2969 	if (rval != QLA_SUCCESS) {
2970 		DEBUG2(printk("scsi(%ld): MBC_GET_PORT_NAME Failed, No FL "
2971 		    "Port\n", vha->host_no));
2972 
2973 		vha->device_flags &= ~SWITCH_FOUND;
2974 		return (QLA_SUCCESS);
2975 	}
2976 	vha->device_flags |= SWITCH_FOUND;
2977 
2978 	/* Mark devices that need re-synchronization. */
2979 	rval2 = qla2x00_device_resync(vha);
2980 	if (rval2 == QLA_RSCNS_HANDLED) {
2981 		/* No point doing the scan, just continue. */
2982 		return (QLA_SUCCESS);
2983 	}
2984 	do {
2985 		/* FDMI support. */
2986 		if (ql2xfdmienable &&
2987 		    test_and_clear_bit(REGISTER_FDMI_NEEDED, &vha->dpc_flags))
2988 			qla2x00_fdmi_register(vha);
2989 
2990 		/* Ensure we are logged into the SNS. */
2991 		if (IS_FWI2_CAPABLE(ha))
2992 			loop_id = NPH_SNS;
2993 		else
2994 			loop_id = SIMPLE_NAME_SERVER;
2995 		ha->isp_ops->fabric_login(vha, loop_id, 0xff, 0xff,
2996 		    0xfc, mb, BIT_1 | BIT_0);
2997 		if (mb[0] != MBS_COMMAND_COMPLETE) {
2998 			DEBUG2(qla_printk(KERN_INFO, ha,
2999 			    "Failed SNS login: loop_id=%x mb[0]=%x mb[1]=%x "
3000 			    "mb[2]=%x mb[6]=%x mb[7]=%x\n", loop_id,
3001 			    mb[0], mb[1], mb[2], mb[6], mb[7]));
3002 			return (QLA_SUCCESS);
3003 		}
3004 
3005 		if (test_and_clear_bit(REGISTER_FC4_NEEDED, &vha->dpc_flags)) {
3006 			if (qla2x00_rft_id(vha)) {
3007 				/* EMPTY */
3008 				DEBUG2(printk("scsi(%ld): Register FC-4 "
3009 				    "TYPE failed.\n", vha->host_no));
3010 			}
3011 			if (qla2x00_rff_id(vha)) {
3012 				/* EMPTY */
3013 				DEBUG2(printk("scsi(%ld): Register FC-4 "
3014 				    "Features failed.\n", vha->host_no));
3015 			}
3016 			if (qla2x00_rnn_id(vha)) {
3017 				/* EMPTY */
3018 				DEBUG2(printk("scsi(%ld): Register Node Name "
3019 				    "failed.\n", vha->host_no));
3020 			} else if (qla2x00_rsnn_nn(vha)) {
3021 				/* EMPTY */
3022 				DEBUG2(printk("scsi(%ld): Register Symbolic "
3023 				    "Node Name failed.\n", vha->host_no));
3024 			}
3025 		}
3026 
3027 		rval = qla2x00_find_all_fabric_devs(vha, &new_fcports);
3028 		if (rval != QLA_SUCCESS)
3029 			break;
3030 
3031 		/*
3032 		 * Logout all previous fabric devices marked lost, except
3033 		 * FCP2 devices.
3034 		 */
3035 		list_for_each_entry(fcport, &vha->vp_fcports, list) {
3036 			if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags))
3037 				break;
3038 
3039 			if ((fcport->flags & FCF_FABRIC_DEVICE) == 0)
3040 				continue;
3041 
3042 			if (atomic_read(&fcport->state) == FCS_DEVICE_LOST) {
3043 				qla2x00_mark_device_lost(vha, fcport,
3044 				    ql2xplogiabsentdevice, 0);
3045 				if (fcport->loop_id != FC_NO_LOOP_ID &&
3046 				    (fcport->flags & FCF_FCP2_DEVICE) == 0 &&
3047 				    fcport->port_type != FCT_INITIATOR &&
3048 				    fcport->port_type != FCT_BROADCAST) {
3049 					ha->isp_ops->fabric_logout(vha,
3050 					    fcport->loop_id,
3051 					    fcport->d_id.b.domain,
3052 					    fcport->d_id.b.area,
3053 					    fcport->d_id.b.al_pa);
3054 					fcport->loop_id = FC_NO_LOOP_ID;
3055 				}
3056 			}
3057 		}
3058 
3059 		/* Starting free loop ID. */
3060 		next_loopid = ha->min_external_loopid;
3061 
3062 		/*
3063 		 * Scan through our port list and login entries that need to be
3064 		 * logged in.
3065 		 */
3066 		list_for_each_entry(fcport, &vha->vp_fcports, list) {
3067 			if (atomic_read(&vha->loop_down_timer) ||
3068 			    test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags))
3069 				break;
3070 
3071 			if ((fcport->flags & FCF_FABRIC_DEVICE) == 0 ||
3072 			    (fcport->flags & FCF_LOGIN_NEEDED) == 0)
3073 				continue;
3074 
3075 			if (fcport->loop_id == FC_NO_LOOP_ID) {
3076 				fcport->loop_id = next_loopid;
3077 				rval = qla2x00_find_new_loop_id(
3078 				    base_vha, fcport);
3079 				if (rval != QLA_SUCCESS) {
3080 					/* Ran out of IDs to use */
3081 					break;
3082 				}
3083 			}
3084 			/* Login and update database */
3085 			qla2x00_fabric_dev_login(vha, fcport, &next_loopid);
3086 		}
3087 
3088 		/* Exit if out of loop IDs. */
3089 		if (rval != QLA_SUCCESS) {
3090 			break;
3091 		}
3092 
3093 		/*
3094 		 * Login and add the new devices to our port list.
3095 		 */
3096 		list_for_each_entry_safe(fcport, fcptemp, &new_fcports, list) {
3097 			if (atomic_read(&vha->loop_down_timer) ||
3098 			    test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags))
3099 				break;
3100 
3101 			/* Find a new loop ID to use. */
3102 			fcport->loop_id = next_loopid;
3103 			rval = qla2x00_find_new_loop_id(base_vha, fcport);
3104 			if (rval != QLA_SUCCESS) {
3105 				/* Ran out of IDs to use */
3106 				break;
3107 			}
3108 
3109 			/* Login and update database */
3110 			qla2x00_fabric_dev_login(vha, fcport, &next_loopid);
3111 
3112 			if (vha->vp_idx) {
3113 				fcport->vha = vha;
3114 				fcport->vp_idx = vha->vp_idx;
3115 			}
3116 			list_move_tail(&fcport->list, &vha->vp_fcports);
3117 		}
3118 	} while (0);
3119 
3120 	/* Free all new device structures not processed. */
3121 	list_for_each_entry_safe(fcport, fcptemp, &new_fcports, list) {
3122 		list_del(&fcport->list);
3123 		kfree(fcport);
3124 	}
3125 
3126 	if (rval) {
3127 		DEBUG2(printk("scsi(%ld): Configure fabric error exit: "
3128 		    "rval=%d\n", vha->host_no, rval));
3129 	}
3130 
3131 	return (rval);
3132 }
3133 
3134 /*
3135  * qla2x00_find_all_fabric_devs
3136  *
3137  * Input:
3138  *	ha = adapter block pointer.
3139  *	dev = database device entry pointer.
3140  *
3141  * Returns:
3142  *	0 = success.
3143  *
3144  * Context:
3145  *	Kernel context.
3146  */
3147 static int
qla2x00_find_all_fabric_devs(scsi_qla_host_t * vha,struct list_head * new_fcports)3148 qla2x00_find_all_fabric_devs(scsi_qla_host_t *vha,
3149 	struct list_head *new_fcports)
3150 {
3151 	int		rval;
3152 	uint16_t	loop_id;
3153 	fc_port_t	*fcport, *new_fcport, *fcptemp;
3154 	int		found;
3155 
3156 	sw_info_t	*swl;
3157 	int		swl_idx;
3158 	int		first_dev, last_dev;
3159 	port_id_t	wrap = {}, nxt_d_id;
3160 	struct qla_hw_data *ha = vha->hw;
3161 	struct scsi_qla_host *vp, *base_vha = pci_get_drvdata(ha->pdev);
3162 	struct scsi_qla_host *tvp;
3163 
3164 	rval = QLA_SUCCESS;
3165 
3166 	/* Try GID_PT to get device list, else GAN. */
3167 	swl = kcalloc(MAX_FIBRE_DEVICES, sizeof(sw_info_t), GFP_KERNEL);
3168 	if (!swl) {
3169 		/*EMPTY*/
3170 		DEBUG2(printk("scsi(%ld): GID_PT allocations failed, fallback "
3171 		    "on GA_NXT\n", vha->host_no));
3172 	} else {
3173 		if (qla2x00_gid_pt(vha, swl) != QLA_SUCCESS) {
3174 			kfree(swl);
3175 			swl = NULL;
3176 		} else if (qla2x00_gpn_id(vha, swl) != QLA_SUCCESS) {
3177 			kfree(swl);
3178 			swl = NULL;
3179 		} else if (qla2x00_gnn_id(vha, swl) != QLA_SUCCESS) {
3180 			kfree(swl);
3181 			swl = NULL;
3182 		} else if (ql2xiidmaenable &&
3183 		    qla2x00_gfpn_id(vha, swl) == QLA_SUCCESS) {
3184 			qla2x00_gpsc(vha, swl);
3185 		}
3186 
3187 		/* If other queries succeeded probe for FC-4 type */
3188 		if (swl)
3189 			qla2x00_gff_id(vha, swl);
3190 	}
3191 	swl_idx = 0;
3192 
3193 	/* Allocate temporary fcport for any new fcports discovered. */
3194 	new_fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
3195 	if (new_fcport == NULL) {
3196 		kfree(swl);
3197 		return (QLA_MEMORY_ALLOC_FAILED);
3198 	}
3199 	new_fcport->flags |= (FCF_FABRIC_DEVICE | FCF_LOGIN_NEEDED);
3200 	/* Set start port ID scan at adapter ID. */
3201 	first_dev = 1;
3202 	last_dev = 0;
3203 
3204 	/* Starting free loop ID. */
3205 	loop_id = ha->min_external_loopid;
3206 	for (; loop_id <= ha->max_loop_id; loop_id++) {
3207 		if (qla2x00_is_reserved_id(vha, loop_id))
3208 			continue;
3209 
3210 		if (ha->current_topology == ISP_CFG_FL &&
3211 		    (atomic_read(&vha->loop_down_timer) ||
3212 		     LOOP_TRANSITION(vha))) {
3213 			atomic_set(&vha->loop_down_timer, 0);
3214 			set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
3215 			set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
3216 			break;
3217 		}
3218 
3219 		if (swl != NULL) {
3220 			if (last_dev) {
3221 				wrap.b24 = new_fcport->d_id.b24;
3222 			} else {
3223 				new_fcport->d_id.b24 = swl[swl_idx].d_id.b24;
3224 				memcpy(new_fcport->node_name,
3225 				    swl[swl_idx].node_name, WWN_SIZE);
3226 				memcpy(new_fcport->port_name,
3227 				    swl[swl_idx].port_name, WWN_SIZE);
3228 				memcpy(new_fcport->fabric_port_name,
3229 				    swl[swl_idx].fabric_port_name, WWN_SIZE);
3230 				new_fcport->fp_speed = swl[swl_idx].fp_speed;
3231 				new_fcport->fc4_type = swl[swl_idx].fc4_type;
3232 
3233 				if (swl[swl_idx].d_id.b.rsvd_1 != 0) {
3234 					last_dev = 1;
3235 				}
3236 				swl_idx++;
3237 			}
3238 		} else {
3239 			/* Send GA_NXT to the switch */
3240 			rval = qla2x00_ga_nxt(vha, new_fcport);
3241 			if (rval != QLA_SUCCESS) {
3242 				qla_printk(KERN_WARNING, ha,
3243 				    "SNS scan failed -- assuming zero-entry "
3244 				    "result...\n");
3245 				list_for_each_entry_safe(fcport, fcptemp,
3246 				    new_fcports, list) {
3247 					list_del(&fcport->list);
3248 					kfree(fcport);
3249 				}
3250 				rval = QLA_SUCCESS;
3251 				break;
3252 			}
3253 		}
3254 
3255 		/* If wrap on switch device list, exit. */
3256 		if (first_dev) {
3257 			wrap.b24 = new_fcport->d_id.b24;
3258 			first_dev = 0;
3259 		} else if (new_fcport->d_id.b24 == wrap.b24) {
3260 			DEBUG2(printk("scsi(%ld): device wrap (%02x%02x%02x)\n",
3261 			    vha->host_no, new_fcport->d_id.b.domain,
3262 			    new_fcport->d_id.b.area, new_fcport->d_id.b.al_pa));
3263 			break;
3264 		}
3265 
3266 		/* Bypass if same physical adapter. */
3267 		if (new_fcport->d_id.b24 == base_vha->d_id.b24)
3268 			continue;
3269 
3270 		/* Bypass virtual ports of the same host. */
3271 		found = 0;
3272 		if (ha->num_vhosts) {
3273 			unsigned long flags;
3274 
3275 			spin_lock_irqsave(&ha->vport_slock, flags);
3276 			list_for_each_entry_safe(vp, tvp, &ha->vp_list, list) {
3277 				if (new_fcport->d_id.b24 == vp->d_id.b24) {
3278 					found = 1;
3279 					break;
3280 				}
3281 			}
3282 			spin_unlock_irqrestore(&ha->vport_slock, flags);
3283 
3284 			if (found)
3285 				continue;
3286 		}
3287 
3288 		/* Bypass if same domain and area of adapter. */
3289 		if (((new_fcport->d_id.b24 & 0xffff00) ==
3290 		    (vha->d_id.b24 & 0xffff00)) && ha->current_topology ==
3291 			ISP_CFG_FL)
3292 			    continue;
3293 
3294 		/* Bypass reserved domain fields. */
3295 		if ((new_fcport->d_id.b.domain & 0xf0) == 0xf0)
3296 			continue;
3297 
3298 		/* Bypass ports whose FCP-4 type is not FCP_SCSI */
3299 		if (ql2xgffidenable &&
3300 		    (new_fcport->fc4_type != FC4_TYPE_FCP_SCSI &&
3301 		    new_fcport->fc4_type != FC4_TYPE_UNKNOWN))
3302 			continue;
3303 
3304 		/* Locate matching device in database. */
3305 		found = 0;
3306 		list_for_each_entry(fcport, &vha->vp_fcports, list) {
3307 			if (memcmp(new_fcport->port_name, fcport->port_name,
3308 			    WWN_SIZE))
3309 				continue;
3310 
3311 			found++;
3312 
3313 			/* Update port state. */
3314 			memcpy(fcport->fabric_port_name,
3315 			    new_fcport->fabric_port_name, WWN_SIZE);
3316 			fcport->fp_speed = new_fcport->fp_speed;
3317 
3318 			/*
3319 			 * If address the same and state FCS_ONLINE, nothing
3320 			 * changed.
3321 			 */
3322 			if (fcport->d_id.b24 == new_fcport->d_id.b24 &&
3323 			    atomic_read(&fcport->state) == FCS_ONLINE) {
3324 				break;
3325 			}
3326 
3327 			/*
3328 			 * If device was not a fabric device before.
3329 			 */
3330 			if ((fcport->flags & FCF_FABRIC_DEVICE) == 0) {
3331 				fcport->d_id.b24 = new_fcport->d_id.b24;
3332 				fcport->loop_id = FC_NO_LOOP_ID;
3333 				fcport->flags |= (FCF_FABRIC_DEVICE |
3334 				    FCF_LOGIN_NEEDED);
3335 				break;
3336 			}
3337 
3338 			/*
3339 			 * Port ID changed or device was marked to be updated;
3340 			 * Log it out if still logged in and mark it for
3341 			 * relogin later.
3342 			 */
3343 			fcport->d_id.b24 = new_fcport->d_id.b24;
3344 			fcport->flags |= FCF_LOGIN_NEEDED;
3345 			if (fcport->loop_id != FC_NO_LOOP_ID &&
3346 			    (fcport->flags & FCF_FCP2_DEVICE) == 0 &&
3347 			    fcport->port_type != FCT_INITIATOR &&
3348 			    fcport->port_type != FCT_BROADCAST) {
3349 				ha->isp_ops->fabric_logout(vha, fcport->loop_id,
3350 				    fcport->d_id.b.domain, fcport->d_id.b.area,
3351 				    fcport->d_id.b.al_pa);
3352 				fcport->loop_id = FC_NO_LOOP_ID;
3353 			}
3354 
3355 			break;
3356 		}
3357 
3358 		if (found)
3359 			continue;
3360 		/* If device was not in our fcports list, then add it. */
3361 		list_add_tail(&new_fcport->list, new_fcports);
3362 
3363 		/* Allocate a new replacement fcport. */
3364 		nxt_d_id.b24 = new_fcport->d_id.b24;
3365 		new_fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
3366 		if (new_fcport == NULL) {
3367 			kfree(swl);
3368 			return (QLA_MEMORY_ALLOC_FAILED);
3369 		}
3370 		new_fcport->flags |= (FCF_FABRIC_DEVICE | FCF_LOGIN_NEEDED);
3371 		new_fcport->d_id.b24 = nxt_d_id.b24;
3372 	}
3373 
3374 	kfree(swl);
3375 	kfree(new_fcport);
3376 
3377 	return (rval);
3378 }
3379 
3380 /*
3381  * qla2x00_find_new_loop_id
3382  *	Scan through our port list and find a new usable loop ID.
3383  *
3384  * Input:
3385  *	ha:	adapter state pointer.
3386  *	dev:	port structure pointer.
3387  *
3388  * Returns:
3389  *	qla2x00 local function return status code.
3390  *
3391  * Context:
3392  *	Kernel context.
3393  */
3394 static int
qla2x00_find_new_loop_id(scsi_qla_host_t * vha,fc_port_t * dev)3395 qla2x00_find_new_loop_id(scsi_qla_host_t *vha, fc_port_t *dev)
3396 {
3397 	int	rval;
3398 	int	found;
3399 	fc_port_t *fcport;
3400 	uint16_t first_loop_id;
3401 	struct qla_hw_data *ha = vha->hw;
3402 	struct scsi_qla_host *vp;
3403 	struct scsi_qla_host *tvp;
3404 	unsigned long flags = 0;
3405 
3406 	rval = QLA_SUCCESS;
3407 
3408 	/* Save starting loop ID. */
3409 	first_loop_id = dev->loop_id;
3410 
3411 	for (;;) {
3412 		/* Skip loop ID if already used by adapter. */
3413 		if (dev->loop_id == vha->loop_id)
3414 			dev->loop_id++;
3415 
3416 		/* Skip reserved loop IDs. */
3417 		while (qla2x00_is_reserved_id(vha, dev->loop_id))
3418 			dev->loop_id++;
3419 
3420 		/* Reset loop ID if passed the end. */
3421 		if (dev->loop_id > ha->max_loop_id) {
3422 			/* first loop ID. */
3423 			dev->loop_id = ha->min_external_loopid;
3424 		}
3425 
3426 		/* Check for loop ID being already in use. */
3427 		found = 0;
3428 		fcport = NULL;
3429 
3430 		spin_lock_irqsave(&ha->vport_slock, flags);
3431 		list_for_each_entry_safe(vp, tvp, &ha->vp_list, list) {
3432 			list_for_each_entry(fcport, &vp->vp_fcports, list) {
3433 				if (fcport->loop_id == dev->loop_id &&
3434 								fcport != dev) {
3435 					/* ID possibly in use */
3436 					found++;
3437 					break;
3438 				}
3439 			}
3440 			if (found)
3441 				break;
3442 		}
3443 		spin_unlock_irqrestore(&ha->vport_slock, flags);
3444 
3445 		/* If not in use then it is free to use. */
3446 		if (!found) {
3447 			break;
3448 		}
3449 
3450 		/* ID in use. Try next value. */
3451 		dev->loop_id++;
3452 
3453 		/* If wrap around. No free ID to use. */
3454 		if (dev->loop_id == first_loop_id) {
3455 			dev->loop_id = FC_NO_LOOP_ID;
3456 			rval = QLA_FUNCTION_FAILED;
3457 			break;
3458 		}
3459 	}
3460 
3461 	return (rval);
3462 }
3463 
3464 /*
3465  * qla2x00_device_resync
3466  *	Marks devices in the database that needs resynchronization.
3467  *
3468  * Input:
3469  *	ha = adapter block pointer.
3470  *
3471  * Context:
3472  *	Kernel context.
3473  */
3474 static int
qla2x00_device_resync(scsi_qla_host_t * vha)3475 qla2x00_device_resync(scsi_qla_host_t *vha)
3476 {
3477 	int	rval;
3478 	uint32_t mask;
3479 	fc_port_t *fcport;
3480 	uint32_t rscn_entry;
3481 	uint8_t rscn_out_iter;
3482 	uint8_t format;
3483 	port_id_t d_id = {};
3484 
3485 	rval = QLA_RSCNS_HANDLED;
3486 
3487 	while (vha->rscn_out_ptr != vha->rscn_in_ptr ||
3488 	    vha->flags.rscn_queue_overflow) {
3489 
3490 		rscn_entry = vha->rscn_queue[vha->rscn_out_ptr];
3491 		format = MSB(MSW(rscn_entry));
3492 		d_id.b.domain = LSB(MSW(rscn_entry));
3493 		d_id.b.area = MSB(LSW(rscn_entry));
3494 		d_id.b.al_pa = LSB(LSW(rscn_entry));
3495 
3496 		DEBUG(printk("scsi(%ld): RSCN queue entry[%d] = "
3497 		    "[%02x/%02x%02x%02x].\n",
3498 		    vha->host_no, vha->rscn_out_ptr, format, d_id.b.domain,
3499 		    d_id.b.area, d_id.b.al_pa));
3500 
3501 		vha->rscn_out_ptr++;
3502 		if (vha->rscn_out_ptr == MAX_RSCN_COUNT)
3503 			vha->rscn_out_ptr = 0;
3504 
3505 		/* Skip duplicate entries. */
3506 		for (rscn_out_iter = vha->rscn_out_ptr;
3507 		    !vha->flags.rscn_queue_overflow &&
3508 		    rscn_out_iter != vha->rscn_in_ptr;
3509 		    rscn_out_iter = (rscn_out_iter ==
3510 			(MAX_RSCN_COUNT - 1)) ? 0: rscn_out_iter + 1) {
3511 
3512 			if (rscn_entry != vha->rscn_queue[rscn_out_iter])
3513 				break;
3514 
3515 			DEBUG(printk("scsi(%ld): Skipping duplicate RSCN queue "
3516 			    "entry found at [%d].\n", vha->host_no,
3517 			    rscn_out_iter));
3518 
3519 			vha->rscn_out_ptr = rscn_out_iter;
3520 		}
3521 
3522 		/* Queue overflow, set switch default case. */
3523 		if (vha->flags.rscn_queue_overflow) {
3524 			DEBUG(printk("scsi(%ld): device_resync: rscn "
3525 			    "overflow.\n", vha->host_no));
3526 
3527 			format = 3;
3528 			vha->flags.rscn_queue_overflow = 0;
3529 		}
3530 
3531 		switch (format) {
3532 		case 0:
3533 			mask = 0xffffff;
3534 			break;
3535 		case 1:
3536 			mask = 0xffff00;
3537 			break;
3538 		case 2:
3539 			mask = 0xff0000;
3540 			break;
3541 		default:
3542 			mask = 0x0;
3543 			d_id.b24 = 0;
3544 			vha->rscn_out_ptr = vha->rscn_in_ptr;
3545 			break;
3546 		}
3547 
3548 		rval = QLA_SUCCESS;
3549 
3550 		list_for_each_entry(fcport, &vha->vp_fcports, list) {
3551 			if ((fcport->flags & FCF_FABRIC_DEVICE) == 0 ||
3552 			    (fcport->d_id.b24 & mask) != d_id.b24 ||
3553 			    fcport->port_type == FCT_BROADCAST)
3554 				continue;
3555 
3556 			if (atomic_read(&fcport->state) == FCS_ONLINE) {
3557 				if (format != 3 ||
3558 				    fcport->port_type != FCT_INITIATOR) {
3559 					qla2x00_mark_device_lost(vha, fcport,
3560 					    0, 0);
3561 				}
3562 			}
3563 		}
3564 	}
3565 	return (rval);
3566 }
3567 
3568 /*
3569  * qla2x00_fabric_dev_login
3570  *	Login fabric target device and update FC port database.
3571  *
3572  * Input:
3573  *	ha:		adapter state pointer.
3574  *	fcport:		port structure list pointer.
3575  *	next_loopid:	contains value of a new loop ID that can be used
3576  *			by the next login attempt.
3577  *
3578  * Returns:
3579  *	qla2x00 local function return status code.
3580  *
3581  * Context:
3582  *	Kernel context.
3583  */
3584 static int
qla2x00_fabric_dev_login(scsi_qla_host_t * vha,fc_port_t * fcport,uint16_t * next_loopid)3585 qla2x00_fabric_dev_login(scsi_qla_host_t *vha, fc_port_t *fcport,
3586     uint16_t *next_loopid)
3587 {
3588 	int	rval;
3589 	int	retry;
3590 	uint8_t opts;
3591 	struct qla_hw_data *ha = vha->hw;
3592 
3593 	rval = QLA_SUCCESS;
3594 	retry = 0;
3595 
3596 	if (IS_ALOGIO_CAPABLE(ha)) {
3597 		if (fcport->flags & FCF_ASYNC_SENT)
3598 			return rval;
3599 		fcport->flags |= FCF_ASYNC_SENT;
3600 		rval = qla2x00_post_async_login_work(vha, fcport, NULL);
3601 		if (!rval)
3602 			return rval;
3603 	}
3604 
3605 	fcport->flags &= ~FCF_ASYNC_SENT;
3606 	rval = qla2x00_fabric_login(vha, fcport, next_loopid);
3607 	if (rval == QLA_SUCCESS) {
3608 		/* Send an ADISC to FCP2 devices.*/
3609 		opts = 0;
3610 		if (fcport->flags & FCF_FCP2_DEVICE)
3611 			opts |= BIT_1;
3612 		rval = qla2x00_get_port_database(vha, fcport, opts);
3613 		if (rval != QLA_SUCCESS) {
3614 			ha->isp_ops->fabric_logout(vha, fcport->loop_id,
3615 			    fcport->d_id.b.domain, fcport->d_id.b.area,
3616 			    fcport->d_id.b.al_pa);
3617 			qla2x00_mark_device_lost(vha, fcport, 1, 0);
3618 		} else {
3619 			qla2x00_update_fcport(vha, fcport);
3620 		}
3621 	}
3622 
3623 	return (rval);
3624 }
3625 
3626 /*
3627  * qla2x00_fabric_login
3628  *	Issue fabric login command.
3629  *
3630  * Input:
3631  *	ha = adapter block pointer.
3632  *	device = pointer to FC device type structure.
3633  *
3634  * Returns:
3635  *      0 - Login successfully
3636  *      1 - Login failed
3637  *      2 - Initiator device
3638  *      3 - Fatal error
3639  */
3640 int
qla2x00_fabric_login(scsi_qla_host_t * vha,fc_port_t * fcport,uint16_t * next_loopid)3641 qla2x00_fabric_login(scsi_qla_host_t *vha, fc_port_t *fcport,
3642     uint16_t *next_loopid)
3643 {
3644 	int	rval;
3645 	int	retry;
3646 	uint16_t tmp_loopid;
3647 	uint16_t mb[MAILBOX_REGISTER_COUNT];
3648 	struct qla_hw_data *ha = vha->hw;
3649 
3650 	retry = 0;
3651 	tmp_loopid = 0;
3652 
3653 	for (;;) {
3654 		DEBUG(printk("scsi(%ld): Trying Fabric Login w/loop id 0x%04x "
3655  		    "for port %02x%02x%02x.\n",
3656 		    vha->host_no, fcport->loop_id, fcport->d_id.b.domain,
3657 		    fcport->d_id.b.area, fcport->d_id.b.al_pa));
3658 
3659 		/* Login fcport on switch. */
3660 		ha->isp_ops->fabric_login(vha, fcport->loop_id,
3661 		    fcport->d_id.b.domain, fcport->d_id.b.area,
3662 		    fcport->d_id.b.al_pa, mb, BIT_0);
3663 		if (mb[0] == MBS_PORT_ID_USED) {
3664 			/*
3665 			 * Device has another loop ID.  The firmware team
3666 			 * recommends the driver perform an implicit login with
3667 			 * the specified ID again. The ID we just used is save
3668 			 * here so we return with an ID that can be tried by
3669 			 * the next login.
3670 			 */
3671 			retry++;
3672 			tmp_loopid = fcport->loop_id;
3673 			fcport->loop_id = mb[1];
3674 
3675 			DEBUG(printk("Fabric Login: port in use - next "
3676  			    "loop id=0x%04x, port Id=%02x%02x%02x.\n",
3677 			    fcport->loop_id, fcport->d_id.b.domain,
3678 			    fcport->d_id.b.area, fcport->d_id.b.al_pa));
3679 
3680 		} else if (mb[0] == MBS_COMMAND_COMPLETE) {
3681 			/*
3682 			 * Login succeeded.
3683 			 */
3684 			if (retry) {
3685 				/* A retry occurred before. */
3686 				*next_loopid = tmp_loopid;
3687 			} else {
3688 				/*
3689 				 * No retry occurred before. Just increment the
3690 				 * ID value for next login.
3691 				 */
3692 				*next_loopid = (fcport->loop_id + 1);
3693 			}
3694 
3695 			if (mb[1] & BIT_0) {
3696 				fcport->port_type = FCT_INITIATOR;
3697 			} else {
3698 				fcport->port_type = FCT_TARGET;
3699 				if (mb[1] & BIT_1) {
3700 					fcport->flags |= FCF_FCP2_DEVICE;
3701 				}
3702 			}
3703 
3704 			if (mb[10] & BIT_0)
3705 				fcport->supported_classes |= FC_COS_CLASS2;
3706 			if (mb[10] & BIT_1)
3707 				fcport->supported_classes |= FC_COS_CLASS3;
3708 
3709 			rval = QLA_SUCCESS;
3710 			break;
3711 		} else if (mb[0] == MBS_LOOP_ID_USED) {
3712 			/*
3713 			 * Loop ID already used, try next loop ID.
3714 			 */
3715 			fcport->loop_id++;
3716 			rval = qla2x00_find_new_loop_id(vha, fcport);
3717 			if (rval != QLA_SUCCESS) {
3718 				/* Ran out of loop IDs to use */
3719 				break;
3720 			}
3721 		} else if (mb[0] == MBS_COMMAND_ERROR) {
3722 			/*
3723 			 * Firmware possibly timed out during login. If NO
3724 			 * retries are left to do then the device is declared
3725 			 * dead.
3726 			 */
3727 			*next_loopid = fcport->loop_id;
3728 			ha->isp_ops->fabric_logout(vha, fcport->loop_id,
3729 			    fcport->d_id.b.domain, fcport->d_id.b.area,
3730 			    fcport->d_id.b.al_pa);
3731 			qla2x00_mark_device_lost(vha, fcport, 1, 0);
3732 
3733 			rval = 1;
3734 			break;
3735 		} else {
3736 			/*
3737 			 * unrecoverable / not handled error
3738 			 */
3739 			DEBUG2(printk("%s(%ld): failed=%x port_id=%02x%02x%02x "
3740  			    "loop_id=%x jiffies=%lx.\n",
3741 			    __func__, vha->host_no, mb[0],
3742 			    fcport->d_id.b.domain, fcport->d_id.b.area,
3743 			    fcport->d_id.b.al_pa, fcport->loop_id, jiffies));
3744 
3745 			*next_loopid = fcport->loop_id;
3746 			ha->isp_ops->fabric_logout(vha, fcport->loop_id,
3747 			    fcport->d_id.b.domain, fcport->d_id.b.area,
3748 			    fcport->d_id.b.al_pa);
3749 			fcport->loop_id = FC_NO_LOOP_ID;
3750 			fcport->login_retry = 0;
3751 
3752 			rval = 3;
3753 			break;
3754 		}
3755 	}
3756 
3757 	return (rval);
3758 }
3759 
3760 /*
3761  * qla2x00_local_device_login
3762  *	Issue local device login command.
3763  *
3764  * Input:
3765  *	ha = adapter block pointer.
3766  *	loop_id = loop id of device to login to.
3767  *
3768  * Returns (Where's the #define!!!!):
3769  *      0 - Login successfully
3770  *      1 - Login failed
3771  *      3 - Fatal error
3772  */
3773 int
qla2x00_local_device_login(scsi_qla_host_t * vha,fc_port_t * fcport)3774 qla2x00_local_device_login(scsi_qla_host_t *vha, fc_port_t *fcport)
3775 {
3776 	int		rval;
3777 	uint16_t	mb[MAILBOX_REGISTER_COUNT];
3778 
3779 	memset(mb, 0, sizeof(mb));
3780 	rval = qla2x00_login_local_device(vha, fcport, mb, BIT_0);
3781 	if (rval == QLA_SUCCESS) {
3782 		/* Interrogate mailbox registers for any errors */
3783 		if (mb[0] == MBS_COMMAND_ERROR)
3784 			rval = 1;
3785 		else if (mb[0] == MBS_COMMAND_PARAMETER_ERROR)
3786 			/* device not in PCB table */
3787 			rval = 3;
3788 	}
3789 
3790 	return (rval);
3791 }
3792 
3793 /*
3794  *  qla2x00_loop_resync
3795  *      Resync with fibre channel devices.
3796  *
3797  * Input:
3798  *      ha = adapter block pointer.
3799  *
3800  * Returns:
3801  *      0 = success
3802  */
3803 int
qla2x00_loop_resync(scsi_qla_host_t * vha)3804 qla2x00_loop_resync(scsi_qla_host_t *vha)
3805 {
3806 	int rval = QLA_SUCCESS;
3807 	uint32_t wait_time;
3808 	struct req_que *req;
3809 	struct rsp_que *rsp;
3810 
3811 	if (vha->hw->flags.cpu_affinity_enabled)
3812 		req = vha->hw->req_q_map[0];
3813 	else
3814 		req = vha->req;
3815 	rsp = req->rsp;
3816 
3817 	atomic_set(&vha->loop_state, LOOP_UPDATE);
3818 	clear_bit(ISP_ABORT_RETRY, &vha->dpc_flags);
3819 	if (vha->flags.online) {
3820 		if (!(rval = qla2x00_fw_ready(vha))) {
3821 			/* Wait at most MAX_TARGET RSCNs for a stable link. */
3822 			wait_time = 256;
3823 			do {
3824 				atomic_set(&vha->loop_state, LOOP_UPDATE);
3825 
3826 				/* Issue a marker after FW becomes ready. */
3827 				qla2x00_marker(vha, req, rsp, 0, 0,
3828 					MK_SYNC_ALL);
3829 				vha->marker_needed = 0;
3830 
3831 				/* Remap devices on Loop. */
3832 				clear_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
3833 
3834 				qla2x00_configure_loop(vha);
3835 				wait_time--;
3836 			} while (!atomic_read(&vha->loop_down_timer) &&
3837 				!(test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags))
3838 				&& wait_time && (test_bit(LOOP_RESYNC_NEEDED,
3839 				&vha->dpc_flags)));
3840 		}
3841 	}
3842 
3843 	if (test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags))
3844 		return (QLA_FUNCTION_FAILED);
3845 
3846 	if (rval)
3847 		DEBUG2_3(printk("%s(): **** FAILED ****\n", __func__));
3848 
3849 	return (rval);
3850 }
3851 
3852 /*
3853 * qla2x00_perform_loop_resync
3854 * Description: This function will set the appropriate flags and call
3855 *              qla2x00_loop_resync. If successful loop will be resynced
3856 * Arguments : scsi_qla_host_t pointer
3857 * returm    : Success or Failure
3858 */
3859 
qla2x00_perform_loop_resync(scsi_qla_host_t * ha)3860 int qla2x00_perform_loop_resync(scsi_qla_host_t *ha)
3861 {
3862 	int32_t rval = 0;
3863 
3864 	if (!test_and_set_bit(LOOP_RESYNC_ACTIVE, &ha->dpc_flags)) {
3865 		/*Configure the flags so that resync happens properly*/
3866 		atomic_set(&ha->loop_down_timer, 0);
3867 		if (!(ha->device_flags & DFLG_NO_CABLE)) {
3868 			atomic_set(&ha->loop_state, LOOP_UP);
3869 			set_bit(LOCAL_LOOP_UPDATE, &ha->dpc_flags);
3870 			set_bit(REGISTER_FC4_NEEDED, &ha->dpc_flags);
3871 			set_bit(LOOP_RESYNC_NEEDED, &ha->dpc_flags);
3872 
3873 			rval = qla2x00_loop_resync(ha);
3874 		} else
3875 			atomic_set(&ha->loop_state, LOOP_DEAD);
3876 
3877 		clear_bit(LOOP_RESYNC_ACTIVE, &ha->dpc_flags);
3878 	}
3879 
3880 	return rval;
3881 }
3882 
3883 void
qla2x00_update_fcports(scsi_qla_host_t * base_vha)3884 qla2x00_update_fcports(scsi_qla_host_t *base_vha)
3885 {
3886 	fc_port_t *fcport;
3887 	struct scsi_qla_host *vha;
3888 	struct qla_hw_data *ha = base_vha->hw;
3889 	unsigned long flags;
3890 
3891 	spin_lock_irqsave(&ha->vport_slock, flags);
3892 	/* Go with deferred removal of rport references. */
3893 	list_for_each_entry(vha, &base_vha->hw->vp_list, list) {
3894 		atomic_inc(&vha->vref_count);
3895 		list_for_each_entry(fcport, &vha->vp_fcports, list) {
3896 			if (fcport->drport &&
3897 			    atomic_read(&fcport->state) != FCS_UNCONFIGURED) {
3898 				spin_unlock_irqrestore(&ha->vport_slock, flags);
3899 
3900 				qla2x00_rport_del(fcport);
3901 
3902 				spin_lock_irqsave(&ha->vport_slock, flags);
3903 			}
3904 		}
3905 		atomic_dec(&vha->vref_count);
3906 	}
3907 	spin_unlock_irqrestore(&ha->vport_slock, flags);
3908 }
3909 
3910 /*
3911 * qla82xx_quiescent_state_cleanup
3912 * Description: This function will block the new I/Os
3913 *              Its not aborting any I/Os as context
3914 *              is not destroyed during quiescence
3915 * Arguments: scsi_qla_host_t
3916 * return   : void
3917 */
3918 void
qla82xx_quiescent_state_cleanup(scsi_qla_host_t * vha)3919 qla82xx_quiescent_state_cleanup(scsi_qla_host_t *vha)
3920 {
3921 	struct qla_hw_data *ha = vha->hw;
3922 	struct scsi_qla_host *vp;
3923 
3924 	qla_printk(KERN_INFO, ha,
3925 			"Performing ISP error recovery - ha= %p.\n", ha);
3926 
3927 	atomic_set(&ha->loop_down_timer, LOOP_DOWN_TIME);
3928 	if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
3929 		atomic_set(&vha->loop_state, LOOP_DOWN);
3930 		qla2x00_mark_all_devices_lost(vha, 0);
3931 		list_for_each_entry(vp, &ha->vp_list, list)
3932 			qla2x00_mark_all_devices_lost(vha, 0);
3933 	} else {
3934 		if (!atomic_read(&vha->loop_down_timer))
3935 			atomic_set(&vha->loop_down_timer,
3936 					LOOP_DOWN_TIME);
3937 	}
3938 	/* Wait for pending cmds to complete */
3939 	qla2x00_eh_wait_for_pending_commands(vha, 0, 0, WAIT_HOST);
3940 }
3941 
3942 void
qla2x00_abort_isp_cleanup(scsi_qla_host_t * vha)3943 qla2x00_abort_isp_cleanup(scsi_qla_host_t *vha)
3944 {
3945 	struct qla_hw_data *ha = vha->hw;
3946 	struct scsi_qla_host *vp;
3947 	unsigned long flags;
3948 	fc_port_t *fcport;
3949 
3950 	/* For ISP82XX, driver waits for completion of the commands.
3951 	 * online flag should be set.
3952 	 */
3953 	if (!IS_QLA82XX(ha))
3954 		vha->flags.online = 0;
3955 	ha->flags.chip_reset_done = 0;
3956 	clear_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
3957 	ha->qla_stats.total_isp_aborts++;
3958 
3959 	qla_printk(KERN_INFO, ha,
3960 	    "Performing ISP error recovery - ha= %p.\n", ha);
3961 
3962 	/* For ISP82XX, reset_chip is just disabling interrupts.
3963 	 * Driver waits for the completion of the commands.
3964 	 * the interrupts need to be enabled.
3965 	 */
3966 	if (!IS_QLA82XX(ha))
3967 		ha->isp_ops->reset_chip(vha);
3968 
3969 	atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
3970 	if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
3971 		atomic_set(&vha->loop_state, LOOP_DOWN);
3972 		qla2x00_mark_all_devices_lost(vha, 0);
3973 
3974 		spin_lock_irqsave(&ha->vport_slock, flags);
3975 		list_for_each_entry(vp, &ha->vp_list, list) {
3976 			atomic_inc(&vp->vref_count);
3977 			spin_unlock_irqrestore(&ha->vport_slock, flags);
3978 
3979 			qla2x00_mark_all_devices_lost(vp, 0);
3980 
3981 			spin_lock_irqsave(&ha->vport_slock, flags);
3982 			atomic_dec(&vp->vref_count);
3983 		}
3984 		spin_unlock_irqrestore(&ha->vport_slock, flags);
3985 	} else {
3986 		if (!atomic_read(&vha->loop_down_timer))
3987 			atomic_set(&vha->loop_down_timer,
3988 			    LOOP_DOWN_TIME);
3989 	}
3990 
3991 	/* Clear all async request states across all VPs. */
3992 	list_for_each_entry(fcport, &vha->vp_fcports, list)
3993 		fcport->flags &= ~(FCF_LOGIN_NEEDED | FCF_ASYNC_SENT);
3994 	spin_lock_irqsave(&ha->vport_slock, flags);
3995 	list_for_each_entry(vp, &ha->vp_list, list) {
3996 		atomic_inc(&vp->vref_count);
3997 		spin_unlock_irqrestore(&ha->vport_slock, flags);
3998 
3999 		list_for_each_entry(fcport, &vp->vp_fcports, list)
4000 			fcport->flags &= ~(FCF_LOGIN_NEEDED | FCF_ASYNC_SENT);
4001 
4002 		spin_lock_irqsave(&ha->vport_slock, flags);
4003 		atomic_dec(&vp->vref_count);
4004 	}
4005 	spin_unlock_irqrestore(&ha->vport_slock, flags);
4006 
4007 	if (!ha->flags.eeh_busy) {
4008 		/* Make sure for ISP 82XX IO DMA is complete */
4009 		if (IS_QLA82XX(ha)) {
4010 			qla82xx_chip_reset_cleanup(vha);
4011 
4012 			/* Done waiting for pending commands.
4013 			 * Reset the online flag.
4014 			 */
4015 			vha->flags.online = 0;
4016 		}
4017 
4018 		/* Requeue all commands in outstanding command list. */
4019 		qla2x00_abort_all_cmds(vha, DID_RESET << 16);
4020 	}
4021 }
4022 
4023 /*
4024 *  qla2x00_abort_isp
4025 *      Resets ISP and aborts all outstanding commands.
4026 *
4027 * Input:
4028 *      ha           = adapter block pointer.
4029 *
4030 * Returns:
4031 *      0 = success
4032 */
4033 int
qla2x00_abort_isp(scsi_qla_host_t * vha)4034 qla2x00_abort_isp(scsi_qla_host_t *vha)
4035 {
4036 	int rval;
4037 	uint8_t        status = 0;
4038 	struct qla_hw_data *ha = vha->hw;
4039 	struct scsi_qla_host *vp;
4040 	struct req_que *req = ha->req_q_map[0];
4041 	unsigned long flags;
4042 
4043 	if (vha->flags.online) {
4044 		qla2x00_abort_isp_cleanup(vha);
4045 
4046 		if (unlikely(pci_channel_offline(ha->pdev) &&
4047 		    ha->flags.pci_channel_io_perm_failure)) {
4048 			clear_bit(ISP_ABORT_RETRY, &vha->dpc_flags);
4049 			status = 0;
4050 			return status;
4051 		}
4052 
4053 		ha->isp_ops->get_flash_version(vha, req->ring);
4054 
4055 		ha->isp_ops->nvram_config(vha);
4056 
4057 		if (!qla2x00_restart_isp(vha)) {
4058 			clear_bit(RESET_MARKER_NEEDED, &vha->dpc_flags);
4059 
4060 			if (!atomic_read(&vha->loop_down_timer)) {
4061 				/*
4062 				 * Issue marker command only when we are going
4063 				 * to start the I/O .
4064 				 */
4065 				vha->marker_needed = 1;
4066 			}
4067 
4068 			vha->flags.online = 1;
4069 
4070 			ha->isp_ops->enable_intrs(ha);
4071 
4072 			ha->isp_abort_cnt = 0;
4073 			clear_bit(ISP_ABORT_RETRY, &vha->dpc_flags);
4074 
4075 			if (IS_QLA81XX(ha))
4076 				qla2x00_get_fw_version(vha,
4077 				    &ha->fw_major_version,
4078 				    &ha->fw_minor_version,
4079 				    &ha->fw_subminor_version,
4080 				    &ha->fw_attributes, &ha->fw_memory_size,
4081 				    ha->mpi_version, &ha->mpi_capabilities,
4082 				    ha->phy_version);
4083 
4084 			if (ha->fce) {
4085 				ha->flags.fce_enabled = 1;
4086 				memset(ha->fce, 0,
4087 				    fce_calc_size(ha->fce_bufs));
4088 				rval = qla2x00_enable_fce_trace(vha,
4089 				    ha->fce_dma, ha->fce_bufs, ha->fce_mb,
4090 				    &ha->fce_bufs);
4091 				if (rval) {
4092 					qla_printk(KERN_WARNING, ha,
4093 					    "Unable to reinitialize FCE "
4094 					    "(%d).\n", rval);
4095 					ha->flags.fce_enabled = 0;
4096 				}
4097 			}
4098 
4099 			if (ha->eft) {
4100 				memset(ha->eft, 0, EFT_SIZE);
4101 				rval = qla2x00_enable_eft_trace(vha,
4102 				    ha->eft_dma, EFT_NUM_BUFFERS);
4103 				if (rval) {
4104 					qla_printk(KERN_WARNING, ha,
4105 					    "Unable to reinitialize EFT "
4106 					    "(%d).\n", rval);
4107 				}
4108 			}
4109 		} else {	/* failed the ISP abort */
4110 			vha->flags.online = 1;
4111 			if (test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) {
4112 				if (ha->isp_abort_cnt == 0) {
4113  					qla_printk(KERN_WARNING, ha,
4114 					    "ISP error recovery failed - "
4115 					    "board disabled\n");
4116 					/*
4117 					 * The next call disables the board
4118 					 * completely.
4119 					 */
4120 					ha->isp_ops->reset_adapter(vha);
4121 					vha->flags.online = 0;
4122 					clear_bit(ISP_ABORT_RETRY,
4123 					    &vha->dpc_flags);
4124 					status = 0;
4125 				} else { /* schedule another ISP abort */
4126 					ha->isp_abort_cnt--;
4127 					DEBUG(printk("qla%ld: ISP abort - "
4128 					    "retry remaining %d\n",
4129 					    vha->host_no, ha->isp_abort_cnt));
4130 					status = 1;
4131 				}
4132 			} else {
4133 				ha->isp_abort_cnt = MAX_RETRIES_OF_ISP_ABORT;
4134 				DEBUG(printk("qla2x00(%ld): ISP error recovery "
4135 				    "- retrying (%d) more times\n",
4136 				    vha->host_no, ha->isp_abort_cnt));
4137 				set_bit(ISP_ABORT_RETRY, &vha->dpc_flags);
4138 				status = 1;
4139 			}
4140 		}
4141 
4142 	}
4143 
4144 	if (!status) {
4145 		DEBUG(printk(KERN_INFO
4146 				"qla2x00_abort_isp(%ld): succeeded.\n",
4147 				vha->host_no));
4148 
4149 		spin_lock_irqsave(&ha->vport_slock, flags);
4150 		list_for_each_entry(vp, &ha->vp_list, list) {
4151 			if (vp->vp_idx) {
4152 				atomic_inc(&vp->vref_count);
4153 				spin_unlock_irqrestore(&ha->vport_slock, flags);
4154 
4155 				qla2x00_vp_abort_isp(vp);
4156 
4157 				spin_lock_irqsave(&ha->vport_slock, flags);
4158 				atomic_dec(&vp->vref_count);
4159 			}
4160 		}
4161 		spin_unlock_irqrestore(&ha->vport_slock, flags);
4162 
4163 	} else {
4164 		qla_printk(KERN_INFO, ha,
4165 			"qla2x00_abort_isp: **** FAILED ****\n");
4166 	}
4167 
4168 	return(status);
4169 }
4170 
4171 /*
4172 *  qla2x00_restart_isp
4173 *      restarts the ISP after a reset
4174 *
4175 * Input:
4176 *      ha = adapter block pointer.
4177 *
4178 * Returns:
4179 *      0 = success
4180 */
4181 static int
qla2x00_restart_isp(scsi_qla_host_t * vha)4182 qla2x00_restart_isp(scsi_qla_host_t *vha)
4183 {
4184 	int status = 0;
4185 	uint32_t wait_time;
4186 	struct qla_hw_data *ha = vha->hw;
4187 	struct req_que *req = ha->req_q_map[0];
4188 	struct rsp_que *rsp = ha->rsp_q_map[0];
4189 
4190 	/* If firmware needs to be loaded */
4191 	if (qla2x00_isp_firmware(vha)) {
4192 		vha->flags.online = 0;
4193 		status = ha->isp_ops->chip_diag(vha);
4194 		if (!status)
4195 			status = qla2x00_setup_chip(vha);
4196 	}
4197 
4198 	if (!status && !(status = qla2x00_init_rings(vha))) {
4199 		clear_bit(RESET_MARKER_NEEDED, &vha->dpc_flags);
4200 		ha->flags.chip_reset_done = 1;
4201 		/* Initialize the queues in use */
4202 		qla25xx_init_queues(ha);
4203 
4204 		status = qla2x00_fw_ready(vha);
4205 		if (!status) {
4206 			DEBUG(printk("%s(): Start configure loop, "
4207 			    "status = %d\n", __func__, status));
4208 
4209 			/* Issue a marker after FW becomes ready. */
4210 			qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL);
4211 
4212 			vha->flags.online = 1;
4213 			/* Wait at most MAX_TARGET RSCNs for a stable link. */
4214 			wait_time = 256;
4215 			do {
4216 				clear_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
4217 				qla2x00_configure_loop(vha);
4218 				wait_time--;
4219 			} while (!atomic_read(&vha->loop_down_timer) &&
4220 				!(test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags))
4221 				&& wait_time && (test_bit(LOOP_RESYNC_NEEDED,
4222 				&vha->dpc_flags)));
4223 		}
4224 
4225 		/* if no cable then assume it's good */
4226 		if ((vha->device_flags & DFLG_NO_CABLE))
4227 			status = 0;
4228 
4229 		DEBUG(printk("%s(): Configure loop done, status = 0x%x\n",
4230 				__func__,
4231 				status));
4232 	}
4233 	return (status);
4234 }
4235 
4236 static int
qla25xx_init_queues(struct qla_hw_data * ha)4237 qla25xx_init_queues(struct qla_hw_data *ha)
4238 {
4239 	struct rsp_que *rsp = NULL;
4240 	struct req_que *req = NULL;
4241 	struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
4242 	int ret = -1;
4243 	int i;
4244 
4245 	for (i = 1; i < ha->max_rsp_queues; i++) {
4246 		rsp = ha->rsp_q_map[i];
4247 		if (rsp) {
4248 			rsp->options &= ~BIT_0;
4249 			ret = qla25xx_init_rsp_que(base_vha, rsp);
4250 			if (ret != QLA_SUCCESS)
4251 				DEBUG2_17(printk(KERN_WARNING
4252 					"%s Rsp que:%d init failed\n", __func__,
4253 						rsp->id));
4254 			else
4255 				DEBUG2_17(printk(KERN_INFO
4256 					"%s Rsp que:%d inited\n", __func__,
4257 						rsp->id));
4258 		}
4259 	}
4260 	for (i = 1; i < ha->max_req_queues; i++) {
4261 		req = ha->req_q_map[i];
4262 		if (req) {
4263 		/* Clear outstanding commands array. */
4264 			req->options &= ~BIT_0;
4265 			ret = qla25xx_init_req_que(base_vha, req);
4266 			if (ret != QLA_SUCCESS)
4267 				DEBUG2_17(printk(KERN_WARNING
4268 					"%s Req que:%d init failed\n", __func__,
4269 						req->id));
4270 			else
4271 				DEBUG2_17(printk(KERN_WARNING
4272 					"%s Req que:%d inited\n", __func__,
4273 						req->id));
4274 		}
4275 	}
4276 	return ret;
4277 }
4278 
4279 /*
4280 * qla2x00_reset_adapter
4281 *      Reset adapter.
4282 *
4283 * Input:
4284 *      ha = adapter block pointer.
4285 */
4286 void
qla2x00_reset_adapter(scsi_qla_host_t * vha)4287 qla2x00_reset_adapter(scsi_qla_host_t *vha)
4288 {
4289 	unsigned long flags = 0;
4290 	struct qla_hw_data *ha = vha->hw;
4291 	struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
4292 
4293 	vha->flags.online = 0;
4294 	ha->isp_ops->disable_intrs(ha);
4295 
4296 	spin_lock_irqsave(&ha->hardware_lock, flags);
4297 	WRT_REG_WORD(&reg->hccr, HCCR_RESET_RISC);
4298 	RD_REG_WORD(&reg->hccr);			/* PCI Posting. */
4299 	WRT_REG_WORD(&reg->hccr, HCCR_RELEASE_RISC);
4300 	RD_REG_WORD(&reg->hccr);			/* PCI Posting. */
4301 	spin_unlock_irqrestore(&ha->hardware_lock, flags);
4302 }
4303 
4304 void
qla24xx_reset_adapter(scsi_qla_host_t * vha)4305 qla24xx_reset_adapter(scsi_qla_host_t *vha)
4306 {
4307 	unsigned long flags = 0;
4308 	struct qla_hw_data *ha = vha->hw;
4309 	struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
4310 
4311 	if (IS_QLA82XX(ha))
4312 		return;
4313 
4314 	vha->flags.online = 0;
4315 	ha->isp_ops->disable_intrs(ha);
4316 
4317 	spin_lock_irqsave(&ha->hardware_lock, flags);
4318 	WRT_REG_DWORD(&reg->hccr, HCCRX_SET_RISC_RESET);
4319 	RD_REG_DWORD(&reg->hccr);
4320 	WRT_REG_DWORD(&reg->hccr, HCCRX_REL_RISC_PAUSE);
4321 	RD_REG_DWORD(&reg->hccr);
4322 	spin_unlock_irqrestore(&ha->hardware_lock, flags);
4323 
4324 	if (IS_NOPOLLING_TYPE(ha))
4325 		ha->isp_ops->enable_intrs(ha);
4326 }
4327 
4328 /* On sparc systems, obtain port and node WWN from firmware
4329  * properties.
4330  */
qla24xx_nvram_wwn_from_ofw(scsi_qla_host_t * vha,struct nvram_24xx * nv)4331 static void qla24xx_nvram_wwn_from_ofw(scsi_qla_host_t *vha,
4332 	struct nvram_24xx *nv)
4333 {
4334 #ifdef CONFIG_SPARC
4335 	struct qla_hw_data *ha = vha->hw;
4336 	struct pci_dev *pdev = ha->pdev;
4337 	struct device_node *dp = pci_device_to_OF_node(pdev);
4338 	const u8 *val;
4339 	int len;
4340 
4341 	val = of_get_property(dp, "port-wwn", &len);
4342 	if (val && len >= WWN_SIZE)
4343 		memcpy(nv->port_name, val, WWN_SIZE);
4344 
4345 	val = of_get_property(dp, "node-wwn", &len);
4346 	if (val && len >= WWN_SIZE)
4347 		memcpy(nv->node_name, val, WWN_SIZE);
4348 #endif
4349 }
4350 
4351 int
qla24xx_nvram_config(scsi_qla_host_t * vha)4352 qla24xx_nvram_config(scsi_qla_host_t *vha)
4353 {
4354 	int   rval;
4355 	struct init_cb_24xx *icb;
4356 	struct nvram_24xx *nv;
4357 	uint32_t *dptr;
4358 	uint8_t  *dptr1, *dptr2;
4359 	uint32_t chksum;
4360 	uint16_t cnt;
4361 	struct qla_hw_data *ha = vha->hw;
4362 
4363 	rval = QLA_SUCCESS;
4364 	icb = (struct init_cb_24xx *)ha->init_cb;
4365 	nv = ha->nvram;
4366 
4367 	/* Determine NVRAM starting address. */
4368 	if (ha->flags.port0) {
4369 		ha->nvram_base = FA_NVRAM_FUNC0_ADDR;
4370 		ha->vpd_base = FA_NVRAM_VPD0_ADDR;
4371 	} else {
4372 		ha->nvram_base = FA_NVRAM_FUNC1_ADDR;
4373 		ha->vpd_base = FA_NVRAM_VPD1_ADDR;
4374 	}
4375 	ha->nvram_size = sizeof(struct nvram_24xx);
4376 	ha->vpd_size = FA_NVRAM_VPD_SIZE;
4377 	if (IS_QLA82XX(ha))
4378 		ha->vpd_size = FA_VPD_SIZE_82XX;
4379 
4380 	/* Get VPD data into cache */
4381 	ha->vpd = ha->nvram + VPD_OFFSET;
4382 	ha->isp_ops->read_nvram(vha, (uint8_t *)ha->vpd,
4383 	    ha->nvram_base - FA_NVRAM_FUNC0_ADDR, FA_NVRAM_VPD_SIZE * 4);
4384 
4385 	/* Get NVRAM data into cache and calculate checksum. */
4386 	dptr = (uint32_t *)nv;
4387 	ha->isp_ops->read_nvram(vha, (uint8_t *)dptr, ha->nvram_base,
4388 	    ha->nvram_size);
4389 	for (cnt = 0, chksum = 0; cnt < ha->nvram_size >> 2; cnt++)
4390 		chksum += le32_to_cpu(*dptr++);
4391 
4392 	DEBUG5(printk("scsi(%ld): Contents of NVRAM\n", vha->host_no));
4393 	DEBUG5(qla2x00_dump_buffer((uint8_t *)nv, ha->nvram_size));
4394 
4395 	/* Bad NVRAM data, set defaults parameters. */
4396 	if (chksum || nv->id[0] != 'I' || nv->id[1] != 'S' || nv->id[2] != 'P'
4397 	    || nv->id[3] != ' ' ||
4398 	    nv->nvram_version < __constant_cpu_to_le16(ICB_VERSION)) {
4399 		/* Reset NVRAM data. */
4400 		qla_printk(KERN_WARNING, ha, "Inconsistent NVRAM detected: "
4401 		    "checksum=0x%x id=%c version=0x%x.\n", chksum, nv->id[0],
4402 		    le16_to_cpu(nv->nvram_version));
4403 		qla_printk(KERN_WARNING, ha, "Falling back to functioning (yet "
4404 		    "invalid -- WWPN) defaults.\n");
4405 
4406 		/*
4407 		 * Set default initialization control block.
4408 		 */
4409 		memset(nv, 0, ha->nvram_size);
4410 		nv->nvram_version = __constant_cpu_to_le16(ICB_VERSION);
4411 		nv->version = __constant_cpu_to_le16(ICB_VERSION);
4412 		nv->frame_payload_size = __constant_cpu_to_le16(2048);
4413 		nv->execution_throttle = __constant_cpu_to_le16(0xFFFF);
4414 		nv->exchange_count = __constant_cpu_to_le16(0);
4415 		nv->hard_address = __constant_cpu_to_le16(124);
4416 		nv->port_name[0] = 0x21;
4417 		nv->port_name[1] = 0x00 + ha->port_no;
4418 		nv->port_name[2] = 0x00;
4419 		nv->port_name[3] = 0xe0;
4420 		nv->port_name[4] = 0x8b;
4421 		nv->port_name[5] = 0x1c;
4422 		nv->port_name[6] = 0x55;
4423 		nv->port_name[7] = 0x86;
4424 		nv->node_name[0] = 0x20;
4425 		nv->node_name[1] = 0x00;
4426 		nv->node_name[2] = 0x00;
4427 		nv->node_name[3] = 0xe0;
4428 		nv->node_name[4] = 0x8b;
4429 		nv->node_name[5] = 0x1c;
4430 		nv->node_name[6] = 0x55;
4431 		nv->node_name[7] = 0x86;
4432 		qla24xx_nvram_wwn_from_ofw(vha, nv);
4433 		nv->login_retry_count = __constant_cpu_to_le16(8);
4434 		nv->interrupt_delay_timer = __constant_cpu_to_le16(0);
4435 		nv->login_timeout = __constant_cpu_to_le16(0);
4436 		nv->firmware_options_1 =
4437 		    __constant_cpu_to_le32(BIT_14|BIT_13|BIT_2|BIT_1);
4438 		nv->firmware_options_2 = __constant_cpu_to_le32(2 << 4);
4439 		nv->firmware_options_2 |= __constant_cpu_to_le32(BIT_12);
4440 		nv->firmware_options_3 = __constant_cpu_to_le32(2 << 13);
4441 		nv->host_p = __constant_cpu_to_le32(BIT_11|BIT_10);
4442 		nv->efi_parameters = __constant_cpu_to_le32(0);
4443 		nv->reset_delay = 5;
4444 		nv->max_luns_per_target = __constant_cpu_to_le16(128);
4445 		nv->port_down_retry_count = __constant_cpu_to_le16(30);
4446 		nv->link_down_timeout = __constant_cpu_to_le16(30);
4447 
4448 		rval = 1;
4449 	}
4450 
4451 	/* Reset Initialization control block */
4452 	memset(icb, 0, ha->init_cb_size);
4453 
4454 	/* Copy 1st segment. */
4455 	dptr1 = (uint8_t *)icb;
4456 	dptr2 = (uint8_t *)&nv->version;
4457 	cnt = (uint8_t *)&icb->response_q_inpointer - (uint8_t *)&icb->version;
4458 	while (cnt--)
4459 		*dptr1++ = *dptr2++;
4460 
4461 	icb->login_retry_count = nv->login_retry_count;
4462 	icb->link_down_on_nos = nv->link_down_on_nos;
4463 
4464 	/* Copy 2nd segment. */
4465 	dptr1 = (uint8_t *)&icb->interrupt_delay_timer;
4466 	dptr2 = (uint8_t *)&nv->interrupt_delay_timer;
4467 	cnt = (uint8_t *)&icb->reserved_3 -
4468 	    (uint8_t *)&icb->interrupt_delay_timer;
4469 	while (cnt--)
4470 		*dptr1++ = *dptr2++;
4471 
4472 	/*
4473 	 * Setup driver NVRAM options.
4474 	 */
4475 	qla2x00_set_model_info(vha, nv->model_name, sizeof(nv->model_name),
4476 	    "QLA2462");
4477 
4478 	/* Use alternate WWN? */
4479 	if (nv->host_p & __constant_cpu_to_le32(BIT_15)) {
4480 		memcpy(icb->node_name, nv->alternate_node_name, WWN_SIZE);
4481 		memcpy(icb->port_name, nv->alternate_port_name, WWN_SIZE);
4482 	}
4483 
4484 	/* Prepare nodename */
4485 	if ((icb->firmware_options_1 & __constant_cpu_to_le32(BIT_14)) == 0) {
4486 		/*
4487 		 * Firmware will apply the following mask if the nodename was
4488 		 * not provided.
4489 		 */
4490 		memcpy(icb->node_name, icb->port_name, WWN_SIZE);
4491 		icb->node_name[0] &= 0xF0;
4492 	}
4493 
4494 	/* Set host adapter parameters. */
4495 	ha->flags.disable_risc_code_load = 0;
4496 	ha->flags.enable_lip_reset = 0;
4497 	ha->flags.enable_lip_full_login =
4498 	    le32_to_cpu(nv->host_p) & BIT_10 ? 1: 0;
4499 	ha->flags.enable_target_reset =
4500 	    le32_to_cpu(nv->host_p) & BIT_11 ? 1: 0;
4501 	ha->flags.enable_led_scheme = 0;
4502 	ha->flags.disable_serdes = le32_to_cpu(nv->host_p) & BIT_5 ? 1: 0;
4503 
4504 	ha->operating_mode = (le32_to_cpu(icb->firmware_options_2) &
4505 	    (BIT_6 | BIT_5 | BIT_4)) >> 4;
4506 
4507 	memcpy(ha->fw_seriallink_options24, nv->seriallink_options,
4508 	    sizeof(ha->fw_seriallink_options24));
4509 
4510 	/* save HBA serial number */
4511 	ha->serial0 = icb->port_name[5];
4512 	ha->serial1 = icb->port_name[6];
4513 	ha->serial2 = icb->port_name[7];
4514 	memcpy(vha->node_name, icb->node_name, WWN_SIZE);
4515 	memcpy(vha->port_name, icb->port_name, WWN_SIZE);
4516 
4517 	icb->execution_throttle = __constant_cpu_to_le16(0xFFFF);
4518 
4519 	ha->retry_count = le16_to_cpu(nv->login_retry_count);
4520 
4521 	/* Set minimum login_timeout to 4 seconds. */
4522 	if (le16_to_cpu(nv->login_timeout) < ql2xlogintimeout)
4523 		nv->login_timeout = cpu_to_le16(ql2xlogintimeout);
4524 	if (le16_to_cpu(nv->login_timeout) < 4)
4525 		nv->login_timeout = __constant_cpu_to_le16(4);
4526 	ha->login_timeout = le16_to_cpu(nv->login_timeout);
4527 	icb->login_timeout = nv->login_timeout;
4528 
4529 	/* Set minimum RATOV to 100 tenths of a second. */
4530 	ha->r_a_tov = 100;
4531 
4532 	ha->loop_reset_delay = nv->reset_delay;
4533 
4534 	/* Link Down Timeout = 0:
4535 	 *
4536 	 * 	When Port Down timer expires we will start returning
4537 	 *	I/O's to OS with "DID_NO_CONNECT".
4538 	 *
4539 	 * Link Down Timeout != 0:
4540 	 *
4541 	 *	 The driver waits for the link to come up after link down
4542 	 *	 before returning I/Os to OS with "DID_NO_CONNECT".
4543 	 */
4544 	if (le16_to_cpu(nv->link_down_timeout) == 0) {
4545 		ha->loop_down_abort_time =
4546 		    (LOOP_DOWN_TIME - LOOP_DOWN_TIMEOUT);
4547 	} else {
4548 		ha->link_down_timeout =	le16_to_cpu(nv->link_down_timeout);
4549 		ha->loop_down_abort_time =
4550 		    (LOOP_DOWN_TIME - ha->link_down_timeout);
4551 	}
4552 
4553 	/* Need enough time to try and get the port back. */
4554 	ha->port_down_retry_count = le16_to_cpu(nv->port_down_retry_count);
4555 	if (qlport_down_retry)
4556 		ha->port_down_retry_count = qlport_down_retry;
4557 
4558 	/* Set login_retry_count */
4559 	ha->login_retry_count  = le16_to_cpu(nv->login_retry_count);
4560 	if (ha->port_down_retry_count ==
4561 	    le16_to_cpu(nv->port_down_retry_count) &&
4562 	    ha->port_down_retry_count > 3)
4563 		ha->login_retry_count = ha->port_down_retry_count;
4564 	else if (ha->port_down_retry_count > (int)ha->login_retry_count)
4565 		ha->login_retry_count = ha->port_down_retry_count;
4566 	if (ql2xloginretrycount)
4567 		ha->login_retry_count = ql2xloginretrycount;
4568 
4569 	/* Enable ZIO. */
4570 	if (!vha->flags.init_done) {
4571 		ha->zio_mode = le32_to_cpu(icb->firmware_options_2) &
4572 		    (BIT_3 | BIT_2 | BIT_1 | BIT_0);
4573 		ha->zio_timer = le16_to_cpu(icb->interrupt_delay_timer) ?
4574 		    le16_to_cpu(icb->interrupt_delay_timer): 2;
4575 	}
4576 	icb->firmware_options_2 &= __constant_cpu_to_le32(
4577 	    ~(BIT_3 | BIT_2 | BIT_1 | BIT_0));
4578 	vha->flags.process_response_queue = 0;
4579 	if (ha->zio_mode != QLA_ZIO_DISABLED) {
4580 		ha->zio_mode = QLA_ZIO_MODE_6;
4581 
4582 		DEBUG2(printk("scsi(%ld): ZIO mode %d enabled; timer delay "
4583 		    "(%d us).\n", vha->host_no, ha->zio_mode,
4584 		    ha->zio_timer * 100));
4585 		qla_printk(KERN_INFO, ha,
4586 		    "ZIO mode %d enabled; timer delay (%d us).\n",
4587 		    ha->zio_mode, ha->zio_timer * 100);
4588 
4589 		icb->firmware_options_2 |= cpu_to_le32(
4590 		    (uint32_t)ha->zio_mode);
4591 		icb->interrupt_delay_timer = cpu_to_le16(ha->zio_timer);
4592 		vha->flags.process_response_queue = 1;
4593 	}
4594 
4595 	if (rval) {
4596 		DEBUG2_3(printk(KERN_WARNING
4597 		    "scsi(%ld): NVRAM configuration failed!\n", vha->host_no));
4598 	}
4599 	return (rval);
4600 }
4601 
4602 static int
qla24xx_load_risc_flash(scsi_qla_host_t * vha,uint32_t * srisc_addr,uint32_t faddr)4603 qla24xx_load_risc_flash(scsi_qla_host_t *vha, uint32_t *srisc_addr,
4604     uint32_t faddr)
4605 {
4606 	int	rval = QLA_SUCCESS;
4607 	int	segments, fragment;
4608 	uint32_t *dcode, dlen;
4609 	uint32_t risc_addr;
4610 	uint32_t risc_size;
4611 	uint32_t i;
4612 	struct qla_hw_data *ha = vha->hw;
4613 	struct req_que *req = ha->req_q_map[0];
4614 
4615 	qla_printk(KERN_INFO, ha,
4616 	    "FW: Loading from flash (%x)...\n", faddr);
4617 
4618 	rval = QLA_SUCCESS;
4619 
4620 	segments = FA_RISC_CODE_SEGMENTS;
4621 	dcode = (uint32_t *)req->ring;
4622 	*srisc_addr = 0;
4623 
4624 	/* Validate firmware image by checking version. */
4625 	qla24xx_read_flash_data(vha, dcode, faddr + 4, 4);
4626 	for (i = 0; i < 4; i++)
4627 		dcode[i] = be32_to_cpu(dcode[i]);
4628 	if ((dcode[0] == 0xffffffff && dcode[1] == 0xffffffff &&
4629 	    dcode[2] == 0xffffffff && dcode[3] == 0xffffffff) ||
4630 	    (dcode[0] == 0 && dcode[1] == 0 && dcode[2] == 0 &&
4631 		dcode[3] == 0)) {
4632 		qla_printk(KERN_WARNING, ha,
4633 		    "Unable to verify integrity of flash firmware image!\n");
4634 		qla_printk(KERN_WARNING, ha,
4635 		    "Firmware data: %08x %08x %08x %08x!\n", dcode[0],
4636 		    dcode[1], dcode[2], dcode[3]);
4637 
4638 		return QLA_FUNCTION_FAILED;
4639 	}
4640 
4641 	while (segments && rval == QLA_SUCCESS) {
4642 		/* Read segment's load information. */
4643 		qla24xx_read_flash_data(vha, dcode, faddr, 4);
4644 
4645 		risc_addr = be32_to_cpu(dcode[2]);
4646 		*srisc_addr = *srisc_addr == 0 ? risc_addr : *srisc_addr;
4647 		risc_size = be32_to_cpu(dcode[3]);
4648 
4649 		fragment = 0;
4650 		while (risc_size > 0 && rval == QLA_SUCCESS) {
4651 			dlen = (uint32_t)(ha->fw_transfer_size >> 2);
4652 			if (dlen > risc_size)
4653 				dlen = risc_size;
4654 
4655 			DEBUG7(printk("scsi(%ld): Loading risc segment@ risc "
4656 			    "addr %x, number of dwords 0x%x, offset 0x%x.\n",
4657 			    vha->host_no, risc_addr, dlen, faddr));
4658 
4659 			qla24xx_read_flash_data(vha, dcode, faddr, dlen);
4660 			for (i = 0; i < dlen; i++)
4661 				dcode[i] = swab32(dcode[i]);
4662 
4663 			rval = qla2x00_load_ram(vha, req->dma, risc_addr,
4664 			    dlen);
4665 			if (rval) {
4666 				DEBUG(printk("scsi(%ld):[ERROR] Failed to load "
4667 				    "segment %d of firmware\n", vha->host_no,
4668 				    fragment));
4669 				qla_printk(KERN_WARNING, ha,
4670 				    "[ERROR] Failed to load segment %d of "
4671 				    "firmware\n", fragment);
4672 				break;
4673 			}
4674 
4675 			faddr += dlen;
4676 			risc_addr += dlen;
4677 			risc_size -= dlen;
4678 			fragment++;
4679 		}
4680 
4681 		/* Next segment. */
4682 		segments--;
4683 	}
4684 
4685 	return rval;
4686 }
4687 
4688 #define QLA_FW_URL "ftp://ftp.qlogic.com/outgoing/linux/firmware/"
4689 
4690 int
qla2x00_load_risc(scsi_qla_host_t * vha,uint32_t * srisc_addr)4691 qla2x00_load_risc(scsi_qla_host_t *vha, uint32_t *srisc_addr)
4692 {
4693 	int	rval;
4694 	int	i, fragment;
4695 	uint16_t *wcode, *fwcode;
4696 	uint32_t risc_addr, risc_size, fwclen, wlen, *seg;
4697 	struct fw_blob *blob;
4698 	struct qla_hw_data *ha = vha->hw;
4699 	struct req_que *req = ha->req_q_map[0];
4700 
4701 	/* Load firmware blob. */
4702 	blob = qla2x00_request_firmware(vha);
4703 	if (!blob) {
4704 		qla_printk(KERN_ERR, ha, "Firmware image unavailable.\n");
4705 		qla_printk(KERN_ERR, ha, "Firmware images can be retrieved "
4706 		    "from: " QLA_FW_URL ".\n");
4707 		return QLA_FUNCTION_FAILED;
4708 	}
4709 
4710 	rval = QLA_SUCCESS;
4711 
4712 	wcode = (uint16_t *)req->ring;
4713 	*srisc_addr = 0;
4714 	fwcode = (uint16_t *)blob->fw->data;
4715 	fwclen = 0;
4716 
4717 	/* Validate firmware image by checking version. */
4718 	if (blob->fw->size < 8 * sizeof(uint16_t)) {
4719 		qla_printk(KERN_WARNING, ha,
4720 		    "Unable to verify integrity of firmware image (%Zd)!\n",
4721 		    blob->fw->size);
4722 		goto fail_fw_integrity;
4723 	}
4724 	for (i = 0; i < 4; i++)
4725 		wcode[i] = be16_to_cpu(fwcode[i + 4]);
4726 	if ((wcode[0] == 0xffff && wcode[1] == 0xffff && wcode[2] == 0xffff &&
4727 	    wcode[3] == 0xffff) || (wcode[0] == 0 && wcode[1] == 0 &&
4728 		wcode[2] == 0 && wcode[3] == 0)) {
4729 		qla_printk(KERN_WARNING, ha,
4730 		    "Unable to verify integrity of firmware image!\n");
4731 		qla_printk(KERN_WARNING, ha,
4732 		    "Firmware data: %04x %04x %04x %04x!\n", wcode[0],
4733 		    wcode[1], wcode[2], wcode[3]);
4734 		goto fail_fw_integrity;
4735 	}
4736 
4737 	seg = blob->segs;
4738 	while (*seg && rval == QLA_SUCCESS) {
4739 		risc_addr = *seg;
4740 		*srisc_addr = *srisc_addr == 0 ? *seg : *srisc_addr;
4741 		risc_size = be16_to_cpu(fwcode[3]);
4742 
4743 		/* Validate firmware image size. */
4744 		fwclen += risc_size * sizeof(uint16_t);
4745 		if (blob->fw->size < fwclen) {
4746 			qla_printk(KERN_WARNING, ha,
4747 			    "Unable to verify integrity of firmware image "
4748 			    "(%Zd)!\n", blob->fw->size);
4749 			goto fail_fw_integrity;
4750 		}
4751 
4752 		fragment = 0;
4753 		while (risc_size > 0 && rval == QLA_SUCCESS) {
4754 			wlen = (uint16_t)(ha->fw_transfer_size >> 1);
4755 			if (wlen > risc_size)
4756 				wlen = risc_size;
4757 
4758 			DEBUG7(printk("scsi(%ld): Loading risc segment@ risc "
4759 			    "addr %x, number of words 0x%x.\n", vha->host_no,
4760 			    risc_addr, wlen));
4761 
4762 			for (i = 0; i < wlen; i++)
4763 				wcode[i] = swab16(fwcode[i]);
4764 
4765 			rval = qla2x00_load_ram(vha, req->dma, risc_addr,
4766 			    wlen);
4767 			if (rval) {
4768 				DEBUG(printk("scsi(%ld):[ERROR] Failed to load "
4769 				    "segment %d of firmware\n", vha->host_no,
4770 				    fragment));
4771 				qla_printk(KERN_WARNING, ha,
4772 				    "[ERROR] Failed to load segment %d of "
4773 				    "firmware\n", fragment);
4774 				break;
4775 			}
4776 
4777 			fwcode += wlen;
4778 			risc_addr += wlen;
4779 			risc_size -= wlen;
4780 			fragment++;
4781 		}
4782 
4783 		/* Next segment. */
4784 		seg++;
4785 	}
4786 	return rval;
4787 
4788 fail_fw_integrity:
4789 	return QLA_FUNCTION_FAILED;
4790 }
4791 
4792 static int
qla24xx_load_risc_blob(scsi_qla_host_t * vha,uint32_t * srisc_addr)4793 qla24xx_load_risc_blob(scsi_qla_host_t *vha, uint32_t *srisc_addr)
4794 {
4795 	int	rval;
4796 	int	segments, fragment;
4797 	uint32_t *dcode, dlen;
4798 	uint32_t risc_addr;
4799 	uint32_t risc_size;
4800 	uint32_t i;
4801 	struct fw_blob *blob;
4802 	uint32_t *fwcode, fwclen;
4803 	struct qla_hw_data *ha = vha->hw;
4804 	struct req_que *req = ha->req_q_map[0];
4805 
4806 	/* Load firmware blob. */
4807 	blob = qla2x00_request_firmware(vha);
4808 	if (!blob) {
4809 		qla_printk(KERN_ERR, ha, "Firmware image unavailable.\n");
4810 		qla_printk(KERN_ERR, ha, "Firmware images can be retrieved "
4811 		    "from: " QLA_FW_URL ".\n");
4812 
4813 		return QLA_FUNCTION_FAILED;
4814 	}
4815 
4816 	qla_printk(KERN_INFO, ha,
4817 	    "FW: Loading via request-firmware...\n");
4818 
4819 	rval = QLA_SUCCESS;
4820 
4821 	segments = FA_RISC_CODE_SEGMENTS;
4822 	dcode = (uint32_t *)req->ring;
4823 	*srisc_addr = 0;
4824 	fwcode = (uint32_t *)blob->fw->data;
4825 	fwclen = 0;
4826 
4827 	/* Validate firmware image by checking version. */
4828 	if (blob->fw->size < 8 * sizeof(uint32_t)) {
4829 		qla_printk(KERN_WARNING, ha,
4830 		    "Unable to verify integrity of firmware image (%Zd)!\n",
4831 		    blob->fw->size);
4832 		goto fail_fw_integrity;
4833 	}
4834 	for (i = 0; i < 4; i++)
4835 		dcode[i] = be32_to_cpu(fwcode[i + 4]);
4836 	if ((dcode[0] == 0xffffffff && dcode[1] == 0xffffffff &&
4837 	    dcode[2] == 0xffffffff && dcode[3] == 0xffffffff) ||
4838 	    (dcode[0] == 0 && dcode[1] == 0 && dcode[2] == 0 &&
4839 		dcode[3] == 0)) {
4840 		qla_printk(KERN_WARNING, ha,
4841 		    "Unable to verify integrity of firmware image!\n");
4842 		qla_printk(KERN_WARNING, ha,
4843 		    "Firmware data: %08x %08x %08x %08x!\n", dcode[0],
4844 		    dcode[1], dcode[2], dcode[3]);
4845 		goto fail_fw_integrity;
4846 	}
4847 
4848 	while (segments && rval == QLA_SUCCESS) {
4849 		risc_addr = be32_to_cpu(fwcode[2]);
4850 		*srisc_addr = *srisc_addr == 0 ? risc_addr : *srisc_addr;
4851 		risc_size = be32_to_cpu(fwcode[3]);
4852 
4853 		/* Validate firmware image size. */
4854 		fwclen += risc_size * sizeof(uint32_t);
4855 		if (blob->fw->size < fwclen) {
4856 			qla_printk(KERN_WARNING, ha,
4857 			    "Unable to verify integrity of firmware image "
4858 			    "(%Zd)!\n", blob->fw->size);
4859 
4860 			goto fail_fw_integrity;
4861 		}
4862 
4863 		fragment = 0;
4864 		while (risc_size > 0 && rval == QLA_SUCCESS) {
4865 			dlen = (uint32_t)(ha->fw_transfer_size >> 2);
4866 			if (dlen > risc_size)
4867 				dlen = risc_size;
4868 
4869 			DEBUG7(printk("scsi(%ld): Loading risc segment@ risc "
4870 			    "addr %x, number of dwords 0x%x.\n", vha->host_no,
4871 			    risc_addr, dlen));
4872 
4873 			for (i = 0; i < dlen; i++)
4874 				dcode[i] = swab32(fwcode[i]);
4875 
4876 			rval = qla2x00_load_ram(vha, req->dma, risc_addr,
4877 			    dlen);
4878 			if (rval) {
4879 				DEBUG(printk("scsi(%ld):[ERROR] Failed to load "
4880 				    "segment %d of firmware\n", vha->host_no,
4881 				    fragment));
4882 				qla_printk(KERN_WARNING, ha,
4883 				    "[ERROR] Failed to load segment %d of "
4884 				    "firmware\n", fragment);
4885 				break;
4886 			}
4887 
4888 			fwcode += dlen;
4889 			risc_addr += dlen;
4890 			risc_size -= dlen;
4891 			fragment++;
4892 		}
4893 
4894 		/* Next segment. */
4895 		segments--;
4896 	}
4897 	return rval;
4898 
4899 fail_fw_integrity:
4900 	return QLA_FUNCTION_FAILED;
4901 }
4902 
4903 int
qla24xx_load_risc(scsi_qla_host_t * vha,uint32_t * srisc_addr)4904 qla24xx_load_risc(scsi_qla_host_t *vha, uint32_t *srisc_addr)
4905 {
4906 	int rval;
4907 
4908 	if (ql2xfwloadbin == 1)
4909 		return qla81xx_load_risc(vha, srisc_addr);
4910 
4911 	/*
4912 	 * FW Load priority:
4913 	 * 1) Firmware via request-firmware interface (.bin file).
4914 	 * 2) Firmware residing in flash.
4915 	 */
4916 	rval = qla24xx_load_risc_blob(vha, srisc_addr);
4917 	if (rval == QLA_SUCCESS)
4918 		return rval;
4919 
4920 	return qla24xx_load_risc_flash(vha, srisc_addr,
4921 	    vha->hw->flt_region_fw);
4922 }
4923 
4924 int
qla81xx_load_risc(scsi_qla_host_t * vha,uint32_t * srisc_addr)4925 qla81xx_load_risc(scsi_qla_host_t *vha, uint32_t *srisc_addr)
4926 {
4927 	int rval;
4928 	struct qla_hw_data *ha = vha->hw;
4929 
4930 	if (ql2xfwloadbin == 2)
4931 		goto try_blob_fw;
4932 
4933 	/*
4934 	 * FW Load priority:
4935 	 * 1) Firmware residing in flash.
4936 	 * 2) Firmware via request-firmware interface (.bin file).
4937 	 * 3) Golden-Firmware residing in flash -- limited operation.
4938 	 */
4939 	rval = qla24xx_load_risc_flash(vha, srisc_addr, ha->flt_region_fw);
4940 	if (rval == QLA_SUCCESS)
4941 		return rval;
4942 
4943 try_blob_fw:
4944 	rval = qla24xx_load_risc_blob(vha, srisc_addr);
4945 	if (rval == QLA_SUCCESS || !ha->flt_region_gold_fw)
4946 		return rval;
4947 
4948 	qla_printk(KERN_ERR, ha,
4949 	    "FW: Attempting to fallback to golden firmware...\n");
4950 	rval = qla24xx_load_risc_flash(vha, srisc_addr, ha->flt_region_gold_fw);
4951 	if (rval != QLA_SUCCESS)
4952 		return rval;
4953 
4954 	qla_printk(KERN_ERR, ha,
4955 	    "FW: Please update operational firmware...\n");
4956 	ha->flags.running_gold_fw = 1;
4957 
4958 	return rval;
4959 }
4960 
4961 void
qla2x00_try_to_stop_firmware(scsi_qla_host_t * vha)4962 qla2x00_try_to_stop_firmware(scsi_qla_host_t *vha)
4963 {
4964 	int ret, retries;
4965 	struct qla_hw_data *ha = vha->hw;
4966 
4967 	if (ha->flags.pci_channel_io_perm_failure)
4968 		return;
4969 	if (!IS_FWI2_CAPABLE(ha))
4970 		return;
4971 	if (!ha->fw_major_version)
4972 		return;
4973 
4974 	ret = qla2x00_stop_firmware(vha);
4975 	for (retries = 5; ret != QLA_SUCCESS && ret != QLA_FUNCTION_TIMEOUT &&
4976 	    ret != QLA_INVALID_COMMAND && retries ; retries--) {
4977 		ha->isp_ops->reset_chip(vha);
4978 		if (ha->isp_ops->chip_diag(vha) != QLA_SUCCESS)
4979 			continue;
4980 		if (qla2x00_setup_chip(vha) != QLA_SUCCESS)
4981 			continue;
4982 		qla_printk(KERN_INFO, ha,
4983 		    "Attempting retry of stop-firmware command...\n");
4984 		ret = qla2x00_stop_firmware(vha);
4985 	}
4986 }
4987 
4988 int
qla24xx_configure_vhba(scsi_qla_host_t * vha)4989 qla24xx_configure_vhba(scsi_qla_host_t *vha)
4990 {
4991 	int rval = QLA_SUCCESS;
4992 	uint16_t mb[MAILBOX_REGISTER_COUNT];
4993 	struct qla_hw_data *ha = vha->hw;
4994 	struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
4995 	struct req_que *req;
4996 	struct rsp_que *rsp;
4997 
4998 	if (!vha->vp_idx)
4999 		return -EINVAL;
5000 
5001 	rval = qla2x00_fw_ready(base_vha);
5002 	if (ha->flags.cpu_affinity_enabled)
5003 		req = ha->req_q_map[0];
5004 	else
5005 		req = vha->req;
5006 	rsp = req->rsp;
5007 
5008 	if (rval == QLA_SUCCESS) {
5009 		clear_bit(RESET_MARKER_NEEDED, &vha->dpc_flags);
5010 		qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL);
5011 	}
5012 
5013 	vha->flags.management_server_logged_in = 0;
5014 
5015 	/* Login to SNS first */
5016 	ha->isp_ops->fabric_login(vha, NPH_SNS, 0xff, 0xff, 0xfc, mb, BIT_1);
5017 	if (mb[0] != MBS_COMMAND_COMPLETE) {
5018 		DEBUG15(qla_printk(KERN_INFO, ha,
5019 		    "Failed SNS login: loop_id=%x mb[0]=%x mb[1]=%x "
5020 		    "mb[2]=%x mb[6]=%x mb[7]=%x\n", NPH_SNS,
5021 		    mb[0], mb[1], mb[2], mb[6], mb[7]));
5022 		return (QLA_FUNCTION_FAILED);
5023 	}
5024 
5025 	atomic_set(&vha->loop_down_timer, 0);
5026 	atomic_set(&vha->loop_state, LOOP_UP);
5027 	set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
5028 	set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
5029 	rval = qla2x00_loop_resync(base_vha);
5030 
5031 	return rval;
5032 }
5033 
5034 /* 84XX Support **************************************************************/
5035 
5036 static LIST_HEAD(qla_cs84xx_list);
5037 static DEFINE_MUTEX(qla_cs84xx_mutex);
5038 
5039 static struct qla_chip_state_84xx *
qla84xx_get_chip(struct scsi_qla_host * vha)5040 qla84xx_get_chip(struct scsi_qla_host *vha)
5041 {
5042 	struct qla_chip_state_84xx *cs84xx;
5043 	struct qla_hw_data *ha = vha->hw;
5044 
5045 	mutex_lock(&qla_cs84xx_mutex);
5046 
5047 	/* Find any shared 84xx chip. */
5048 	list_for_each_entry(cs84xx, &qla_cs84xx_list, list) {
5049 		if (cs84xx->bus == ha->pdev->bus) {
5050 			kref_get(&cs84xx->kref);
5051 			goto done;
5052 		}
5053 	}
5054 
5055 	cs84xx = kzalloc(sizeof(*cs84xx), GFP_KERNEL);
5056 	if (!cs84xx)
5057 		goto done;
5058 
5059 	kref_init(&cs84xx->kref);
5060 	spin_lock_init(&cs84xx->access_lock);
5061 	mutex_init(&cs84xx->fw_update_mutex);
5062 	cs84xx->bus = ha->pdev->bus;
5063 
5064 	list_add_tail(&cs84xx->list, &qla_cs84xx_list);
5065 done:
5066 	mutex_unlock(&qla_cs84xx_mutex);
5067 	return cs84xx;
5068 }
5069 
5070 static void
__qla84xx_chip_release(struct kref * kref)5071 __qla84xx_chip_release(struct kref *kref)
5072 {
5073 	struct qla_chip_state_84xx *cs84xx =
5074 	    container_of(kref, struct qla_chip_state_84xx, kref);
5075 
5076 	mutex_lock(&qla_cs84xx_mutex);
5077 	list_del(&cs84xx->list);
5078 	mutex_unlock(&qla_cs84xx_mutex);
5079 	kfree(cs84xx);
5080 }
5081 
5082 void
qla84xx_put_chip(struct scsi_qla_host * vha)5083 qla84xx_put_chip(struct scsi_qla_host *vha)
5084 {
5085 	struct qla_hw_data *ha = vha->hw;
5086 	if (ha->cs84xx)
5087 		kref_put(&ha->cs84xx->kref, __qla84xx_chip_release);
5088 }
5089 
5090 static int
qla84xx_init_chip(scsi_qla_host_t * vha)5091 qla84xx_init_chip(scsi_qla_host_t *vha)
5092 {
5093 	int rval;
5094 	uint16_t status[2];
5095 	struct qla_hw_data *ha = vha->hw;
5096 
5097 	mutex_lock(&ha->cs84xx->fw_update_mutex);
5098 
5099 	rval = qla84xx_verify_chip(vha, status);
5100 
5101 	mutex_unlock(&ha->cs84xx->fw_update_mutex);
5102 
5103 	return rval != QLA_SUCCESS || status[0] ? QLA_FUNCTION_FAILED:
5104 	    QLA_SUCCESS;
5105 }
5106 
5107 /* 81XX Support **************************************************************/
5108 
5109 int
qla81xx_nvram_config(scsi_qla_host_t * vha)5110 qla81xx_nvram_config(scsi_qla_host_t *vha)
5111 {
5112 	int   rval;
5113 	struct init_cb_81xx *icb;
5114 	struct nvram_81xx *nv;
5115 	uint32_t *dptr;
5116 	uint8_t  *dptr1, *dptr2;
5117 	uint32_t chksum;
5118 	uint16_t cnt;
5119 	struct qla_hw_data *ha = vha->hw;
5120 
5121 	rval = QLA_SUCCESS;
5122 	icb = (struct init_cb_81xx *)ha->init_cb;
5123 	nv = ha->nvram;
5124 
5125 	/* Determine NVRAM starting address. */
5126 	ha->nvram_size = sizeof(struct nvram_81xx);
5127 	ha->vpd_size = FA_NVRAM_VPD_SIZE;
5128 
5129 	/* Get VPD data into cache */
5130 	ha->vpd = ha->nvram + VPD_OFFSET;
5131 	ha->isp_ops->read_optrom(vha, ha->vpd, ha->flt_region_vpd << 2,
5132 	    ha->vpd_size);
5133 
5134 	/* Get NVRAM data into cache and calculate checksum. */
5135 	ha->isp_ops->read_optrom(vha, ha->nvram, ha->flt_region_nvram << 2,
5136 	    ha->nvram_size);
5137 	dptr = (uint32_t *)nv;
5138 	for (cnt = 0, chksum = 0; cnt < ha->nvram_size >> 2; cnt++)
5139 		chksum += le32_to_cpu(*dptr++);
5140 
5141 	DEBUG5(printk("scsi(%ld): Contents of NVRAM\n", vha->host_no));
5142 	DEBUG5(qla2x00_dump_buffer((uint8_t *)nv, ha->nvram_size));
5143 
5144 	/* Bad NVRAM data, set defaults parameters. */
5145 	if (chksum || nv->id[0] != 'I' || nv->id[1] != 'S' || nv->id[2] != 'P'
5146 	    || nv->id[3] != ' ' ||
5147 	    nv->nvram_version < __constant_cpu_to_le16(ICB_VERSION)) {
5148 		/* Reset NVRAM data. */
5149 		qla_printk(KERN_WARNING, ha, "Inconsistent NVRAM detected: "
5150 		    "checksum=0x%x id=%c version=0x%x.\n", chksum, nv->id[0],
5151 		    le16_to_cpu(nv->nvram_version));
5152 		qla_printk(KERN_WARNING, ha, "Falling back to functioning (yet "
5153 		    "invalid -- WWPN) defaults.\n");
5154 
5155 		/*
5156 		 * Set default initialization control block.
5157 		 */
5158 		memset(nv, 0, ha->nvram_size);
5159 		nv->nvram_version = __constant_cpu_to_le16(ICB_VERSION);
5160 		nv->version = __constant_cpu_to_le16(ICB_VERSION);
5161 		nv->frame_payload_size = __constant_cpu_to_le16(2048);
5162 		nv->execution_throttle = __constant_cpu_to_le16(0xFFFF);
5163 		nv->exchange_count = __constant_cpu_to_le16(0);
5164 		nv->port_name[0] = 0x21;
5165 		nv->port_name[1] = 0x00 + ha->port_no;
5166 		nv->port_name[2] = 0x00;
5167 		nv->port_name[3] = 0xe0;
5168 		nv->port_name[4] = 0x8b;
5169 		nv->port_name[5] = 0x1c;
5170 		nv->port_name[6] = 0x55;
5171 		nv->port_name[7] = 0x86;
5172 		nv->node_name[0] = 0x20;
5173 		nv->node_name[1] = 0x00;
5174 		nv->node_name[2] = 0x00;
5175 		nv->node_name[3] = 0xe0;
5176 		nv->node_name[4] = 0x8b;
5177 		nv->node_name[5] = 0x1c;
5178 		nv->node_name[6] = 0x55;
5179 		nv->node_name[7] = 0x86;
5180 		nv->login_retry_count = __constant_cpu_to_le16(8);
5181 		nv->interrupt_delay_timer = __constant_cpu_to_le16(0);
5182 		nv->login_timeout = __constant_cpu_to_le16(0);
5183 		nv->firmware_options_1 =
5184 		    __constant_cpu_to_le32(BIT_14|BIT_13|BIT_2|BIT_1);
5185 		nv->firmware_options_2 = __constant_cpu_to_le32(2 << 4);
5186 		nv->firmware_options_2 |= __constant_cpu_to_le32(BIT_12);
5187 		nv->firmware_options_3 = __constant_cpu_to_le32(2 << 13);
5188 		nv->host_p = __constant_cpu_to_le32(BIT_11|BIT_10);
5189 		nv->efi_parameters = __constant_cpu_to_le32(0);
5190 		nv->reset_delay = 5;
5191 		nv->max_luns_per_target = __constant_cpu_to_le16(128);
5192 		nv->port_down_retry_count = __constant_cpu_to_le16(30);
5193 		nv->link_down_timeout = __constant_cpu_to_le16(30);
5194 		nv->enode_mac[0] = 0x00;
5195 		nv->enode_mac[1] = 0x02;
5196 		nv->enode_mac[2] = 0x03;
5197 		nv->enode_mac[3] = 0x04;
5198 		nv->enode_mac[4] = 0x05;
5199 		nv->enode_mac[5] = 0x06 + ha->port_no;
5200 
5201 		rval = 1;
5202 	}
5203 
5204 	/* Reset Initialization control block */
5205 	memset(icb, 0, sizeof(struct init_cb_81xx));
5206 
5207 	/* Copy 1st segment. */
5208 	dptr1 = (uint8_t *)icb;
5209 	dptr2 = (uint8_t *)&nv->version;
5210 	cnt = (uint8_t *)&icb->response_q_inpointer - (uint8_t *)&icb->version;
5211 	while (cnt--)
5212 		*dptr1++ = *dptr2++;
5213 
5214 	icb->login_retry_count = nv->login_retry_count;
5215 
5216 	/* Copy 2nd segment. */
5217 	dptr1 = (uint8_t *)&icb->interrupt_delay_timer;
5218 	dptr2 = (uint8_t *)&nv->interrupt_delay_timer;
5219 	cnt = (uint8_t *)&icb->reserved_5 -
5220 	    (uint8_t *)&icb->interrupt_delay_timer;
5221 	while (cnt--)
5222 		*dptr1++ = *dptr2++;
5223 
5224 	memcpy(icb->enode_mac, nv->enode_mac, sizeof(icb->enode_mac));
5225 	/* Some boards (with valid NVRAMs) still have NULL enode_mac!! */
5226 	if (!memcmp(icb->enode_mac, "\0\0\0\0\0\0", sizeof(icb->enode_mac))) {
5227 		icb->enode_mac[0] = 0x01;
5228 		icb->enode_mac[1] = 0x02;
5229 		icb->enode_mac[2] = 0x03;
5230 		icb->enode_mac[3] = 0x04;
5231 		icb->enode_mac[4] = 0x05;
5232 		icb->enode_mac[5] = 0x06 + ha->port_no;
5233 	}
5234 
5235 	/* Use extended-initialization control block. */
5236 	memcpy(ha->ex_init_cb, &nv->ex_version, sizeof(*ha->ex_init_cb));
5237 
5238 	/*
5239 	 * Setup driver NVRAM options.
5240 	 */
5241 	qla2x00_set_model_info(vha, nv->model_name, sizeof(nv->model_name),
5242 	    "QLE8XXX");
5243 
5244 	/* Use alternate WWN? */
5245 	if (nv->host_p & __constant_cpu_to_le32(BIT_15)) {
5246 		memcpy(icb->node_name, nv->alternate_node_name, WWN_SIZE);
5247 		memcpy(icb->port_name, nv->alternate_port_name, WWN_SIZE);
5248 	}
5249 
5250 	/* Prepare nodename */
5251 	if ((icb->firmware_options_1 & __constant_cpu_to_le32(BIT_14)) == 0) {
5252 		/*
5253 		 * Firmware will apply the following mask if the nodename was
5254 		 * not provided.
5255 		 */
5256 		memcpy(icb->node_name, icb->port_name, WWN_SIZE);
5257 		icb->node_name[0] &= 0xF0;
5258 	}
5259 
5260 	/* Set host adapter parameters. */
5261 	ha->flags.disable_risc_code_load = 0;
5262 	ha->flags.enable_lip_reset = 0;
5263 	ha->flags.enable_lip_full_login =
5264 	    le32_to_cpu(nv->host_p) & BIT_10 ? 1: 0;
5265 	ha->flags.enable_target_reset =
5266 	    le32_to_cpu(nv->host_p) & BIT_11 ? 1: 0;
5267 	ha->flags.enable_led_scheme = 0;
5268 	ha->flags.disable_serdes = le32_to_cpu(nv->host_p) & BIT_5 ? 1: 0;
5269 
5270 	ha->operating_mode = (le32_to_cpu(icb->firmware_options_2) &
5271 	    (BIT_6 | BIT_5 | BIT_4)) >> 4;
5272 
5273 	/* save HBA serial number */
5274 	ha->serial0 = icb->port_name[5];
5275 	ha->serial1 = icb->port_name[6];
5276 	ha->serial2 = icb->port_name[7];
5277 	memcpy(vha->node_name, icb->node_name, WWN_SIZE);
5278 	memcpy(vha->port_name, icb->port_name, WWN_SIZE);
5279 
5280 	icb->execution_throttle = __constant_cpu_to_le16(0xFFFF);
5281 
5282 	ha->retry_count = le16_to_cpu(nv->login_retry_count);
5283 
5284 	/* Set minimum login_timeout to 4 seconds. */
5285 	if (le16_to_cpu(nv->login_timeout) < ql2xlogintimeout)
5286 		nv->login_timeout = cpu_to_le16(ql2xlogintimeout);
5287 	if (le16_to_cpu(nv->login_timeout) < 4)
5288 		nv->login_timeout = __constant_cpu_to_le16(4);
5289 	ha->login_timeout = le16_to_cpu(nv->login_timeout);
5290 	icb->login_timeout = nv->login_timeout;
5291 
5292 	/* Set minimum RATOV to 100 tenths of a second. */
5293 	ha->r_a_tov = 100;
5294 
5295 	ha->loop_reset_delay = nv->reset_delay;
5296 
5297 	/* Link Down Timeout = 0:
5298 	 *
5299 	 * 	When Port Down timer expires we will start returning
5300 	 *	I/O's to OS with "DID_NO_CONNECT".
5301 	 *
5302 	 * Link Down Timeout != 0:
5303 	 *
5304 	 *	 The driver waits for the link to come up after link down
5305 	 *	 before returning I/Os to OS with "DID_NO_CONNECT".
5306 	 */
5307 	if (le16_to_cpu(nv->link_down_timeout) == 0) {
5308 		ha->loop_down_abort_time =
5309 		    (LOOP_DOWN_TIME - LOOP_DOWN_TIMEOUT);
5310 	} else {
5311 		ha->link_down_timeout =	le16_to_cpu(nv->link_down_timeout);
5312 		ha->loop_down_abort_time =
5313 		    (LOOP_DOWN_TIME - ha->link_down_timeout);
5314 	}
5315 
5316 	/* Need enough time to try and get the port back. */
5317 	ha->port_down_retry_count = le16_to_cpu(nv->port_down_retry_count);
5318 	if (qlport_down_retry)
5319 		ha->port_down_retry_count = qlport_down_retry;
5320 
5321 	/* Set login_retry_count */
5322 	ha->login_retry_count  = le16_to_cpu(nv->login_retry_count);
5323 	if (ha->port_down_retry_count ==
5324 	    le16_to_cpu(nv->port_down_retry_count) &&
5325 	    ha->port_down_retry_count > 3)
5326 		ha->login_retry_count = ha->port_down_retry_count;
5327 	else if (ha->port_down_retry_count > (int)ha->login_retry_count)
5328 		ha->login_retry_count = ha->port_down_retry_count;
5329 	if (ql2xloginretrycount)
5330 		ha->login_retry_count = ql2xloginretrycount;
5331 
5332 	/* Enable ZIO. */
5333 	if (!vha->flags.init_done) {
5334 		ha->zio_mode = le32_to_cpu(icb->firmware_options_2) &
5335 		    (BIT_3 | BIT_2 | BIT_1 | BIT_0);
5336 		ha->zio_timer = le16_to_cpu(icb->interrupt_delay_timer) ?
5337 		    le16_to_cpu(icb->interrupt_delay_timer): 2;
5338 	}
5339 	icb->firmware_options_2 &= __constant_cpu_to_le32(
5340 	    ~(BIT_3 | BIT_2 | BIT_1 | BIT_0));
5341 	vha->flags.process_response_queue = 0;
5342 	if (ha->zio_mode != QLA_ZIO_DISABLED) {
5343 		ha->zio_mode = QLA_ZIO_MODE_6;
5344 
5345 		DEBUG2(printk("scsi(%ld): ZIO mode %d enabled; timer delay "
5346 		    "(%d us).\n", vha->host_no, ha->zio_mode,
5347 		    ha->zio_timer * 100));
5348 		qla_printk(KERN_INFO, ha,
5349 		    "ZIO mode %d enabled; timer delay (%d us).\n",
5350 		    ha->zio_mode, ha->zio_timer * 100);
5351 
5352 		icb->firmware_options_2 |= cpu_to_le32(
5353 		    (uint32_t)ha->zio_mode);
5354 		icb->interrupt_delay_timer = cpu_to_le16(ha->zio_timer);
5355 		vha->flags.process_response_queue = 1;
5356 	}
5357 
5358 	if (rval) {
5359 		DEBUG2_3(printk(KERN_WARNING
5360 		    "scsi(%ld): NVRAM configuration failed!\n", vha->host_no));
5361 	}
5362 	return (rval);
5363 }
5364 
5365 int
qla82xx_restart_isp(scsi_qla_host_t * vha)5366 qla82xx_restart_isp(scsi_qla_host_t *vha)
5367 {
5368 	int status, rval;
5369 	uint32_t wait_time;
5370 	struct qla_hw_data *ha = vha->hw;
5371 	struct req_que *req = ha->req_q_map[0];
5372 	struct rsp_que *rsp = ha->rsp_q_map[0];
5373 	struct scsi_qla_host *vp;
5374 	unsigned long flags;
5375 
5376 	status = qla2x00_init_rings(vha);
5377 	if (!status) {
5378 		clear_bit(RESET_MARKER_NEEDED, &vha->dpc_flags);
5379 		ha->flags.chip_reset_done = 1;
5380 
5381 		status = qla2x00_fw_ready(vha);
5382 		if (!status) {
5383 			qla_printk(KERN_INFO, ha,
5384 			"%s(): Start configure loop, "
5385 			"status = %d\n", __func__, status);
5386 
5387 			/* Issue a marker after FW becomes ready. */
5388 			qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL);
5389 
5390 			vha->flags.online = 1;
5391 			/* Wait at most MAX_TARGET RSCNs for a stable link. */
5392 			wait_time = 256;
5393 			do {
5394 				clear_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
5395 				qla2x00_configure_loop(vha);
5396 				wait_time--;
5397 			} while (!atomic_read(&vha->loop_down_timer) &&
5398 			    !(test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags)) &&
5399 			    wait_time &&
5400 			    (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)));
5401 		}
5402 
5403 		/* if no cable then assume it's good */
5404 		if ((vha->device_flags & DFLG_NO_CABLE))
5405 			status = 0;
5406 
5407 		qla_printk(KERN_INFO, ha,
5408 			"%s(): Configure loop done, status = 0x%x\n",
5409 			__func__, status);
5410 	}
5411 
5412 	if (!status) {
5413 		clear_bit(RESET_MARKER_NEEDED, &vha->dpc_flags);
5414 
5415 		if (!atomic_read(&vha->loop_down_timer)) {
5416 			/*
5417 			 * Issue marker command only when we are going
5418 			 * to start the I/O .
5419 			 */
5420 			vha->marker_needed = 1;
5421 		}
5422 
5423 		vha->flags.online = 1;
5424 
5425 		ha->isp_ops->enable_intrs(ha);
5426 
5427 		ha->isp_abort_cnt = 0;
5428 		clear_bit(ISP_ABORT_RETRY, &vha->dpc_flags);
5429 
5430 		if (ha->fce) {
5431 			ha->flags.fce_enabled = 1;
5432 			memset(ha->fce, 0,
5433 			    fce_calc_size(ha->fce_bufs));
5434 			rval = qla2x00_enable_fce_trace(vha,
5435 			    ha->fce_dma, ha->fce_bufs, ha->fce_mb,
5436 			    &ha->fce_bufs);
5437 			if (rval) {
5438 				qla_printk(KERN_WARNING, ha,
5439 				    "Unable to reinitialize FCE "
5440 				    "(%d).\n", rval);
5441 				ha->flags.fce_enabled = 0;
5442 			}
5443 		}
5444 
5445 		if (ha->eft) {
5446 			memset(ha->eft, 0, EFT_SIZE);
5447 			rval = qla2x00_enable_eft_trace(vha,
5448 			    ha->eft_dma, EFT_NUM_BUFFERS);
5449 			if (rval) {
5450 				qla_printk(KERN_WARNING, ha,
5451 				    "Unable to reinitialize EFT "
5452 				    "(%d).\n", rval);
5453 			}
5454 		}
5455 	}
5456 
5457 	if (!status) {
5458 		DEBUG(printk(KERN_INFO
5459 			"qla82xx_restart_isp(%ld): succeeded.\n",
5460 			vha->host_no));
5461 
5462 		spin_lock_irqsave(&ha->vport_slock, flags);
5463 		list_for_each_entry(vp, &ha->vp_list, list) {
5464 			if (vp->vp_idx) {
5465 				atomic_inc(&vp->vref_count);
5466 				spin_unlock_irqrestore(&ha->vport_slock, flags);
5467 
5468 				qla2x00_vp_abort_isp(vp);
5469 
5470 				spin_lock_irqsave(&ha->vport_slock, flags);
5471 				atomic_dec(&vp->vref_count);
5472 			}
5473 		}
5474 		spin_unlock_irqrestore(&ha->vport_slock, flags);
5475 
5476 	} else {
5477 		qla_printk(KERN_INFO, ha,
5478 			"qla82xx_restart_isp: **** FAILED ****\n");
5479 	}
5480 
5481 	return status;
5482 }
5483 
5484 void
qla81xx_update_fw_options(scsi_qla_host_t * vha)5485 qla81xx_update_fw_options(scsi_qla_host_t *vha)
5486 {
5487 	struct qla_hw_data *ha = vha->hw;
5488 
5489 	if (!ql2xetsenable)
5490 		return;
5491 
5492 	/* Enable ETS Burst. */
5493 	memset(ha->fw_options, 0, sizeof(ha->fw_options));
5494 	ha->fw_options[2] |= BIT_9;
5495 	qla2x00_set_fw_options(vha, ha->fw_options);
5496 }
5497 
5498 /*
5499  * qla24xx_get_fcp_prio
5500  *	Gets the fcp cmd priority value for the logged in port.
5501  *	Looks for a match of the port descriptors within
5502  *	each of the fcp prio config entries. If a match is found,
5503  *	the tag (priority) value is returned.
5504  *
5505  * Input:
5506  *	vha = scsi host structure pointer.
5507  *	fcport = port structure pointer.
5508  *
5509  * Return:
5510  *	non-zero (if found)
5511  * 	0 (if not found)
5512  *
5513  * Context:
5514  * 	Kernel context
5515  */
5516 uint8_t
qla24xx_get_fcp_prio(scsi_qla_host_t * vha,fc_port_t * fcport)5517 qla24xx_get_fcp_prio(scsi_qla_host_t *vha, fc_port_t *fcport)
5518 {
5519 	int i, entries;
5520 	uint8_t pid_match, wwn_match;
5521 	uint8_t priority;
5522 	uint32_t pid1, pid2;
5523 	uint64_t wwn1, wwn2;
5524 	struct qla_fcp_prio_entry *pri_entry;
5525 	struct qla_hw_data *ha = vha->hw;
5526 
5527 	if (!ha->fcp_prio_cfg || !ha->flags.fcp_prio_enabled)
5528 		return 0;
5529 
5530 	priority = 0;
5531 	entries = ha->fcp_prio_cfg->num_entries;
5532 	pri_entry = &ha->fcp_prio_cfg->entry[0];
5533 
5534 	for (i = 0; i < entries; i++) {
5535 		pid_match = wwn_match = 0;
5536 
5537 		if (!(pri_entry->flags & FCP_PRIO_ENTRY_VALID)) {
5538 			pri_entry++;
5539 			continue;
5540 		}
5541 
5542 		/* check source pid for a match */
5543 		if (pri_entry->flags & FCP_PRIO_ENTRY_SPID_VALID) {
5544 			pid1 = pri_entry->src_pid & INVALID_PORT_ID;
5545 			pid2 = vha->d_id.b24 & INVALID_PORT_ID;
5546 			if (pid1 == INVALID_PORT_ID)
5547 				pid_match++;
5548 			else if (pid1 == pid2)
5549 				pid_match++;
5550 		}
5551 
5552 		/* check destination pid for a match */
5553 		if (pri_entry->flags & FCP_PRIO_ENTRY_DPID_VALID) {
5554 			pid1 = pri_entry->dst_pid & INVALID_PORT_ID;
5555 			pid2 = fcport->d_id.b24 & INVALID_PORT_ID;
5556 			if (pid1 == INVALID_PORT_ID)
5557 				pid_match++;
5558 			else if (pid1 == pid2)
5559 				pid_match++;
5560 		}
5561 
5562 		/* check source WWN for a match */
5563 		if (pri_entry->flags & FCP_PRIO_ENTRY_SWWN_VALID) {
5564 			wwn1 = wwn_to_u64(vha->port_name);
5565 			wwn2 = wwn_to_u64(pri_entry->src_wwpn);
5566 			if (wwn2 == (uint64_t)-1)
5567 				wwn_match++;
5568 			else if (wwn1 == wwn2)
5569 				wwn_match++;
5570 		}
5571 
5572 		/* check destination WWN for a match */
5573 		if (pri_entry->flags & FCP_PRIO_ENTRY_DWWN_VALID) {
5574 			wwn1 = wwn_to_u64(fcport->port_name);
5575 			wwn2 = wwn_to_u64(pri_entry->dst_wwpn);
5576 			if (wwn2 == (uint64_t)-1)
5577 				wwn_match++;
5578 			else if (wwn1 == wwn2)
5579 				wwn_match++;
5580 		}
5581 
5582 		if (pid_match == 2 || wwn_match == 2) {
5583 			/* Found a matching entry */
5584 			if (pri_entry->flags & FCP_PRIO_ENTRY_TAG_VALID)
5585 				priority = pri_entry->tag;
5586 			break;
5587 		}
5588 
5589 		pri_entry++;
5590 	}
5591 
5592 	return priority;
5593 }
5594 
5595 /*
5596  * qla24xx_update_fcport_fcp_prio
5597  *	Activates fcp priority for the logged in fc port
5598  *
5599  * Input:
5600  *	vha = scsi host structure pointer.
5601  *	fcp = port structure pointer.
5602  *
5603  * Return:
5604  *	QLA_SUCCESS or QLA_FUNCTION_FAILED
5605  *
5606  * Context:
5607  *	Kernel context.
5608  */
5609 int
qla24xx_update_fcport_fcp_prio(scsi_qla_host_t * vha,fc_port_t * fcport)5610 qla24xx_update_fcport_fcp_prio(scsi_qla_host_t *vha, fc_port_t *fcport)
5611 {
5612 	int ret;
5613 	uint8_t priority;
5614 	uint16_t mb[5];
5615 
5616 	if (fcport->port_type != FCT_TARGET ||
5617 	    fcport->loop_id == FC_NO_LOOP_ID)
5618 		return QLA_FUNCTION_FAILED;
5619 
5620 	priority = qla24xx_get_fcp_prio(vha, fcport);
5621 	ret = qla24xx_set_fcp_prio(vha, fcport->loop_id, priority, mb);
5622 	if (ret == QLA_SUCCESS)
5623 		fcport->fcp_prio = priority;
5624 	else
5625 		DEBUG2(printk(KERN_WARNING
5626 			"scsi(%ld): Unable to activate fcp priority, "
5627 			" ret=0x%x\n", vha->host_no, ret));
5628 
5629 	return  ret;
5630 }
5631 
5632 /*
5633  * qla24xx_update_all_fcp_prio
5634  *	Activates fcp priority for all the logged in ports
5635  *
5636  * Input:
5637  *	ha = adapter block pointer.
5638  *
5639  * Return:
5640  *	QLA_SUCCESS or QLA_FUNCTION_FAILED
5641  *
5642  * Context:
5643  *	Kernel context.
5644  */
5645 int
qla24xx_update_all_fcp_prio(scsi_qla_host_t * vha)5646 qla24xx_update_all_fcp_prio(scsi_qla_host_t *vha)
5647 {
5648 	int ret;
5649 	fc_port_t *fcport;
5650 
5651 	ret = QLA_FUNCTION_FAILED;
5652 	/* We need to set priority for all logged in ports */
5653 	list_for_each_entry(fcport, &vha->vp_fcports, list)
5654 		ret = qla24xx_update_fcport_fcp_prio(vha, fcport);
5655 
5656 	return ret;
5657 }
5658