1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * QLogic Fibre Channel HBA Driver
4  * Copyright (c)  2003-2014 QLogic Corporation
5  */
6 #include "qla_def.h"
7 #include "qla_gbl.h"
8 
9 #include <linux/delay.h>
10 #include <linux/slab.h>
11 #include <linux/vmalloc.h>
12 
13 #include "qla_devtbl.h"
14 
15 #ifdef CONFIG_SPARC
16 #include <asm/prom.h>
17 #endif
18 
19 #include "qla_target.h"
20 
21 /*
22 *  QLogic ISP2x00 Hardware Support Function Prototypes.
23 */
24 static int qla2x00_isp_firmware(scsi_qla_host_t *);
25 static int qla2x00_setup_chip(scsi_qla_host_t *);
26 static int qla2x00_fw_ready(scsi_qla_host_t *);
27 static int qla2x00_configure_hba(scsi_qla_host_t *);
28 static int qla2x00_configure_loop(scsi_qla_host_t *);
29 static int qla2x00_configure_local_loop(scsi_qla_host_t *);
30 static int qla2x00_configure_fabric(scsi_qla_host_t *);
31 static int qla2x00_find_all_fabric_devs(scsi_qla_host_t *);
32 static int qla2x00_restart_isp(scsi_qla_host_t *);
33 
34 static struct qla_chip_state_84xx *qla84xx_get_chip(struct scsi_qla_host *);
35 static int qla84xx_init_chip(scsi_qla_host_t *);
36 static int qla25xx_init_queues(struct qla_hw_data *);
37 static void qla24xx_handle_gpdb_event(scsi_qla_host_t *vha,
38 				      struct event_arg *ea);
39 static void qla24xx_handle_prli_done_event(struct scsi_qla_host *,
40     struct event_arg *);
41 static void __qla24xx_handle_gpdb_event(scsi_qla_host_t *, struct event_arg *);
42 
43 /* SRB Extensions ---------------------------------------------------------- */
44 
45 void
qla2x00_sp_timeout(struct timer_list * t)46 qla2x00_sp_timeout(struct timer_list *t)
47 {
48 	srb_t *sp = from_timer(sp, t, u.iocb_cmd.timer);
49 	struct srb_iocb *iocb;
50 	scsi_qla_host_t *vha = sp->vha;
51 
52 	WARN_ON(irqs_disabled());
53 	iocb = &sp->u.iocb_cmd;
54 	iocb->timeout(sp);
55 
56 	/* ref: TMR */
57 	kref_put(&sp->cmd_kref, qla2x00_sp_release);
58 
59 	if (vha && qla2x00_isp_reg_stat(vha->hw)) {
60 		ql_log(ql_log_info, vha, 0x9008,
61 		    "PCI/Register disconnect.\n");
62 		qla_pci_set_eeh_busy(vha);
63 	}
64 }
65 
qla2x00_sp_free(srb_t * sp)66 void qla2x00_sp_free(srb_t *sp)
67 {
68 	struct srb_iocb *iocb = &sp->u.iocb_cmd;
69 
70 	del_timer(&iocb->timer);
71 	qla2x00_rel_sp(sp);
72 }
73 
qla2xxx_rel_done_warning(srb_t * sp,int res)74 void qla2xxx_rel_done_warning(srb_t *sp, int res)
75 {
76 	WARN_ONCE(1, "Calling done() of an already freed srb %p object\n", sp);
77 }
78 
qla2xxx_rel_free_warning(srb_t * sp)79 void qla2xxx_rel_free_warning(srb_t *sp)
80 {
81 	WARN_ONCE(1, "Calling free() of an already freed srb %p object\n", sp);
82 }
83 
84 /* Asynchronous Login/Logout Routines -------------------------------------- */
85 
86 unsigned long
qla2x00_get_async_timeout(struct scsi_qla_host * vha)87 qla2x00_get_async_timeout(struct scsi_qla_host *vha)
88 {
89 	unsigned long tmo;
90 	struct qla_hw_data *ha = vha->hw;
91 
92 	/* Firmware should use switch negotiated r_a_tov for timeout. */
93 	tmo = ha->r_a_tov / 10 * 2;
94 	if (IS_QLAFX00(ha)) {
95 		tmo = FX00_DEF_RATOV * 2;
96 	} else if (!IS_FWI2_CAPABLE(ha)) {
97 		/*
98 		 * Except for earlier ISPs where the timeout is seeded from the
99 		 * initialization control block.
100 		 */
101 		tmo = ha->login_timeout;
102 	}
103 	return tmo;
104 }
105 
qla24xx_abort_iocb_timeout(void * data)106 static void qla24xx_abort_iocb_timeout(void *data)
107 {
108 	srb_t *sp = data;
109 	struct srb_iocb *abt = &sp->u.iocb_cmd;
110 	struct qla_qpair *qpair = sp->qpair;
111 	u32 handle;
112 	unsigned long flags;
113 	int sp_found = 0, cmdsp_found = 0;
114 
115 	if (sp->cmd_sp)
116 		ql_dbg(ql_dbg_async, sp->vha, 0x507c,
117 		    "Abort timeout - cmd hdl=%x, cmd type=%x hdl=%x, type=%x\n",
118 		    sp->cmd_sp->handle, sp->cmd_sp->type,
119 		    sp->handle, sp->type);
120 	else
121 		ql_dbg(ql_dbg_async, sp->vha, 0x507c,
122 		    "Abort timeout 2 - hdl=%x, type=%x\n",
123 		    sp->handle, sp->type);
124 
125 	spin_lock_irqsave(qpair->qp_lock_ptr, flags);
126 	for (handle = 1; handle < qpair->req->num_outstanding_cmds; handle++) {
127 		if (sp->cmd_sp && (qpair->req->outstanding_cmds[handle] ==
128 		    sp->cmd_sp)) {
129 			qpair->req->outstanding_cmds[handle] = NULL;
130 			cmdsp_found = 1;
131 			qla_put_fw_resources(qpair, &sp->cmd_sp->iores);
132 		}
133 
134 		/* removing the abort */
135 		if (qpair->req->outstanding_cmds[handle] == sp) {
136 			qpair->req->outstanding_cmds[handle] = NULL;
137 			sp_found = 1;
138 			qla_put_fw_resources(qpair, &sp->iores);
139 			break;
140 		}
141 	}
142 	spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
143 
144 	if (cmdsp_found && sp->cmd_sp) {
145 		/*
146 		 * This done function should take care of
147 		 * original command ref: INIT
148 		 */
149 		sp->cmd_sp->done(sp->cmd_sp, QLA_OS_TIMER_EXPIRED);
150 	}
151 
152 	if (sp_found) {
153 		abt->u.abt.comp_status = cpu_to_le16(CS_TIMEOUT);
154 		sp->done(sp, QLA_OS_TIMER_EXPIRED);
155 	}
156 }
157 
qla24xx_abort_sp_done(srb_t * sp,int res)158 static void qla24xx_abort_sp_done(srb_t *sp, int res)
159 {
160 	struct srb_iocb *abt = &sp->u.iocb_cmd;
161 	srb_t *orig_sp = sp->cmd_sp;
162 
163 	if (orig_sp)
164 		qla_wait_nvme_release_cmd_kref(orig_sp);
165 
166 	if (sp->flags & SRB_WAKEUP_ON_COMP)
167 		complete(&abt->u.abt.comp);
168 	else
169 		/* ref: INIT */
170 		kref_put(&sp->cmd_kref, qla2x00_sp_release);
171 }
172 
qla24xx_async_abort_cmd(srb_t * cmd_sp,bool wait)173 int qla24xx_async_abort_cmd(srb_t *cmd_sp, bool wait)
174 {
175 	scsi_qla_host_t *vha = cmd_sp->vha;
176 	struct srb_iocb *abt_iocb;
177 	srb_t *sp;
178 	int rval = QLA_FUNCTION_FAILED;
179 
180 	/* ref: INIT for ABTS command */
181 	sp = qla2xxx_get_qpair_sp(cmd_sp->vha, cmd_sp->qpair, cmd_sp->fcport,
182 				  GFP_ATOMIC);
183 	if (!sp)
184 		return QLA_MEMORY_ALLOC_FAILED;
185 
186 	qla_vha_mark_busy(vha);
187 	abt_iocb = &sp->u.iocb_cmd;
188 	sp->type = SRB_ABT_CMD;
189 	sp->name = "abort";
190 	sp->qpair = cmd_sp->qpair;
191 	sp->cmd_sp = cmd_sp;
192 	if (wait)
193 		sp->flags = SRB_WAKEUP_ON_COMP;
194 
195 	init_completion(&abt_iocb->u.abt.comp);
196 	/* FW can send 2 x ABTS's timeout/20s */
197 	qla2x00_init_async_sp(sp, 42, qla24xx_abort_sp_done);
198 	sp->u.iocb_cmd.timeout = qla24xx_abort_iocb_timeout;
199 
200 	abt_iocb->u.abt.cmd_hndl = cmd_sp->handle;
201 	abt_iocb->u.abt.req_que_no = cpu_to_le16(cmd_sp->qpair->req->id);
202 
203 	ql_dbg(ql_dbg_async, vha, 0x507c,
204 	       "Abort command issued - hdl=%x, type=%x\n", cmd_sp->handle,
205 	       cmd_sp->type);
206 
207 	rval = qla2x00_start_sp(sp);
208 	if (rval != QLA_SUCCESS) {
209 		/* ref: INIT */
210 		kref_put(&sp->cmd_kref, qla2x00_sp_release);
211 		return rval;
212 	}
213 
214 	if (wait) {
215 		wait_for_completion(&abt_iocb->u.abt.comp);
216 		rval = abt_iocb->u.abt.comp_status == CS_COMPLETE ?
217 			QLA_SUCCESS : QLA_ERR_FROM_FW;
218 		/* ref: INIT */
219 		kref_put(&sp->cmd_kref, qla2x00_sp_release);
220 	}
221 
222 	return rval;
223 }
224 
225 void
qla2x00_async_iocb_timeout(void * data)226 qla2x00_async_iocb_timeout(void *data)
227 {
228 	srb_t *sp = data;
229 	fc_port_t *fcport = sp->fcport;
230 	struct srb_iocb *lio = &sp->u.iocb_cmd;
231 	int rc, h;
232 	unsigned long flags;
233 
234 	if (fcport) {
235 		ql_dbg(ql_dbg_disc, fcport->vha, 0x2071,
236 		    "Async-%s timeout - hdl=%x portid=%06x %8phC.\n",
237 		    sp->name, sp->handle, fcport->d_id.b24, fcport->port_name);
238 
239 		fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE);
240 	} else {
241 		pr_info("Async-%s timeout - hdl=%x.\n",
242 		    sp->name, sp->handle);
243 	}
244 
245 	switch (sp->type) {
246 	case SRB_LOGIN_CMD:
247 		rc = qla24xx_async_abort_cmd(sp, false);
248 		if (rc) {
249 			/* Retry as needed. */
250 			lio->u.logio.data[0] = MBS_COMMAND_ERROR;
251 			lio->u.logio.data[1] =
252 				lio->u.logio.flags & SRB_LOGIN_RETRIED ?
253 				QLA_LOGIO_LOGIN_RETRIED : 0;
254 			spin_lock_irqsave(sp->qpair->qp_lock_ptr, flags);
255 			for (h = 1; h < sp->qpair->req->num_outstanding_cmds;
256 			    h++) {
257 				if (sp->qpair->req->outstanding_cmds[h] ==
258 				    sp) {
259 					sp->qpair->req->outstanding_cmds[h] =
260 					    NULL;
261 					break;
262 				}
263 			}
264 			spin_unlock_irqrestore(sp->qpair->qp_lock_ptr, flags);
265 			sp->done(sp, QLA_FUNCTION_TIMEOUT);
266 		}
267 		break;
268 	case SRB_LOGOUT_CMD:
269 	case SRB_CT_PTHRU_CMD:
270 	case SRB_MB_IOCB:
271 	case SRB_NACK_PLOGI:
272 	case SRB_NACK_PRLI:
273 	case SRB_NACK_LOGO:
274 	case SRB_CTRL_VP:
275 	default:
276 		rc = qla24xx_async_abort_cmd(sp, false);
277 		if (rc) {
278 			spin_lock_irqsave(sp->qpair->qp_lock_ptr, flags);
279 			for (h = 1; h < sp->qpair->req->num_outstanding_cmds;
280 			    h++) {
281 				if (sp->qpair->req->outstanding_cmds[h] ==
282 				    sp) {
283 					sp->qpair->req->outstanding_cmds[h] =
284 					    NULL;
285 					break;
286 				}
287 			}
288 			spin_unlock_irqrestore(sp->qpair->qp_lock_ptr, flags);
289 			sp->done(sp, QLA_FUNCTION_TIMEOUT);
290 		}
291 		break;
292 	}
293 }
294 
qla2x00_async_login_sp_done(srb_t * sp,int res)295 static void qla2x00_async_login_sp_done(srb_t *sp, int res)
296 {
297 	struct scsi_qla_host *vha = sp->vha;
298 	struct srb_iocb *lio = &sp->u.iocb_cmd;
299 	struct event_arg ea;
300 
301 	ql_dbg(ql_dbg_disc, vha, 0x20dd,
302 	    "%s %8phC res %d \n", __func__, sp->fcport->port_name, res);
303 
304 	sp->fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE);
305 
306 	if (!test_bit(UNLOADING, &vha->dpc_flags)) {
307 		memset(&ea, 0, sizeof(ea));
308 		ea.fcport = sp->fcport;
309 		ea.data[0] = lio->u.logio.data[0];
310 		ea.data[1] = lio->u.logio.data[1];
311 		ea.iop[0] = lio->u.logio.iop[0];
312 		ea.iop[1] = lio->u.logio.iop[1];
313 		ea.sp = sp;
314 		if (res)
315 			ea.data[0] = MBS_COMMAND_ERROR;
316 		qla24xx_handle_plogi_done_event(vha, &ea);
317 	}
318 
319 	/* ref: INIT */
320 	kref_put(&sp->cmd_kref, qla2x00_sp_release);
321 }
322 
323 int
qla2x00_async_login(struct scsi_qla_host * vha,fc_port_t * fcport,uint16_t * data)324 qla2x00_async_login(struct scsi_qla_host *vha, fc_port_t *fcport,
325     uint16_t *data)
326 {
327 	srb_t *sp;
328 	struct srb_iocb *lio;
329 	int rval = QLA_FUNCTION_FAILED;
330 
331 	if (!vha->flags.online || (fcport->flags & FCF_ASYNC_SENT) ||
332 	    fcport->loop_id == FC_NO_LOOP_ID) {
333 		ql_log(ql_log_warn, vha, 0xffff,
334 		    "%s: %8phC - not sending command.\n",
335 		    __func__, fcport->port_name);
336 		return rval;
337 	}
338 
339 	/* ref: INIT */
340 	sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
341 	if (!sp)
342 		goto done;
343 
344 	qla2x00_set_fcport_disc_state(fcport, DSC_LOGIN_PEND);
345 	fcport->flags |= FCF_ASYNC_SENT;
346 	fcport->logout_completed = 0;
347 
348 	sp->type = SRB_LOGIN_CMD;
349 	sp->name = "login";
350 	sp->gen1 = fcport->rscn_gen;
351 	sp->gen2 = fcport->login_gen;
352 	qla2x00_init_async_sp(sp, qla2x00_get_async_timeout(vha) + 2,
353 			      qla2x00_async_login_sp_done);
354 
355 	lio = &sp->u.iocb_cmd;
356 	if (N2N_TOPO(fcport->vha->hw) && fcport_is_bigger(fcport)) {
357 		lio->u.logio.flags |= SRB_LOGIN_PRLI_ONLY;
358 	} else {
359 		if (vha->hw->flags.edif_enabled &&
360 		    DBELL_ACTIVE(vha)) {
361 			lio->u.logio.flags |=
362 				(SRB_LOGIN_FCSP | SRB_LOGIN_SKIP_PRLI);
363 		} else {
364 			lio->u.logio.flags |= SRB_LOGIN_COND_PLOGI;
365 		}
366 	}
367 
368 	if (NVME_TARGET(vha->hw, fcport))
369 		lio->u.logio.flags |= SRB_LOGIN_SKIP_PRLI;
370 
371 	rval = qla2x00_start_sp(sp);
372 
373 	ql_dbg(ql_dbg_disc, vha, 0x2072,
374 	       "Async-login - %8phC hdl=%x, loopid=%x portid=%06x retries=%d %s.\n",
375 	       fcport->port_name, sp->handle, fcport->loop_id,
376 	       fcport->d_id.b24, fcport->login_retry,
377 	       lio->u.logio.flags & SRB_LOGIN_FCSP ? "FCSP" : "");
378 
379 	if (rval != QLA_SUCCESS) {
380 		fcport->flags |= FCF_LOGIN_NEEDED;
381 		set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
382 		goto done_free_sp;
383 	}
384 
385 	return rval;
386 
387 done_free_sp:
388 	/* ref: INIT */
389 	kref_put(&sp->cmd_kref, qla2x00_sp_release);
390 	fcport->flags &= ~FCF_ASYNC_SENT;
391 done:
392 	fcport->flags &= ~FCF_ASYNC_ACTIVE;
393 
394 	/*
395 	 * async login failed. Could be due to iocb/exchange resource
396 	 * being low. Set state DELETED for re-login process to start again.
397 	 */
398 	qla2x00_set_fcport_disc_state(fcport, DSC_DELETED);
399 	return rval;
400 }
401 
qla2x00_async_logout_sp_done(srb_t * sp,int res)402 static void qla2x00_async_logout_sp_done(srb_t *sp, int res)
403 {
404 	sp->fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE);
405 	sp->fcport->login_gen++;
406 	qlt_logo_completion_handler(sp->fcport, sp->u.iocb_cmd.u.logio.data[0]);
407 	/* ref: INIT */
408 	kref_put(&sp->cmd_kref, qla2x00_sp_release);
409 }
410 
411 int
qla2x00_async_logout(struct scsi_qla_host * vha,fc_port_t * fcport)412 qla2x00_async_logout(struct scsi_qla_host *vha, fc_port_t *fcport)
413 {
414 	srb_t *sp;
415 	int rval = QLA_FUNCTION_FAILED;
416 
417 	fcport->flags |= FCF_ASYNC_SENT;
418 	/* ref: INIT */
419 	sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
420 	if (!sp)
421 		goto done;
422 
423 	sp->type = SRB_LOGOUT_CMD;
424 	sp->name = "logout";
425 	qla2x00_init_async_sp(sp, qla2x00_get_async_timeout(vha) + 2,
426 			      qla2x00_async_logout_sp_done),
427 
428 	ql_dbg(ql_dbg_disc, vha, 0x2070,
429 	    "Async-logout - hdl=%x loop-id=%x portid=%02x%02x%02x %8phC explicit %d.\n",
430 	    sp->handle, fcport->loop_id, fcport->d_id.b.domain,
431 		fcport->d_id.b.area, fcport->d_id.b.al_pa,
432 		fcport->port_name, fcport->explicit_logout);
433 
434 	rval = qla2x00_start_sp(sp);
435 	if (rval != QLA_SUCCESS)
436 		goto done_free_sp;
437 	return rval;
438 
439 done_free_sp:
440 	/* ref: INIT */
441 	kref_put(&sp->cmd_kref, qla2x00_sp_release);
442 done:
443 	fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE);
444 	return rval;
445 }
446 
447 void
qla2x00_async_prlo_done(struct scsi_qla_host * vha,fc_port_t * fcport,uint16_t * data)448 qla2x00_async_prlo_done(struct scsi_qla_host *vha, fc_port_t *fcport,
449     uint16_t *data)
450 {
451 	fcport->flags &= ~FCF_ASYNC_ACTIVE;
452 	/* Don't re-login in target mode */
453 	if (!fcport->tgt_session)
454 		qla2x00_mark_device_lost(vha, fcport, 1);
455 	qlt_logo_completion_handler(fcport, data[0]);
456 }
457 
qla2x00_async_prlo_sp_done(srb_t * sp,int res)458 static void qla2x00_async_prlo_sp_done(srb_t *sp, int res)
459 {
460 	struct srb_iocb *lio = &sp->u.iocb_cmd;
461 	struct scsi_qla_host *vha = sp->vha;
462 
463 	sp->fcport->flags &= ~FCF_ASYNC_ACTIVE;
464 	if (!test_bit(UNLOADING, &vha->dpc_flags))
465 		qla2x00_post_async_prlo_done_work(sp->fcport->vha, sp->fcport,
466 		    lio->u.logio.data);
467 	/* ref: INIT */
468 	kref_put(&sp->cmd_kref, qla2x00_sp_release);
469 }
470 
471 int
qla2x00_async_prlo(struct scsi_qla_host * vha,fc_port_t * fcport)472 qla2x00_async_prlo(struct scsi_qla_host *vha, fc_port_t *fcport)
473 {
474 	srb_t *sp;
475 	int rval;
476 
477 	rval = QLA_FUNCTION_FAILED;
478 	/* ref: INIT */
479 	sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
480 	if (!sp)
481 		goto done;
482 
483 	sp->type = SRB_PRLO_CMD;
484 	sp->name = "prlo";
485 	qla2x00_init_async_sp(sp, qla2x00_get_async_timeout(vha) + 2,
486 			      qla2x00_async_prlo_sp_done);
487 
488 	ql_dbg(ql_dbg_disc, vha, 0x2070,
489 	    "Async-prlo - hdl=%x loop-id=%x portid=%02x%02x%02x.\n",
490 	    sp->handle, fcport->loop_id, fcport->d_id.b.domain,
491 	    fcport->d_id.b.area, fcport->d_id.b.al_pa);
492 
493 	rval = qla2x00_start_sp(sp);
494 	if (rval != QLA_SUCCESS)
495 		goto done_free_sp;
496 
497 	return rval;
498 
499 done_free_sp:
500 	/* ref: INIT */
501 	kref_put(&sp->cmd_kref, qla2x00_sp_release);
502 done:
503 	fcport->flags &= ~FCF_ASYNC_ACTIVE;
504 	return rval;
505 }
506 
507 static
qla24xx_handle_adisc_event(scsi_qla_host_t * vha,struct event_arg * ea)508 void qla24xx_handle_adisc_event(scsi_qla_host_t *vha, struct event_arg *ea)
509 {
510 	struct fc_port *fcport = ea->fcport;
511 	unsigned long flags;
512 
513 	ql_dbg(ql_dbg_disc, vha, 0x20d2,
514 	    "%s %8phC DS %d LS %d rc %d login %d|%d rscn %d|%d lid %d\n",
515 	    __func__, fcport->port_name, fcport->disc_state,
516 	    fcport->fw_login_state, ea->rc, fcport->login_gen, ea->sp->gen2,
517 	    fcport->rscn_gen, ea->sp->gen1, fcport->loop_id);
518 
519 	WARN_ONCE(!qla2xxx_is_valid_mbs(ea->data[0]), "mbs: %#x\n",
520 		  ea->data[0]);
521 
522 	if (ea->data[0] != MBS_COMMAND_COMPLETE) {
523 		ql_dbg(ql_dbg_disc, vha, 0x2066,
524 		    "%s %8phC: adisc fail: post delete\n",
525 		    __func__, ea->fcport->port_name);
526 
527 		spin_lock_irqsave(&vha->work_lock, flags);
528 		/* deleted = 0 & logout_on_delete = force fw cleanup */
529 		if (fcport->deleted == QLA_SESS_DELETED)
530 			fcport->deleted = 0;
531 
532 		fcport->logout_on_delete = 1;
533 		spin_unlock_irqrestore(&vha->work_lock, flags);
534 
535 		qlt_schedule_sess_for_deletion(ea->fcport);
536 		return;
537 	}
538 
539 	if (ea->fcport->disc_state == DSC_DELETE_PEND)
540 		return;
541 
542 	if (ea->sp->gen2 != ea->fcport->login_gen) {
543 		/* target side must have changed it. */
544 		ql_dbg(ql_dbg_disc, vha, 0x20d3,
545 		    "%s %8phC generation changed\n",
546 		    __func__, ea->fcport->port_name);
547 		return;
548 	} else if (ea->sp->gen1 != ea->fcport->rscn_gen) {
549 		qla_rscn_replay(fcport);
550 		qlt_schedule_sess_for_deletion(fcport);
551 		return;
552 	}
553 
554 	__qla24xx_handle_gpdb_event(vha, ea);
555 }
556 
qla_post_els_plogi_work(struct scsi_qla_host * vha,fc_port_t * fcport)557 static int qla_post_els_plogi_work(struct scsi_qla_host *vha, fc_port_t *fcport)
558 {
559 	struct qla_work_evt *e;
560 
561 	e = qla2x00_alloc_work(vha, QLA_EVT_ELS_PLOGI);
562 	if (!e)
563 		return QLA_FUNCTION_FAILED;
564 
565 	e->u.fcport.fcport = fcport;
566 	fcport->flags |= FCF_ASYNC_ACTIVE;
567 	qla2x00_set_fcport_disc_state(fcport, DSC_LOGIN_PEND);
568 	return qla2x00_post_work(vha, e);
569 }
570 
qla2x00_async_adisc_sp_done(srb_t * sp,int res)571 static void qla2x00_async_adisc_sp_done(srb_t *sp, int res)
572 {
573 	struct scsi_qla_host *vha = sp->vha;
574 	struct event_arg ea;
575 	struct srb_iocb *lio = &sp->u.iocb_cmd;
576 
577 	ql_dbg(ql_dbg_disc, vha, 0x2066,
578 	    "Async done-%s res %x %8phC\n",
579 	    sp->name, res, sp->fcport->port_name);
580 
581 	sp->fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE);
582 
583 	memset(&ea, 0, sizeof(ea));
584 	ea.rc = res;
585 	ea.data[0] = lio->u.logio.data[0];
586 	ea.data[1] = lio->u.logio.data[1];
587 	ea.iop[0] = lio->u.logio.iop[0];
588 	ea.iop[1] = lio->u.logio.iop[1];
589 	ea.fcport = sp->fcport;
590 	ea.sp = sp;
591 	if (res)
592 		ea.data[0] = MBS_COMMAND_ERROR;
593 
594 	qla24xx_handle_adisc_event(vha, &ea);
595 	/* ref: INIT */
596 	kref_put(&sp->cmd_kref, qla2x00_sp_release);
597 }
598 
599 int
qla2x00_async_adisc(struct scsi_qla_host * vha,fc_port_t * fcport,uint16_t * data)600 qla2x00_async_adisc(struct scsi_qla_host *vha, fc_port_t *fcport,
601     uint16_t *data)
602 {
603 	srb_t *sp;
604 	struct srb_iocb *lio;
605 	int rval = QLA_FUNCTION_FAILED;
606 
607 	if (IS_SESSION_DELETED(fcport)) {
608 		ql_log(ql_log_warn, vha, 0xffff,
609 		       "%s: %8phC is being delete - not sending command.\n",
610 		       __func__, fcport->port_name);
611 		fcport->flags &= ~FCF_ASYNC_ACTIVE;
612 		return rval;
613 	}
614 
615 	if (!vha->flags.online || (fcport->flags & FCF_ASYNC_SENT))
616 		return rval;
617 
618 	fcport->flags |= FCF_ASYNC_SENT;
619 	/* ref: INIT */
620 	sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
621 	if (!sp)
622 		goto done;
623 
624 	sp->type = SRB_ADISC_CMD;
625 	sp->name = "adisc";
626 	sp->gen1 = fcport->rscn_gen;
627 	sp->gen2 = fcport->login_gen;
628 	qla2x00_init_async_sp(sp, qla2x00_get_async_timeout(vha) + 2,
629 			      qla2x00_async_adisc_sp_done);
630 
631 	if (data[1] & QLA_LOGIO_LOGIN_RETRIED) {
632 		lio = &sp->u.iocb_cmd;
633 		lio->u.logio.flags |= SRB_LOGIN_RETRIED;
634 	}
635 
636 	ql_dbg(ql_dbg_disc, vha, 0x206f,
637 	    "Async-adisc - hdl=%x loopid=%x portid=%06x %8phC.\n",
638 	    sp->handle, fcport->loop_id, fcport->d_id.b24, fcport->port_name);
639 
640 	rval = qla2x00_start_sp(sp);
641 	if (rval != QLA_SUCCESS)
642 		goto done_free_sp;
643 
644 	return rval;
645 
646 done_free_sp:
647 	/* ref: INIT */
648 	kref_put(&sp->cmd_kref, qla2x00_sp_release);
649 done:
650 	fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE);
651 	qla2x00_post_async_adisc_work(vha, fcport, data);
652 	return rval;
653 }
654 
qla2x00_is_reserved_id(scsi_qla_host_t * vha,uint16_t loop_id)655 static bool qla2x00_is_reserved_id(scsi_qla_host_t *vha, uint16_t loop_id)
656 {
657 	struct qla_hw_data *ha = vha->hw;
658 
659 	if (IS_FWI2_CAPABLE(ha))
660 		return loop_id > NPH_LAST_HANDLE;
661 
662 	return (loop_id > ha->max_loop_id && loop_id < SNS_FIRST_LOOP_ID) ||
663 		loop_id == MANAGEMENT_SERVER || loop_id == BROADCAST;
664 }
665 
666 /**
667  * qla2x00_find_new_loop_id - scan through our port list and find a new usable loop ID
668  * @vha: adapter state pointer.
669  * @dev: port structure pointer.
670  *
671  * Returns:
672  *	qla2x00 local function return status code.
673  *
674  * Context:
675  *	Kernel context.
676  */
qla2x00_find_new_loop_id(scsi_qla_host_t * vha,fc_port_t * dev)677 static int qla2x00_find_new_loop_id(scsi_qla_host_t *vha, fc_port_t *dev)
678 {
679 	int	rval;
680 	struct qla_hw_data *ha = vha->hw;
681 	unsigned long flags = 0;
682 
683 	rval = QLA_SUCCESS;
684 
685 	spin_lock_irqsave(&ha->vport_slock, flags);
686 
687 	dev->loop_id = find_first_zero_bit(ha->loop_id_map, LOOPID_MAP_SIZE);
688 	if (dev->loop_id >= LOOPID_MAP_SIZE ||
689 	    qla2x00_is_reserved_id(vha, dev->loop_id)) {
690 		dev->loop_id = FC_NO_LOOP_ID;
691 		rval = QLA_FUNCTION_FAILED;
692 	} else {
693 		set_bit(dev->loop_id, ha->loop_id_map);
694 	}
695 	spin_unlock_irqrestore(&ha->vport_slock, flags);
696 
697 	if (rval == QLA_SUCCESS)
698 		ql_dbg(ql_dbg_disc, dev->vha, 0x2086,
699 		       "Assigning new loopid=%x, portid=%x.\n",
700 		       dev->loop_id, dev->d_id.b24);
701 	else
702 		ql_log(ql_log_warn, dev->vha, 0x2087,
703 		       "No loop_id's available, portid=%x.\n",
704 		       dev->d_id.b24);
705 
706 	return rval;
707 }
708 
qla2x00_clear_loop_id(fc_port_t * fcport)709 void qla2x00_clear_loop_id(fc_port_t *fcport)
710 {
711 	struct qla_hw_data *ha = fcport->vha->hw;
712 
713 	if (fcport->loop_id == FC_NO_LOOP_ID ||
714 	    qla2x00_is_reserved_id(fcport->vha, fcport->loop_id))
715 		return;
716 
717 	clear_bit(fcport->loop_id, ha->loop_id_map);
718 	fcport->loop_id = FC_NO_LOOP_ID;
719 }
720 
qla24xx_handle_gnl_done_event(scsi_qla_host_t * vha,struct event_arg * ea)721 static void qla24xx_handle_gnl_done_event(scsi_qla_host_t *vha,
722 	struct event_arg *ea)
723 {
724 	fc_port_t *fcport, *conflict_fcport;
725 	struct get_name_list_extended *e;
726 	u16 i, n, found = 0, loop_id;
727 	port_id_t id;
728 	u64 wwn;
729 	u16 data[2];
730 	u8 current_login_state, nvme_cls;
731 
732 	fcport = ea->fcport;
733 	ql_dbg(ql_dbg_disc, vha, 0xffff,
734 	    "%s %8phC DS %d LS rc %d %d login %d|%d rscn %d|%d lid %d edif %d\n",
735 	    __func__, fcport->port_name, fcport->disc_state,
736 	    fcport->fw_login_state, ea->rc,
737 	    fcport->login_gen, fcport->last_login_gen,
738 	    fcport->rscn_gen, fcport->last_rscn_gen, vha->loop_id, fcport->edif.enable);
739 
740 	if (fcport->disc_state == DSC_DELETE_PEND)
741 		return;
742 
743 	if (ea->rc) { /* rval */
744 		if (fcport->login_retry == 0) {
745 			ql_dbg(ql_dbg_disc, vha, 0x20de,
746 			    "GNL failed Port login retry %8phN, retry cnt=%d.\n",
747 			    fcport->port_name, fcport->login_retry);
748 		}
749 		return;
750 	}
751 
752 	if (fcport->last_rscn_gen != fcport->rscn_gen) {
753 		qla_rscn_replay(fcport);
754 		qlt_schedule_sess_for_deletion(fcport);
755 		return;
756 	} else if (fcport->last_login_gen != fcport->login_gen) {
757 		ql_dbg(ql_dbg_disc, vha, 0x20e0,
758 		    "%s %8phC login gen changed\n",
759 		    __func__, fcport->port_name);
760 		set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
761 		return;
762 	}
763 
764 	n = ea->data[0] / sizeof(struct get_name_list_extended);
765 
766 	ql_dbg(ql_dbg_disc, vha, 0x20e1,
767 	    "%s %d %8phC n %d %02x%02x%02x lid %d \n",
768 	    __func__, __LINE__, fcport->port_name, n,
769 	    fcport->d_id.b.domain, fcport->d_id.b.area,
770 	    fcport->d_id.b.al_pa, fcport->loop_id);
771 
772 	for (i = 0; i < n; i++) {
773 		e = &vha->gnl.l[i];
774 		wwn = wwn_to_u64(e->port_name);
775 		id.b.domain = e->port_id[2];
776 		id.b.area = e->port_id[1];
777 		id.b.al_pa = e->port_id[0];
778 		id.b.rsvd_1 = 0;
779 
780 		if (memcmp((u8 *)&wwn, fcport->port_name, WWN_SIZE))
781 			continue;
782 
783 		if (IS_SW_RESV_ADDR(id))
784 			continue;
785 
786 		found = 1;
787 
788 		loop_id = le16_to_cpu(e->nport_handle);
789 		loop_id = (loop_id & 0x7fff);
790 		nvme_cls = e->current_login_state >> 4;
791 		current_login_state = e->current_login_state & 0xf;
792 
793 		if (PRLI_PHASE(nvme_cls)) {
794 			current_login_state = nvme_cls;
795 			fcport->fc4_type &= ~FS_FC4TYPE_FCP;
796 			fcport->fc4_type |= FS_FC4TYPE_NVME;
797 		} else if (PRLI_PHASE(current_login_state)) {
798 			fcport->fc4_type |= FS_FC4TYPE_FCP;
799 			fcport->fc4_type &= ~FS_FC4TYPE_NVME;
800 		}
801 
802 		ql_dbg(ql_dbg_disc, vha, 0x20e2,
803 		    "%s found %8phC CLS [%x|%x] fc4_type %d ID[%06x|%06x] lid[%d|%d]\n",
804 		    __func__, fcport->port_name,
805 		    e->current_login_state, fcport->fw_login_state,
806 		    fcport->fc4_type, id.b24, fcport->d_id.b24,
807 		    loop_id, fcport->loop_id);
808 
809 		switch (fcport->disc_state) {
810 		case DSC_DELETE_PEND:
811 		case DSC_DELETED:
812 			break;
813 		default:
814 			if ((id.b24 != fcport->d_id.b24 &&
815 			    fcport->d_id.b24 &&
816 			    fcport->loop_id != FC_NO_LOOP_ID) ||
817 			    (fcport->loop_id != FC_NO_LOOP_ID &&
818 				fcport->loop_id != loop_id)) {
819 				ql_dbg(ql_dbg_disc, vha, 0x20e3,
820 				    "%s %d %8phC post del sess\n",
821 				    __func__, __LINE__, fcport->port_name);
822 				if (fcport->n2n_flag)
823 					fcport->d_id.b24 = 0;
824 				qlt_schedule_sess_for_deletion(fcport);
825 				return;
826 			}
827 			break;
828 		}
829 
830 		fcport->loop_id = loop_id;
831 		if (fcport->n2n_flag)
832 			fcport->d_id.b24 = id.b24;
833 
834 		wwn = wwn_to_u64(fcport->port_name);
835 		qlt_find_sess_invalidate_other(vha, wwn,
836 			id, loop_id, &conflict_fcport);
837 
838 		if (conflict_fcport) {
839 			/*
840 			 * Another share fcport share the same loop_id &
841 			 * nport id. Conflict fcport needs to finish
842 			 * cleanup before this fcport can proceed to login.
843 			 */
844 			conflict_fcport->conflict = fcport;
845 			fcport->login_pause = 1;
846 		}
847 
848 		switch (vha->hw->current_topology) {
849 		default:
850 			switch (current_login_state) {
851 			case DSC_LS_PRLI_COMP:
852 				ql_dbg(ql_dbg_disc,
853 				    vha, 0x20e4, "%s %d %8phC post gpdb\n",
854 				    __func__, __LINE__, fcport->port_name);
855 
856 				if ((e->prli_svc_param_word_3[0] & BIT_4) == 0)
857 					fcport->port_type = FCT_INITIATOR;
858 				else
859 					fcport->port_type = FCT_TARGET;
860 				data[0] = data[1] = 0;
861 				qla2x00_post_async_adisc_work(vha, fcport,
862 				    data);
863 				break;
864 			case DSC_LS_PLOGI_COMP:
865 				if (vha->hw->flags.edif_enabled) {
866 					/* check to see if App support Secure */
867 					qla24xx_post_gpdb_work(vha, fcport, 0);
868 					break;
869 				}
870 				fallthrough;
871 			case DSC_LS_PORT_UNAVAIL:
872 			default:
873 				if (fcport->loop_id == FC_NO_LOOP_ID) {
874 					qla2x00_find_new_loop_id(vha, fcport);
875 					fcport->fw_login_state =
876 					    DSC_LS_PORT_UNAVAIL;
877 				}
878 				ql_dbg(ql_dbg_disc, vha, 0x20e5,
879 				    "%s %d %8phC\n", __func__, __LINE__,
880 				    fcport->port_name);
881 				qla24xx_fcport_handle_login(vha, fcport);
882 				break;
883 			}
884 			break;
885 		case ISP_CFG_N:
886 			fcport->fw_login_state = current_login_state;
887 			fcport->d_id = id;
888 			switch (current_login_state) {
889 			case DSC_LS_PRLI_PEND:
890 				/*
891 				 * In the middle of PRLI. Let it finish.
892 				 * Allow relogin code to recheck state again
893 				 * with GNL. Push disc_state back to DELETED
894 				 * so GNL can go out again
895 				 */
896 				qla2x00_set_fcport_disc_state(fcport,
897 				    DSC_DELETED);
898 				set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
899 				break;
900 			case DSC_LS_PRLI_COMP:
901 				if ((e->prli_svc_param_word_3[0] & BIT_4) == 0)
902 					fcport->port_type = FCT_INITIATOR;
903 				else
904 					fcport->port_type = FCT_TARGET;
905 
906 				data[0] = data[1] = 0;
907 				qla2x00_post_async_adisc_work(vha, fcport,
908 				    data);
909 				break;
910 			case DSC_LS_PLOGI_COMP:
911 				if (vha->hw->flags.edif_enabled &&
912 				    DBELL_ACTIVE(vha)) {
913 					/* check to see if App support secure or not */
914 					qla24xx_post_gpdb_work(vha, fcport, 0);
915 					break;
916 				}
917 				if (fcport_is_bigger(fcport)) {
918 					/* local adapter is smaller */
919 					if (fcport->loop_id != FC_NO_LOOP_ID)
920 						qla2x00_clear_loop_id(fcport);
921 
922 					fcport->loop_id = loop_id;
923 					qla24xx_fcport_handle_login(vha,
924 					    fcport);
925 					break;
926 				}
927 				fallthrough;
928 			default:
929 				if (fcport_is_smaller(fcport)) {
930 					/* local adapter is bigger */
931 					if (fcport->loop_id != FC_NO_LOOP_ID)
932 						qla2x00_clear_loop_id(fcport);
933 
934 					fcport->loop_id = loop_id;
935 					qla24xx_fcport_handle_login(vha,
936 					    fcport);
937 				}
938 				break;
939 			}
940 			break;
941 		} /* switch (ha->current_topology) */
942 	}
943 
944 	if (!found) {
945 		switch (vha->hw->current_topology) {
946 		case ISP_CFG_F:
947 		case ISP_CFG_FL:
948 			for (i = 0; i < n; i++) {
949 				e = &vha->gnl.l[i];
950 				id.b.domain = e->port_id[0];
951 				id.b.area = e->port_id[1];
952 				id.b.al_pa = e->port_id[2];
953 				id.b.rsvd_1 = 0;
954 				loop_id = le16_to_cpu(e->nport_handle);
955 
956 				if (fcport->d_id.b24 == id.b24) {
957 					conflict_fcport =
958 					    qla2x00_find_fcport_by_wwpn(vha,
959 						e->port_name, 0);
960 					if (conflict_fcport) {
961 						ql_dbg(ql_dbg_disc + ql_dbg_verbose,
962 						    vha, 0x20e5,
963 						    "%s %d %8phC post del sess\n",
964 						    __func__, __LINE__,
965 						    conflict_fcport->port_name);
966 						qlt_schedule_sess_for_deletion
967 							(conflict_fcport);
968 					}
969 				}
970 				/*
971 				 * FW already picked this loop id for
972 				 * another fcport
973 				 */
974 				if (fcport->loop_id == loop_id)
975 					fcport->loop_id = FC_NO_LOOP_ID;
976 			}
977 			qla24xx_fcport_handle_login(vha, fcport);
978 			break;
979 		case ISP_CFG_N:
980 			qla2x00_set_fcport_disc_state(fcport, DSC_DELETED);
981 			if (time_after_eq(jiffies, fcport->dm_login_expire)) {
982 				if (fcport->n2n_link_reset_cnt < 2) {
983 					fcport->n2n_link_reset_cnt++;
984 					/*
985 					 * remote port is not sending PLOGI.
986 					 * Reset link to kick start his state
987 					 * machine
988 					 */
989 					set_bit(N2N_LINK_RESET,
990 					    &vha->dpc_flags);
991 				} else {
992 					if (fcport->n2n_chip_reset < 1) {
993 						ql_log(ql_log_info, vha, 0x705d,
994 						    "Chip reset to bring laser down");
995 						set_bit(ISP_ABORT_NEEDED,
996 						    &vha->dpc_flags);
997 						fcport->n2n_chip_reset++;
998 					} else {
999 						ql_log(ql_log_info, vha, 0x705d,
1000 						    "Remote port %8ph is not coming back\n",
1001 						    fcport->port_name);
1002 						fcport->scan_state = 0;
1003 					}
1004 				}
1005 				qla2xxx_wake_dpc(vha);
1006 			} else {
1007 				/*
1008 				 * report port suppose to do PLOGI. Give him
1009 				 * more time. FW will catch it.
1010 				 */
1011 				set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
1012 			}
1013 			break;
1014 		case ISP_CFG_NL:
1015 			qla24xx_fcport_handle_login(vha, fcport);
1016 			break;
1017 		default:
1018 			break;
1019 		}
1020 	}
1021 } /* gnl_event */
1022 
qla24xx_async_gnl_sp_done(srb_t * sp,int res)1023 static void qla24xx_async_gnl_sp_done(srb_t *sp, int res)
1024 {
1025 	struct scsi_qla_host *vha = sp->vha;
1026 	unsigned long flags;
1027 	struct fc_port *fcport = NULL, *tf;
1028 	u16 i, n = 0, loop_id;
1029 	struct event_arg ea;
1030 	struct get_name_list_extended *e;
1031 	u64 wwn;
1032 	struct list_head h;
1033 	bool found = false;
1034 
1035 	ql_dbg(ql_dbg_disc, vha, 0x20e7,
1036 	    "Async done-%s res %x mb[1]=%x mb[2]=%x \n",
1037 	    sp->name, res, sp->u.iocb_cmd.u.mbx.in_mb[1],
1038 	    sp->u.iocb_cmd.u.mbx.in_mb[2]);
1039 
1040 
1041 	sp->fcport->flags &= ~(FCF_ASYNC_SENT|FCF_ASYNC_ACTIVE);
1042 	memset(&ea, 0, sizeof(ea));
1043 	ea.sp = sp;
1044 	ea.rc = res;
1045 
1046 	if (sp->u.iocb_cmd.u.mbx.in_mb[1] >=
1047 	    sizeof(struct get_name_list_extended)) {
1048 		n = sp->u.iocb_cmd.u.mbx.in_mb[1] /
1049 		    sizeof(struct get_name_list_extended);
1050 		ea.data[0] = sp->u.iocb_cmd.u.mbx.in_mb[1]; /* amnt xfered */
1051 	}
1052 
1053 	for (i = 0; i < n; i++) {
1054 		e = &vha->gnl.l[i];
1055 		loop_id = le16_to_cpu(e->nport_handle);
1056 		/* mask out reserve bit */
1057 		loop_id = (loop_id & 0x7fff);
1058 		set_bit(loop_id, vha->hw->loop_id_map);
1059 		wwn = wwn_to_u64(e->port_name);
1060 
1061 		ql_dbg(ql_dbg_disc, vha, 0x20e8,
1062 		    "%s %8phC %02x:%02x:%02x CLS %x/%x lid %x \n",
1063 		    __func__, &wwn, e->port_id[2], e->port_id[1],
1064 		    e->port_id[0], e->current_login_state, e->last_login_state,
1065 		    (loop_id & 0x7fff));
1066 	}
1067 
1068 	spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
1069 
1070 	INIT_LIST_HEAD(&h);
1071 	fcport = tf = NULL;
1072 	if (!list_empty(&vha->gnl.fcports))
1073 		list_splice_init(&vha->gnl.fcports, &h);
1074 	spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
1075 
1076 	list_for_each_entry_safe(fcport, tf, &h, gnl_entry) {
1077 		spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
1078 		list_del_init(&fcport->gnl_entry);
1079 		fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE);
1080 		spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
1081 		ea.fcport = fcport;
1082 
1083 		qla24xx_handle_gnl_done_event(vha, &ea);
1084 	}
1085 
1086 	/* create new fcport if fw has knowledge of new sessions */
1087 	for (i = 0; i < n; i++) {
1088 		port_id_t id;
1089 		u64 wwnn;
1090 
1091 		e = &vha->gnl.l[i];
1092 		wwn = wwn_to_u64(e->port_name);
1093 
1094 		found = false;
1095 		list_for_each_entry_safe(fcport, tf, &vha->vp_fcports, list) {
1096 			if (!memcmp((u8 *)&wwn, fcport->port_name,
1097 			    WWN_SIZE)) {
1098 				found = true;
1099 				break;
1100 			}
1101 		}
1102 
1103 		id.b.domain = e->port_id[2];
1104 		id.b.area = e->port_id[1];
1105 		id.b.al_pa = e->port_id[0];
1106 		id.b.rsvd_1 = 0;
1107 
1108 		if (!found && wwn && !IS_SW_RESV_ADDR(id)) {
1109 			ql_dbg(ql_dbg_disc, vha, 0x2065,
1110 			    "%s %d %8phC %06x post new sess\n",
1111 			    __func__, __LINE__, (u8 *)&wwn, id.b24);
1112 			wwnn = wwn_to_u64(e->node_name);
1113 			qla24xx_post_newsess_work(vha, &id, (u8 *)&wwn,
1114 			    (u8 *)&wwnn, NULL, 0);
1115 		}
1116 	}
1117 
1118 	spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
1119 	vha->gnl.sent = 0;
1120 	if (!list_empty(&vha->gnl.fcports)) {
1121 		/* retrigger gnl */
1122 		list_for_each_entry_safe(fcport, tf, &vha->gnl.fcports,
1123 		    gnl_entry) {
1124 			list_del_init(&fcport->gnl_entry);
1125 			fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE);
1126 			if (qla24xx_post_gnl_work(vha, fcport) == QLA_SUCCESS)
1127 				break;
1128 		}
1129 	}
1130 	spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
1131 
1132 	/* ref: INIT */
1133 	kref_put(&sp->cmd_kref, qla2x00_sp_release);
1134 }
1135 
qla24xx_async_gnl(struct scsi_qla_host * vha,fc_port_t * fcport)1136 int qla24xx_async_gnl(struct scsi_qla_host *vha, fc_port_t *fcport)
1137 {
1138 	srb_t *sp;
1139 	int rval = QLA_FUNCTION_FAILED;
1140 	unsigned long flags;
1141 	u16 *mb;
1142 
1143 	if (!vha->flags.online || (fcport->flags & FCF_ASYNC_SENT))
1144 		goto done;
1145 
1146 	ql_dbg(ql_dbg_disc, vha, 0x20d9,
1147 	    "Async-gnlist WWPN %8phC \n", fcport->port_name);
1148 
1149 	spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
1150 	fcport->flags |= FCF_ASYNC_SENT;
1151 	qla2x00_set_fcport_disc_state(fcport, DSC_GNL);
1152 	fcport->last_rscn_gen = fcport->rscn_gen;
1153 	fcport->last_login_gen = fcport->login_gen;
1154 
1155 	list_add_tail(&fcport->gnl_entry, &vha->gnl.fcports);
1156 	if (vha->gnl.sent) {
1157 		spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
1158 		return QLA_SUCCESS;
1159 	}
1160 	vha->gnl.sent = 1;
1161 	spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
1162 
1163 	/* ref: INIT */
1164 	sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
1165 	if (!sp)
1166 		goto done;
1167 
1168 	sp->type = SRB_MB_IOCB;
1169 	sp->name = "gnlist";
1170 	sp->gen1 = fcport->rscn_gen;
1171 	sp->gen2 = fcport->login_gen;
1172 	qla2x00_init_async_sp(sp, qla2x00_get_async_timeout(vha) + 2,
1173 			      qla24xx_async_gnl_sp_done);
1174 
1175 	mb = sp->u.iocb_cmd.u.mbx.out_mb;
1176 	mb[0] = MBC_PORT_NODE_NAME_LIST;
1177 	mb[1] = BIT_2 | BIT_3;
1178 	mb[2] = MSW(vha->gnl.ldma);
1179 	mb[3] = LSW(vha->gnl.ldma);
1180 	mb[6] = MSW(MSD(vha->gnl.ldma));
1181 	mb[7] = LSW(MSD(vha->gnl.ldma));
1182 	mb[8] = vha->gnl.size;
1183 	mb[9] = vha->vp_idx;
1184 
1185 	ql_dbg(ql_dbg_disc, vha, 0x20da,
1186 	    "Async-%s - OUT WWPN %8phC hndl %x\n",
1187 	    sp->name, fcport->port_name, sp->handle);
1188 
1189 	rval = qla2x00_start_sp(sp);
1190 	if (rval != QLA_SUCCESS)
1191 		goto done_free_sp;
1192 
1193 	return rval;
1194 
1195 done_free_sp:
1196 	/* ref: INIT */
1197 	kref_put(&sp->cmd_kref, qla2x00_sp_release);
1198 	fcport->flags &= ~(FCF_ASYNC_SENT);
1199 done:
1200 	fcport->flags &= ~(FCF_ASYNC_ACTIVE);
1201 	return rval;
1202 }
1203 
qla24xx_post_gnl_work(struct scsi_qla_host * vha,fc_port_t * fcport)1204 int qla24xx_post_gnl_work(struct scsi_qla_host *vha, fc_port_t *fcport)
1205 {
1206 	struct qla_work_evt *e;
1207 
1208 	e = qla2x00_alloc_work(vha, QLA_EVT_GNL);
1209 	if (!e)
1210 		return QLA_FUNCTION_FAILED;
1211 
1212 	e->u.fcport.fcport = fcport;
1213 	fcport->flags |= FCF_ASYNC_ACTIVE;
1214 	return qla2x00_post_work(vha, e);
1215 }
1216 
qla24xx_async_gpdb_sp_done(srb_t * sp,int res)1217 static void qla24xx_async_gpdb_sp_done(srb_t *sp, int res)
1218 {
1219 	struct scsi_qla_host *vha = sp->vha;
1220 	struct qla_hw_data *ha = vha->hw;
1221 	fc_port_t *fcport = sp->fcport;
1222 	u16 *mb = sp->u.iocb_cmd.u.mbx.in_mb;
1223 	struct event_arg ea;
1224 
1225 	ql_dbg(ql_dbg_disc, vha, 0x20db,
1226 	    "Async done-%s res %x, WWPN %8phC mb[1]=%x mb[2]=%x \n",
1227 	    sp->name, res, fcport->port_name, mb[1], mb[2]);
1228 
1229 	fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE);
1230 
1231 	if (res == QLA_FUNCTION_TIMEOUT)
1232 		goto done;
1233 
1234 	memset(&ea, 0, sizeof(ea));
1235 	ea.fcport = fcport;
1236 	ea.sp = sp;
1237 
1238 	qla24xx_handle_gpdb_event(vha, &ea);
1239 
1240 done:
1241 	dma_pool_free(ha->s_dma_pool, sp->u.iocb_cmd.u.mbx.in,
1242 		sp->u.iocb_cmd.u.mbx.in_dma);
1243 
1244 	kref_put(&sp->cmd_kref, qla2x00_sp_release);
1245 }
1246 
qla24xx_post_prli_work(struct scsi_qla_host * vha,fc_port_t * fcport)1247 int qla24xx_post_prli_work(struct scsi_qla_host *vha, fc_port_t *fcport)
1248 {
1249 	struct qla_work_evt *e;
1250 
1251 	if (vha->host->active_mode == MODE_TARGET)
1252 		return QLA_FUNCTION_FAILED;
1253 
1254 	e = qla2x00_alloc_work(vha, QLA_EVT_PRLI);
1255 	if (!e)
1256 		return QLA_FUNCTION_FAILED;
1257 
1258 	e->u.fcport.fcport = fcport;
1259 
1260 	return qla2x00_post_work(vha, e);
1261 }
1262 
qla2x00_async_prli_sp_done(srb_t * sp,int res)1263 static void qla2x00_async_prli_sp_done(srb_t *sp, int res)
1264 {
1265 	struct scsi_qla_host *vha = sp->vha;
1266 	struct srb_iocb *lio = &sp->u.iocb_cmd;
1267 	struct event_arg ea;
1268 
1269 	ql_dbg(ql_dbg_disc, vha, 0x2129,
1270 	    "%s %8phC res %x\n", __func__,
1271 	    sp->fcport->port_name, res);
1272 
1273 	sp->fcport->flags &= ~FCF_ASYNC_SENT;
1274 
1275 	if (!test_bit(UNLOADING, &vha->dpc_flags)) {
1276 		memset(&ea, 0, sizeof(ea));
1277 		ea.fcport = sp->fcport;
1278 		ea.data[0] = lio->u.logio.data[0];
1279 		ea.data[1] = lio->u.logio.data[1];
1280 		ea.iop[0] = lio->u.logio.iop[0];
1281 		ea.iop[1] = lio->u.logio.iop[1];
1282 		ea.sp = sp;
1283 		if (res == QLA_OS_TIMER_EXPIRED)
1284 			ea.data[0] = QLA_OS_TIMER_EXPIRED;
1285 		else if (res)
1286 			ea.data[0] = MBS_COMMAND_ERROR;
1287 
1288 		qla24xx_handle_prli_done_event(vha, &ea);
1289 	}
1290 
1291 	kref_put(&sp->cmd_kref, qla2x00_sp_release);
1292 }
1293 
1294 int
qla24xx_async_prli(struct scsi_qla_host * vha,fc_port_t * fcport)1295 qla24xx_async_prli(struct scsi_qla_host *vha, fc_port_t *fcport)
1296 {
1297 	srb_t *sp;
1298 	struct srb_iocb *lio;
1299 	int rval = QLA_FUNCTION_FAILED;
1300 
1301 	if (!vha->flags.online) {
1302 		ql_dbg(ql_dbg_disc, vha, 0xffff, "%s %d %8phC exit\n",
1303 		    __func__, __LINE__, fcport->port_name);
1304 		return rval;
1305 	}
1306 
1307 	if ((fcport->fw_login_state == DSC_LS_PLOGI_PEND ||
1308 	    fcport->fw_login_state == DSC_LS_PRLI_PEND) &&
1309 	    qla_dual_mode_enabled(vha)) {
1310 		ql_dbg(ql_dbg_disc, vha, 0xffff, "%s %d %8phC exit\n",
1311 		    __func__, __LINE__, fcport->port_name);
1312 		return rval;
1313 	}
1314 
1315 	sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
1316 	if (!sp)
1317 		return rval;
1318 
1319 	fcport->flags |= FCF_ASYNC_SENT;
1320 	fcport->logout_completed = 0;
1321 
1322 	sp->type = SRB_PRLI_CMD;
1323 	sp->name = "prli";
1324 	qla2x00_init_async_sp(sp, qla2x00_get_async_timeout(vha) + 2,
1325 			      qla2x00_async_prli_sp_done);
1326 
1327 	lio = &sp->u.iocb_cmd;
1328 	lio->u.logio.flags = 0;
1329 
1330 	if (NVME_TARGET(vha->hw, fcport))
1331 		lio->u.logio.flags |= SRB_LOGIN_NVME_PRLI;
1332 
1333 	ql_dbg(ql_dbg_disc, vha, 0x211b,
1334 	    "Async-prli - %8phC hdl=%x, loopid=%x portid=%06x retries=%d fc4type %x priority %x %s.\n",
1335 	    fcport->port_name, sp->handle, fcport->loop_id, fcport->d_id.b24,
1336 	    fcport->login_retry, fcport->fc4_type, vha->hw->fc4_type_priority,
1337 	    NVME_TARGET(vha->hw, fcport) ? "nvme" : "fcp");
1338 
1339 	rval = qla2x00_start_sp(sp);
1340 	if (rval != QLA_SUCCESS) {
1341 		fcport->flags |= FCF_LOGIN_NEEDED;
1342 		set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
1343 		goto done_free_sp;
1344 	}
1345 
1346 	return rval;
1347 
1348 done_free_sp:
1349 	/* ref: INIT */
1350 	kref_put(&sp->cmd_kref, qla2x00_sp_release);
1351 	fcport->flags &= ~FCF_ASYNC_SENT;
1352 	return rval;
1353 }
1354 
qla24xx_post_gpdb_work(struct scsi_qla_host * vha,fc_port_t * fcport,u8 opt)1355 int qla24xx_post_gpdb_work(struct scsi_qla_host *vha, fc_port_t *fcport, u8 opt)
1356 {
1357 	struct qla_work_evt *e;
1358 
1359 	e = qla2x00_alloc_work(vha, QLA_EVT_GPDB);
1360 	if (!e)
1361 		return QLA_FUNCTION_FAILED;
1362 
1363 	e->u.fcport.fcport = fcport;
1364 	e->u.fcport.opt = opt;
1365 	fcport->flags |= FCF_ASYNC_ACTIVE;
1366 	return qla2x00_post_work(vha, e);
1367 }
1368 
qla24xx_async_gpdb(struct scsi_qla_host * vha,fc_port_t * fcport,u8 opt)1369 int qla24xx_async_gpdb(struct scsi_qla_host *vha, fc_port_t *fcport, u8 opt)
1370 {
1371 	srb_t *sp;
1372 	struct srb_iocb *mbx;
1373 	int rval = QLA_FUNCTION_FAILED;
1374 	u16 *mb;
1375 	dma_addr_t pd_dma;
1376 	struct port_database_24xx *pd;
1377 	struct qla_hw_data *ha = vha->hw;
1378 
1379 	if (IS_SESSION_DELETED(fcport)) {
1380 		ql_log(ql_log_warn, vha, 0xffff,
1381 		       "%s: %8phC is being delete - not sending command.\n",
1382 		       __func__, fcport->port_name);
1383 		fcport->flags &= ~FCF_ASYNC_ACTIVE;
1384 		return rval;
1385 	}
1386 
1387 	if (!vha->flags.online || fcport->flags & FCF_ASYNC_SENT) {
1388 		ql_log(ql_log_warn, vha, 0xffff,
1389 		    "%s: %8phC online %d flags %x - not sending command.\n",
1390 		    __func__, fcport->port_name, vha->flags.online, fcport->flags);
1391 		goto done;
1392 	}
1393 
1394 	sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
1395 	if (!sp)
1396 		goto done;
1397 
1398 	qla2x00_set_fcport_disc_state(fcport, DSC_GPDB);
1399 
1400 	fcport->flags |= FCF_ASYNC_SENT;
1401 	sp->type = SRB_MB_IOCB;
1402 	sp->name = "gpdb";
1403 	sp->gen1 = fcport->rscn_gen;
1404 	sp->gen2 = fcport->login_gen;
1405 	qla2x00_init_async_sp(sp, qla2x00_get_async_timeout(vha) + 2,
1406 			      qla24xx_async_gpdb_sp_done);
1407 
1408 	pd = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, &pd_dma);
1409 	if (pd == NULL) {
1410 		ql_log(ql_log_warn, vha, 0xd043,
1411 		    "Failed to allocate port database structure.\n");
1412 		goto done_free_sp;
1413 	}
1414 
1415 	mb = sp->u.iocb_cmd.u.mbx.out_mb;
1416 	mb[0] = MBC_GET_PORT_DATABASE;
1417 	mb[1] = fcport->loop_id;
1418 	mb[2] = MSW(pd_dma);
1419 	mb[3] = LSW(pd_dma);
1420 	mb[6] = MSW(MSD(pd_dma));
1421 	mb[7] = LSW(MSD(pd_dma));
1422 	mb[9] = vha->vp_idx;
1423 	mb[10] = opt;
1424 
1425 	mbx = &sp->u.iocb_cmd;
1426 	mbx->u.mbx.in = (void *)pd;
1427 	mbx->u.mbx.in_dma = pd_dma;
1428 
1429 	ql_dbg(ql_dbg_disc, vha, 0x20dc,
1430 	    "Async-%s %8phC hndl %x opt %x\n",
1431 	    sp->name, fcport->port_name, sp->handle, opt);
1432 
1433 	rval = qla2x00_start_sp(sp);
1434 	if (rval != QLA_SUCCESS)
1435 		goto done_free_sp;
1436 	return rval;
1437 
1438 done_free_sp:
1439 	if (pd)
1440 		dma_pool_free(ha->s_dma_pool, pd, pd_dma);
1441 
1442 	kref_put(&sp->cmd_kref, qla2x00_sp_release);
1443 	fcport->flags &= ~FCF_ASYNC_SENT;
1444 done:
1445 	fcport->flags &= ~FCF_ASYNC_ACTIVE;
1446 	qla24xx_post_gpdb_work(vha, fcport, opt);
1447 	return rval;
1448 }
1449 
1450 static
__qla24xx_handle_gpdb_event(scsi_qla_host_t * vha,struct event_arg * ea)1451 void __qla24xx_handle_gpdb_event(scsi_qla_host_t *vha, struct event_arg *ea)
1452 {
1453 	unsigned long flags;
1454 
1455 	spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
1456 	ea->fcport->login_gen++;
1457 	ea->fcport->logout_on_delete = 1;
1458 
1459 	if (!ea->fcport->login_succ && !IS_SW_RESV_ADDR(ea->fcport->d_id)) {
1460 		vha->fcport_count++;
1461 		ea->fcport->login_succ = 1;
1462 
1463 		spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
1464 		qla24xx_sched_upd_fcport(ea->fcport);
1465 		spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
1466 	} else if (ea->fcport->login_succ) {
1467 		/*
1468 		 * We have an existing session. A late RSCN delivery
1469 		 * must have triggered the session to be re-validate.
1470 		 * Session is still valid.
1471 		 */
1472 		ql_dbg(ql_dbg_disc, vha, 0x20d6,
1473 		    "%s %d %8phC session revalidate success\n",
1474 		    __func__, __LINE__, ea->fcport->port_name);
1475 		qla2x00_set_fcport_disc_state(ea->fcport, DSC_LOGIN_COMPLETE);
1476 	}
1477 	spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
1478 }
1479 
qla_chk_secure_login(scsi_qla_host_t * vha,fc_port_t * fcport,struct port_database_24xx * pd)1480 static int	qla_chk_secure_login(scsi_qla_host_t	*vha, fc_port_t *fcport,
1481 	struct port_database_24xx *pd)
1482 {
1483 	int rc = 0;
1484 
1485 	if (pd->secure_login) {
1486 		ql_dbg(ql_dbg_disc, vha, 0x104d,
1487 		    "Secure Login established on %8phC\n",
1488 		    fcport->port_name);
1489 		fcport->flags |= FCF_FCSP_DEVICE;
1490 	} else {
1491 		ql_dbg(ql_dbg_disc, vha, 0x104d,
1492 		    "non-Secure Login %8phC",
1493 		    fcport->port_name);
1494 		fcport->flags &= ~FCF_FCSP_DEVICE;
1495 	}
1496 	if (vha->hw->flags.edif_enabled) {
1497 		if (fcport->flags & FCF_FCSP_DEVICE) {
1498 			qla2x00_set_fcport_disc_state(fcport, DSC_LOGIN_AUTH_PEND);
1499 			/* Start edif prli timer & ring doorbell for app */
1500 			fcport->edif.rx_sa_set = 0;
1501 			fcport->edif.tx_sa_set = 0;
1502 			fcport->edif.rx_sa_pending = 0;
1503 			fcport->edif.tx_sa_pending = 0;
1504 
1505 			qla2x00_post_aen_work(vha, FCH_EVT_PORT_ONLINE,
1506 			    fcport->d_id.b24);
1507 
1508 			if (DBELL_ACTIVE(vha)) {
1509 				ql_dbg(ql_dbg_disc, vha, 0x20ef,
1510 				    "%s %d %8phC EDIF: post DB_AUTH: AUTH needed\n",
1511 				    __func__, __LINE__, fcport->port_name);
1512 				fcport->edif.app_sess_online = 1;
1513 
1514 				qla_edb_eventcreate(vha, VND_CMD_AUTH_STATE_NEEDED,
1515 				    fcport->d_id.b24, 0, fcport);
1516 			}
1517 
1518 			rc = 1;
1519 		} else if (qla_ini_mode_enabled(vha) || qla_dual_mode_enabled(vha)) {
1520 			ql_dbg(ql_dbg_disc, vha, 0x2117,
1521 			    "%s %d %8phC post prli\n",
1522 			    __func__, __LINE__, fcport->port_name);
1523 			qla24xx_post_prli_work(vha, fcport);
1524 			rc = 1;
1525 		}
1526 	}
1527 	return rc;
1528 }
1529 
1530 static
qla24xx_handle_gpdb_event(scsi_qla_host_t * vha,struct event_arg * ea)1531 void qla24xx_handle_gpdb_event(scsi_qla_host_t *vha, struct event_arg *ea)
1532 {
1533 	fc_port_t *fcport = ea->fcport;
1534 	struct port_database_24xx *pd;
1535 	struct srb *sp = ea->sp;
1536 	uint8_t	ls;
1537 
1538 	pd = (struct port_database_24xx *)sp->u.iocb_cmd.u.mbx.in;
1539 
1540 	fcport->flags &= ~FCF_ASYNC_SENT;
1541 
1542 	ql_dbg(ql_dbg_disc, vha, 0x20d2,
1543 	    "%s %8phC DS %d LS %x fc4_type %x rc %x\n", __func__,
1544 	    fcport->port_name, fcport->disc_state, pd->current_login_state,
1545 	    fcport->fc4_type, ea->rc);
1546 
1547 	if (fcport->disc_state == DSC_DELETE_PEND) {
1548 		ql_dbg(ql_dbg_disc, vha, 0x20d5, "%s %d %8phC\n",
1549 		       __func__, __LINE__, fcport->port_name);
1550 		return;
1551 	}
1552 
1553 	if (NVME_TARGET(vha->hw, fcport))
1554 		ls = pd->current_login_state >> 4;
1555 	else
1556 		ls = pd->current_login_state & 0xf;
1557 
1558 	if (ea->sp->gen2 != fcport->login_gen) {
1559 		/* target side must have changed it. */
1560 
1561 		ql_dbg(ql_dbg_disc, vha, 0x20d3,
1562 		    "%s %8phC generation changed\n",
1563 		    __func__, fcport->port_name);
1564 		return;
1565 	} else if (ea->sp->gen1 != fcport->rscn_gen) {
1566 		qla_rscn_replay(fcport);
1567 		qlt_schedule_sess_for_deletion(fcport);
1568 		ql_dbg(ql_dbg_disc, vha, 0x20d5, "%s %d %8phC, ls %x\n",
1569 		       __func__, __LINE__, fcport->port_name, ls);
1570 		return;
1571 	}
1572 
1573 	switch (ls) {
1574 	case PDS_PRLI_COMPLETE:
1575 		__qla24xx_parse_gpdb(vha, fcport, pd);
1576 		break;
1577 	case PDS_PLOGI_COMPLETE:
1578 		if (qla_chk_secure_login(vha, fcport, pd)) {
1579 			ql_dbg(ql_dbg_disc, vha, 0x20d5, "%s %d %8phC, ls %x\n",
1580 			       __func__, __LINE__, fcport->port_name, ls);
1581 			return;
1582 		}
1583 		fallthrough;
1584 	case PDS_PLOGI_PENDING:
1585 	case PDS_PRLI_PENDING:
1586 	case PDS_PRLI2_PENDING:
1587 		/* Set discovery state back to GNL to Relogin attempt */
1588 		if (qla_dual_mode_enabled(vha) ||
1589 		    qla_ini_mode_enabled(vha)) {
1590 			qla2x00_set_fcport_disc_state(fcport, DSC_GNL);
1591 			set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
1592 		}
1593 		ql_dbg(ql_dbg_disc, vha, 0x20d5, "%s %d %8phC, ls %x\n",
1594 		       __func__, __LINE__, fcport->port_name, ls);
1595 		return;
1596 	case PDS_LOGO_PENDING:
1597 	case PDS_PORT_UNAVAILABLE:
1598 	default:
1599 		ql_dbg(ql_dbg_disc, vha, 0x20d5, "%s %d %8phC post del sess\n",
1600 		    __func__, __LINE__, fcport->port_name);
1601 		qlt_schedule_sess_for_deletion(fcport);
1602 		return;
1603 	}
1604 	__qla24xx_handle_gpdb_event(vha, ea);
1605 } /* gpdb event */
1606 
qla_chk_n2n_b4_login(struct scsi_qla_host * vha,fc_port_t * fcport)1607 static void qla_chk_n2n_b4_login(struct scsi_qla_host *vha, fc_port_t *fcport)
1608 {
1609 	u8 login = 0;
1610 	int rc;
1611 
1612 	ql_dbg(ql_dbg_disc, vha, 0x307b,
1613 	    "%s %8phC DS %d LS %d lid %d retries=%d\n",
1614 	    __func__, fcport->port_name, fcport->disc_state,
1615 	    fcport->fw_login_state, fcport->loop_id, fcport->login_retry);
1616 
1617 	if (qla_tgt_mode_enabled(vha))
1618 		return;
1619 
1620 	if (qla_dual_mode_enabled(vha)) {
1621 		if (N2N_TOPO(vha->hw)) {
1622 			u64 mywwn, wwn;
1623 
1624 			mywwn = wwn_to_u64(vha->port_name);
1625 			wwn = wwn_to_u64(fcport->port_name);
1626 			if (mywwn > wwn)
1627 				login = 1;
1628 			else if ((fcport->fw_login_state == DSC_LS_PLOGI_COMP)
1629 			    && time_after_eq(jiffies,
1630 				    fcport->plogi_nack_done_deadline))
1631 				login = 1;
1632 		} else {
1633 			login = 1;
1634 		}
1635 	} else {
1636 		/* initiator mode */
1637 		login = 1;
1638 	}
1639 
1640 	if (login && fcport->login_retry) {
1641 		fcport->login_retry--;
1642 		if (fcport->loop_id == FC_NO_LOOP_ID) {
1643 			fcport->fw_login_state = DSC_LS_PORT_UNAVAIL;
1644 			rc = qla2x00_find_new_loop_id(vha, fcport);
1645 			if (rc) {
1646 				ql_dbg(ql_dbg_disc, vha, 0x20e6,
1647 				    "%s %d %8phC post del sess - out of loopid\n",
1648 				    __func__, __LINE__, fcport->port_name);
1649 				fcport->scan_state = 0;
1650 				qlt_schedule_sess_for_deletion(fcport);
1651 				return;
1652 			}
1653 		}
1654 		ql_dbg(ql_dbg_disc, vha, 0x20bf,
1655 		    "%s %d %8phC post login\n",
1656 		    __func__, __LINE__, fcport->port_name);
1657 		qla2x00_post_async_login_work(vha, fcport, NULL);
1658 	}
1659 }
1660 
qla24xx_fcport_handle_login(struct scsi_qla_host * vha,fc_port_t * fcport)1661 int qla24xx_fcport_handle_login(struct scsi_qla_host *vha, fc_port_t *fcport)
1662 {
1663 	u16 data[2];
1664 	u16 sec;
1665 
1666 	ql_dbg(ql_dbg_disc, vha, 0x20d8,
1667 	    "%s %8phC DS %d LS %d P %d fl %x confl %p rscn %d|%d login %d lid %d scan %d fc4type %x\n",
1668 	    __func__, fcport->port_name, fcport->disc_state,
1669 	    fcport->fw_login_state, fcport->login_pause, fcport->flags,
1670 	    fcport->conflict, fcport->last_rscn_gen, fcport->rscn_gen,
1671 	    fcport->login_gen, fcport->loop_id, fcport->scan_state,
1672 	    fcport->fc4_type);
1673 
1674 	if (fcport->scan_state != QLA_FCPORT_FOUND ||
1675 	    fcport->disc_state == DSC_DELETE_PEND)
1676 		return 0;
1677 
1678 	if ((fcport->loop_id != FC_NO_LOOP_ID) &&
1679 	    qla_dual_mode_enabled(vha) &&
1680 	    ((fcport->fw_login_state == DSC_LS_PLOGI_PEND) ||
1681 	     (fcport->fw_login_state == DSC_LS_PRLI_PEND)))
1682 		return 0;
1683 
1684 	if (fcport->fw_login_state == DSC_LS_PLOGI_COMP &&
1685 	    !N2N_TOPO(vha->hw)) {
1686 		if (time_before_eq(jiffies, fcport->plogi_nack_done_deadline)) {
1687 			set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
1688 			return 0;
1689 		}
1690 	}
1691 
1692 	/* Target won't initiate port login if fabric is present */
1693 	if (vha->host->active_mode == MODE_TARGET && !N2N_TOPO(vha->hw))
1694 		return 0;
1695 
1696 	if (fcport->flags & (FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE)) {
1697 		set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
1698 		return 0;
1699 	}
1700 
1701 	switch (fcport->disc_state) {
1702 	case DSC_DELETED:
1703 		switch (vha->hw->current_topology) {
1704 		case ISP_CFG_N:
1705 			if (fcport_is_smaller(fcport)) {
1706 				/* this adapter is bigger */
1707 				if (fcport->login_retry) {
1708 					if (fcport->loop_id == FC_NO_LOOP_ID) {
1709 						qla2x00_find_new_loop_id(vha,
1710 						    fcport);
1711 						fcport->fw_login_state =
1712 						    DSC_LS_PORT_UNAVAIL;
1713 					}
1714 					fcport->login_retry--;
1715 					qla_post_els_plogi_work(vha, fcport);
1716 				} else {
1717 					ql_log(ql_log_info, vha, 0x705d,
1718 					    "Unable to reach remote port %8phC",
1719 					    fcport->port_name);
1720 				}
1721 			} else {
1722 				qla24xx_post_gnl_work(vha, fcport);
1723 			}
1724 			break;
1725 		default:
1726 			if (fcport->loop_id == FC_NO_LOOP_ID) {
1727 				ql_dbg(ql_dbg_disc, vha, 0x20bd,
1728 				    "%s %d %8phC post gnl\n",
1729 				    __func__, __LINE__, fcport->port_name);
1730 				qla24xx_post_gnl_work(vha, fcport);
1731 			} else {
1732 				qla_chk_n2n_b4_login(vha, fcport);
1733 			}
1734 			break;
1735 		}
1736 		break;
1737 
1738 	case DSC_GNL:
1739 		switch (vha->hw->current_topology) {
1740 		case ISP_CFG_N:
1741 			if ((fcport->current_login_state & 0xf) == 0x6) {
1742 				ql_dbg(ql_dbg_disc, vha, 0x2118,
1743 				    "%s %d %8phC post GPDB work\n",
1744 				    __func__, __LINE__, fcport->port_name);
1745 				fcport->chip_reset =
1746 					vha->hw->base_qpair->chip_reset;
1747 				qla24xx_post_gpdb_work(vha, fcport, 0);
1748 			}  else {
1749 				ql_dbg(ql_dbg_disc, vha, 0x2118,
1750 				    "%s %d %8phC post %s PRLI\n",
1751 				    __func__, __LINE__, fcport->port_name,
1752 				    NVME_TARGET(vha->hw, fcport) ? "NVME" :
1753 				    "FC");
1754 				qla24xx_post_prli_work(vha, fcport);
1755 			}
1756 			break;
1757 		default:
1758 			if (fcport->login_pause) {
1759 				ql_dbg(ql_dbg_disc, vha, 0x20d8,
1760 				    "%s %d %8phC exit\n",
1761 				    __func__, __LINE__,
1762 				    fcport->port_name);
1763 				fcport->last_rscn_gen = fcport->rscn_gen;
1764 				fcport->last_login_gen = fcport->login_gen;
1765 				set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
1766 				break;
1767 			}
1768 			qla_chk_n2n_b4_login(vha, fcport);
1769 			break;
1770 		}
1771 		break;
1772 
1773 	case DSC_LOGIN_FAILED:
1774 		if (N2N_TOPO(vha->hw))
1775 			qla_chk_n2n_b4_login(vha, fcport);
1776 		else
1777 			qlt_schedule_sess_for_deletion(fcport);
1778 		break;
1779 
1780 	case DSC_LOGIN_COMPLETE:
1781 		/* recheck login state */
1782 		data[0] = data[1] = 0;
1783 		qla2x00_post_async_adisc_work(vha, fcport, data);
1784 		break;
1785 
1786 	case DSC_LOGIN_PEND:
1787 		if (vha->hw->flags.edif_enabled)
1788 			break;
1789 
1790 		if (fcport->fw_login_state == DSC_LS_PLOGI_COMP) {
1791 			ql_dbg(ql_dbg_disc, vha, 0x2118,
1792 			       "%s %d %8phC post %s PRLI\n",
1793 			       __func__, __LINE__, fcport->port_name,
1794 			       NVME_TARGET(vha->hw, fcport) ? "NVME" : "FC");
1795 			qla24xx_post_prli_work(vha, fcport);
1796 		}
1797 		break;
1798 
1799 	case DSC_UPD_FCPORT:
1800 		sec =  jiffies_to_msecs(jiffies -
1801 		    fcport->jiffies_at_registration)/1000;
1802 		if (fcport->sec_since_registration < sec && sec &&
1803 		    !(sec % 60)) {
1804 			fcport->sec_since_registration = sec;
1805 			ql_dbg(ql_dbg_disc, fcport->vha, 0xffff,
1806 			    "%s %8phC - Slow Rport registration(%d Sec)\n",
1807 			    __func__, fcport->port_name, sec);
1808 		}
1809 
1810 		if (fcport->next_disc_state != DSC_DELETE_PEND)
1811 			fcport->next_disc_state = DSC_ADISC;
1812 		set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
1813 		break;
1814 
1815 	default:
1816 		break;
1817 	}
1818 
1819 	return 0;
1820 }
1821 
qla24xx_post_newsess_work(struct scsi_qla_host * vha,port_id_t * id,u8 * port_name,u8 * node_name,void * pla,u8 fc4_type)1822 int qla24xx_post_newsess_work(struct scsi_qla_host *vha, port_id_t *id,
1823     u8 *port_name, u8 *node_name, void *pla, u8 fc4_type)
1824 {
1825 	struct qla_work_evt *e;
1826 
1827 	e = qla2x00_alloc_work(vha, QLA_EVT_NEW_SESS);
1828 	if (!e)
1829 		return QLA_FUNCTION_FAILED;
1830 
1831 	e->u.new_sess.id = *id;
1832 	e->u.new_sess.pla = pla;
1833 	e->u.new_sess.fc4_type = fc4_type;
1834 	memcpy(e->u.new_sess.port_name, port_name, WWN_SIZE);
1835 	if (node_name)
1836 		memcpy(e->u.new_sess.node_name, node_name, WWN_SIZE);
1837 
1838 	return qla2x00_post_work(vha, e);
1839 }
1840 
qla2x00_handle_rscn(scsi_qla_host_t * vha,struct event_arg * ea)1841 void qla2x00_handle_rscn(scsi_qla_host_t *vha, struct event_arg *ea)
1842 {
1843 	fc_port_t *fcport;
1844 	unsigned long flags;
1845 
1846 	switch (ea->id.b.rsvd_1) {
1847 	case RSCN_PORT_ADDR:
1848 		fcport = qla2x00_find_fcport_by_nportid(vha, &ea->id, 1);
1849 		if (fcport) {
1850 			if (ql2xfc2target &&
1851 			    fcport->flags & FCF_FCP2_DEVICE &&
1852 			    atomic_read(&fcport->state) == FCS_ONLINE) {
1853 				ql_dbg(ql_dbg_disc, vha, 0x2115,
1854 				       "Delaying session delete for FCP2 portid=%06x %8phC ",
1855 					fcport->d_id.b24, fcport->port_name);
1856 				return;
1857 			}
1858 
1859 			if (vha->hw->flags.edif_enabled && DBELL_ACTIVE(vha)) {
1860 				/*
1861 				 * On ipsec start by remote port, Target port
1862 				 * may use RSCN to trigger initiator to
1863 				 * relogin. If driver is already in the
1864 				 * process of a relogin, then ignore the RSCN
1865 				 * and allow the current relogin to continue.
1866 				 * This reduces thrashing of the connection.
1867 				 */
1868 				if (atomic_read(&fcport->state) == FCS_ONLINE) {
1869 					/*
1870 					 * If state = online, then set scan_needed=1 to do relogin.
1871 					 * Otherwise we're already in the middle of a relogin
1872 					 */
1873 					fcport->scan_needed = 1;
1874 					fcport->rscn_gen++;
1875 				}
1876 			} else {
1877 				fcport->scan_needed = 1;
1878 				fcport->rscn_gen++;
1879 			}
1880 		}
1881 		break;
1882 	case RSCN_AREA_ADDR:
1883 		list_for_each_entry(fcport, &vha->vp_fcports, list) {
1884 			if (fcport->flags & FCF_FCP2_DEVICE &&
1885 			    atomic_read(&fcport->state) == FCS_ONLINE)
1886 				continue;
1887 
1888 			if ((ea->id.b24 & 0xffff00) == (fcport->d_id.b24 & 0xffff00)) {
1889 				fcport->scan_needed = 1;
1890 				fcport->rscn_gen++;
1891 			}
1892 		}
1893 		break;
1894 	case RSCN_DOM_ADDR:
1895 		list_for_each_entry(fcport, &vha->vp_fcports, list) {
1896 			if (fcport->flags & FCF_FCP2_DEVICE &&
1897 			    atomic_read(&fcport->state) == FCS_ONLINE)
1898 				continue;
1899 
1900 			if ((ea->id.b24 & 0xff0000) == (fcport->d_id.b24 & 0xff0000)) {
1901 				fcport->scan_needed = 1;
1902 				fcport->rscn_gen++;
1903 			}
1904 		}
1905 		break;
1906 	case RSCN_FAB_ADDR:
1907 	default:
1908 		list_for_each_entry(fcport, &vha->vp_fcports, list) {
1909 			if (fcport->flags & FCF_FCP2_DEVICE &&
1910 			    atomic_read(&fcport->state) == FCS_ONLINE)
1911 				continue;
1912 
1913 			fcport->scan_needed = 1;
1914 			fcport->rscn_gen++;
1915 		}
1916 		break;
1917 	}
1918 
1919 	spin_lock_irqsave(&vha->work_lock, flags);
1920 	if (vha->scan.scan_flags == 0) {
1921 		ql_dbg(ql_dbg_disc, vha, 0xffff, "%s: schedule\n", __func__);
1922 		vha->scan.scan_flags |= SF_QUEUED;
1923 		schedule_delayed_work(&vha->scan.scan_work, 5);
1924 	}
1925 	spin_unlock_irqrestore(&vha->work_lock, flags);
1926 }
1927 
qla24xx_handle_relogin_event(scsi_qla_host_t * vha,struct event_arg * ea)1928 void qla24xx_handle_relogin_event(scsi_qla_host_t *vha,
1929 	struct event_arg *ea)
1930 {
1931 	fc_port_t *fcport = ea->fcport;
1932 
1933 	if (test_bit(UNLOADING, &vha->dpc_flags))
1934 		return;
1935 
1936 	ql_dbg(ql_dbg_disc, vha, 0x2102,
1937 	    "%s %8phC DS %d LS %d P %d del %d cnfl %p rscn %d|%d login %d|%d fl %x\n",
1938 	    __func__, fcport->port_name, fcport->disc_state,
1939 	    fcport->fw_login_state, fcport->login_pause,
1940 	    fcport->deleted, fcport->conflict,
1941 	    fcport->last_rscn_gen, fcport->rscn_gen,
1942 	    fcport->last_login_gen, fcport->login_gen,
1943 	    fcport->flags);
1944 
1945 	if (fcport->last_rscn_gen != fcport->rscn_gen) {
1946 		ql_dbg(ql_dbg_disc, vha, 0x20e9, "%s %d %8phC post gnl\n",
1947 		    __func__, __LINE__, fcport->port_name);
1948 		qla24xx_post_gnl_work(vha, fcport);
1949 		return;
1950 	}
1951 
1952 	qla24xx_fcport_handle_login(vha, fcport);
1953 }
1954 
qla_handle_els_plogi_done(scsi_qla_host_t * vha,struct event_arg * ea)1955 void qla_handle_els_plogi_done(scsi_qla_host_t *vha,
1956 				      struct event_arg *ea)
1957 {
1958 	if (N2N_TOPO(vha->hw) && fcport_is_smaller(ea->fcport) &&
1959 	    vha->hw->flags.edif_enabled) {
1960 		/* check to see if App support Secure */
1961 		qla24xx_post_gpdb_work(vha, ea->fcport, 0);
1962 		return;
1963 	}
1964 
1965 	/* for pure Target Mode, PRLI will not be initiated */
1966 	if (vha->host->active_mode == MODE_TARGET)
1967 		return;
1968 
1969 	ql_dbg(ql_dbg_disc, vha, 0x2118,
1970 	    "%s %d %8phC post PRLI\n",
1971 	    __func__, __LINE__, ea->fcport->port_name);
1972 	qla24xx_post_prli_work(vha, ea->fcport);
1973 }
1974 
1975 /*
1976  * RSCN(s) came in for this fcport, but the RSCN(s) was not able
1977  * to be consumed by the fcport
1978  */
qla_rscn_replay(fc_port_t * fcport)1979 void qla_rscn_replay(fc_port_t *fcport)
1980 {
1981 	struct event_arg ea;
1982 
1983 	switch (fcport->disc_state) {
1984 	case DSC_DELETE_PEND:
1985 		return;
1986 	default:
1987 		break;
1988 	}
1989 
1990 	if (fcport->scan_needed) {
1991 		memset(&ea, 0, sizeof(ea));
1992 		ea.id = fcport->d_id;
1993 		ea.id.b.rsvd_1 = RSCN_PORT_ADDR;
1994 		qla2x00_handle_rscn(fcport->vha, &ea);
1995 	}
1996 }
1997 
1998 static void
qla2x00_tmf_iocb_timeout(void * data)1999 qla2x00_tmf_iocb_timeout(void *data)
2000 {
2001 	srb_t *sp = data;
2002 	struct srb_iocb *tmf = &sp->u.iocb_cmd;
2003 	int rc, h;
2004 	unsigned long flags;
2005 
2006 	if (sp->type == SRB_MARKER)
2007 		rc = QLA_FUNCTION_FAILED;
2008 	else
2009 		rc = qla24xx_async_abort_cmd(sp, false);
2010 
2011 	if (rc) {
2012 		spin_lock_irqsave(sp->qpair->qp_lock_ptr, flags);
2013 		for (h = 1; h < sp->qpair->req->num_outstanding_cmds; h++) {
2014 			if (sp->qpair->req->outstanding_cmds[h] == sp) {
2015 				sp->qpair->req->outstanding_cmds[h] = NULL;
2016 				qla_put_fw_resources(sp->qpair, &sp->iores);
2017 				break;
2018 			}
2019 		}
2020 		spin_unlock_irqrestore(sp->qpair->qp_lock_ptr, flags);
2021 		tmf->u.tmf.comp_status = cpu_to_le16(CS_TIMEOUT);
2022 		tmf->u.tmf.data = QLA_FUNCTION_FAILED;
2023 		complete(&tmf->u.tmf.comp);
2024 	}
2025 }
2026 
qla_marker_sp_done(srb_t * sp,int res)2027 static void qla_marker_sp_done(srb_t *sp, int res)
2028 {
2029 	struct srb_iocb *tmf = &sp->u.iocb_cmd;
2030 
2031 	if (res != QLA_SUCCESS)
2032 		ql_dbg(ql_dbg_taskm, sp->vha, 0x8004,
2033 		    "Async-marker fail hdl=%x portid=%06x ctrl=%x lun=%lld qp=%d.\n",
2034 		    sp->handle, sp->fcport->d_id.b24, sp->u.iocb_cmd.u.tmf.flags,
2035 		    sp->u.iocb_cmd.u.tmf.lun, sp->qpair->id);
2036 
2037 	sp->u.iocb_cmd.u.tmf.data = res;
2038 	complete(&tmf->u.tmf.comp);
2039 }
2040 
2041 #define  START_SP_W_RETRIES(_sp, _rval, _chip_gen, _login_gen) \
2042 {\
2043 	int cnt = 5; \
2044 	do { \
2045 		if (_chip_gen != sp->vha->hw->chip_reset || _login_gen != sp->fcport->login_gen) {\
2046 			_rval = EINVAL; \
2047 			break; \
2048 		} \
2049 		_rval = qla2x00_start_sp(_sp); \
2050 		if (_rval == EAGAIN) \
2051 			msleep(1); \
2052 		else \
2053 			break; \
2054 		cnt--; \
2055 	} while (cnt); \
2056 }
2057 
2058 /**
2059  * qla26xx_marker: send marker IOCB and wait for the completion of it.
2060  * @arg: pointer to argument list.
2061  *    It is assume caller will provide an fcport pointer and modifier
2062  */
2063 static int
qla26xx_marker(struct tmf_arg * arg)2064 qla26xx_marker(struct tmf_arg *arg)
2065 {
2066 	struct scsi_qla_host *vha = arg->vha;
2067 	struct srb_iocb *tm_iocb;
2068 	srb_t *sp;
2069 	int rval = QLA_FUNCTION_FAILED;
2070 	fc_port_t *fcport = arg->fcport;
2071 	u32 chip_gen, login_gen;
2072 
2073 	if (TMF_NOT_READY(arg->fcport)) {
2074 		ql_dbg(ql_dbg_taskm, vha, 0x8039,
2075 		    "FC port not ready for marker loop-id=%x portid=%06x modifier=%x lun=%lld qp=%d.\n",
2076 		    fcport->loop_id, fcport->d_id.b24,
2077 		    arg->modifier, arg->lun, arg->qpair->id);
2078 		return QLA_SUSPENDED;
2079 	}
2080 
2081 	chip_gen = vha->hw->chip_reset;
2082 	login_gen = fcport->login_gen;
2083 
2084 	/* ref: INIT */
2085 	sp = qla2xxx_get_qpair_sp(vha, arg->qpair, fcport, GFP_KERNEL);
2086 	if (!sp)
2087 		goto done;
2088 
2089 	sp->type = SRB_MARKER;
2090 	sp->name = "marker";
2091 	qla2x00_init_async_sp(sp, qla2x00_get_async_timeout(vha), qla_marker_sp_done);
2092 	sp->u.iocb_cmd.timeout = qla2x00_tmf_iocb_timeout;
2093 
2094 	tm_iocb = &sp->u.iocb_cmd;
2095 	init_completion(&tm_iocb->u.tmf.comp);
2096 	tm_iocb->u.tmf.modifier = arg->modifier;
2097 	tm_iocb->u.tmf.lun = arg->lun;
2098 	tm_iocb->u.tmf.loop_id = fcport->loop_id;
2099 	tm_iocb->u.tmf.vp_index = vha->vp_idx;
2100 
2101 	START_SP_W_RETRIES(sp, rval, chip_gen, login_gen);
2102 
2103 	ql_dbg(ql_dbg_taskm, vha, 0x8006,
2104 	    "Async-marker hdl=%x loop-id=%x portid=%06x modifier=%x lun=%lld qp=%d rval %d.\n",
2105 	    sp->handle, fcport->loop_id, fcport->d_id.b24,
2106 	    arg->modifier, arg->lun, sp->qpair->id, rval);
2107 
2108 	if (rval != QLA_SUCCESS) {
2109 		ql_log(ql_log_warn, vha, 0x8031,
2110 		    "Marker IOCB send failure (%x).\n", rval);
2111 		goto done_free_sp;
2112 	}
2113 
2114 	wait_for_completion(&tm_iocb->u.tmf.comp);
2115 	rval = tm_iocb->u.tmf.data;
2116 
2117 	if (rval != QLA_SUCCESS) {
2118 		ql_log(ql_log_warn, vha, 0x8019,
2119 		    "Marker failed hdl=%x loop-id=%x portid=%06x modifier=%x lun=%lld qp=%d rval %d.\n",
2120 		    sp->handle, fcport->loop_id, fcport->d_id.b24,
2121 		    arg->modifier, arg->lun, sp->qpair->id, rval);
2122 	}
2123 
2124 done_free_sp:
2125 	/* ref: INIT */
2126 	kref_put(&sp->cmd_kref, qla2x00_sp_release);
2127 done:
2128 	return rval;
2129 }
2130 
qla2x00_tmf_sp_done(srb_t * sp,int res)2131 static void qla2x00_tmf_sp_done(srb_t *sp, int res)
2132 {
2133 	struct srb_iocb *tmf = &sp->u.iocb_cmd;
2134 
2135 	if (res)
2136 		tmf->u.tmf.data = res;
2137 	complete(&tmf->u.tmf.comp);
2138 }
2139 
qla_tmf_wait(struct tmf_arg * arg)2140 static int qla_tmf_wait(struct tmf_arg *arg)
2141 {
2142 	/* there are only 2 types of error handling that reaches here, lun or target reset */
2143 	if (arg->flags & (TCF_LUN_RESET | TCF_ABORT_TASK_SET | TCF_CLEAR_TASK_SET))
2144 		return qla2x00_eh_wait_for_pending_commands(arg->vha,
2145 		    arg->fcport->d_id.b24, arg->lun, WAIT_LUN);
2146 	else
2147 		return qla2x00_eh_wait_for_pending_commands(arg->vha,
2148 		    arg->fcport->d_id.b24, arg->lun, WAIT_TARGET);
2149 }
2150 
2151 static int
__qla2x00_async_tm_cmd(struct tmf_arg * arg)2152 __qla2x00_async_tm_cmd(struct tmf_arg *arg)
2153 {
2154 	struct scsi_qla_host *vha = arg->vha;
2155 	struct srb_iocb *tm_iocb;
2156 	srb_t *sp;
2157 	int rval = QLA_FUNCTION_FAILED;
2158 	fc_port_t *fcport = arg->fcport;
2159 	u32 chip_gen, login_gen;
2160 	u64 jif;
2161 
2162 	if (TMF_NOT_READY(arg->fcport)) {
2163 		ql_dbg(ql_dbg_taskm, vha, 0x8032,
2164 		    "FC port not ready for TM command loop-id=%x portid=%06x modifier=%x lun=%lld qp=%d.\n",
2165 		    fcport->loop_id, fcport->d_id.b24,
2166 		    arg->modifier, arg->lun, arg->qpair->id);
2167 		return QLA_SUSPENDED;
2168 	}
2169 
2170 	chip_gen = vha->hw->chip_reset;
2171 	login_gen = fcport->login_gen;
2172 
2173 	/* ref: INIT */
2174 	sp = qla2xxx_get_qpair_sp(vha, arg->qpair, fcport, GFP_KERNEL);
2175 	if (!sp)
2176 		goto done;
2177 
2178 	qla_vha_mark_busy(vha);
2179 	sp->type = SRB_TM_CMD;
2180 	sp->name = "tmf";
2181 	qla2x00_init_async_sp(sp, qla2x00_get_async_timeout(vha),
2182 			      qla2x00_tmf_sp_done);
2183 	sp->u.iocb_cmd.timeout = qla2x00_tmf_iocb_timeout;
2184 
2185 	tm_iocb = &sp->u.iocb_cmd;
2186 	init_completion(&tm_iocb->u.tmf.comp);
2187 	tm_iocb->u.tmf.flags = arg->flags;
2188 	tm_iocb->u.tmf.lun = arg->lun;
2189 
2190 	START_SP_W_RETRIES(sp, rval, chip_gen, login_gen);
2191 
2192 	ql_dbg(ql_dbg_taskm, vha, 0x802f,
2193 	    "Async-tmf hdl=%x loop-id=%x portid=%06x ctrl=%x lun=%lld qp=%d rval=%x.\n",
2194 	    sp->handle, fcport->loop_id, fcport->d_id.b24,
2195 	    arg->flags, arg->lun, sp->qpair->id, rval);
2196 
2197 	if (rval != QLA_SUCCESS)
2198 		goto done_free_sp;
2199 	wait_for_completion(&tm_iocb->u.tmf.comp);
2200 
2201 	rval = tm_iocb->u.tmf.data;
2202 
2203 	if (rval != QLA_SUCCESS) {
2204 		ql_log(ql_log_warn, vha, 0x8030,
2205 		    "TM IOCB failed (%x).\n", rval);
2206 	}
2207 
2208 	if (!test_bit(UNLOADING, &vha->dpc_flags) && !IS_QLAFX00(vha->hw)) {
2209 		jif = jiffies;
2210 		if (qla_tmf_wait(arg)) {
2211 			ql_log(ql_log_info, vha, 0x803e,
2212 			       "Waited %u ms Nexus=%ld:%06x:%llu.\n",
2213 			       jiffies_to_msecs(jiffies - jif), vha->host_no,
2214 			       fcport->d_id.b24, arg->lun);
2215 		}
2216 
2217 		if (chip_gen == vha->hw->chip_reset && login_gen == fcport->login_gen) {
2218 			rval = qla26xx_marker(arg);
2219 		} else {
2220 			ql_log(ql_log_info, vha, 0x803e,
2221 			       "Skip Marker due to disruption. Nexus=%ld:%06x:%llu.\n",
2222 			       vha->host_no, fcport->d_id.b24, arg->lun);
2223 			rval = QLA_FUNCTION_FAILED;
2224 		}
2225 	}
2226 	if (tm_iocb->u.tmf.data)
2227 		rval = tm_iocb->u.tmf.data;
2228 
2229 done_free_sp:
2230 	/* ref: INIT */
2231 	kref_put(&sp->cmd_kref, qla2x00_sp_release);
2232 done:
2233 	return rval;
2234 }
2235 
qla_put_tmf(struct tmf_arg * arg)2236 static void qla_put_tmf(struct tmf_arg *arg)
2237 {
2238 	struct scsi_qla_host *vha = arg->vha;
2239 	struct qla_hw_data *ha = vha->hw;
2240 	unsigned long flags;
2241 
2242 	spin_lock_irqsave(&ha->tgt.sess_lock, flags);
2243 	ha->active_tmf--;
2244 	list_del(&arg->tmf_elem);
2245 	spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
2246 }
2247 
2248 static
qla_get_tmf(struct tmf_arg * arg)2249 int qla_get_tmf(struct tmf_arg *arg)
2250 {
2251 	struct scsi_qla_host *vha = arg->vha;
2252 	struct qla_hw_data *ha = vha->hw;
2253 	unsigned long flags;
2254 	fc_port_t *fcport = arg->fcport;
2255 	int rc = 0;
2256 	struct tmf_arg *t;
2257 
2258 	spin_lock_irqsave(&ha->tgt.sess_lock, flags);
2259 	list_for_each_entry(t, &ha->tmf_active, tmf_elem) {
2260 		if (t->fcport == arg->fcport && t->lun == arg->lun) {
2261 			/* reject duplicate TMF */
2262 			ql_log(ql_log_warn, vha, 0x802c,
2263 			       "found duplicate TMF.  Nexus=%ld:%06x:%llu.\n",
2264 			       vha->host_no, fcport->d_id.b24, arg->lun);
2265 			spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
2266 			return -EINVAL;
2267 		}
2268 	}
2269 
2270 	list_add_tail(&arg->tmf_elem, &ha->tmf_pending);
2271 	while (ha->active_tmf >= MAX_ACTIVE_TMF) {
2272 		spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
2273 
2274 		msleep(1);
2275 
2276 		spin_lock_irqsave(&ha->tgt.sess_lock, flags);
2277 		if (TMF_NOT_READY(fcport)) {
2278 			ql_log(ql_log_warn, vha, 0x802c,
2279 			    "Unable to acquire TM resource due to disruption.\n");
2280 			rc = EIO;
2281 			break;
2282 		}
2283 		if (ha->active_tmf < MAX_ACTIVE_TMF &&
2284 		    list_is_first(&arg->tmf_elem, &ha->tmf_pending))
2285 			break;
2286 	}
2287 
2288 	list_del(&arg->tmf_elem);
2289 
2290 	if (!rc) {
2291 		ha->active_tmf++;
2292 		list_add_tail(&arg->tmf_elem, &ha->tmf_active);
2293 	}
2294 
2295 	spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
2296 
2297 	return rc;
2298 }
2299 
2300 int
qla2x00_async_tm_cmd(fc_port_t * fcport,uint32_t flags,uint64_t lun,uint32_t tag)2301 qla2x00_async_tm_cmd(fc_port_t *fcport, uint32_t flags, uint64_t lun,
2302 		     uint32_t tag)
2303 {
2304 	struct scsi_qla_host *vha = fcport->vha;
2305 	struct tmf_arg a;
2306 	int rval = QLA_SUCCESS;
2307 
2308 	if (TMF_NOT_READY(fcport))
2309 		return QLA_SUSPENDED;
2310 
2311 	a.vha = fcport->vha;
2312 	a.fcport = fcport;
2313 	a.lun = lun;
2314 	a.flags = flags;
2315 	INIT_LIST_HEAD(&a.tmf_elem);
2316 
2317 	if (flags & (TCF_LUN_RESET|TCF_ABORT_TASK_SET|TCF_CLEAR_TASK_SET|TCF_CLEAR_ACA)) {
2318 		a.modifier = MK_SYNC_ID_LUN;
2319 	} else {
2320 		a.modifier = MK_SYNC_ID;
2321 	}
2322 
2323 	if (qla_get_tmf(&a))
2324 		return QLA_FUNCTION_FAILED;
2325 
2326 	a.qpair = vha->hw->base_qpair;
2327 	rval = __qla2x00_async_tm_cmd(&a);
2328 
2329 	qla_put_tmf(&a);
2330 	return rval;
2331 }
2332 
2333 int
qla24xx_async_abort_command(srb_t * sp)2334 qla24xx_async_abort_command(srb_t *sp)
2335 {
2336 	unsigned long   flags = 0;
2337 
2338 	uint32_t	handle;
2339 	fc_port_t	*fcport = sp->fcport;
2340 	struct qla_qpair *qpair = sp->qpair;
2341 	struct scsi_qla_host *vha = fcport->vha;
2342 	struct req_que *req = qpair->req;
2343 
2344 	spin_lock_irqsave(qpair->qp_lock_ptr, flags);
2345 	for (handle = 1; handle < req->num_outstanding_cmds; handle++) {
2346 		if (req->outstanding_cmds[handle] == sp)
2347 			break;
2348 	}
2349 	spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
2350 
2351 	if (handle == req->num_outstanding_cmds) {
2352 		/* Command not found. */
2353 		return QLA_ERR_NOT_FOUND;
2354 	}
2355 	if (sp->type == SRB_FXIOCB_DCMD)
2356 		return qlafx00_fx_disc(vha, &vha->hw->mr.fcport,
2357 		    FXDISC_ABORT_IOCTL);
2358 
2359 	return qla24xx_async_abort_cmd(sp, true);
2360 }
2361 
2362 static void
qla24xx_handle_prli_done_event(struct scsi_qla_host * vha,struct event_arg * ea)2363 qla24xx_handle_prli_done_event(struct scsi_qla_host *vha, struct event_arg *ea)
2364 {
2365 	struct srb *sp;
2366 	WARN_ONCE(!qla2xxx_is_valid_mbs(ea->data[0]), "mbs: %#x\n",
2367 		  ea->data[0]);
2368 
2369 	switch (ea->data[0]) {
2370 	case MBS_COMMAND_COMPLETE:
2371 		ql_dbg(ql_dbg_disc, vha, 0x2118,
2372 		    "%s %d %8phC post gpdb\n",
2373 		    __func__, __LINE__, ea->fcport->port_name);
2374 
2375 		ea->fcport->chip_reset = vha->hw->base_qpair->chip_reset;
2376 		ea->fcport->logout_on_delete = 1;
2377 		ea->fcport->nvme_prli_service_param = ea->iop[0];
2378 		if (ea->iop[0] & NVME_PRLI_SP_FIRST_BURST)
2379 			ea->fcport->nvme_first_burst_size =
2380 			    (ea->iop[1] & 0xffff) * 512;
2381 		else
2382 			ea->fcport->nvme_first_burst_size = 0;
2383 		qla24xx_post_gpdb_work(vha, ea->fcport, 0);
2384 		break;
2385 	default:
2386 		sp = ea->sp;
2387 		ql_dbg(ql_dbg_disc, vha, 0x2118,
2388 		       "%s %d %8phC priority %s, fc4type %x prev try %s\n",
2389 		       __func__, __LINE__, ea->fcport->port_name,
2390 		       vha->hw->fc4_type_priority == FC4_PRIORITY_FCP ?
2391 		       "FCP" : "NVMe", ea->fcport->fc4_type,
2392 		       (sp->u.iocb_cmd.u.logio.flags & SRB_LOGIN_NVME_PRLI) ?
2393 			"NVME" : "FCP");
2394 
2395 		if (NVME_FCP_TARGET(ea->fcport)) {
2396 			if (sp->u.iocb_cmd.u.logio.flags & SRB_LOGIN_NVME_PRLI)
2397 				ea->fcport->do_prli_nvme = 0;
2398 			else
2399 				ea->fcport->do_prli_nvme = 1;
2400 		} else {
2401 			ea->fcport->do_prli_nvme = 0;
2402 		}
2403 
2404 		if (N2N_TOPO(vha->hw)) {
2405 			if (ea->fcport->n2n_link_reset_cnt ==
2406 			    vha->hw->login_retry_count &&
2407 			    ea->fcport->flags & FCF_FCSP_DEVICE) {
2408 				/* remote authentication app just started */
2409 				ea->fcport->n2n_link_reset_cnt = 0;
2410 			}
2411 
2412 			if (ea->fcport->n2n_link_reset_cnt <
2413 			    vha->hw->login_retry_count) {
2414 				ea->fcport->n2n_link_reset_cnt++;
2415 				vha->relogin_jif = jiffies + 2 * HZ;
2416 				/*
2417 				 * PRLI failed. Reset link to kick start
2418 				 * state machine
2419 				 */
2420 				set_bit(N2N_LINK_RESET, &vha->dpc_flags);
2421 				qla2xxx_wake_dpc(vha);
2422 			} else {
2423 				ql_log(ql_log_warn, vha, 0x2119,
2424 				       "%s %d %8phC Unable to reconnect\n",
2425 				       __func__, __LINE__,
2426 				       ea->fcport->port_name);
2427 			}
2428 		} else {
2429 			/*
2430 			 * switch connect. login failed. Take connection down
2431 			 * and allow relogin to retrigger
2432 			 */
2433 			ea->fcport->flags &= ~FCF_ASYNC_SENT;
2434 			ea->fcport->keep_nport_handle = 0;
2435 			ea->fcport->logout_on_delete = 1;
2436 			qlt_schedule_sess_for_deletion(ea->fcport);
2437 		}
2438 		break;
2439 	}
2440 }
2441 
2442 void
qla24xx_handle_plogi_done_event(struct scsi_qla_host * vha,struct event_arg * ea)2443 qla24xx_handle_plogi_done_event(struct scsi_qla_host *vha, struct event_arg *ea)
2444 {
2445 	port_id_t cid;	/* conflict Nport id */
2446 	u16 lid;
2447 	struct fc_port *conflict_fcport;
2448 	unsigned long flags;
2449 	struct fc_port *fcport = ea->fcport;
2450 
2451 	ql_dbg(ql_dbg_disc, vha, 0xffff,
2452 	    "%s %8phC DS %d LS %d rc %d login %d|%d rscn %d|%d data %x|%x iop %x|%x\n",
2453 	    __func__, fcport->port_name, fcport->disc_state,
2454 	    fcport->fw_login_state, ea->rc, ea->sp->gen2, fcport->login_gen,
2455 	    ea->sp->gen1, fcport->rscn_gen,
2456 	    ea->data[0], ea->data[1], ea->iop[0], ea->iop[1]);
2457 
2458 	if ((fcport->fw_login_state == DSC_LS_PLOGI_PEND) ||
2459 	    (fcport->fw_login_state == DSC_LS_PRLI_PEND)) {
2460 		ql_dbg(ql_dbg_disc, vha, 0x20ea,
2461 		    "%s %d %8phC Remote is trying to login\n",
2462 		    __func__, __LINE__, fcport->port_name);
2463 		return;
2464 	}
2465 
2466 	if ((fcport->disc_state == DSC_DELETE_PEND) ||
2467 	    (fcport->disc_state == DSC_DELETED)) {
2468 		set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
2469 		return;
2470 	}
2471 
2472 	if (ea->sp->gen2 != fcport->login_gen) {
2473 		/* target side must have changed it. */
2474 		ql_dbg(ql_dbg_disc, vha, 0x20d3,
2475 		    "%s %8phC generation changed\n",
2476 		    __func__, fcport->port_name);
2477 		set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
2478 		return;
2479 	} else if (ea->sp->gen1 != fcport->rscn_gen) {
2480 		ql_dbg(ql_dbg_disc, vha, 0x20d3,
2481 		    "%s %8phC RSCN generation changed\n",
2482 		    __func__, fcport->port_name);
2483 		qla_rscn_replay(fcport);
2484 		qlt_schedule_sess_for_deletion(fcport);
2485 		return;
2486 	}
2487 
2488 	WARN_ONCE(!qla2xxx_is_valid_mbs(ea->data[0]), "mbs: %#x\n",
2489 		  ea->data[0]);
2490 
2491 	switch (ea->data[0]) {
2492 	case MBS_COMMAND_COMPLETE:
2493 		/*
2494 		 * Driver must validate login state - If PRLI not complete,
2495 		 * force a relogin attempt via implicit LOGO, PLOGI, and PRLI
2496 		 * requests.
2497 		 */
2498 		if (vha->hw->flags.edif_enabled) {
2499 			set_bit(ea->fcport->loop_id, vha->hw->loop_id_map);
2500 			spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
2501 			ea->fcport->chip_reset = vha->hw->base_qpair->chip_reset;
2502 			ea->fcport->logout_on_delete = 1;
2503 			ea->fcport->send_els_logo = 0;
2504 			ea->fcport->fw_login_state = DSC_LS_PLOGI_COMP;
2505 			spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
2506 
2507 			qla24xx_post_gpdb_work(vha, ea->fcport, 0);
2508 		} else {
2509 			if (NVME_TARGET(vha->hw, fcport)) {
2510 				ql_dbg(ql_dbg_disc, vha, 0x2117,
2511 				    "%s %d %8phC post prli\n",
2512 				    __func__, __LINE__, fcport->port_name);
2513 				qla24xx_post_prli_work(vha, fcport);
2514 			} else {
2515 				ql_dbg(ql_dbg_disc, vha, 0x20ea,
2516 				    "%s %d %8phC LoopID 0x%x in use with %06x. post gpdb\n",
2517 				    __func__, __LINE__, fcport->port_name,
2518 				    fcport->loop_id, fcport->d_id.b24);
2519 
2520 				set_bit(fcport->loop_id, vha->hw->loop_id_map);
2521 				spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
2522 				fcport->chip_reset = vha->hw->base_qpair->chip_reset;
2523 				fcport->logout_on_delete = 1;
2524 				fcport->send_els_logo = 0;
2525 				fcport->fw_login_state = DSC_LS_PRLI_COMP;
2526 				spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
2527 
2528 				qla24xx_post_gpdb_work(vha, fcport, 0);
2529 			}
2530 		}
2531 		break;
2532 	case MBS_COMMAND_ERROR:
2533 		ql_dbg(ql_dbg_disc, vha, 0x20eb, "%s %d %8phC cmd error %x\n",
2534 		    __func__, __LINE__, ea->fcport->port_name, ea->data[1]);
2535 
2536 		qlt_schedule_sess_for_deletion(ea->fcport);
2537 		break;
2538 	case MBS_LOOP_ID_USED:
2539 		/* data[1] = IO PARAM 1 = nport ID  */
2540 		cid.b.domain = (ea->iop[1] >> 16) & 0xff;
2541 		cid.b.area   = (ea->iop[1] >>  8) & 0xff;
2542 		cid.b.al_pa  = ea->iop[1] & 0xff;
2543 		cid.b.rsvd_1 = 0;
2544 
2545 		ql_dbg(ql_dbg_disc, vha, 0x20ec,
2546 		    "%s %d %8phC lid %#x in use with pid %06x post gnl\n",
2547 		    __func__, __LINE__, ea->fcport->port_name,
2548 		    ea->fcport->loop_id, cid.b24);
2549 
2550 		set_bit(ea->fcport->loop_id, vha->hw->loop_id_map);
2551 		ea->fcport->loop_id = FC_NO_LOOP_ID;
2552 		qla24xx_post_gnl_work(vha, ea->fcport);
2553 		break;
2554 	case MBS_PORT_ID_USED:
2555 		lid = ea->iop[1] & 0xffff;
2556 		qlt_find_sess_invalidate_other(vha,
2557 		    wwn_to_u64(ea->fcport->port_name),
2558 		    ea->fcport->d_id, lid, &conflict_fcport);
2559 
2560 		if (conflict_fcport) {
2561 			/*
2562 			 * Another fcport share the same loop_id/nport id.
2563 			 * Conflict fcport needs to finish cleanup before this
2564 			 * fcport can proceed to login.
2565 			 */
2566 			conflict_fcport->conflict = ea->fcport;
2567 			ea->fcport->login_pause = 1;
2568 
2569 			ql_dbg(ql_dbg_disc, vha, 0x20ed,
2570 			    "%s %d %8phC NPortId %06x inuse with loopid 0x%x.\n",
2571 			    __func__, __LINE__, ea->fcport->port_name,
2572 			    ea->fcport->d_id.b24, lid);
2573 		} else {
2574 			ql_dbg(ql_dbg_disc, vha, 0x20ed,
2575 			    "%s %d %8phC NPortId %06x inuse with loopid 0x%x. sched delete\n",
2576 			    __func__, __LINE__, ea->fcport->port_name,
2577 			    ea->fcport->d_id.b24, lid);
2578 
2579 			qla2x00_clear_loop_id(ea->fcport);
2580 			set_bit(lid, vha->hw->loop_id_map);
2581 			ea->fcport->loop_id = lid;
2582 			ea->fcport->keep_nport_handle = 0;
2583 			ea->fcport->logout_on_delete = 1;
2584 			qlt_schedule_sess_for_deletion(ea->fcport);
2585 		}
2586 		break;
2587 	}
2588 	return;
2589 }
2590 
2591 /****************************************************************************/
2592 /*                QLogic ISP2x00 Hardware Support Functions.                */
2593 /****************************************************************************/
2594 
2595 static int
qla83xx_nic_core_fw_load(scsi_qla_host_t * vha)2596 qla83xx_nic_core_fw_load(scsi_qla_host_t *vha)
2597 {
2598 	int rval = QLA_SUCCESS;
2599 	struct qla_hw_data *ha = vha->hw;
2600 	uint32_t idc_major_ver, idc_minor_ver;
2601 	uint16_t config[4];
2602 
2603 	qla83xx_idc_lock(vha, 0);
2604 
2605 	/* SV: TODO: Assign initialization timeout from
2606 	 * flash-info / other param
2607 	 */
2608 	ha->fcoe_dev_init_timeout = QLA83XX_IDC_INITIALIZATION_TIMEOUT;
2609 	ha->fcoe_reset_timeout = QLA83XX_IDC_RESET_ACK_TIMEOUT;
2610 
2611 	/* Set our fcoe function presence */
2612 	if (__qla83xx_set_drv_presence(vha) != QLA_SUCCESS) {
2613 		ql_dbg(ql_dbg_p3p, vha, 0xb077,
2614 		    "Error while setting DRV-Presence.\n");
2615 		rval = QLA_FUNCTION_FAILED;
2616 		goto exit;
2617 	}
2618 
2619 	/* Decide the reset ownership */
2620 	qla83xx_reset_ownership(vha);
2621 
2622 	/*
2623 	 * On first protocol driver load:
2624 	 * Init-Owner: Set IDC-Major-Version and Clear IDC-Lock-Recovery
2625 	 * register.
2626 	 * Others: Check compatibility with current IDC Major version.
2627 	 */
2628 	qla83xx_rd_reg(vha, QLA83XX_IDC_MAJOR_VERSION, &idc_major_ver);
2629 	if (ha->flags.nic_core_reset_owner) {
2630 		/* Set IDC Major version */
2631 		idc_major_ver = QLA83XX_SUPP_IDC_MAJOR_VERSION;
2632 		qla83xx_wr_reg(vha, QLA83XX_IDC_MAJOR_VERSION, idc_major_ver);
2633 
2634 		/* Clearing IDC-Lock-Recovery register */
2635 		qla83xx_wr_reg(vha, QLA83XX_IDC_LOCK_RECOVERY, 0);
2636 	} else if (idc_major_ver != QLA83XX_SUPP_IDC_MAJOR_VERSION) {
2637 		/*
2638 		 * Clear further IDC participation if we are not compatible with
2639 		 * the current IDC Major Version.
2640 		 */
2641 		ql_log(ql_log_warn, vha, 0xb07d,
2642 		    "Failing load, idc_major_ver=%d, expected_major_ver=%d.\n",
2643 		    idc_major_ver, QLA83XX_SUPP_IDC_MAJOR_VERSION);
2644 		__qla83xx_clear_drv_presence(vha);
2645 		rval = QLA_FUNCTION_FAILED;
2646 		goto exit;
2647 	}
2648 	/* Each function sets its supported Minor version. */
2649 	qla83xx_rd_reg(vha, QLA83XX_IDC_MINOR_VERSION, &idc_minor_ver);
2650 	idc_minor_ver |= (QLA83XX_SUPP_IDC_MINOR_VERSION << (ha->portnum * 2));
2651 	qla83xx_wr_reg(vha, QLA83XX_IDC_MINOR_VERSION, idc_minor_ver);
2652 
2653 	if (ha->flags.nic_core_reset_owner) {
2654 		memset(config, 0, sizeof(config));
2655 		if (!qla81xx_get_port_config(vha, config))
2656 			qla83xx_wr_reg(vha, QLA83XX_IDC_DEV_STATE,
2657 			    QLA8XXX_DEV_READY);
2658 	}
2659 
2660 	rval = qla83xx_idc_state_handler(vha);
2661 
2662 exit:
2663 	qla83xx_idc_unlock(vha, 0);
2664 
2665 	return rval;
2666 }
2667 
2668 /*
2669 * qla2x00_initialize_adapter
2670 *      Initialize board.
2671 *
2672 * Input:
2673 *      ha = adapter block pointer.
2674 *
2675 * Returns:
2676 *      0 = success
2677 */
2678 int
qla2x00_initialize_adapter(scsi_qla_host_t * vha)2679 qla2x00_initialize_adapter(scsi_qla_host_t *vha)
2680 {
2681 	int	rval;
2682 	struct qla_hw_data *ha = vha->hw;
2683 	struct req_que *req = ha->req_q_map[0];
2684 	struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
2685 
2686 	memset(&vha->qla_stats, 0, sizeof(vha->qla_stats));
2687 	memset(&vha->fc_host_stat, 0, sizeof(vha->fc_host_stat));
2688 
2689 	/* Clear adapter flags. */
2690 	vha->flags.online = 0;
2691 	ha->flags.chip_reset_done = 0;
2692 	vha->flags.reset_active = 0;
2693 	ha->flags.pci_channel_io_perm_failure = 0;
2694 	ha->flags.eeh_busy = 0;
2695 	vha->qla_stats.jiffies_at_last_reset = get_jiffies_64();
2696 	atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
2697 	atomic_set(&vha->loop_state, LOOP_DOWN);
2698 	vha->device_flags = DFLG_NO_CABLE;
2699 	vha->dpc_flags = 0;
2700 	vha->flags.management_server_logged_in = 0;
2701 	vha->marker_needed = 0;
2702 	ha->isp_abort_cnt = 0;
2703 	ha->beacon_blink_led = 0;
2704 
2705 	set_bit(0, ha->req_qid_map);
2706 	set_bit(0, ha->rsp_qid_map);
2707 
2708 	ql_dbg(ql_dbg_init, vha, 0x0040,
2709 	    "Configuring PCI space...\n");
2710 	rval = ha->isp_ops->pci_config(vha);
2711 	if (rval) {
2712 		ql_log(ql_log_warn, vha, 0x0044,
2713 		    "Unable to configure PCI space.\n");
2714 		return (rval);
2715 	}
2716 
2717 	ha->isp_ops->reset_chip(vha);
2718 
2719 	/* Check for secure flash support */
2720 	if (IS_QLA28XX(ha)) {
2721 		if (rd_reg_word(&reg->mailbox12) & BIT_0)
2722 			ha->flags.secure_adapter = 1;
2723 		ql_log(ql_log_info, vha, 0xffff, "Secure Adapter: %s\n",
2724 		    (ha->flags.secure_adapter) ? "Yes" : "No");
2725 	}
2726 
2727 
2728 	rval = qla2xxx_get_flash_info(vha);
2729 	if (rval) {
2730 		ql_log(ql_log_fatal, vha, 0x004f,
2731 		    "Unable to validate FLASH data.\n");
2732 		return rval;
2733 	}
2734 
2735 	if (IS_QLA8044(ha)) {
2736 		qla8044_read_reset_template(vha);
2737 
2738 		/* NOTE: If ql2xdontresethba==1, set IDC_CTRL DONTRESET_BIT0.
2739 		 * If DONRESET_BIT0 is set, drivers should not set dev_state
2740 		 * to NEED_RESET. But if NEED_RESET is set, drivers should
2741 		 * should honor the reset. */
2742 		if (ql2xdontresethba == 1)
2743 			qla8044_set_idc_dontreset(vha);
2744 	}
2745 
2746 	ha->isp_ops->get_flash_version(vha, req->ring);
2747 	ql_dbg(ql_dbg_init, vha, 0x0061,
2748 	    "Configure NVRAM parameters...\n");
2749 
2750 	/* Let priority default to FCP, can be overridden by nvram_config */
2751 	ha->fc4_type_priority = FC4_PRIORITY_FCP;
2752 
2753 	ha->isp_ops->nvram_config(vha);
2754 
2755 	if (ha->fc4_type_priority != FC4_PRIORITY_FCP &&
2756 	    ha->fc4_type_priority != FC4_PRIORITY_NVME)
2757 		ha->fc4_type_priority = FC4_PRIORITY_FCP;
2758 
2759 	ql_log(ql_log_info, vha, 0xffff, "FC4 priority set to %s\n",
2760 	       ha->fc4_type_priority == FC4_PRIORITY_FCP ? "FCP" : "NVMe");
2761 
2762 	if (ha->flags.disable_serdes) {
2763 		/* Mask HBA via NVRAM settings? */
2764 		ql_log(ql_log_info, vha, 0x0077,
2765 		    "Masking HBA WWPN %8phN (via NVRAM).\n", vha->port_name);
2766 		return QLA_FUNCTION_FAILED;
2767 	}
2768 
2769 	ql_dbg(ql_dbg_init, vha, 0x0078,
2770 	    "Verifying loaded RISC code...\n");
2771 
2772 	/* If smartsan enabled then require fdmi and rdp enabled */
2773 	if (ql2xsmartsan) {
2774 		ql2xfdmienable = 1;
2775 		ql2xrdpenable = 1;
2776 	}
2777 
2778 	if (qla2x00_isp_firmware(vha) != QLA_SUCCESS) {
2779 		rval = ha->isp_ops->chip_diag(vha);
2780 		if (rval)
2781 			return (rval);
2782 		rval = qla2x00_setup_chip(vha);
2783 		if (rval)
2784 			return (rval);
2785 	}
2786 
2787 	if (IS_QLA84XX(ha)) {
2788 		ha->cs84xx = qla84xx_get_chip(vha);
2789 		if (!ha->cs84xx) {
2790 			ql_log(ql_log_warn, vha, 0x00d0,
2791 			    "Unable to configure ISP84XX.\n");
2792 			return QLA_FUNCTION_FAILED;
2793 		}
2794 	}
2795 
2796 	if (qla_ini_mode_enabled(vha) || qla_dual_mode_enabled(vha))
2797 		rval = qla2x00_init_rings(vha);
2798 
2799 	/* No point in continuing if firmware initialization failed. */
2800 	if (rval != QLA_SUCCESS)
2801 		return rval;
2802 
2803 	ha->flags.chip_reset_done = 1;
2804 
2805 	if (rval == QLA_SUCCESS && IS_QLA84XX(ha)) {
2806 		/* Issue verify 84xx FW IOCB to complete 84xx initialization */
2807 		rval = qla84xx_init_chip(vha);
2808 		if (rval != QLA_SUCCESS) {
2809 			ql_log(ql_log_warn, vha, 0x00d4,
2810 			    "Unable to initialize ISP84XX.\n");
2811 			qla84xx_put_chip(vha);
2812 		}
2813 	}
2814 
2815 	/* Load the NIC Core f/w if we are the first protocol driver. */
2816 	if (IS_QLA8031(ha)) {
2817 		rval = qla83xx_nic_core_fw_load(vha);
2818 		if (rval)
2819 			ql_log(ql_log_warn, vha, 0x0124,
2820 			    "Error in initializing NIC Core f/w.\n");
2821 	}
2822 
2823 	if (IS_QLA24XX_TYPE(ha) || IS_QLA25XX(ha))
2824 		qla24xx_read_fcp_prio_cfg(vha);
2825 
2826 	if (IS_P3P_TYPE(ha))
2827 		qla82xx_set_driver_version(vha, QLA2XXX_VERSION);
2828 	else
2829 		qla25xx_set_driver_version(vha, QLA2XXX_VERSION);
2830 
2831 	return (rval);
2832 }
2833 
2834 /**
2835  * qla2100_pci_config() - Setup ISP21xx PCI configuration registers.
2836  * @vha: HA context
2837  *
2838  * Returns 0 on success.
2839  */
2840 int
qla2100_pci_config(scsi_qla_host_t * vha)2841 qla2100_pci_config(scsi_qla_host_t *vha)
2842 {
2843 	uint16_t w;
2844 	unsigned long flags;
2845 	struct qla_hw_data *ha = vha->hw;
2846 	struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
2847 
2848 	pci_set_master(ha->pdev);
2849 	pci_try_set_mwi(ha->pdev);
2850 
2851 	pci_read_config_word(ha->pdev, PCI_COMMAND, &w);
2852 	w |= (PCI_COMMAND_PARITY | PCI_COMMAND_SERR);
2853 	pci_write_config_word(ha->pdev, PCI_COMMAND, w);
2854 
2855 	pci_disable_rom(ha->pdev);
2856 
2857 	/* Get PCI bus information. */
2858 	spin_lock_irqsave(&ha->hardware_lock, flags);
2859 	ha->pci_attr = rd_reg_word(&reg->ctrl_status);
2860 	spin_unlock_irqrestore(&ha->hardware_lock, flags);
2861 
2862 	return QLA_SUCCESS;
2863 }
2864 
2865 /**
2866  * qla2300_pci_config() - Setup ISP23xx PCI configuration registers.
2867  * @vha: HA context
2868  *
2869  * Returns 0 on success.
2870  */
2871 int
qla2300_pci_config(scsi_qla_host_t * vha)2872 qla2300_pci_config(scsi_qla_host_t *vha)
2873 {
2874 	uint16_t	w;
2875 	unsigned long   flags = 0;
2876 	uint32_t	cnt;
2877 	struct qla_hw_data *ha = vha->hw;
2878 	struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
2879 
2880 	pci_set_master(ha->pdev);
2881 	pci_try_set_mwi(ha->pdev);
2882 
2883 	pci_read_config_word(ha->pdev, PCI_COMMAND, &w);
2884 	w |= (PCI_COMMAND_PARITY | PCI_COMMAND_SERR);
2885 
2886 	if (IS_QLA2322(ha) || IS_QLA6322(ha))
2887 		w &= ~PCI_COMMAND_INTX_DISABLE;
2888 	pci_write_config_word(ha->pdev, PCI_COMMAND, w);
2889 
2890 	/*
2891 	 * If this is a 2300 card and not 2312, reset the
2892 	 * COMMAND_INVALIDATE due to a bug in the 2300. Unfortunately,
2893 	 * the 2310 also reports itself as a 2300 so we need to get the
2894 	 * fb revision level -- a 6 indicates it really is a 2300 and
2895 	 * not a 2310.
2896 	 */
2897 	if (IS_QLA2300(ha)) {
2898 		spin_lock_irqsave(&ha->hardware_lock, flags);
2899 
2900 		/* Pause RISC. */
2901 		wrt_reg_word(&reg->hccr, HCCR_PAUSE_RISC);
2902 		for (cnt = 0; cnt < 30000; cnt++) {
2903 			if ((rd_reg_word(&reg->hccr) & HCCR_RISC_PAUSE) != 0)
2904 				break;
2905 
2906 			udelay(10);
2907 		}
2908 
2909 		/* Select FPM registers. */
2910 		wrt_reg_word(&reg->ctrl_status, 0x20);
2911 		rd_reg_word(&reg->ctrl_status);
2912 
2913 		/* Get the fb rev level */
2914 		ha->fb_rev = RD_FB_CMD_REG(ha, reg);
2915 
2916 		if (ha->fb_rev == FPM_2300)
2917 			pci_clear_mwi(ha->pdev);
2918 
2919 		/* Deselect FPM registers. */
2920 		wrt_reg_word(&reg->ctrl_status, 0x0);
2921 		rd_reg_word(&reg->ctrl_status);
2922 
2923 		/* Release RISC module. */
2924 		wrt_reg_word(&reg->hccr, HCCR_RELEASE_RISC);
2925 		for (cnt = 0; cnt < 30000; cnt++) {
2926 			if ((rd_reg_word(&reg->hccr) & HCCR_RISC_PAUSE) == 0)
2927 				break;
2928 
2929 			udelay(10);
2930 		}
2931 
2932 		spin_unlock_irqrestore(&ha->hardware_lock, flags);
2933 	}
2934 
2935 	pci_write_config_byte(ha->pdev, PCI_LATENCY_TIMER, 0x80);
2936 
2937 	pci_disable_rom(ha->pdev);
2938 
2939 	/* Get PCI bus information. */
2940 	spin_lock_irqsave(&ha->hardware_lock, flags);
2941 	ha->pci_attr = rd_reg_word(&reg->ctrl_status);
2942 	spin_unlock_irqrestore(&ha->hardware_lock, flags);
2943 
2944 	return QLA_SUCCESS;
2945 }
2946 
2947 /**
2948  * qla24xx_pci_config() - Setup ISP24xx PCI configuration registers.
2949  * @vha: HA context
2950  *
2951  * Returns 0 on success.
2952  */
2953 int
qla24xx_pci_config(scsi_qla_host_t * vha)2954 qla24xx_pci_config(scsi_qla_host_t *vha)
2955 {
2956 	uint16_t w;
2957 	unsigned long flags = 0;
2958 	struct qla_hw_data *ha = vha->hw;
2959 	struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
2960 
2961 	pci_set_master(ha->pdev);
2962 	pci_try_set_mwi(ha->pdev);
2963 
2964 	pci_read_config_word(ha->pdev, PCI_COMMAND, &w);
2965 	w |= (PCI_COMMAND_PARITY | PCI_COMMAND_SERR);
2966 	w &= ~PCI_COMMAND_INTX_DISABLE;
2967 	pci_write_config_word(ha->pdev, PCI_COMMAND, w);
2968 
2969 	pci_write_config_byte(ha->pdev, PCI_LATENCY_TIMER, 0x80);
2970 
2971 	/* PCI-X -- adjust Maximum Memory Read Byte Count (2048). */
2972 	if (pci_find_capability(ha->pdev, PCI_CAP_ID_PCIX))
2973 		pcix_set_mmrbc(ha->pdev, 2048);
2974 
2975 	/* PCIe -- adjust Maximum Read Request Size (2048). */
2976 	if (pci_is_pcie(ha->pdev))
2977 		pcie_set_readrq(ha->pdev, 4096);
2978 
2979 	pci_disable_rom(ha->pdev);
2980 
2981 	ha->chip_revision = ha->pdev->revision;
2982 
2983 	/* Get PCI bus information. */
2984 	spin_lock_irqsave(&ha->hardware_lock, flags);
2985 	ha->pci_attr = rd_reg_dword(&reg->ctrl_status);
2986 	spin_unlock_irqrestore(&ha->hardware_lock, flags);
2987 
2988 	return QLA_SUCCESS;
2989 }
2990 
2991 /**
2992  * qla25xx_pci_config() - Setup ISP25xx PCI configuration registers.
2993  * @vha: HA context
2994  *
2995  * Returns 0 on success.
2996  */
2997 int
qla25xx_pci_config(scsi_qla_host_t * vha)2998 qla25xx_pci_config(scsi_qla_host_t *vha)
2999 {
3000 	uint16_t w;
3001 	struct qla_hw_data *ha = vha->hw;
3002 
3003 	pci_set_master(ha->pdev);
3004 	pci_try_set_mwi(ha->pdev);
3005 
3006 	pci_read_config_word(ha->pdev, PCI_COMMAND, &w);
3007 	w |= (PCI_COMMAND_PARITY | PCI_COMMAND_SERR);
3008 	w &= ~PCI_COMMAND_INTX_DISABLE;
3009 	pci_write_config_word(ha->pdev, PCI_COMMAND, w);
3010 
3011 	/* PCIe -- adjust Maximum Read Request Size (2048). */
3012 	if (pci_is_pcie(ha->pdev))
3013 		pcie_set_readrq(ha->pdev, 4096);
3014 
3015 	pci_disable_rom(ha->pdev);
3016 
3017 	ha->chip_revision = ha->pdev->revision;
3018 
3019 	return QLA_SUCCESS;
3020 }
3021 
3022 /**
3023  * qla2x00_isp_firmware() - Choose firmware image.
3024  * @vha: HA context
3025  *
3026  * Returns 0 on success.
3027  */
3028 static int
qla2x00_isp_firmware(scsi_qla_host_t * vha)3029 qla2x00_isp_firmware(scsi_qla_host_t *vha)
3030 {
3031 	int  rval;
3032 	uint16_t loop_id, topo, sw_cap;
3033 	uint8_t domain, area, al_pa;
3034 	struct qla_hw_data *ha = vha->hw;
3035 
3036 	/* Assume loading risc code */
3037 	rval = QLA_FUNCTION_FAILED;
3038 
3039 	if (ha->flags.disable_risc_code_load) {
3040 		ql_log(ql_log_info, vha, 0x0079, "RISC CODE NOT loaded.\n");
3041 
3042 		/* Verify checksum of loaded RISC code. */
3043 		rval = qla2x00_verify_checksum(vha, ha->fw_srisc_address);
3044 		if (rval == QLA_SUCCESS) {
3045 			/* And, verify we are not in ROM code. */
3046 			rval = qla2x00_get_adapter_id(vha, &loop_id, &al_pa,
3047 			    &area, &domain, &topo, &sw_cap);
3048 		}
3049 	}
3050 
3051 	if (rval)
3052 		ql_dbg(ql_dbg_init, vha, 0x007a,
3053 		    "**** Load RISC code ****.\n");
3054 
3055 	return (rval);
3056 }
3057 
3058 /**
3059  * qla2x00_reset_chip() - Reset ISP chip.
3060  * @vha: HA context
3061  *
3062  * Returns 0 on success.
3063  */
3064 int
qla2x00_reset_chip(scsi_qla_host_t * vha)3065 qla2x00_reset_chip(scsi_qla_host_t *vha)
3066 {
3067 	unsigned long   flags = 0;
3068 	struct qla_hw_data *ha = vha->hw;
3069 	struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
3070 	uint32_t	cnt;
3071 	uint16_t	cmd;
3072 	int rval = QLA_FUNCTION_FAILED;
3073 
3074 	if (unlikely(pci_channel_offline(ha->pdev)))
3075 		return rval;
3076 
3077 	ha->isp_ops->disable_intrs(ha);
3078 
3079 	spin_lock_irqsave(&ha->hardware_lock, flags);
3080 
3081 	/* Turn off master enable */
3082 	cmd = 0;
3083 	pci_read_config_word(ha->pdev, PCI_COMMAND, &cmd);
3084 	cmd &= ~PCI_COMMAND_MASTER;
3085 	pci_write_config_word(ha->pdev, PCI_COMMAND, cmd);
3086 
3087 	if (!IS_QLA2100(ha)) {
3088 		/* Pause RISC. */
3089 		wrt_reg_word(&reg->hccr, HCCR_PAUSE_RISC);
3090 		if (IS_QLA2200(ha) || IS_QLA2300(ha)) {
3091 			for (cnt = 0; cnt < 30000; cnt++) {
3092 				if ((rd_reg_word(&reg->hccr) &
3093 				    HCCR_RISC_PAUSE) != 0)
3094 					break;
3095 				udelay(100);
3096 			}
3097 		} else {
3098 			rd_reg_word(&reg->hccr);	/* PCI Posting. */
3099 			udelay(10);
3100 		}
3101 
3102 		/* Select FPM registers. */
3103 		wrt_reg_word(&reg->ctrl_status, 0x20);
3104 		rd_reg_word(&reg->ctrl_status);		/* PCI Posting. */
3105 
3106 		/* FPM Soft Reset. */
3107 		wrt_reg_word(&reg->fpm_diag_config, 0x100);
3108 		rd_reg_word(&reg->fpm_diag_config);	/* PCI Posting. */
3109 
3110 		/* Toggle Fpm Reset. */
3111 		if (!IS_QLA2200(ha)) {
3112 			wrt_reg_word(&reg->fpm_diag_config, 0x0);
3113 			rd_reg_word(&reg->fpm_diag_config); /* PCI Posting. */
3114 		}
3115 
3116 		/* Select frame buffer registers. */
3117 		wrt_reg_word(&reg->ctrl_status, 0x10);
3118 		rd_reg_word(&reg->ctrl_status);		/* PCI Posting. */
3119 
3120 		/* Reset frame buffer FIFOs. */
3121 		if (IS_QLA2200(ha)) {
3122 			WRT_FB_CMD_REG(ha, reg, 0xa000);
3123 			RD_FB_CMD_REG(ha, reg);		/* PCI Posting. */
3124 		} else {
3125 			WRT_FB_CMD_REG(ha, reg, 0x00fc);
3126 
3127 			/* Read back fb_cmd until zero or 3 seconds max */
3128 			for (cnt = 0; cnt < 3000; cnt++) {
3129 				if ((RD_FB_CMD_REG(ha, reg) & 0xff) == 0)
3130 					break;
3131 				udelay(100);
3132 			}
3133 		}
3134 
3135 		/* Select RISC module registers. */
3136 		wrt_reg_word(&reg->ctrl_status, 0);
3137 		rd_reg_word(&reg->ctrl_status);		/* PCI Posting. */
3138 
3139 		/* Reset RISC processor. */
3140 		wrt_reg_word(&reg->hccr, HCCR_RESET_RISC);
3141 		rd_reg_word(&reg->hccr);		/* PCI Posting. */
3142 
3143 		/* Release RISC processor. */
3144 		wrt_reg_word(&reg->hccr, HCCR_RELEASE_RISC);
3145 		rd_reg_word(&reg->hccr);		/* PCI Posting. */
3146 	}
3147 
3148 	wrt_reg_word(&reg->hccr, HCCR_CLR_RISC_INT);
3149 	wrt_reg_word(&reg->hccr, HCCR_CLR_HOST_INT);
3150 
3151 	/* Reset ISP chip. */
3152 	wrt_reg_word(&reg->ctrl_status, CSR_ISP_SOFT_RESET);
3153 
3154 	/* Wait for RISC to recover from reset. */
3155 	if (IS_QLA2100(ha) || IS_QLA2200(ha) || IS_QLA2300(ha)) {
3156 		/*
3157 		 * It is necessary to for a delay here since the card doesn't
3158 		 * respond to PCI reads during a reset. On some architectures
3159 		 * this will result in an MCA.
3160 		 */
3161 		udelay(20);
3162 		for (cnt = 30000; cnt; cnt--) {
3163 			if ((rd_reg_word(&reg->ctrl_status) &
3164 			    CSR_ISP_SOFT_RESET) == 0)
3165 				break;
3166 			udelay(100);
3167 		}
3168 	} else
3169 		udelay(10);
3170 
3171 	/* Reset RISC processor. */
3172 	wrt_reg_word(&reg->hccr, HCCR_RESET_RISC);
3173 
3174 	wrt_reg_word(&reg->semaphore, 0);
3175 
3176 	/* Release RISC processor. */
3177 	wrt_reg_word(&reg->hccr, HCCR_RELEASE_RISC);
3178 	rd_reg_word(&reg->hccr);			/* PCI Posting. */
3179 
3180 	if (IS_QLA2100(ha) || IS_QLA2200(ha) || IS_QLA2300(ha)) {
3181 		for (cnt = 0; cnt < 30000; cnt++) {
3182 			if (RD_MAILBOX_REG(ha, reg, 0) != MBS_BUSY)
3183 				break;
3184 
3185 			udelay(100);
3186 		}
3187 	} else
3188 		udelay(100);
3189 
3190 	/* Turn on master enable */
3191 	cmd |= PCI_COMMAND_MASTER;
3192 	pci_write_config_word(ha->pdev, PCI_COMMAND, cmd);
3193 
3194 	/* Disable RISC pause on FPM parity error. */
3195 	if (!IS_QLA2100(ha)) {
3196 		wrt_reg_word(&reg->hccr, HCCR_DISABLE_PARITY_PAUSE);
3197 		rd_reg_word(&reg->hccr);		/* PCI Posting. */
3198 	}
3199 
3200 	spin_unlock_irqrestore(&ha->hardware_lock, flags);
3201 
3202 	return QLA_SUCCESS;
3203 }
3204 
3205 /**
3206  * qla81xx_reset_mpi() - Reset's MPI FW via Write MPI Register MBC.
3207  * @vha: HA context
3208  *
3209  * Returns 0 on success.
3210  */
3211 static int
qla81xx_reset_mpi(scsi_qla_host_t * vha)3212 qla81xx_reset_mpi(scsi_qla_host_t *vha)
3213 {
3214 	uint16_t mb[4] = {0x1010, 0, 1, 0};
3215 
3216 	if (!IS_QLA81XX(vha->hw))
3217 		return QLA_SUCCESS;
3218 
3219 	return qla81xx_write_mpi_register(vha, mb);
3220 }
3221 
3222 static int
qla_chk_risc_recovery(scsi_qla_host_t * vha)3223 qla_chk_risc_recovery(scsi_qla_host_t *vha)
3224 {
3225 	struct qla_hw_data *ha = vha->hw;
3226 	struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
3227 	__le16 __iomem *mbptr = &reg->mailbox0;
3228 	int i;
3229 	u16 mb[32];
3230 	int rc = QLA_SUCCESS;
3231 
3232 	if (!IS_QLA27XX(ha) && !IS_QLA28XX(ha))
3233 		return rc;
3234 
3235 	/* this check is only valid after RISC reset */
3236 	mb[0] = rd_reg_word(mbptr);
3237 	mbptr++;
3238 	if (mb[0] == 0xf) {
3239 		rc = QLA_FUNCTION_FAILED;
3240 
3241 		for (i = 1; i < 32; i++) {
3242 			mb[i] = rd_reg_word(mbptr);
3243 			mbptr++;
3244 		}
3245 
3246 		ql_log(ql_log_warn, vha, 0x1015,
3247 		       "RISC reset failed. mb[0-7] %04xh %04xh %04xh %04xh %04xh %04xh %04xh %04xh\n",
3248 		       mb[0], mb[1], mb[2], mb[3], mb[4], mb[5], mb[6], mb[7]);
3249 		ql_log(ql_log_warn, vha, 0x1015,
3250 		       "RISC reset failed. mb[8-15] %04xh %04xh %04xh %04xh %04xh %04xh %04xh %04xh\n",
3251 		       mb[8], mb[9], mb[10], mb[11], mb[12], mb[13], mb[14],
3252 		       mb[15]);
3253 		ql_log(ql_log_warn, vha, 0x1015,
3254 		       "RISC reset failed. mb[16-23] %04xh %04xh %04xh %04xh %04xh %04xh %04xh %04xh\n",
3255 		       mb[16], mb[17], mb[18], mb[19], mb[20], mb[21], mb[22],
3256 		       mb[23]);
3257 		ql_log(ql_log_warn, vha, 0x1015,
3258 		       "RISC reset failed. mb[24-31] %04xh %04xh %04xh %04xh %04xh %04xh %04xh %04xh\n",
3259 		       mb[24], mb[25], mb[26], mb[27], mb[28], mb[29], mb[30],
3260 		       mb[31]);
3261 	}
3262 	return rc;
3263 }
3264 
3265 /**
3266  * qla24xx_reset_risc() - Perform full reset of ISP24xx RISC.
3267  * @vha: HA context
3268  *
3269  * Returns 0 on success.
3270  */
3271 static inline int
qla24xx_reset_risc(scsi_qla_host_t * vha)3272 qla24xx_reset_risc(scsi_qla_host_t *vha)
3273 {
3274 	unsigned long flags = 0;
3275 	struct qla_hw_data *ha = vha->hw;
3276 	struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
3277 	uint32_t cnt;
3278 	uint16_t wd;
3279 	static int abts_cnt; /* ISP abort retry counts */
3280 	int rval = QLA_SUCCESS;
3281 	int print = 1;
3282 
3283 	spin_lock_irqsave(&ha->hardware_lock, flags);
3284 
3285 	/* Reset RISC. */
3286 	wrt_reg_dword(&reg->ctrl_status, CSRX_DMA_SHUTDOWN|MWB_4096_BYTES);
3287 	for (cnt = 0; cnt < 30000; cnt++) {
3288 		if ((rd_reg_dword(&reg->ctrl_status) & CSRX_DMA_ACTIVE) == 0)
3289 			break;
3290 
3291 		udelay(10);
3292 	}
3293 
3294 	if (!(rd_reg_dword(&reg->ctrl_status) & CSRX_DMA_ACTIVE))
3295 		set_bit(DMA_SHUTDOWN_CMPL, &ha->fw_dump_cap_flags);
3296 
3297 	ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x017e,
3298 	    "HCCR: 0x%x, Control Status %x, DMA active status:0x%x\n",
3299 	    rd_reg_dword(&reg->hccr),
3300 	    rd_reg_dword(&reg->ctrl_status),
3301 	    (rd_reg_dword(&reg->ctrl_status) & CSRX_DMA_ACTIVE));
3302 
3303 	wrt_reg_dword(&reg->ctrl_status,
3304 	    CSRX_ISP_SOFT_RESET|CSRX_DMA_SHUTDOWN|MWB_4096_BYTES);
3305 	pci_read_config_word(ha->pdev, PCI_COMMAND, &wd);
3306 
3307 	udelay(100);
3308 
3309 	/* Wait for firmware to complete NVRAM accesses. */
3310 	rd_reg_word(&reg->mailbox0);
3311 	for (cnt = 10000; rd_reg_word(&reg->mailbox0) != 0 &&
3312 	    rval == QLA_SUCCESS; cnt--) {
3313 		barrier();
3314 		if (cnt)
3315 			udelay(5);
3316 		else
3317 			rval = QLA_FUNCTION_TIMEOUT;
3318 	}
3319 
3320 	if (rval == QLA_SUCCESS)
3321 		set_bit(ISP_MBX_RDY, &ha->fw_dump_cap_flags);
3322 
3323 	ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x017f,
3324 	    "HCCR: 0x%x, MailBox0 Status 0x%x\n",
3325 	    rd_reg_dword(&reg->hccr),
3326 	    rd_reg_word(&reg->mailbox0));
3327 
3328 	/* Wait for soft-reset to complete. */
3329 	rd_reg_dword(&reg->ctrl_status);
3330 	for (cnt = 0; cnt < 60; cnt++) {
3331 		barrier();
3332 		if ((rd_reg_dword(&reg->ctrl_status) &
3333 		    CSRX_ISP_SOFT_RESET) == 0)
3334 			break;
3335 
3336 		udelay(5);
3337 	}
3338 	if (!(rd_reg_dword(&reg->ctrl_status) & CSRX_ISP_SOFT_RESET))
3339 		set_bit(ISP_SOFT_RESET_CMPL, &ha->fw_dump_cap_flags);
3340 
3341 	ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x015d,
3342 	    "HCCR: 0x%x, Soft Reset status: 0x%x\n",
3343 	    rd_reg_dword(&reg->hccr),
3344 	    rd_reg_dword(&reg->ctrl_status));
3345 
3346 	/* If required, do an MPI FW reset now */
3347 	if (test_and_clear_bit(MPI_RESET_NEEDED, &vha->dpc_flags)) {
3348 		if (qla81xx_reset_mpi(vha) != QLA_SUCCESS) {
3349 			if (++abts_cnt < 5) {
3350 				set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
3351 				set_bit(MPI_RESET_NEEDED, &vha->dpc_flags);
3352 			} else {
3353 				/*
3354 				 * We exhausted the ISP abort retries. We have to
3355 				 * set the board offline.
3356 				 */
3357 				abts_cnt = 0;
3358 				vha->flags.online = 0;
3359 			}
3360 		}
3361 	}
3362 
3363 	wrt_reg_dword(&reg->hccr, HCCRX_SET_RISC_RESET);
3364 	rd_reg_dword(&reg->hccr);
3365 
3366 	wrt_reg_dword(&reg->hccr, HCCRX_REL_RISC_PAUSE);
3367 	rd_reg_dword(&reg->hccr);
3368 
3369 	wrt_reg_dword(&reg->hccr, HCCRX_CLR_RISC_RESET);
3370 	mdelay(10);
3371 	rd_reg_dword(&reg->hccr);
3372 
3373 	wd = rd_reg_word(&reg->mailbox0);
3374 	for (cnt = 300; wd != 0 && rval == QLA_SUCCESS; cnt--) {
3375 		barrier();
3376 		if (cnt) {
3377 			mdelay(1);
3378 			if (print && qla_chk_risc_recovery(vha))
3379 				print = 0;
3380 
3381 			wd = rd_reg_word(&reg->mailbox0);
3382 		} else {
3383 			rval = QLA_FUNCTION_TIMEOUT;
3384 
3385 			ql_log(ql_log_warn, vha, 0x015e,
3386 			       "RISC reset timeout\n");
3387 		}
3388 	}
3389 
3390 	if (rval == QLA_SUCCESS)
3391 		set_bit(RISC_RDY_AFT_RESET, &ha->fw_dump_cap_flags);
3392 
3393 	ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x015e,
3394 	    "Host Risc 0x%x, mailbox0 0x%x\n",
3395 	    rd_reg_dword(&reg->hccr),
3396 	     rd_reg_word(&reg->mailbox0));
3397 
3398 	spin_unlock_irqrestore(&ha->hardware_lock, flags);
3399 
3400 	ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x015f,
3401 	    "Driver in %s mode\n",
3402 	    IS_NOPOLLING_TYPE(ha) ? "Interrupt" : "Polling");
3403 
3404 	if (IS_NOPOLLING_TYPE(ha))
3405 		ha->isp_ops->enable_intrs(ha);
3406 
3407 	return rval;
3408 }
3409 
3410 static void
qla25xx_read_risc_sema_reg(scsi_qla_host_t * vha,uint32_t * data)3411 qla25xx_read_risc_sema_reg(scsi_qla_host_t *vha, uint32_t *data)
3412 {
3413 	struct device_reg_24xx __iomem *reg = &vha->hw->iobase->isp24;
3414 
3415 	wrt_reg_dword(&reg->iobase_addr, RISC_REGISTER_BASE_OFFSET);
3416 	*data = rd_reg_dword(&reg->iobase_window + RISC_REGISTER_WINDOW_OFFSET);
3417 }
3418 
3419 static void
qla25xx_write_risc_sema_reg(scsi_qla_host_t * vha,uint32_t data)3420 qla25xx_write_risc_sema_reg(scsi_qla_host_t *vha, uint32_t data)
3421 {
3422 	struct device_reg_24xx __iomem *reg = &vha->hw->iobase->isp24;
3423 
3424 	wrt_reg_dword(&reg->iobase_addr, RISC_REGISTER_BASE_OFFSET);
3425 	wrt_reg_dword(&reg->iobase_window + RISC_REGISTER_WINDOW_OFFSET, data);
3426 }
3427 
3428 static void
qla25xx_manipulate_risc_semaphore(scsi_qla_host_t * vha)3429 qla25xx_manipulate_risc_semaphore(scsi_qla_host_t *vha)
3430 {
3431 	uint32_t wd32 = 0;
3432 	uint delta_msec = 100;
3433 	uint elapsed_msec = 0;
3434 	uint timeout_msec;
3435 	ulong n;
3436 
3437 	if (vha->hw->pdev->subsystem_device != 0x0175 &&
3438 	    vha->hw->pdev->subsystem_device != 0x0240)
3439 		return;
3440 
3441 	wrt_reg_dword(&vha->hw->iobase->isp24.hccr, HCCRX_SET_RISC_PAUSE);
3442 	udelay(100);
3443 
3444 attempt:
3445 	timeout_msec = TIMEOUT_SEMAPHORE;
3446 	n = timeout_msec / delta_msec;
3447 	while (n--) {
3448 		qla25xx_write_risc_sema_reg(vha, RISC_SEMAPHORE_SET);
3449 		qla25xx_read_risc_sema_reg(vha, &wd32);
3450 		if (wd32 & RISC_SEMAPHORE)
3451 			break;
3452 		msleep(delta_msec);
3453 		elapsed_msec += delta_msec;
3454 		if (elapsed_msec > TIMEOUT_TOTAL_ELAPSED)
3455 			goto force;
3456 	}
3457 
3458 	if (!(wd32 & RISC_SEMAPHORE))
3459 		goto force;
3460 
3461 	if (!(wd32 & RISC_SEMAPHORE_FORCE))
3462 		goto acquired;
3463 
3464 	qla25xx_write_risc_sema_reg(vha, RISC_SEMAPHORE_CLR);
3465 	timeout_msec = TIMEOUT_SEMAPHORE_FORCE;
3466 	n = timeout_msec / delta_msec;
3467 	while (n--) {
3468 		qla25xx_read_risc_sema_reg(vha, &wd32);
3469 		if (!(wd32 & RISC_SEMAPHORE_FORCE))
3470 			break;
3471 		msleep(delta_msec);
3472 		elapsed_msec += delta_msec;
3473 		if (elapsed_msec > TIMEOUT_TOTAL_ELAPSED)
3474 			goto force;
3475 	}
3476 
3477 	if (wd32 & RISC_SEMAPHORE_FORCE)
3478 		qla25xx_write_risc_sema_reg(vha, RISC_SEMAPHORE_FORCE_CLR);
3479 
3480 	goto attempt;
3481 
3482 force:
3483 	qla25xx_write_risc_sema_reg(vha, RISC_SEMAPHORE_FORCE_SET);
3484 
3485 acquired:
3486 	return;
3487 }
3488 
3489 /**
3490  * qla24xx_reset_chip() - Reset ISP24xx chip.
3491  * @vha: HA context
3492  *
3493  * Returns 0 on success.
3494  */
3495 int
qla24xx_reset_chip(scsi_qla_host_t * vha)3496 qla24xx_reset_chip(scsi_qla_host_t *vha)
3497 {
3498 	struct qla_hw_data *ha = vha->hw;
3499 	int rval = QLA_FUNCTION_FAILED;
3500 
3501 	if (pci_channel_offline(ha->pdev) &&
3502 	    ha->flags.pci_channel_io_perm_failure) {
3503 		return rval;
3504 	}
3505 
3506 	ha->isp_ops->disable_intrs(ha);
3507 
3508 	qla25xx_manipulate_risc_semaphore(vha);
3509 
3510 	/* Perform RISC reset. */
3511 	rval = qla24xx_reset_risc(vha);
3512 
3513 	return rval;
3514 }
3515 
3516 /**
3517  * qla2x00_chip_diag() - Test chip for proper operation.
3518  * @vha: HA context
3519  *
3520  * Returns 0 on success.
3521  */
3522 int
qla2x00_chip_diag(scsi_qla_host_t * vha)3523 qla2x00_chip_diag(scsi_qla_host_t *vha)
3524 {
3525 	int		rval;
3526 	struct qla_hw_data *ha = vha->hw;
3527 	struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
3528 	unsigned long	flags = 0;
3529 	uint16_t	data;
3530 	uint32_t	cnt;
3531 	uint16_t	mb[5];
3532 	struct req_que *req = ha->req_q_map[0];
3533 
3534 	/* Assume a failed state */
3535 	rval = QLA_FUNCTION_FAILED;
3536 
3537 	ql_dbg(ql_dbg_init, vha, 0x007b, "Testing device at %p.\n",
3538 	       &reg->flash_address);
3539 
3540 	spin_lock_irqsave(&ha->hardware_lock, flags);
3541 
3542 	/* Reset ISP chip. */
3543 	wrt_reg_word(&reg->ctrl_status, CSR_ISP_SOFT_RESET);
3544 
3545 	/*
3546 	 * We need to have a delay here since the card will not respond while
3547 	 * in reset causing an MCA on some architectures.
3548 	 */
3549 	udelay(20);
3550 	data = qla2x00_debounce_register(&reg->ctrl_status);
3551 	for (cnt = 6000000 ; cnt && (data & CSR_ISP_SOFT_RESET); cnt--) {
3552 		udelay(5);
3553 		data = rd_reg_word(&reg->ctrl_status);
3554 		barrier();
3555 	}
3556 
3557 	if (!cnt)
3558 		goto chip_diag_failed;
3559 
3560 	ql_dbg(ql_dbg_init, vha, 0x007c,
3561 	    "Reset register cleared by chip reset.\n");
3562 
3563 	/* Reset RISC processor. */
3564 	wrt_reg_word(&reg->hccr, HCCR_RESET_RISC);
3565 	wrt_reg_word(&reg->hccr, HCCR_RELEASE_RISC);
3566 
3567 	/* Workaround for QLA2312 PCI parity error */
3568 	if (IS_QLA2100(ha) || IS_QLA2200(ha) || IS_QLA2300(ha)) {
3569 		data = qla2x00_debounce_register(MAILBOX_REG(ha, reg, 0));
3570 		for (cnt = 6000000; cnt && (data == MBS_BUSY); cnt--) {
3571 			udelay(5);
3572 			data = RD_MAILBOX_REG(ha, reg, 0);
3573 			barrier();
3574 		}
3575 	} else
3576 		udelay(10);
3577 
3578 	if (!cnt)
3579 		goto chip_diag_failed;
3580 
3581 	/* Check product ID of chip */
3582 	ql_dbg(ql_dbg_init, vha, 0x007d, "Checking product ID of chip.\n");
3583 
3584 	mb[1] = RD_MAILBOX_REG(ha, reg, 1);
3585 	mb[2] = RD_MAILBOX_REG(ha, reg, 2);
3586 	mb[3] = RD_MAILBOX_REG(ha, reg, 3);
3587 	mb[4] = qla2x00_debounce_register(MAILBOX_REG(ha, reg, 4));
3588 	if (mb[1] != PROD_ID_1 || (mb[2] != PROD_ID_2 && mb[2] != PROD_ID_2a) ||
3589 	    mb[3] != PROD_ID_3) {
3590 		ql_log(ql_log_warn, vha, 0x0062,
3591 		    "Wrong product ID = 0x%x,0x%x,0x%x.\n",
3592 		    mb[1], mb[2], mb[3]);
3593 
3594 		goto chip_diag_failed;
3595 	}
3596 	ha->product_id[0] = mb[1];
3597 	ha->product_id[1] = mb[2];
3598 	ha->product_id[2] = mb[3];
3599 	ha->product_id[3] = mb[4];
3600 
3601 	/* Adjust fw RISC transfer size */
3602 	if (req->length > 1024)
3603 		ha->fw_transfer_size = REQUEST_ENTRY_SIZE * 1024;
3604 	else
3605 		ha->fw_transfer_size = REQUEST_ENTRY_SIZE *
3606 		    req->length;
3607 
3608 	if (IS_QLA2200(ha) &&
3609 	    RD_MAILBOX_REG(ha, reg, 7) == QLA2200A_RISC_ROM_VER) {
3610 		/* Limit firmware transfer size with a 2200A */
3611 		ql_dbg(ql_dbg_init, vha, 0x007e, "Found QLA2200A Chip.\n");
3612 
3613 		ha->device_type |= DT_ISP2200A;
3614 		ha->fw_transfer_size = 128;
3615 	}
3616 
3617 	/* Wrap Incoming Mailboxes Test. */
3618 	spin_unlock_irqrestore(&ha->hardware_lock, flags);
3619 
3620 	ql_dbg(ql_dbg_init, vha, 0x007f, "Checking mailboxes.\n");
3621 	rval = qla2x00_mbx_reg_test(vha);
3622 	if (rval)
3623 		ql_log(ql_log_warn, vha, 0x0080,
3624 		    "Failed mailbox send register test.\n");
3625 	else
3626 		/* Flag a successful rval */
3627 		rval = QLA_SUCCESS;
3628 	spin_lock_irqsave(&ha->hardware_lock, flags);
3629 
3630 chip_diag_failed:
3631 	if (rval)
3632 		ql_log(ql_log_info, vha, 0x0081,
3633 		    "Chip diagnostics **** FAILED ****.\n");
3634 
3635 	spin_unlock_irqrestore(&ha->hardware_lock, flags);
3636 
3637 	return (rval);
3638 }
3639 
3640 /**
3641  * qla24xx_chip_diag() - Test ISP24xx for proper operation.
3642  * @vha: HA context
3643  *
3644  * Returns 0 on success.
3645  */
3646 int
qla24xx_chip_diag(scsi_qla_host_t * vha)3647 qla24xx_chip_diag(scsi_qla_host_t *vha)
3648 {
3649 	int rval;
3650 	struct qla_hw_data *ha = vha->hw;
3651 	struct req_que *req = ha->req_q_map[0];
3652 
3653 	if (IS_P3P_TYPE(ha))
3654 		return QLA_SUCCESS;
3655 
3656 	ha->fw_transfer_size = REQUEST_ENTRY_SIZE * req->length;
3657 
3658 	rval = qla2x00_mbx_reg_test(vha);
3659 	if (rval) {
3660 		ql_log(ql_log_warn, vha, 0x0082,
3661 		    "Failed mailbox send register test.\n");
3662 	} else {
3663 		/* Flag a successful rval */
3664 		rval = QLA_SUCCESS;
3665 	}
3666 
3667 	return rval;
3668 }
3669 
3670 static void
qla2x00_init_fce_trace(scsi_qla_host_t * vha)3671 qla2x00_init_fce_trace(scsi_qla_host_t *vha)
3672 {
3673 	int rval;
3674 	dma_addr_t tc_dma;
3675 	void *tc;
3676 	struct qla_hw_data *ha = vha->hw;
3677 
3678 	if (!IS_FWI2_CAPABLE(ha))
3679 		return;
3680 
3681 	if (!IS_QLA25XX(ha) && !IS_QLA81XX(ha) && !IS_QLA83XX(ha) &&
3682 	    !IS_QLA27XX(ha) && !IS_QLA28XX(ha))
3683 		return;
3684 
3685 	if (ha->fce) {
3686 		ql_dbg(ql_dbg_init, vha, 0x00bd,
3687 		       "%s: FCE Mem is already allocated.\n",
3688 		       __func__);
3689 		return;
3690 	}
3691 
3692 	/* Allocate memory for Fibre Channel Event Buffer. */
3693 	tc = dma_alloc_coherent(&ha->pdev->dev, FCE_SIZE, &tc_dma,
3694 				GFP_KERNEL);
3695 	if (!tc) {
3696 		ql_log(ql_log_warn, vha, 0x00be,
3697 		       "Unable to allocate (%d KB) for FCE.\n",
3698 		       FCE_SIZE / 1024);
3699 		return;
3700 	}
3701 
3702 	rval = qla2x00_enable_fce_trace(vha, tc_dma, FCE_NUM_BUFFERS,
3703 					ha->fce_mb, &ha->fce_bufs);
3704 	if (rval) {
3705 		ql_log(ql_log_warn, vha, 0x00bf,
3706 		       "Unable to initialize FCE (%d).\n", rval);
3707 		dma_free_coherent(&ha->pdev->dev, FCE_SIZE, tc, tc_dma);
3708 		return;
3709 	}
3710 
3711 	ql_dbg(ql_dbg_init, vha, 0x00c0,
3712 	       "Allocated (%d KB) for FCE...\n", FCE_SIZE / 1024);
3713 
3714 	ha->flags.fce_enabled = 1;
3715 	ha->fce_dma = tc_dma;
3716 	ha->fce = tc;
3717 }
3718 
3719 static void
qla2x00_init_eft_trace(scsi_qla_host_t * vha)3720 qla2x00_init_eft_trace(scsi_qla_host_t *vha)
3721 {
3722 	int rval;
3723 	dma_addr_t tc_dma;
3724 	void *tc;
3725 	struct qla_hw_data *ha = vha->hw;
3726 
3727 	if (!IS_FWI2_CAPABLE(ha))
3728 		return;
3729 
3730 	if (ha->eft) {
3731 		ql_dbg(ql_dbg_init, vha, 0x00bd,
3732 		    "%s: EFT Mem is already allocated.\n",
3733 		    __func__);
3734 		return;
3735 	}
3736 
3737 	/* Allocate memory for Extended Trace Buffer. */
3738 	tc = dma_alloc_coherent(&ha->pdev->dev, EFT_SIZE, &tc_dma,
3739 				GFP_KERNEL);
3740 	if (!tc) {
3741 		ql_log(ql_log_warn, vha, 0x00c1,
3742 		       "Unable to allocate (%d KB) for EFT.\n",
3743 		       EFT_SIZE / 1024);
3744 		return;
3745 	}
3746 
3747 	rval = qla2x00_enable_eft_trace(vha, tc_dma, EFT_NUM_BUFFERS);
3748 	if (rval) {
3749 		ql_log(ql_log_warn, vha, 0x00c2,
3750 		       "Unable to initialize EFT (%d).\n", rval);
3751 		dma_free_coherent(&ha->pdev->dev, EFT_SIZE, tc, tc_dma);
3752 		return;
3753 	}
3754 
3755 	ql_dbg(ql_dbg_init, vha, 0x00c3,
3756 	       "Allocated (%d KB) EFT ...\n", EFT_SIZE / 1024);
3757 
3758 	ha->eft_dma = tc_dma;
3759 	ha->eft = tc;
3760 }
3761 
3762 static void
qla2x00_alloc_offload_mem(scsi_qla_host_t * vha)3763 qla2x00_alloc_offload_mem(scsi_qla_host_t *vha)
3764 {
3765 	qla2x00_init_fce_trace(vha);
3766 	qla2x00_init_eft_trace(vha);
3767 }
3768 
3769 void
qla2x00_alloc_fw_dump(scsi_qla_host_t * vha)3770 qla2x00_alloc_fw_dump(scsi_qla_host_t *vha)
3771 {
3772 	uint32_t dump_size, fixed_size, mem_size, req_q_size, rsp_q_size,
3773 	    eft_size, fce_size, mq_size;
3774 	struct qla_hw_data *ha = vha->hw;
3775 	struct req_que *req = ha->req_q_map[0];
3776 	struct rsp_que *rsp = ha->rsp_q_map[0];
3777 	struct qla2xxx_fw_dump *fw_dump;
3778 
3779 	if (ha->fw_dump) {
3780 		ql_dbg(ql_dbg_init, vha, 0x00bd,
3781 		    "Firmware dump already allocated.\n");
3782 		return;
3783 	}
3784 
3785 	ha->fw_dumped = 0;
3786 	ha->fw_dump_cap_flags = 0;
3787 	dump_size = fixed_size = mem_size = eft_size = fce_size = mq_size = 0;
3788 	req_q_size = rsp_q_size = 0;
3789 
3790 	if (IS_QLA2100(ha) || IS_QLA2200(ha)) {
3791 		fixed_size = sizeof(struct qla2100_fw_dump);
3792 	} else if (IS_QLA23XX(ha)) {
3793 		fixed_size = offsetof(struct qla2300_fw_dump, data_ram);
3794 		mem_size = (ha->fw_memory_size - 0x11000 + 1) *
3795 		    sizeof(uint16_t);
3796 	} else if (IS_FWI2_CAPABLE(ha)) {
3797 		if (IS_QLA83XX(ha))
3798 			fixed_size = offsetof(struct qla83xx_fw_dump, ext_mem);
3799 		else if (IS_QLA81XX(ha))
3800 			fixed_size = offsetof(struct qla81xx_fw_dump, ext_mem);
3801 		else if (IS_QLA25XX(ha))
3802 			fixed_size = offsetof(struct qla25xx_fw_dump, ext_mem);
3803 		else
3804 			fixed_size = offsetof(struct qla24xx_fw_dump, ext_mem);
3805 
3806 		mem_size = (ha->fw_memory_size - 0x100000 + 1) *
3807 		    sizeof(uint32_t);
3808 		if (ha->mqenable) {
3809 			if (!IS_QLA83XX(ha))
3810 				mq_size = sizeof(struct qla2xxx_mq_chain);
3811 			/*
3812 			 * Allocate maximum buffer size for all queues - Q0.
3813 			 * Resizing must be done at end-of-dump processing.
3814 			 */
3815 			mq_size += (ha->max_req_queues - 1) *
3816 			    (req->length * sizeof(request_t));
3817 			mq_size += (ha->max_rsp_queues - 1) *
3818 			    (rsp->length * sizeof(response_t));
3819 		}
3820 		if (ha->tgt.atio_ring)
3821 			mq_size += ha->tgt.atio_q_length * sizeof(request_t);
3822 
3823 		qla2x00_init_fce_trace(vha);
3824 		if (ha->fce)
3825 			fce_size = sizeof(struct qla2xxx_fce_chain) + FCE_SIZE;
3826 		qla2x00_init_eft_trace(vha);
3827 		if (ha->eft)
3828 			eft_size = EFT_SIZE;
3829 	}
3830 
3831 	if (IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
3832 		struct fwdt *fwdt = ha->fwdt;
3833 		uint j;
3834 
3835 		for (j = 0; j < 2; j++, fwdt++) {
3836 			if (!fwdt->template) {
3837 				ql_dbg(ql_dbg_init, vha, 0x00ba,
3838 				    "-> fwdt%u no template\n", j);
3839 				continue;
3840 			}
3841 			ql_dbg(ql_dbg_init, vha, 0x00fa,
3842 			    "-> fwdt%u calculating fwdump size...\n", j);
3843 			fwdt->dump_size = qla27xx_fwdt_calculate_dump_size(
3844 			    vha, fwdt->template);
3845 			ql_dbg(ql_dbg_init, vha, 0x00fa,
3846 			    "-> fwdt%u calculated fwdump size = %#lx bytes\n",
3847 			    j, fwdt->dump_size);
3848 			dump_size += fwdt->dump_size;
3849 		}
3850 		/* Add space for spare MPI fw dump. */
3851 		dump_size += ha->fwdt[1].dump_size;
3852 	} else {
3853 		req_q_size = req->length * sizeof(request_t);
3854 		rsp_q_size = rsp->length * sizeof(response_t);
3855 		dump_size = offsetof(struct qla2xxx_fw_dump, isp);
3856 		dump_size += fixed_size + mem_size + req_q_size + rsp_q_size
3857 			+ eft_size;
3858 		ha->chain_offset = dump_size;
3859 		dump_size += mq_size + fce_size;
3860 		if (ha->exchoffld_buf)
3861 			dump_size += sizeof(struct qla2xxx_offld_chain) +
3862 				ha->exchoffld_size;
3863 		if (ha->exlogin_buf)
3864 			dump_size += sizeof(struct qla2xxx_offld_chain) +
3865 				ha->exlogin_size;
3866 	}
3867 
3868 	if (!ha->fw_dump_len || dump_size > ha->fw_dump_alloc_len) {
3869 
3870 		ql_dbg(ql_dbg_init, vha, 0x00c5,
3871 		    "%s dump_size %d fw_dump_len %d fw_dump_alloc_len %d\n",
3872 		    __func__, dump_size, ha->fw_dump_len,
3873 		    ha->fw_dump_alloc_len);
3874 
3875 		fw_dump = vmalloc(dump_size);
3876 		if (!fw_dump) {
3877 			ql_log(ql_log_warn, vha, 0x00c4,
3878 			    "Unable to allocate (%d KB) for firmware dump.\n",
3879 			    dump_size / 1024);
3880 		} else {
3881 			mutex_lock(&ha->optrom_mutex);
3882 			if (ha->fw_dumped) {
3883 				memcpy(fw_dump, ha->fw_dump, ha->fw_dump_len);
3884 				vfree(ha->fw_dump);
3885 				ha->fw_dump = fw_dump;
3886 				ha->fw_dump_alloc_len =  dump_size;
3887 				ql_dbg(ql_dbg_init, vha, 0x00c5,
3888 				    "Re-Allocated (%d KB) and save firmware dump.\n",
3889 				    dump_size / 1024);
3890 			} else {
3891 				vfree(ha->fw_dump);
3892 				ha->fw_dump = fw_dump;
3893 
3894 				ha->fw_dump_len = ha->fw_dump_alloc_len =
3895 				    dump_size;
3896 				ql_dbg(ql_dbg_init, vha, 0x00c5,
3897 				    "Allocated (%d KB) for firmware dump.\n",
3898 				    dump_size / 1024);
3899 
3900 				if (IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
3901 					ha->mpi_fw_dump = (char *)fw_dump +
3902 						ha->fwdt[1].dump_size;
3903 					mutex_unlock(&ha->optrom_mutex);
3904 					return;
3905 				}
3906 
3907 				ha->fw_dump->signature[0] = 'Q';
3908 				ha->fw_dump->signature[1] = 'L';
3909 				ha->fw_dump->signature[2] = 'G';
3910 				ha->fw_dump->signature[3] = 'C';
3911 				ha->fw_dump->version = htonl(1);
3912 
3913 				ha->fw_dump->fixed_size = htonl(fixed_size);
3914 				ha->fw_dump->mem_size = htonl(mem_size);
3915 				ha->fw_dump->req_q_size = htonl(req_q_size);
3916 				ha->fw_dump->rsp_q_size = htonl(rsp_q_size);
3917 
3918 				ha->fw_dump->eft_size = htonl(eft_size);
3919 				ha->fw_dump->eft_addr_l =
3920 				    htonl(LSD(ha->eft_dma));
3921 				ha->fw_dump->eft_addr_h =
3922 				    htonl(MSD(ha->eft_dma));
3923 
3924 				ha->fw_dump->header_size =
3925 					htonl(offsetof
3926 					    (struct qla2xxx_fw_dump, isp));
3927 			}
3928 			mutex_unlock(&ha->optrom_mutex);
3929 		}
3930 	}
3931 }
3932 
3933 static int
qla81xx_mpi_sync(scsi_qla_host_t * vha)3934 qla81xx_mpi_sync(scsi_qla_host_t *vha)
3935 {
3936 #define MPS_MASK	0xe0
3937 	int rval;
3938 	uint16_t dc;
3939 	uint32_t dw;
3940 
3941 	if (!IS_QLA81XX(vha->hw))
3942 		return QLA_SUCCESS;
3943 
3944 	rval = qla2x00_write_ram_word(vha, 0x7c00, 1);
3945 	if (rval != QLA_SUCCESS) {
3946 		ql_log(ql_log_warn, vha, 0x0105,
3947 		    "Unable to acquire semaphore.\n");
3948 		goto done;
3949 	}
3950 
3951 	pci_read_config_word(vha->hw->pdev, 0x54, &dc);
3952 	rval = qla2x00_read_ram_word(vha, 0x7a15, &dw);
3953 	if (rval != QLA_SUCCESS) {
3954 		ql_log(ql_log_warn, vha, 0x0067, "Unable to read sync.\n");
3955 		goto done_release;
3956 	}
3957 
3958 	dc &= MPS_MASK;
3959 	if (dc == (dw & MPS_MASK))
3960 		goto done_release;
3961 
3962 	dw &= ~MPS_MASK;
3963 	dw |= dc;
3964 	rval = qla2x00_write_ram_word(vha, 0x7a15, dw);
3965 	if (rval != QLA_SUCCESS) {
3966 		ql_log(ql_log_warn, vha, 0x0114, "Unable to gain sync.\n");
3967 	}
3968 
3969 done_release:
3970 	rval = qla2x00_write_ram_word(vha, 0x7c00, 0);
3971 	if (rval != QLA_SUCCESS) {
3972 		ql_log(ql_log_warn, vha, 0x006d,
3973 		    "Unable to release semaphore.\n");
3974 	}
3975 
3976 done:
3977 	return rval;
3978 }
3979 
3980 int
qla2x00_alloc_outstanding_cmds(struct qla_hw_data * ha,struct req_que * req)3981 qla2x00_alloc_outstanding_cmds(struct qla_hw_data *ha, struct req_que *req)
3982 {
3983 	/* Don't try to reallocate the array */
3984 	if (req->outstanding_cmds)
3985 		return QLA_SUCCESS;
3986 
3987 	if (!IS_FWI2_CAPABLE(ha))
3988 		req->num_outstanding_cmds = DEFAULT_OUTSTANDING_COMMANDS;
3989 	else {
3990 		if (ha->cur_fw_xcb_count <= ha->cur_fw_iocb_count)
3991 			req->num_outstanding_cmds = ha->cur_fw_xcb_count;
3992 		else
3993 			req->num_outstanding_cmds = ha->cur_fw_iocb_count;
3994 	}
3995 
3996 	req->outstanding_cmds = kcalloc(req->num_outstanding_cmds,
3997 					sizeof(srb_t *),
3998 					GFP_KERNEL);
3999 
4000 	if (!req->outstanding_cmds) {
4001 		/*
4002 		 * Try to allocate a minimal size just so we can get through
4003 		 * initialization.
4004 		 */
4005 		req->num_outstanding_cmds = MIN_OUTSTANDING_COMMANDS;
4006 		req->outstanding_cmds = kcalloc(req->num_outstanding_cmds,
4007 						sizeof(srb_t *),
4008 						GFP_KERNEL);
4009 
4010 		if (!req->outstanding_cmds) {
4011 			ql_log(ql_log_fatal, NULL, 0x0126,
4012 			    "Failed to allocate memory for "
4013 			    "outstanding_cmds for req_que %p.\n", req);
4014 			req->num_outstanding_cmds = 0;
4015 			return QLA_FUNCTION_FAILED;
4016 		}
4017 	}
4018 
4019 	return QLA_SUCCESS;
4020 }
4021 
4022 #define PRINT_FIELD(_field, _flag, _str) {		\
4023 	if (a0->_field & _flag) {\
4024 		if (p) {\
4025 			strcat(ptr, "|");\
4026 			ptr++;\
4027 			leftover--;\
4028 		} \
4029 		len = snprintf(ptr, leftover, "%s", _str);	\
4030 		p = 1;\
4031 		leftover -= len;\
4032 		ptr += len; \
4033 	} \
4034 }
4035 
qla2xxx_print_sfp_info(struct scsi_qla_host * vha)4036 static void qla2xxx_print_sfp_info(struct scsi_qla_host *vha)
4037 {
4038 #define STR_LEN 64
4039 	struct sff_8247_a0 *a0 = (struct sff_8247_a0 *)vha->hw->sfp_data;
4040 	u8 str[STR_LEN], *ptr, p;
4041 	int leftover, len;
4042 
4043 	memset(str, 0, STR_LEN);
4044 	snprintf(str, SFF_VEN_NAME_LEN+1, a0->vendor_name);
4045 	ql_dbg(ql_dbg_init, vha, 0x015a,
4046 	    "SFP MFG Name: %s\n", str);
4047 
4048 	memset(str, 0, STR_LEN);
4049 	snprintf(str, SFF_PART_NAME_LEN+1, a0->vendor_pn);
4050 	ql_dbg(ql_dbg_init, vha, 0x015c,
4051 	    "SFP Part Name: %s\n", str);
4052 
4053 	/* media */
4054 	memset(str, 0, STR_LEN);
4055 	ptr = str;
4056 	leftover = STR_LEN;
4057 	p = len = 0;
4058 	PRINT_FIELD(fc_med_cc9, FC_MED_TW, "Twin AX");
4059 	PRINT_FIELD(fc_med_cc9, FC_MED_TP, "Twisted Pair");
4060 	PRINT_FIELD(fc_med_cc9, FC_MED_MI, "Min Coax");
4061 	PRINT_FIELD(fc_med_cc9, FC_MED_TV, "Video Coax");
4062 	PRINT_FIELD(fc_med_cc9, FC_MED_M6, "MultiMode 62.5um");
4063 	PRINT_FIELD(fc_med_cc9, FC_MED_M5, "MultiMode 50um");
4064 	PRINT_FIELD(fc_med_cc9, FC_MED_SM, "SingleMode");
4065 	ql_dbg(ql_dbg_init, vha, 0x0160,
4066 	    "SFP Media: %s\n", str);
4067 
4068 	/* link length */
4069 	memset(str, 0, STR_LEN);
4070 	ptr = str;
4071 	leftover = STR_LEN;
4072 	p = len = 0;
4073 	PRINT_FIELD(fc_ll_cc7, FC_LL_VL, "Very Long");
4074 	PRINT_FIELD(fc_ll_cc7, FC_LL_S, "Short");
4075 	PRINT_FIELD(fc_ll_cc7, FC_LL_I, "Intermediate");
4076 	PRINT_FIELD(fc_ll_cc7, FC_LL_L, "Long");
4077 	PRINT_FIELD(fc_ll_cc7, FC_LL_M, "Medium");
4078 	ql_dbg(ql_dbg_init, vha, 0x0196,
4079 	    "SFP Link Length: %s\n", str);
4080 
4081 	memset(str, 0, STR_LEN);
4082 	ptr = str;
4083 	leftover = STR_LEN;
4084 	p = len = 0;
4085 	PRINT_FIELD(fc_ll_cc7, FC_LL_SA, "Short Wave (SA)");
4086 	PRINT_FIELD(fc_ll_cc7, FC_LL_LC, "Long Wave(LC)");
4087 	PRINT_FIELD(fc_tec_cc8, FC_TEC_SN, "Short Wave (SN)");
4088 	PRINT_FIELD(fc_tec_cc8, FC_TEC_SL, "Short Wave (SL)");
4089 	PRINT_FIELD(fc_tec_cc8, FC_TEC_LL, "Long Wave (LL)");
4090 	ql_dbg(ql_dbg_init, vha, 0x016e,
4091 	    "SFP FC Link Tech: %s\n", str);
4092 
4093 	if (a0->length_km)
4094 		ql_dbg(ql_dbg_init, vha, 0x016f,
4095 		    "SFP Distant: %d km\n", a0->length_km);
4096 	if (a0->length_100m)
4097 		ql_dbg(ql_dbg_init, vha, 0x0170,
4098 		    "SFP Distant: %d m\n", a0->length_100m*100);
4099 	if (a0->length_50um_10m)
4100 		ql_dbg(ql_dbg_init, vha, 0x0189,
4101 		    "SFP Distant (WL=50um): %d m\n", a0->length_50um_10m * 10);
4102 	if (a0->length_62um_10m)
4103 		ql_dbg(ql_dbg_init, vha, 0x018a,
4104 		  "SFP Distant (WL=62.5um): %d m\n", a0->length_62um_10m * 10);
4105 	if (a0->length_om4_10m)
4106 		ql_dbg(ql_dbg_init, vha, 0x0194,
4107 		    "SFP Distant (OM4): %d m\n", a0->length_om4_10m * 10);
4108 	if (a0->length_om3_10m)
4109 		ql_dbg(ql_dbg_init, vha, 0x0195,
4110 		    "SFP Distant (OM3): %d m\n", a0->length_om3_10m * 10);
4111 }
4112 
4113 
4114 /**
4115  * qla24xx_detect_sfp()
4116  *
4117  * @vha: adapter state pointer.
4118  *
4119  * @return
4120  *	0 -- Configure firmware to use short-range settings -- normal
4121  *	     buffer-to-buffer credits.
4122  *
4123  *	1 -- Configure firmware to use long-range settings -- extra
4124  *	     buffer-to-buffer credits should be allocated with
4125  *	     ha->lr_distance containing distance settings from NVRAM or SFP
4126  *	     (if supported).
4127  */
4128 int
qla24xx_detect_sfp(scsi_qla_host_t * vha)4129 qla24xx_detect_sfp(scsi_qla_host_t *vha)
4130 {
4131 	int rc, used_nvram;
4132 	struct sff_8247_a0 *a;
4133 	struct qla_hw_data *ha = vha->hw;
4134 	struct nvram_81xx *nv = ha->nvram;
4135 #define LR_DISTANCE_UNKNOWN	2
4136 	static const char * const types[] = { "Short", "Long" };
4137 	static const char * const lengths[] = { "(10km)", "(5km)", "" };
4138 	u8 ll = 0;
4139 
4140 	/* Seed with NVRAM settings. */
4141 	used_nvram = 0;
4142 	ha->flags.lr_detected = 0;
4143 	if (IS_BPM_RANGE_CAPABLE(ha) &&
4144 	    (nv->enhanced_features & NEF_LR_DIST_ENABLE)) {
4145 		used_nvram = 1;
4146 		ha->flags.lr_detected = 1;
4147 		ha->lr_distance =
4148 		    (nv->enhanced_features >> LR_DIST_NV_POS)
4149 		     & LR_DIST_NV_MASK;
4150 	}
4151 
4152 	if (!IS_BPM_ENABLED(vha))
4153 		goto out;
4154 	/* Determine SR/LR capabilities of SFP/Transceiver. */
4155 	rc = qla2x00_read_sfp_dev(vha, NULL, 0);
4156 	if (rc)
4157 		goto out;
4158 
4159 	used_nvram = 0;
4160 	a = (struct sff_8247_a0 *)vha->hw->sfp_data;
4161 	qla2xxx_print_sfp_info(vha);
4162 
4163 	ha->flags.lr_detected = 0;
4164 	ll = a->fc_ll_cc7;
4165 	if (ll & FC_LL_VL || ll & FC_LL_L) {
4166 		/* Long range, track length. */
4167 		ha->flags.lr_detected = 1;
4168 
4169 		if (a->length_km > 5 || a->length_100m > 50)
4170 			ha->lr_distance = LR_DISTANCE_10K;
4171 		else
4172 			ha->lr_distance = LR_DISTANCE_5K;
4173 	}
4174 
4175 out:
4176 	ql_dbg(ql_dbg_async, vha, 0x507b,
4177 	    "SFP detect: %s-Range SFP %s (nvr=%x ll=%x lr=%x lrd=%x).\n",
4178 	    types[ha->flags.lr_detected],
4179 	    ha->flags.lr_detected ? lengths[ha->lr_distance] :
4180 	       lengths[LR_DISTANCE_UNKNOWN],
4181 	    used_nvram, ll, ha->flags.lr_detected, ha->lr_distance);
4182 	return ha->flags.lr_detected;
4183 }
4184 
__qla_adjust_iocb_limit(struct qla_qpair * qpair)4185 static void __qla_adjust_iocb_limit(struct qla_qpair *qpair)
4186 {
4187 	u8 num_qps;
4188 	u16 limit;
4189 	struct qla_hw_data *ha = qpair->vha->hw;
4190 
4191 	num_qps = ha->num_qpairs + 1;
4192 	limit = (ha->orig_fw_iocb_count * QLA_IOCB_PCT_LIMIT) / 100;
4193 
4194 	qpair->fwres.iocbs_total = ha->orig_fw_iocb_count;
4195 	qpair->fwres.iocbs_limit = limit;
4196 	qpair->fwres.iocbs_qp_limit = limit / num_qps;
4197 
4198 	qpair->fwres.exch_total = ha->orig_fw_xcb_count;
4199 	qpair->fwres.exch_limit = (ha->orig_fw_xcb_count *
4200 				   QLA_IOCB_PCT_LIMIT) / 100;
4201 }
4202 
qla_init_iocb_limit(scsi_qla_host_t * vha)4203 void qla_init_iocb_limit(scsi_qla_host_t *vha)
4204 {
4205 	u8 i;
4206 	struct qla_hw_data *ha = vha->hw;
4207 
4208 	__qla_adjust_iocb_limit(ha->base_qpair);
4209 	ha->base_qpair->fwres.iocbs_used = 0;
4210 	ha->base_qpair->fwres.exch_used  = 0;
4211 
4212 	for (i = 0; i < ha->max_qpairs; i++) {
4213 		if (ha->queue_pair_map[i])  {
4214 			__qla_adjust_iocb_limit(ha->queue_pair_map[i]);
4215 			ha->queue_pair_map[i]->fwres.iocbs_used = 0;
4216 			ha->queue_pair_map[i]->fwres.exch_used = 0;
4217 		}
4218 	}
4219 
4220 	ha->fwres.iocb_total = ha->orig_fw_iocb_count;
4221 	ha->fwres.iocb_limit = (ha->orig_fw_iocb_count * QLA_IOCB_PCT_LIMIT) / 100;
4222 	ha->fwres.exch_total = ha->orig_fw_xcb_count;
4223 	ha->fwres.exch_limit = (ha->orig_fw_xcb_count * QLA_IOCB_PCT_LIMIT) / 100;
4224 
4225 	atomic_set(&ha->fwres.iocb_used, 0);
4226 	atomic_set(&ha->fwres.exch_used, 0);
4227 }
4228 
qla_adjust_iocb_limit(scsi_qla_host_t * vha)4229 void qla_adjust_iocb_limit(scsi_qla_host_t *vha)
4230 {
4231 	u8 i;
4232 	struct qla_hw_data *ha = vha->hw;
4233 
4234 	__qla_adjust_iocb_limit(ha->base_qpair);
4235 
4236 	for (i = 0; i < ha->max_qpairs; i++) {
4237 		if (ha->queue_pair_map[i])
4238 			__qla_adjust_iocb_limit(ha->queue_pair_map[i]);
4239 	}
4240 }
4241 
4242 /**
4243  * qla2x00_setup_chip() - Load and start RISC firmware.
4244  * @vha: HA context
4245  *
4246  * Returns 0 on success.
4247  */
4248 static int
qla2x00_setup_chip(scsi_qla_host_t * vha)4249 qla2x00_setup_chip(scsi_qla_host_t *vha)
4250 {
4251 	int rval;
4252 	uint32_t srisc_address = 0;
4253 	struct qla_hw_data *ha = vha->hw;
4254 	struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
4255 	unsigned long flags;
4256 	uint16_t fw_major_version;
4257 	int done_once = 0;
4258 
4259 	if (IS_P3P_TYPE(ha)) {
4260 		rval = ha->isp_ops->load_risc(vha, &srisc_address);
4261 		if (rval == QLA_SUCCESS) {
4262 			qla2x00_stop_firmware(vha);
4263 			goto enable_82xx_npiv;
4264 		} else
4265 			goto failed;
4266 	}
4267 
4268 	if (!IS_FWI2_CAPABLE(ha) && !IS_QLA2100(ha) && !IS_QLA2200(ha)) {
4269 		/* Disable SRAM, Instruction RAM and GP RAM parity.  */
4270 		spin_lock_irqsave(&ha->hardware_lock, flags);
4271 		wrt_reg_word(&reg->hccr, (HCCR_ENABLE_PARITY + 0x0));
4272 		rd_reg_word(&reg->hccr);
4273 		spin_unlock_irqrestore(&ha->hardware_lock, flags);
4274 	}
4275 
4276 	qla81xx_mpi_sync(vha);
4277 
4278 execute_fw_with_lr:
4279 	/* Load firmware sequences */
4280 	rval = ha->isp_ops->load_risc(vha, &srisc_address);
4281 	if (rval == QLA_SUCCESS) {
4282 		ql_dbg(ql_dbg_init, vha, 0x00c9,
4283 		    "Verifying Checksum of loaded RISC code.\n");
4284 
4285 		rval = qla2x00_verify_checksum(vha, srisc_address);
4286 		if (rval == QLA_SUCCESS) {
4287 			/* Start firmware execution. */
4288 			ql_dbg(ql_dbg_init, vha, 0x00ca,
4289 			    "Starting firmware.\n");
4290 
4291 			if (ql2xexlogins)
4292 				ha->flags.exlogins_enabled = 1;
4293 
4294 			if (qla_is_exch_offld_enabled(vha))
4295 				ha->flags.exchoffld_enabled = 1;
4296 
4297 			rval = qla2x00_execute_fw(vha, srisc_address);
4298 			/* Retrieve firmware information. */
4299 			if (rval == QLA_SUCCESS) {
4300 				/* Enable BPM support? */
4301 				if (!done_once++ && qla24xx_detect_sfp(vha)) {
4302 					ql_dbg(ql_dbg_init, vha, 0x00ca,
4303 					    "Re-starting firmware -- BPM.\n");
4304 					/* Best-effort - re-init. */
4305 					ha->isp_ops->reset_chip(vha);
4306 					ha->isp_ops->chip_diag(vha);
4307 					goto execute_fw_with_lr;
4308 				}
4309 
4310 				if (IS_ZIO_THRESHOLD_CAPABLE(ha))
4311 					qla27xx_set_zio_threshold(vha,
4312 					    ha->last_zio_threshold);
4313 
4314 				rval = qla2x00_set_exlogins_buffer(vha);
4315 				if (rval != QLA_SUCCESS)
4316 					goto failed;
4317 
4318 				rval = qla2x00_set_exchoffld_buffer(vha);
4319 				if (rval != QLA_SUCCESS)
4320 					goto failed;
4321 
4322 enable_82xx_npiv:
4323 				fw_major_version = ha->fw_major_version;
4324 				if (IS_P3P_TYPE(ha))
4325 					qla82xx_check_md_needed(vha);
4326 				else
4327 					rval = qla2x00_get_fw_version(vha);
4328 				if (rval != QLA_SUCCESS)
4329 					goto failed;
4330 				ha->flags.npiv_supported = 0;
4331 				if (IS_QLA2XXX_MIDTYPE(ha) &&
4332 					 (ha->fw_attributes & BIT_2)) {
4333 					ha->flags.npiv_supported = 1;
4334 					if ((!ha->max_npiv_vports) ||
4335 					    ((ha->max_npiv_vports + 1) %
4336 					    MIN_MULTI_ID_FABRIC))
4337 						ha->max_npiv_vports =
4338 						    MIN_MULTI_ID_FABRIC - 1;
4339 				}
4340 				qla2x00_get_resource_cnts(vha);
4341 				qla_init_iocb_limit(vha);
4342 
4343 				/*
4344 				 * Allocate the array of outstanding commands
4345 				 * now that we know the firmware resources.
4346 				 */
4347 				rval = qla2x00_alloc_outstanding_cmds(ha,
4348 				    vha->req);
4349 				if (rval != QLA_SUCCESS)
4350 					goto failed;
4351 
4352 				if (!fw_major_version && !(IS_P3P_TYPE(ha)))
4353 					qla2x00_alloc_offload_mem(vha);
4354 
4355 				if (ql2xallocfwdump && !(IS_P3P_TYPE(ha)))
4356 					qla2x00_alloc_fw_dump(vha);
4357 
4358 			} else {
4359 				goto failed;
4360 			}
4361 		} else {
4362 			ql_log(ql_log_fatal, vha, 0x00cd,
4363 			    "ISP Firmware failed checksum.\n");
4364 			goto failed;
4365 		}
4366 
4367 		/* Enable PUREX PASSTHRU */
4368 		if (ql2xrdpenable || ha->flags.scm_supported_f ||
4369 		    ha->flags.edif_enabled)
4370 			qla25xx_set_els_cmds_supported(vha);
4371 	} else
4372 		goto failed;
4373 
4374 	if (!IS_FWI2_CAPABLE(ha) && !IS_QLA2100(ha) && !IS_QLA2200(ha)) {
4375 		/* Enable proper parity. */
4376 		spin_lock_irqsave(&ha->hardware_lock, flags);
4377 		if (IS_QLA2300(ha))
4378 			/* SRAM parity */
4379 			wrt_reg_word(&reg->hccr, HCCR_ENABLE_PARITY + 0x1);
4380 		else
4381 			/* SRAM, Instruction RAM and GP RAM parity */
4382 			wrt_reg_word(&reg->hccr, HCCR_ENABLE_PARITY + 0x7);
4383 		rd_reg_word(&reg->hccr);
4384 		spin_unlock_irqrestore(&ha->hardware_lock, flags);
4385 	}
4386 
4387 	if (IS_QLA27XX(ha) || IS_QLA28XX(ha))
4388 		ha->flags.fac_supported = 1;
4389 	else if (rval == QLA_SUCCESS && IS_FAC_REQUIRED(ha)) {
4390 		uint32_t size;
4391 
4392 		rval = qla81xx_fac_get_sector_size(vha, &size);
4393 		if (rval == QLA_SUCCESS) {
4394 			ha->flags.fac_supported = 1;
4395 			ha->fdt_block_size = size << 2;
4396 		} else {
4397 			ql_log(ql_log_warn, vha, 0x00ce,
4398 			    "Unsupported FAC firmware (%d.%02d.%02d).\n",
4399 			    ha->fw_major_version, ha->fw_minor_version,
4400 			    ha->fw_subminor_version);
4401 
4402 			if (IS_QLA83XX(ha)) {
4403 				ha->flags.fac_supported = 0;
4404 				rval = QLA_SUCCESS;
4405 			}
4406 		}
4407 	}
4408 failed:
4409 	if (rval) {
4410 		ql_log(ql_log_fatal, vha, 0x00cf,
4411 		    "Setup chip ****FAILED****.\n");
4412 	}
4413 
4414 	return (rval);
4415 }
4416 
4417 /**
4418  * qla2x00_init_response_q_entries() - Initializes response queue entries.
4419  * @rsp: response queue
4420  *
4421  * Beginning of request ring has initialization control block already built
4422  * by nvram config routine.
4423  *
4424  * Returns 0 on success.
4425  */
4426 void
qla2x00_init_response_q_entries(struct rsp_que * rsp)4427 qla2x00_init_response_q_entries(struct rsp_que *rsp)
4428 {
4429 	uint16_t cnt;
4430 	response_t *pkt;
4431 
4432 	rsp->ring_ptr = rsp->ring;
4433 	rsp->ring_index    = 0;
4434 	rsp->status_srb = NULL;
4435 	pkt = rsp->ring_ptr;
4436 	for (cnt = 0; cnt < rsp->length; cnt++) {
4437 		pkt->signature = RESPONSE_PROCESSED;
4438 		pkt++;
4439 	}
4440 }
4441 
4442 /**
4443  * qla2x00_update_fw_options() - Read and process firmware options.
4444  * @vha: HA context
4445  *
4446  * Returns 0 on success.
4447  */
4448 void
qla2x00_update_fw_options(scsi_qla_host_t * vha)4449 qla2x00_update_fw_options(scsi_qla_host_t *vha)
4450 {
4451 	uint16_t swing, emphasis, tx_sens, rx_sens;
4452 	struct qla_hw_data *ha = vha->hw;
4453 
4454 	memset(ha->fw_options, 0, sizeof(ha->fw_options));
4455 	qla2x00_get_fw_options(vha, ha->fw_options);
4456 
4457 	if (IS_QLA2100(ha) || IS_QLA2200(ha))
4458 		return;
4459 
4460 	/* Serial Link options. */
4461 	ql_dbg(ql_dbg_init + ql_dbg_buffer, vha, 0x0115,
4462 	    "Serial link options.\n");
4463 	ql_dump_buffer(ql_dbg_init + ql_dbg_buffer, vha, 0x0109,
4464 	    ha->fw_seriallink_options, sizeof(ha->fw_seriallink_options));
4465 
4466 	ha->fw_options[1] &= ~FO1_SET_EMPHASIS_SWING;
4467 	if (ha->fw_seriallink_options[3] & BIT_2) {
4468 		ha->fw_options[1] |= FO1_SET_EMPHASIS_SWING;
4469 
4470 		/*  1G settings */
4471 		swing = ha->fw_seriallink_options[2] & (BIT_2 | BIT_1 | BIT_0);
4472 		emphasis = (ha->fw_seriallink_options[2] &
4473 		    (BIT_4 | BIT_3)) >> 3;
4474 		tx_sens = ha->fw_seriallink_options[0] &
4475 		    (BIT_3 | BIT_2 | BIT_1 | BIT_0);
4476 		rx_sens = (ha->fw_seriallink_options[0] &
4477 		    (BIT_7 | BIT_6 | BIT_5 | BIT_4)) >> 4;
4478 		ha->fw_options[10] = (emphasis << 14) | (swing << 8);
4479 		if (IS_QLA2300(ha) || IS_QLA2312(ha) || IS_QLA6312(ha)) {
4480 			if (rx_sens == 0x0)
4481 				rx_sens = 0x3;
4482 			ha->fw_options[10] |= (tx_sens << 4) | rx_sens;
4483 		} else if (IS_QLA2322(ha) || IS_QLA6322(ha))
4484 			ha->fw_options[10] |= BIT_5 |
4485 			    ((rx_sens & (BIT_1 | BIT_0)) << 2) |
4486 			    (tx_sens & (BIT_1 | BIT_0));
4487 
4488 		/*  2G settings */
4489 		swing = (ha->fw_seriallink_options[2] &
4490 		    (BIT_7 | BIT_6 | BIT_5)) >> 5;
4491 		emphasis = ha->fw_seriallink_options[3] & (BIT_1 | BIT_0);
4492 		tx_sens = ha->fw_seriallink_options[1] &
4493 		    (BIT_3 | BIT_2 | BIT_1 | BIT_0);
4494 		rx_sens = (ha->fw_seriallink_options[1] &
4495 		    (BIT_7 | BIT_6 | BIT_5 | BIT_4)) >> 4;
4496 		ha->fw_options[11] = (emphasis << 14) | (swing << 8);
4497 		if (IS_QLA2300(ha) || IS_QLA2312(ha) || IS_QLA6312(ha)) {
4498 			if (rx_sens == 0x0)
4499 				rx_sens = 0x3;
4500 			ha->fw_options[11] |= (tx_sens << 4) | rx_sens;
4501 		} else if (IS_QLA2322(ha) || IS_QLA6322(ha))
4502 			ha->fw_options[11] |= BIT_5 |
4503 			    ((rx_sens & (BIT_1 | BIT_0)) << 2) |
4504 			    (tx_sens & (BIT_1 | BIT_0));
4505 	}
4506 
4507 	/* FCP2 options. */
4508 	/*  Return command IOCBs without waiting for an ABTS to complete. */
4509 	ha->fw_options[3] |= BIT_13;
4510 
4511 	/* LED scheme. */
4512 	if (ha->flags.enable_led_scheme)
4513 		ha->fw_options[2] |= BIT_12;
4514 
4515 	/* Detect ISP6312. */
4516 	if (IS_QLA6312(ha))
4517 		ha->fw_options[2] |= BIT_13;
4518 
4519 	/* Set Retry FLOGI in case of P2P connection */
4520 	if (ha->operating_mode == P2P) {
4521 		ha->fw_options[2] |= BIT_3;
4522 		ql_dbg(ql_dbg_disc, vha, 0x2100,
4523 		    "(%s): Setting FLOGI retry BIT in fw_options[2]: 0x%x\n",
4524 			__func__, ha->fw_options[2]);
4525 	}
4526 
4527 	/* Update firmware options. */
4528 	qla2x00_set_fw_options(vha, ha->fw_options);
4529 }
4530 
4531 void
qla24xx_update_fw_options(scsi_qla_host_t * vha)4532 qla24xx_update_fw_options(scsi_qla_host_t *vha)
4533 {
4534 	int rval;
4535 	struct qla_hw_data *ha = vha->hw;
4536 
4537 	if (IS_P3P_TYPE(ha))
4538 		return;
4539 
4540 	/*  Hold status IOCBs until ABTS response received. */
4541 	if (ql2xfwholdabts)
4542 		ha->fw_options[3] |= BIT_12;
4543 
4544 	/* Set Retry FLOGI in case of P2P connection */
4545 	if (ha->operating_mode == P2P) {
4546 		ha->fw_options[2] |= BIT_3;
4547 		ql_dbg(ql_dbg_disc, vha, 0x2101,
4548 		    "(%s): Setting FLOGI retry BIT in fw_options[2]: 0x%x\n",
4549 			__func__, ha->fw_options[2]);
4550 	}
4551 
4552 	/* Move PUREX, ABTS RX & RIDA to ATIOQ */
4553 	if (ql2xmvasynctoatio && !ha->flags.edif_enabled &&
4554 	    (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha))) {
4555 		if (qla_tgt_mode_enabled(vha) ||
4556 		    qla_dual_mode_enabled(vha))
4557 			ha->fw_options[2] |= BIT_11;
4558 		else
4559 			ha->fw_options[2] &= ~BIT_11;
4560 	}
4561 
4562 	if (IS_QLA25XX(ha) || IS_QLA83XX(ha) || IS_QLA27XX(ha) ||
4563 	    IS_QLA28XX(ha)) {
4564 		/*
4565 		 * Tell FW to track each exchange to prevent
4566 		 * driver from using stale exchange.
4567 		 */
4568 		if (qla_tgt_mode_enabled(vha) ||
4569 		    qla_dual_mode_enabled(vha))
4570 			ha->fw_options[2] |= BIT_4;
4571 		else
4572 			ha->fw_options[2] &= ~(BIT_4);
4573 
4574 		/* Reserve 1/2 of emergency exchanges for ELS.*/
4575 		if (qla2xuseresexchforels)
4576 			ha->fw_options[2] |= BIT_8;
4577 		else
4578 			ha->fw_options[2] &= ~BIT_8;
4579 
4580 		/*
4581 		 * N2N: set Secure=1 for PLOGI ACC and
4582 		 * fw shal not send PRLI after PLOGI Acc
4583 		 */
4584 		if (ha->flags.edif_enabled &&
4585 		    DBELL_ACTIVE(vha)) {
4586 			ha->fw_options[3] |= BIT_15;
4587 			ha->flags.n2n_fw_acc_sec = 1;
4588 		} else {
4589 			ha->fw_options[3] &= ~BIT_15;
4590 			ha->flags.n2n_fw_acc_sec = 0;
4591 		}
4592 	}
4593 
4594 	if (ql2xrdpenable || ha->flags.scm_supported_f ||
4595 	    ha->flags.edif_enabled)
4596 		ha->fw_options[1] |= ADD_FO1_ENABLE_PUREX_IOCB;
4597 
4598 	/* Enable Async 8130/8131 events -- transceiver insertion/removal */
4599 	if (IS_BPM_RANGE_CAPABLE(ha))
4600 		ha->fw_options[3] |= BIT_10;
4601 
4602 	ql_dbg(ql_dbg_init, vha, 0x00e8,
4603 	    "%s, add FW options 1-3 = 0x%04x 0x%04x 0x%04x mode %x\n",
4604 	    __func__, ha->fw_options[1], ha->fw_options[2],
4605 	    ha->fw_options[3], vha->host->active_mode);
4606 
4607 	if (ha->fw_options[1] || ha->fw_options[2] || ha->fw_options[3])
4608 		qla2x00_set_fw_options(vha, ha->fw_options);
4609 
4610 	/* Update Serial Link options. */
4611 	if ((le16_to_cpu(ha->fw_seriallink_options24[0]) & BIT_0) == 0)
4612 		return;
4613 
4614 	rval = qla2x00_set_serdes_params(vha,
4615 	    le16_to_cpu(ha->fw_seriallink_options24[1]),
4616 	    le16_to_cpu(ha->fw_seriallink_options24[2]),
4617 	    le16_to_cpu(ha->fw_seriallink_options24[3]));
4618 	if (rval != QLA_SUCCESS) {
4619 		ql_log(ql_log_warn, vha, 0x0104,
4620 		    "Unable to update Serial Link options (%x).\n", rval);
4621 	}
4622 }
4623 
4624 void
qla2x00_config_rings(struct scsi_qla_host * vha)4625 qla2x00_config_rings(struct scsi_qla_host *vha)
4626 {
4627 	struct qla_hw_data *ha = vha->hw;
4628 	struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
4629 	struct req_que *req = ha->req_q_map[0];
4630 	struct rsp_que *rsp = ha->rsp_q_map[0];
4631 
4632 	/* Setup ring parameters in initialization control block. */
4633 	ha->init_cb->request_q_outpointer = cpu_to_le16(0);
4634 	ha->init_cb->response_q_inpointer = cpu_to_le16(0);
4635 	ha->init_cb->request_q_length = cpu_to_le16(req->length);
4636 	ha->init_cb->response_q_length = cpu_to_le16(rsp->length);
4637 	put_unaligned_le64(req->dma, &ha->init_cb->request_q_address);
4638 	put_unaligned_le64(rsp->dma, &ha->init_cb->response_q_address);
4639 
4640 	wrt_reg_word(ISP_REQ_Q_IN(ha, reg), 0);
4641 	wrt_reg_word(ISP_REQ_Q_OUT(ha, reg), 0);
4642 	wrt_reg_word(ISP_RSP_Q_IN(ha, reg), 0);
4643 	wrt_reg_word(ISP_RSP_Q_OUT(ha, reg), 0);
4644 	rd_reg_word(ISP_RSP_Q_OUT(ha, reg));		/* PCI Posting. */
4645 }
4646 
4647 void
qla24xx_config_rings(struct scsi_qla_host * vha)4648 qla24xx_config_rings(struct scsi_qla_host *vha)
4649 {
4650 	struct qla_hw_data *ha = vha->hw;
4651 	device_reg_t *reg = ISP_QUE_REG(ha, 0);
4652 	struct device_reg_2xxx __iomem *ioreg = &ha->iobase->isp;
4653 	struct qla_msix_entry *msix;
4654 	struct init_cb_24xx *icb;
4655 	uint16_t rid = 0;
4656 	struct req_que *req = ha->req_q_map[0];
4657 	struct rsp_que *rsp = ha->rsp_q_map[0];
4658 
4659 	/* Setup ring parameters in initialization control block. */
4660 	icb = (struct init_cb_24xx *)ha->init_cb;
4661 	icb->request_q_outpointer = cpu_to_le16(0);
4662 	icb->response_q_inpointer = cpu_to_le16(0);
4663 	icb->request_q_length = cpu_to_le16(req->length);
4664 	icb->response_q_length = cpu_to_le16(rsp->length);
4665 	put_unaligned_le64(req->dma, &icb->request_q_address);
4666 	put_unaligned_le64(rsp->dma, &icb->response_q_address);
4667 
4668 	/* Setup ATIO queue dma pointers for target mode */
4669 	icb->atio_q_inpointer = cpu_to_le16(0);
4670 	icb->atio_q_length = cpu_to_le16(ha->tgt.atio_q_length);
4671 	put_unaligned_le64(ha->tgt.atio_dma, &icb->atio_q_address);
4672 
4673 	if (IS_SHADOW_REG_CAPABLE(ha))
4674 		icb->firmware_options_2 |= cpu_to_le32(BIT_30|BIT_29);
4675 
4676 	if (ha->mqenable || IS_QLA83XX(ha) || IS_QLA27XX(ha) ||
4677 	    IS_QLA28XX(ha)) {
4678 		icb->qos = cpu_to_le16(QLA_DEFAULT_QUE_QOS);
4679 		icb->rid = cpu_to_le16(rid);
4680 		if (ha->flags.msix_enabled) {
4681 			msix = &ha->msix_entries[1];
4682 			ql_dbg(ql_dbg_init, vha, 0x0019,
4683 			    "Registering vector 0x%x for base que.\n",
4684 			    msix->entry);
4685 			icb->msix = cpu_to_le16(msix->entry);
4686 		}
4687 		/* Use alternate PCI bus number */
4688 		if (MSB(rid))
4689 			icb->firmware_options_2 |= cpu_to_le32(BIT_19);
4690 		/* Use alternate PCI devfn */
4691 		if (LSB(rid))
4692 			icb->firmware_options_2 |= cpu_to_le32(BIT_18);
4693 
4694 		/* Use Disable MSIX Handshake mode for capable adapters */
4695 		if ((ha->fw_attributes & BIT_6) && (IS_MSIX_NACK_CAPABLE(ha)) &&
4696 		    (ha->flags.msix_enabled)) {
4697 			icb->firmware_options_2 &= cpu_to_le32(~BIT_22);
4698 			ha->flags.disable_msix_handshake = 1;
4699 			ql_dbg(ql_dbg_init, vha, 0x00fe,
4700 			    "MSIX Handshake Disable Mode turned on.\n");
4701 		} else {
4702 			icb->firmware_options_2 |= cpu_to_le32(BIT_22);
4703 		}
4704 		icb->firmware_options_2 |= cpu_to_le32(BIT_23);
4705 
4706 		wrt_reg_dword(&reg->isp25mq.req_q_in, 0);
4707 		wrt_reg_dword(&reg->isp25mq.req_q_out, 0);
4708 		wrt_reg_dword(&reg->isp25mq.rsp_q_in, 0);
4709 		wrt_reg_dword(&reg->isp25mq.rsp_q_out, 0);
4710 	} else {
4711 		wrt_reg_dword(&reg->isp24.req_q_in, 0);
4712 		wrt_reg_dword(&reg->isp24.req_q_out, 0);
4713 		wrt_reg_dword(&reg->isp24.rsp_q_in, 0);
4714 		wrt_reg_dword(&reg->isp24.rsp_q_out, 0);
4715 	}
4716 
4717 	qlt_24xx_config_rings(vha);
4718 
4719 	/* If the user has configured the speed, set it here */
4720 	if (ha->set_data_rate) {
4721 		ql_dbg(ql_dbg_init, vha, 0x00fd,
4722 		    "Speed set by user : %s Gbps \n",
4723 		    qla2x00_get_link_speed_str(ha, ha->set_data_rate));
4724 		icb->firmware_options_3 = cpu_to_le32(ha->set_data_rate << 13);
4725 	}
4726 
4727 	/* PCI posting */
4728 	rd_reg_word(&ioreg->hccr);
4729 }
4730 
4731 /**
4732  * qla2x00_init_rings() - Initializes firmware.
4733  * @vha: HA context
4734  *
4735  * Beginning of request ring has initialization control block already built
4736  * by nvram config routine.
4737  *
4738  * Returns 0 on success.
4739  */
4740 int
qla2x00_init_rings(scsi_qla_host_t * vha)4741 qla2x00_init_rings(scsi_qla_host_t *vha)
4742 {
4743 	int	rval;
4744 	unsigned long flags = 0;
4745 	int cnt, que;
4746 	struct qla_hw_data *ha = vha->hw;
4747 	struct req_que *req;
4748 	struct rsp_que *rsp;
4749 	struct mid_init_cb_24xx *mid_init_cb =
4750 	    (struct mid_init_cb_24xx *) ha->init_cb;
4751 
4752 	spin_lock_irqsave(&ha->hardware_lock, flags);
4753 
4754 	/* Clear outstanding commands array. */
4755 	for (que = 0; que < ha->max_req_queues; que++) {
4756 		req = ha->req_q_map[que];
4757 		if (!req || !test_bit(que, ha->req_qid_map))
4758 			continue;
4759 		req->out_ptr = (uint16_t *)(req->ring + req->length);
4760 		*req->out_ptr = 0;
4761 		for (cnt = 1; cnt < req->num_outstanding_cmds; cnt++)
4762 			req->outstanding_cmds[cnt] = NULL;
4763 
4764 		req->current_outstanding_cmd = 1;
4765 
4766 		/* Initialize firmware. */
4767 		req->ring_ptr  = req->ring;
4768 		req->ring_index    = 0;
4769 		req->cnt      = req->length;
4770 	}
4771 
4772 	for (que = 0; que < ha->max_rsp_queues; que++) {
4773 		rsp = ha->rsp_q_map[que];
4774 		if (!rsp || !test_bit(que, ha->rsp_qid_map))
4775 			continue;
4776 		rsp->in_ptr = (uint16_t *)(rsp->ring + rsp->length);
4777 		*rsp->in_ptr = 0;
4778 		/* Initialize response queue entries */
4779 		if (IS_QLAFX00(ha))
4780 			qlafx00_init_response_q_entries(rsp);
4781 		else
4782 			qla2x00_init_response_q_entries(rsp);
4783 	}
4784 
4785 	ha->tgt.atio_ring_ptr = ha->tgt.atio_ring;
4786 	ha->tgt.atio_ring_index = 0;
4787 	/* Initialize ATIO queue entries */
4788 	qlt_init_atio_q_entries(vha);
4789 
4790 	ha->isp_ops->config_rings(vha);
4791 
4792 	spin_unlock_irqrestore(&ha->hardware_lock, flags);
4793 
4794 	if (IS_QLAFX00(ha)) {
4795 		rval = qlafx00_init_firmware(vha, ha->init_cb_size);
4796 		goto next_check;
4797 	}
4798 
4799 	/* Update any ISP specific firmware options before initialization. */
4800 	ha->isp_ops->update_fw_options(vha);
4801 
4802 	ql_dbg(ql_dbg_init, vha, 0x00d1,
4803 	       "Issue init firmware FW opt 1-3= %08x %08x %08x.\n",
4804 	       le32_to_cpu(mid_init_cb->init_cb.firmware_options_1),
4805 	       le32_to_cpu(mid_init_cb->init_cb.firmware_options_2),
4806 	       le32_to_cpu(mid_init_cb->init_cb.firmware_options_3));
4807 
4808 	if (ha->flags.npiv_supported) {
4809 		if (ha->operating_mode == LOOP && !IS_CNA_CAPABLE(ha))
4810 			ha->max_npiv_vports = MIN_MULTI_ID_FABRIC - 1;
4811 		mid_init_cb->count = cpu_to_le16(ha->max_npiv_vports);
4812 	}
4813 
4814 	if (IS_FWI2_CAPABLE(ha)) {
4815 		mid_init_cb->options = cpu_to_le16(BIT_1);
4816 		mid_init_cb->init_cb.execution_throttle =
4817 		    cpu_to_le16(ha->cur_fw_xcb_count);
4818 		ha->flags.dport_enabled =
4819 			(le32_to_cpu(mid_init_cb->init_cb.firmware_options_1) &
4820 			 BIT_7) != 0;
4821 		ql_dbg(ql_dbg_init, vha, 0x0191, "DPORT Support: %s.\n",
4822 		    (ha->flags.dport_enabled) ? "enabled" : "disabled");
4823 		/* FA-WWPN Status */
4824 		ha->flags.fawwpn_enabled =
4825 			(le32_to_cpu(mid_init_cb->init_cb.firmware_options_1) &
4826 			 BIT_6) != 0;
4827 		ql_dbg(ql_dbg_init, vha, 0x00bc, "FA-WWPN Support: %s.\n",
4828 		    (ha->flags.fawwpn_enabled) ? "enabled" : "disabled");
4829 		/* Init_cb will be reused for other command(s).  Save a backup copy of port_name */
4830 		memcpy(ha->port_name, ha->init_cb->port_name, WWN_SIZE);
4831 	}
4832 
4833 	/* ELS pass through payload is limit by frame size. */
4834 	if (ha->flags.edif_enabled)
4835 		mid_init_cb->init_cb.frame_payload_size = cpu_to_le16(ELS_MAX_PAYLOAD);
4836 
4837 	QLA_FW_STARTED(ha);
4838 	rval = qla2x00_init_firmware(vha, ha->init_cb_size);
4839 next_check:
4840 	if (rval) {
4841 		QLA_FW_STOPPED(ha);
4842 		ql_log(ql_log_fatal, vha, 0x00d2,
4843 		    "Init Firmware **** FAILED ****.\n");
4844 	} else {
4845 		ql_dbg(ql_dbg_init, vha, 0x00d3,
4846 		    "Init Firmware -- success.\n");
4847 		vha->u_ql2xexchoffld = vha->u_ql2xiniexchg = 0;
4848 	}
4849 
4850 	return (rval);
4851 }
4852 
4853 /**
4854  * qla2x00_fw_ready() - Waits for firmware ready.
4855  * @vha: HA context
4856  *
4857  * Returns 0 on success.
4858  */
4859 static int
qla2x00_fw_ready(scsi_qla_host_t * vha)4860 qla2x00_fw_ready(scsi_qla_host_t *vha)
4861 {
4862 	int		rval;
4863 	unsigned long	wtime, mtime, cs84xx_time;
4864 	uint16_t	min_wait;	/* Minimum wait time if loop is down */
4865 	uint16_t	wait_time;	/* Wait time if loop is coming ready */
4866 	uint16_t	state[6];
4867 	struct qla_hw_data *ha = vha->hw;
4868 
4869 	if (IS_QLAFX00(vha->hw))
4870 		return qlafx00_fw_ready(vha);
4871 
4872 	/* Time to wait for loop down */
4873 	if (IS_P3P_TYPE(ha))
4874 		min_wait = 30;
4875 	else
4876 		min_wait = 20;
4877 
4878 	/*
4879 	 * Firmware should take at most one RATOV to login, plus 5 seconds for
4880 	 * our own processing.
4881 	 */
4882 	if ((wait_time = (ha->retry_count*ha->login_timeout) + 5) < min_wait) {
4883 		wait_time = min_wait;
4884 	}
4885 
4886 	/* Min wait time if loop down */
4887 	mtime = jiffies + (min_wait * HZ);
4888 
4889 	/* wait time before firmware ready */
4890 	wtime = jiffies + (wait_time * HZ);
4891 
4892 	/* Wait for ISP to finish LIP */
4893 	if (!vha->flags.init_done)
4894 		ql_log(ql_log_info, vha, 0x801e,
4895 		    "Waiting for LIP to complete.\n");
4896 
4897 	do {
4898 		memset(state, -1, sizeof(state));
4899 		rval = qla2x00_get_firmware_state(vha, state);
4900 		if (rval == QLA_SUCCESS) {
4901 			if (state[0] < FSTATE_LOSS_OF_SYNC) {
4902 				vha->device_flags &= ~DFLG_NO_CABLE;
4903 			}
4904 			if (IS_QLA84XX(ha) && state[0] != FSTATE_READY) {
4905 				ql_dbg(ql_dbg_taskm, vha, 0x801f,
4906 				    "fw_state=%x 84xx=%x.\n", state[0],
4907 				    state[2]);
4908 				if ((state[2] & FSTATE_LOGGED_IN) &&
4909 				     (state[2] & FSTATE_WAITING_FOR_VERIFY)) {
4910 					ql_dbg(ql_dbg_taskm, vha, 0x8028,
4911 					    "Sending verify iocb.\n");
4912 
4913 					cs84xx_time = jiffies;
4914 					rval = qla84xx_init_chip(vha);
4915 					if (rval != QLA_SUCCESS) {
4916 						ql_log(ql_log_warn,
4917 						    vha, 0x8007,
4918 						    "Init chip failed.\n");
4919 						break;
4920 					}
4921 
4922 					/* Add time taken to initialize. */
4923 					cs84xx_time = jiffies - cs84xx_time;
4924 					wtime += cs84xx_time;
4925 					mtime += cs84xx_time;
4926 					ql_dbg(ql_dbg_taskm, vha, 0x8008,
4927 					    "Increasing wait time by %ld. "
4928 					    "New time %ld.\n", cs84xx_time,
4929 					    wtime);
4930 				}
4931 			} else if (state[0] == FSTATE_READY) {
4932 				ql_dbg(ql_dbg_taskm, vha, 0x8037,
4933 				    "F/W Ready - OK.\n");
4934 
4935 				qla2x00_get_retry_cnt(vha, &ha->retry_count,
4936 				    &ha->login_timeout, &ha->r_a_tov);
4937 
4938 				rval = QLA_SUCCESS;
4939 				break;
4940 			}
4941 
4942 			rval = QLA_FUNCTION_FAILED;
4943 
4944 			if (atomic_read(&vha->loop_down_timer) &&
4945 			    state[0] != FSTATE_READY) {
4946 				/* Loop down. Timeout on min_wait for states
4947 				 * other than Wait for Login.
4948 				 */
4949 				if (time_after_eq(jiffies, mtime)) {
4950 					ql_log(ql_log_info, vha, 0x8038,
4951 					    "Cable is unplugged...\n");
4952 
4953 					vha->device_flags |= DFLG_NO_CABLE;
4954 					break;
4955 				}
4956 			}
4957 		} else {
4958 			/* Mailbox cmd failed. Timeout on min_wait. */
4959 			if (time_after_eq(jiffies, mtime) ||
4960 				ha->flags.isp82xx_fw_hung)
4961 				break;
4962 		}
4963 
4964 		if (time_after_eq(jiffies, wtime))
4965 			break;
4966 
4967 		/* Delay for a while */
4968 		msleep(500);
4969 	} while (1);
4970 
4971 	ql_dbg(ql_dbg_taskm, vha, 0x803a,
4972 	    "fw_state=%x (%x, %x, %x, %x %x) curr time=%lx.\n", state[0],
4973 	    state[1], state[2], state[3], state[4], state[5], jiffies);
4974 
4975 	if (rval && !(vha->device_flags & DFLG_NO_CABLE)) {
4976 		ql_log(ql_log_warn, vha, 0x803b,
4977 		    "Firmware ready **** FAILED ****.\n");
4978 	}
4979 
4980 	return (rval);
4981 }
4982 
4983 /*
4984 *  qla2x00_configure_hba
4985 *      Setup adapter context.
4986 *
4987 * Input:
4988 *      ha = adapter state pointer.
4989 *
4990 * Returns:
4991 *      0 = success
4992 *
4993 * Context:
4994 *      Kernel context.
4995 */
4996 static int
qla2x00_configure_hba(scsi_qla_host_t * vha)4997 qla2x00_configure_hba(scsi_qla_host_t *vha)
4998 {
4999 	int       rval;
5000 	uint16_t      loop_id;
5001 	uint16_t      topo;
5002 	uint16_t      sw_cap;
5003 	uint8_t       al_pa;
5004 	uint8_t       area;
5005 	uint8_t       domain;
5006 	char		connect_type[22];
5007 	struct qla_hw_data *ha = vha->hw;
5008 	scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
5009 	port_id_t id;
5010 	unsigned long flags;
5011 
5012 	/* Get host addresses. */
5013 	rval = qla2x00_get_adapter_id(vha,
5014 	    &loop_id, &al_pa, &area, &domain, &topo, &sw_cap);
5015 	if (rval != QLA_SUCCESS) {
5016 		if (LOOP_TRANSITION(vha) || atomic_read(&ha->loop_down_timer) ||
5017 		    IS_CNA_CAPABLE(ha) ||
5018 		    (rval == QLA_COMMAND_ERROR && loop_id == 0x7)) {
5019 			ql_dbg(ql_dbg_disc, vha, 0x2008,
5020 			    "Loop is in a transition state.\n");
5021 		} else {
5022 			ql_log(ql_log_warn, vha, 0x2009,
5023 			    "Unable to get host loop ID.\n");
5024 			if (IS_FWI2_CAPABLE(ha) && (vha == base_vha) &&
5025 			    (rval == QLA_COMMAND_ERROR && loop_id == 0x1b)) {
5026 				ql_log(ql_log_warn, vha, 0x1151,
5027 				    "Doing link init.\n");
5028 				if (qla24xx_link_initialize(vha) == QLA_SUCCESS)
5029 					return rval;
5030 			}
5031 			set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
5032 		}
5033 		return (rval);
5034 	}
5035 
5036 	if (topo == 4) {
5037 		ql_log(ql_log_info, vha, 0x200a,
5038 		    "Cannot get topology - retrying.\n");
5039 		return (QLA_FUNCTION_FAILED);
5040 	}
5041 
5042 	vha->loop_id = loop_id;
5043 
5044 	/* initialize */
5045 	ha->min_external_loopid = SNS_FIRST_LOOP_ID;
5046 	ha->operating_mode = LOOP;
5047 
5048 	switch (topo) {
5049 	case 0:
5050 		ql_dbg(ql_dbg_disc, vha, 0x200b, "HBA in NL topology.\n");
5051 		ha->switch_cap = 0;
5052 		ha->current_topology = ISP_CFG_NL;
5053 		strcpy(connect_type, "(Loop)");
5054 		break;
5055 
5056 	case 1:
5057 		ql_dbg(ql_dbg_disc, vha, 0x200c, "HBA in FL topology.\n");
5058 		ha->switch_cap = sw_cap;
5059 		ha->current_topology = ISP_CFG_FL;
5060 		strcpy(connect_type, "(FL_Port)");
5061 		break;
5062 
5063 	case 2:
5064 		ql_dbg(ql_dbg_disc, vha, 0x200d, "HBA in N P2P topology.\n");
5065 		ha->switch_cap = 0;
5066 		ha->operating_mode = P2P;
5067 		ha->current_topology = ISP_CFG_N;
5068 		strcpy(connect_type, "(N_Port-to-N_Port)");
5069 		break;
5070 
5071 	case 3:
5072 		ql_dbg(ql_dbg_disc, vha, 0x200e, "HBA in F P2P topology.\n");
5073 		ha->switch_cap = sw_cap;
5074 		ha->operating_mode = P2P;
5075 		ha->current_topology = ISP_CFG_F;
5076 		strcpy(connect_type, "(F_Port)");
5077 		break;
5078 
5079 	default:
5080 		ql_dbg(ql_dbg_disc, vha, 0x200f,
5081 		    "HBA in unknown topology %x, using NL.\n", topo);
5082 		ha->switch_cap = 0;
5083 		ha->current_topology = ISP_CFG_NL;
5084 		strcpy(connect_type, "(Loop)");
5085 		break;
5086 	}
5087 
5088 	/* Save Host port and loop ID. */
5089 	/* byte order - Big Endian */
5090 	id.b.domain = domain;
5091 	id.b.area = area;
5092 	id.b.al_pa = al_pa;
5093 	id.b.rsvd_1 = 0;
5094 	spin_lock_irqsave(&ha->hardware_lock, flags);
5095 	if (vha->hw->flags.edif_enabled) {
5096 		if (topo != 2)
5097 			qla_update_host_map(vha, id);
5098 	} else if (!(topo == 2 && ha->flags.n2n_bigger))
5099 		qla_update_host_map(vha, id);
5100 	spin_unlock_irqrestore(&ha->hardware_lock, flags);
5101 
5102 	if (!vha->flags.init_done)
5103 		ql_log(ql_log_info, vha, 0x2010,
5104 		    "Topology - %s, Host Loop address 0x%x.\n",
5105 		    connect_type, vha->loop_id);
5106 
5107 	return(rval);
5108 }
5109 
5110 inline void
qla2x00_set_model_info(scsi_qla_host_t * vha,uint8_t * model,size_t len,const char * def)5111 qla2x00_set_model_info(scsi_qla_host_t *vha, uint8_t *model, size_t len,
5112 		       const char *def)
5113 {
5114 	char *st, *en;
5115 	uint16_t index;
5116 	uint64_t zero[2] = { 0 };
5117 	struct qla_hw_data *ha = vha->hw;
5118 	int use_tbl = !IS_QLA24XX_TYPE(ha) && !IS_QLA25XX(ha) &&
5119 	    !IS_CNA_CAPABLE(ha) && !IS_QLA2031(ha);
5120 
5121 	if (len > sizeof(zero))
5122 		len = sizeof(zero);
5123 	if (memcmp(model, &zero, len) != 0) {
5124 		memcpy(ha->model_number, model, len);
5125 		st = en = ha->model_number;
5126 		en += len - 1;
5127 		while (en > st) {
5128 			if (*en != 0x20 && *en != 0x00)
5129 				break;
5130 			*en-- = '\0';
5131 		}
5132 
5133 		index = (ha->pdev->subsystem_device & 0xff);
5134 		if (use_tbl &&
5135 		    ha->pdev->subsystem_vendor == PCI_VENDOR_ID_QLOGIC &&
5136 		    index < QLA_MODEL_NAMES)
5137 			strscpy(ha->model_desc,
5138 			    qla2x00_model_name[index * 2 + 1],
5139 			    sizeof(ha->model_desc));
5140 	} else {
5141 		index = (ha->pdev->subsystem_device & 0xff);
5142 		if (use_tbl &&
5143 		    ha->pdev->subsystem_vendor == PCI_VENDOR_ID_QLOGIC &&
5144 		    index < QLA_MODEL_NAMES) {
5145 			strscpy(ha->model_number,
5146 				qla2x00_model_name[index * 2],
5147 				sizeof(ha->model_number));
5148 			strscpy(ha->model_desc,
5149 			    qla2x00_model_name[index * 2 + 1],
5150 			    sizeof(ha->model_desc));
5151 		} else {
5152 			strscpy(ha->model_number, def,
5153 				sizeof(ha->model_number));
5154 		}
5155 	}
5156 	if (IS_FWI2_CAPABLE(ha))
5157 		qla2xxx_get_vpd_field(vha, "\x82", ha->model_desc,
5158 		    sizeof(ha->model_desc));
5159 }
5160 
5161 /* On sparc systems, obtain port and node WWN from firmware
5162  * properties.
5163  */
qla2xxx_nvram_wwn_from_ofw(scsi_qla_host_t * vha,nvram_t * nv)5164 static void qla2xxx_nvram_wwn_from_ofw(scsi_qla_host_t *vha, nvram_t *nv)
5165 {
5166 #ifdef CONFIG_SPARC
5167 	struct qla_hw_data *ha = vha->hw;
5168 	struct pci_dev *pdev = ha->pdev;
5169 	struct device_node *dp = pci_device_to_OF_node(pdev);
5170 	const u8 *val;
5171 	int len;
5172 
5173 	val = of_get_property(dp, "port-wwn", &len);
5174 	if (val && len >= WWN_SIZE)
5175 		memcpy(nv->port_name, val, WWN_SIZE);
5176 
5177 	val = of_get_property(dp, "node-wwn", &len);
5178 	if (val && len >= WWN_SIZE)
5179 		memcpy(nv->node_name, val, WWN_SIZE);
5180 #endif
5181 }
5182 
5183 /*
5184 * NVRAM configuration for ISP 2xxx
5185 *
5186 * Input:
5187 *      ha                = adapter block pointer.
5188 *
5189 * Output:
5190 *      initialization control block in response_ring
5191 *      host adapters parameters in host adapter block
5192 *
5193 * Returns:
5194 *      0 = success.
5195 */
5196 int
qla2x00_nvram_config(scsi_qla_host_t * vha)5197 qla2x00_nvram_config(scsi_qla_host_t *vha)
5198 {
5199 	int             rval;
5200 	uint8_t         chksum = 0;
5201 	uint16_t        cnt;
5202 	uint8_t         *dptr1, *dptr2;
5203 	struct qla_hw_data *ha = vha->hw;
5204 	init_cb_t       *icb = ha->init_cb;
5205 	nvram_t         *nv = ha->nvram;
5206 	uint8_t         *ptr = ha->nvram;
5207 	struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
5208 
5209 	rval = QLA_SUCCESS;
5210 
5211 	/* Determine NVRAM starting address. */
5212 	ha->nvram_size = sizeof(*nv);
5213 	ha->nvram_base = 0;
5214 	if (!IS_QLA2100(ha) && !IS_QLA2200(ha) && !IS_QLA2300(ha))
5215 		if ((rd_reg_word(&reg->ctrl_status) >> 14) == 1)
5216 			ha->nvram_base = 0x80;
5217 
5218 	/* Get NVRAM data and calculate checksum. */
5219 	ha->isp_ops->read_nvram(vha, ptr, ha->nvram_base, ha->nvram_size);
5220 	for (cnt = 0, chksum = 0; cnt < ha->nvram_size; cnt++)
5221 		chksum += *ptr++;
5222 
5223 	ql_dbg(ql_dbg_init + ql_dbg_buffer, vha, 0x010f,
5224 	    "Contents of NVRAM.\n");
5225 	ql_dump_buffer(ql_dbg_init + ql_dbg_buffer, vha, 0x0110,
5226 	    nv, ha->nvram_size);
5227 
5228 	/* Bad NVRAM data, set defaults parameters. */
5229 	if (chksum || memcmp("ISP ", nv->id, sizeof(nv->id)) ||
5230 	    nv->nvram_version < 1) {
5231 		/* Reset NVRAM data. */
5232 		ql_log(ql_log_warn, vha, 0x0064,
5233 		    "Inconsistent NVRAM detected: checksum=%#x id=%.4s version=%#x.\n",
5234 		    chksum, nv->id, nv->nvram_version);
5235 		ql_log(ql_log_warn, vha, 0x0065,
5236 		    "Falling back to "
5237 		    "functioning (yet invalid -- WWPN) defaults.\n");
5238 
5239 		/*
5240 		 * Set default initialization control block.
5241 		 */
5242 		memset(nv, 0, ha->nvram_size);
5243 		nv->parameter_block_version = ICB_VERSION;
5244 
5245 		if (IS_QLA23XX(ha)) {
5246 			nv->firmware_options[0] = BIT_2 | BIT_1;
5247 			nv->firmware_options[1] = BIT_7 | BIT_5;
5248 			nv->add_firmware_options[0] = BIT_5;
5249 			nv->add_firmware_options[1] = BIT_5 | BIT_4;
5250 			nv->frame_payload_size = cpu_to_le16(2048);
5251 			nv->special_options[1] = BIT_7;
5252 		} else if (IS_QLA2200(ha)) {
5253 			nv->firmware_options[0] = BIT_2 | BIT_1;
5254 			nv->firmware_options[1] = BIT_7 | BIT_5;
5255 			nv->add_firmware_options[0] = BIT_5;
5256 			nv->add_firmware_options[1] = BIT_5 | BIT_4;
5257 			nv->frame_payload_size = cpu_to_le16(1024);
5258 		} else if (IS_QLA2100(ha)) {
5259 			nv->firmware_options[0] = BIT_3 | BIT_1;
5260 			nv->firmware_options[1] = BIT_5;
5261 			nv->frame_payload_size = cpu_to_le16(1024);
5262 		}
5263 
5264 		nv->max_iocb_allocation = cpu_to_le16(256);
5265 		nv->execution_throttle = cpu_to_le16(16);
5266 		nv->retry_count = 8;
5267 		nv->retry_delay = 1;
5268 
5269 		nv->port_name[0] = 33;
5270 		nv->port_name[3] = 224;
5271 		nv->port_name[4] = 139;
5272 
5273 		qla2xxx_nvram_wwn_from_ofw(vha, nv);
5274 
5275 		nv->login_timeout = 4;
5276 
5277 		/*
5278 		 * Set default host adapter parameters
5279 		 */
5280 		nv->host_p[1] = BIT_2;
5281 		nv->reset_delay = 5;
5282 		nv->port_down_retry_count = 8;
5283 		nv->max_luns_per_target = cpu_to_le16(8);
5284 		nv->link_down_timeout = 60;
5285 
5286 		rval = 1;
5287 	}
5288 
5289 	/* Reset Initialization control block */
5290 	memset(icb, 0, ha->init_cb_size);
5291 
5292 	/*
5293 	 * Setup driver NVRAM options.
5294 	 */
5295 	nv->firmware_options[0] |= (BIT_6 | BIT_1);
5296 	nv->firmware_options[0] &= ~(BIT_5 | BIT_4);
5297 	nv->firmware_options[1] |= (BIT_5 | BIT_0);
5298 	nv->firmware_options[1] &= ~BIT_4;
5299 
5300 	if (IS_QLA23XX(ha)) {
5301 		nv->firmware_options[0] |= BIT_2;
5302 		nv->firmware_options[0] &= ~BIT_3;
5303 		nv->special_options[0] &= ~BIT_6;
5304 		nv->add_firmware_options[1] |= BIT_5 | BIT_4;
5305 
5306 		if (IS_QLA2300(ha)) {
5307 			if (ha->fb_rev == FPM_2310) {
5308 				strcpy(ha->model_number, "QLA2310");
5309 			} else {
5310 				strcpy(ha->model_number, "QLA2300");
5311 			}
5312 		} else {
5313 			qla2x00_set_model_info(vha, nv->model_number,
5314 			    sizeof(nv->model_number), "QLA23xx");
5315 		}
5316 	} else if (IS_QLA2200(ha)) {
5317 		nv->firmware_options[0] |= BIT_2;
5318 		/*
5319 		 * 'Point-to-point preferred, else loop' is not a safe
5320 		 * connection mode setting.
5321 		 */
5322 		if ((nv->add_firmware_options[0] & (BIT_6 | BIT_5 | BIT_4)) ==
5323 		    (BIT_5 | BIT_4)) {
5324 			/* Force 'loop preferred, else point-to-point'. */
5325 			nv->add_firmware_options[0] &= ~(BIT_6 | BIT_5 | BIT_4);
5326 			nv->add_firmware_options[0] |= BIT_5;
5327 		}
5328 		strcpy(ha->model_number, "QLA22xx");
5329 	} else /*if (IS_QLA2100(ha))*/ {
5330 		strcpy(ha->model_number, "QLA2100");
5331 	}
5332 
5333 	/*
5334 	 * Copy over NVRAM RISC parameter block to initialization control block.
5335 	 */
5336 	dptr1 = (uint8_t *)icb;
5337 	dptr2 = (uint8_t *)&nv->parameter_block_version;
5338 	cnt = (uint8_t *)&icb->request_q_outpointer - (uint8_t *)&icb->version;
5339 	while (cnt--)
5340 		*dptr1++ = *dptr2++;
5341 
5342 	/* Copy 2nd half. */
5343 	dptr1 = (uint8_t *)icb->add_firmware_options;
5344 	cnt = (uint8_t *)icb->reserved_3 - (uint8_t *)icb->add_firmware_options;
5345 	while (cnt--)
5346 		*dptr1++ = *dptr2++;
5347 	ha->frame_payload_size = le16_to_cpu(icb->frame_payload_size);
5348 	/* Use alternate WWN? */
5349 	if (nv->host_p[1] & BIT_7) {
5350 		memcpy(icb->node_name, nv->alternate_node_name, WWN_SIZE);
5351 		memcpy(icb->port_name, nv->alternate_port_name, WWN_SIZE);
5352 	}
5353 
5354 	/* Prepare nodename */
5355 	if ((icb->firmware_options[1] & BIT_6) == 0) {
5356 		/*
5357 		 * Firmware will apply the following mask if the nodename was
5358 		 * not provided.
5359 		 */
5360 		memcpy(icb->node_name, icb->port_name, WWN_SIZE);
5361 		icb->node_name[0] &= 0xF0;
5362 	}
5363 
5364 	/*
5365 	 * Set host adapter parameters.
5366 	 */
5367 
5368 	/*
5369 	 * BIT_7 in the host-parameters section allows for modification to
5370 	 * internal driver logging.
5371 	 */
5372 	if (nv->host_p[0] & BIT_7)
5373 		ql2xextended_error_logging = QL_DBG_DEFAULT1_MASK;
5374 	ha->flags.disable_risc_code_load = ((nv->host_p[0] & BIT_4) ? 1 : 0);
5375 	/* Always load RISC code on non ISP2[12]00 chips. */
5376 	if (!IS_QLA2100(ha) && !IS_QLA2200(ha))
5377 		ha->flags.disable_risc_code_load = 0;
5378 	ha->flags.enable_lip_reset = ((nv->host_p[1] & BIT_1) ? 1 : 0);
5379 	ha->flags.enable_lip_full_login = ((nv->host_p[1] & BIT_2) ? 1 : 0);
5380 	ha->flags.enable_target_reset = ((nv->host_p[1] & BIT_3) ? 1 : 0);
5381 	ha->flags.enable_led_scheme = (nv->special_options[1] & BIT_4) ? 1 : 0;
5382 	ha->flags.disable_serdes = 0;
5383 
5384 	ha->operating_mode =
5385 	    (icb->add_firmware_options[0] & (BIT_6 | BIT_5 | BIT_4)) >> 4;
5386 
5387 	memcpy(ha->fw_seriallink_options, nv->seriallink_options,
5388 	    sizeof(ha->fw_seriallink_options));
5389 
5390 	/* save HBA serial number */
5391 	ha->serial0 = icb->port_name[5];
5392 	ha->serial1 = icb->port_name[6];
5393 	ha->serial2 = icb->port_name[7];
5394 	memcpy(vha->node_name, icb->node_name, WWN_SIZE);
5395 	memcpy(vha->port_name, icb->port_name, WWN_SIZE);
5396 
5397 	icb->execution_throttle = cpu_to_le16(0xFFFF);
5398 
5399 	ha->retry_count = nv->retry_count;
5400 
5401 	/* Set minimum login_timeout to 4 seconds. */
5402 	if (nv->login_timeout != ql2xlogintimeout)
5403 		nv->login_timeout = ql2xlogintimeout;
5404 	if (nv->login_timeout < 4)
5405 		nv->login_timeout = 4;
5406 	ha->login_timeout = nv->login_timeout;
5407 
5408 	/* Set minimum RATOV to 100 tenths of a second. */
5409 	ha->r_a_tov = 100;
5410 
5411 	ha->loop_reset_delay = nv->reset_delay;
5412 
5413 	/* Link Down Timeout = 0:
5414 	 *
5415 	 * 	When Port Down timer expires we will start returning
5416 	 *	I/O's to OS with "DID_NO_CONNECT".
5417 	 *
5418 	 * Link Down Timeout != 0:
5419 	 *
5420 	 *	 The driver waits for the link to come up after link down
5421 	 *	 before returning I/Os to OS with "DID_NO_CONNECT".
5422 	 */
5423 	if (nv->link_down_timeout == 0) {
5424 		ha->loop_down_abort_time =
5425 		    (LOOP_DOWN_TIME - LOOP_DOWN_TIMEOUT);
5426 	} else {
5427 		ha->link_down_timeout =	 nv->link_down_timeout;
5428 		ha->loop_down_abort_time =
5429 		    (LOOP_DOWN_TIME - ha->link_down_timeout);
5430 	}
5431 
5432 	/*
5433 	 * Need enough time to try and get the port back.
5434 	 */
5435 	ha->port_down_retry_count = nv->port_down_retry_count;
5436 	if (qlport_down_retry)
5437 		ha->port_down_retry_count = qlport_down_retry;
5438 	/* Set login_retry_count */
5439 	ha->login_retry_count  = nv->retry_count;
5440 	if (ha->port_down_retry_count == nv->port_down_retry_count &&
5441 	    ha->port_down_retry_count > 3)
5442 		ha->login_retry_count = ha->port_down_retry_count;
5443 	else if (ha->port_down_retry_count > (int)ha->login_retry_count)
5444 		ha->login_retry_count = ha->port_down_retry_count;
5445 	if (ql2xloginretrycount)
5446 		ha->login_retry_count = ql2xloginretrycount;
5447 
5448 	icb->lun_enables = cpu_to_le16(0);
5449 	icb->command_resource_count = 0;
5450 	icb->immediate_notify_resource_count = 0;
5451 	icb->timeout = cpu_to_le16(0);
5452 
5453 	if (IS_QLA2100(ha) || IS_QLA2200(ha)) {
5454 		/* Enable RIO */
5455 		icb->firmware_options[0] &= ~BIT_3;
5456 		icb->add_firmware_options[0] &=
5457 		    ~(BIT_3 | BIT_2 | BIT_1 | BIT_0);
5458 		icb->add_firmware_options[0] |= BIT_2;
5459 		icb->response_accumulation_timer = 3;
5460 		icb->interrupt_delay_timer = 5;
5461 
5462 		vha->flags.process_response_queue = 1;
5463 	} else {
5464 		/* Enable ZIO. */
5465 		if (!vha->flags.init_done) {
5466 			ha->zio_mode = icb->add_firmware_options[0] &
5467 			    (BIT_3 | BIT_2 | BIT_1 | BIT_0);
5468 			ha->zio_timer = icb->interrupt_delay_timer ?
5469 			    icb->interrupt_delay_timer : 2;
5470 		}
5471 		icb->add_firmware_options[0] &=
5472 		    ~(BIT_3 | BIT_2 | BIT_1 | BIT_0);
5473 		vha->flags.process_response_queue = 0;
5474 		if (ha->zio_mode != QLA_ZIO_DISABLED) {
5475 			ha->zio_mode = QLA_ZIO_MODE_6;
5476 
5477 			ql_log(ql_log_info, vha, 0x0068,
5478 			    "ZIO mode %d enabled; timer delay (%d us).\n",
5479 			    ha->zio_mode, ha->zio_timer * 100);
5480 
5481 			icb->add_firmware_options[0] |= (uint8_t)ha->zio_mode;
5482 			icb->interrupt_delay_timer = (uint8_t)ha->zio_timer;
5483 			vha->flags.process_response_queue = 1;
5484 		}
5485 	}
5486 
5487 	if (rval) {
5488 		ql_log(ql_log_warn, vha, 0x0069,
5489 		    "NVRAM configuration failed.\n");
5490 	}
5491 	return (rval);
5492 }
5493 
qla2x00_set_fcport_state(fc_port_t * fcport,int state)5494 void qla2x00_set_fcport_state(fc_port_t *fcport, int state)
5495 {
5496 	int old_state;
5497 
5498 	old_state = atomic_read(&fcport->state);
5499 	atomic_set(&fcport->state, state);
5500 
5501 	/* Don't print state transitions during initial allocation of fcport */
5502 	if (old_state && old_state != state) {
5503 		ql_dbg(ql_dbg_disc, fcport->vha, 0x207d,
5504 		       "FCPort %8phC state transitioned from %s to %s - portid=%02x%02x%02x.\n",
5505 		       fcport->port_name, port_state_str[old_state],
5506 		       port_state_str[state], fcport->d_id.b.domain,
5507 		       fcport->d_id.b.area, fcport->d_id.b.al_pa);
5508 	}
5509 }
5510 
5511 /**
5512  * qla2x00_alloc_fcport() - Allocate a generic fcport.
5513  * @vha: HA context
5514  * @flags: allocation flags
5515  *
5516  * Returns a pointer to the allocated fcport, or NULL, if none available.
5517  */
5518 fc_port_t *
qla2x00_alloc_fcport(scsi_qla_host_t * vha,gfp_t flags)5519 qla2x00_alloc_fcport(scsi_qla_host_t *vha, gfp_t flags)
5520 {
5521 	fc_port_t *fcport;
5522 
5523 	fcport = kzalloc(sizeof(fc_port_t), flags);
5524 	if (!fcport)
5525 		return NULL;
5526 
5527 	fcport->ct_desc.ct_sns = dma_alloc_coherent(&vha->hw->pdev->dev,
5528 		sizeof(struct ct_sns_pkt), &fcport->ct_desc.ct_sns_dma,
5529 		flags);
5530 	if (!fcport->ct_desc.ct_sns) {
5531 		ql_log(ql_log_warn, vha, 0xd049,
5532 		    "Failed to allocate ct_sns request.\n");
5533 		kfree(fcport);
5534 		return NULL;
5535 	}
5536 
5537 	/* Setup fcport template structure. */
5538 	fcport->vha = vha;
5539 	fcport->port_type = FCT_UNKNOWN;
5540 	fcport->loop_id = FC_NO_LOOP_ID;
5541 	qla2x00_set_fcport_state(fcport, FCS_UNCONFIGURED);
5542 	fcport->supported_classes = FC_COS_UNSPECIFIED;
5543 	fcport->fp_speed = PORT_SPEED_UNKNOWN;
5544 
5545 	fcport->disc_state = DSC_DELETED;
5546 	fcport->fw_login_state = DSC_LS_PORT_UNAVAIL;
5547 	fcport->deleted = QLA_SESS_DELETED;
5548 	fcport->login_retry = vha->hw->login_retry_count;
5549 	fcport->chip_reset = vha->hw->base_qpair->chip_reset;
5550 	fcport->logout_on_delete = 1;
5551 	fcport->tgt_link_down_time = QLA2XX_MAX_LINK_DOWN_TIME;
5552 	fcport->tgt_short_link_down_cnt = 0;
5553 	fcport->dev_loss_tmo = 0;
5554 
5555 	if (!fcport->ct_desc.ct_sns) {
5556 		ql_log(ql_log_warn, vha, 0xd049,
5557 		    "Failed to allocate ct_sns request.\n");
5558 		kfree(fcport);
5559 		return NULL;
5560 	}
5561 
5562 	INIT_WORK(&fcport->del_work, qla24xx_delete_sess_fn);
5563 	INIT_WORK(&fcport->free_work, qlt_free_session_done);
5564 	INIT_WORK(&fcport->reg_work, qla_register_fcport_fn);
5565 	INIT_LIST_HEAD(&fcport->gnl_entry);
5566 	INIT_LIST_HEAD(&fcport->list);
5567 	INIT_LIST_HEAD(&fcport->unsol_ctx_head);
5568 
5569 	INIT_LIST_HEAD(&fcport->sess_cmd_list);
5570 	spin_lock_init(&fcport->sess_cmd_lock);
5571 
5572 	spin_lock_init(&fcport->edif.sa_list_lock);
5573 	INIT_LIST_HEAD(&fcport->edif.tx_sa_list);
5574 	INIT_LIST_HEAD(&fcport->edif.rx_sa_list);
5575 
5576 	spin_lock_init(&fcport->edif.indx_list_lock);
5577 	INIT_LIST_HEAD(&fcport->edif.edif_indx_list);
5578 
5579 	return fcport;
5580 }
5581 
5582 void
qla2x00_free_fcport(fc_port_t * fcport)5583 qla2x00_free_fcport(fc_port_t *fcport)
5584 {
5585 	if (fcport->ct_desc.ct_sns) {
5586 		dma_free_coherent(&fcport->vha->hw->pdev->dev,
5587 			sizeof(struct ct_sns_pkt), fcport->ct_desc.ct_sns,
5588 			fcport->ct_desc.ct_sns_dma);
5589 
5590 		fcport->ct_desc.ct_sns = NULL;
5591 	}
5592 
5593 	qla_edif_flush_sa_ctl_lists(fcport);
5594 	list_del(&fcport->list);
5595 	qla2x00_clear_loop_id(fcport);
5596 
5597 	qla_edif_list_del(fcport);
5598 
5599 	kfree(fcport);
5600 }
5601 
qla_get_login_template(scsi_qla_host_t * vha)5602 static void qla_get_login_template(scsi_qla_host_t *vha)
5603 {
5604 	struct qla_hw_data *ha = vha->hw;
5605 	int rval;
5606 	u32 *bp, sz;
5607 	__be32 *q;
5608 
5609 	memset(ha->init_cb, 0, ha->init_cb_size);
5610 	sz = min_t(int, sizeof(struct fc_els_flogi), ha->init_cb_size);
5611 	rval = qla24xx_get_port_login_templ(vha, ha->init_cb_dma,
5612 					    ha->init_cb, sz);
5613 	if (rval != QLA_SUCCESS) {
5614 		ql_dbg(ql_dbg_init, vha, 0x00d1,
5615 		       "PLOGI ELS param read fail.\n");
5616 		return;
5617 	}
5618 	q = (__be32 *)&ha->plogi_els_payld.fl_csp;
5619 
5620 	bp = (uint32_t *)ha->init_cb;
5621 	cpu_to_be32_array(q, bp, sz / 4);
5622 	ha->flags.plogi_template_valid = 1;
5623 }
5624 
5625 /*
5626  * qla2x00_configure_loop
5627  *      Updates Fibre Channel Device Database with what is actually on loop.
5628  *
5629  * Input:
5630  *      ha                = adapter block pointer.
5631  *
5632  * Returns:
5633  *      0 = success.
5634  *      1 = error.
5635  *      2 = database was full and device was not configured.
5636  */
5637 static int
qla2x00_configure_loop(scsi_qla_host_t * vha)5638 qla2x00_configure_loop(scsi_qla_host_t *vha)
5639 {
5640 	int  rval;
5641 	unsigned long flags, save_flags;
5642 	struct qla_hw_data *ha = vha->hw;
5643 
5644 	rval = QLA_SUCCESS;
5645 
5646 	/* Get Initiator ID */
5647 	if (test_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags)) {
5648 		rval = qla2x00_configure_hba(vha);
5649 		if (rval != QLA_SUCCESS) {
5650 			ql_dbg(ql_dbg_disc, vha, 0x2013,
5651 			    "Unable to configure HBA.\n");
5652 			return (rval);
5653 		}
5654 	}
5655 
5656 	save_flags = flags = vha->dpc_flags;
5657 	ql_dbg(ql_dbg_disc, vha, 0x2014,
5658 	    "Configure loop -- dpc flags = 0x%lx.\n", flags);
5659 
5660 	/*
5661 	 * If we have both an RSCN and PORT UPDATE pending then handle them
5662 	 * both at the same time.
5663 	 */
5664 	clear_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
5665 	clear_bit(RSCN_UPDATE, &vha->dpc_flags);
5666 
5667 	qla2x00_get_data_rate(vha);
5668 	qla_get_login_template(vha);
5669 
5670 	/* Determine what we need to do */
5671 	if ((ha->current_topology == ISP_CFG_FL ||
5672 	    ha->current_topology == ISP_CFG_F) &&
5673 	    (test_bit(LOCAL_LOOP_UPDATE, &flags))) {
5674 
5675 		set_bit(RSCN_UPDATE, &flags);
5676 		clear_bit(LOCAL_LOOP_UPDATE, &flags);
5677 
5678 	} else if (ha->current_topology == ISP_CFG_NL ||
5679 		   ha->current_topology == ISP_CFG_N) {
5680 		clear_bit(RSCN_UPDATE, &flags);
5681 		set_bit(LOCAL_LOOP_UPDATE, &flags);
5682 	} else if (!vha->flags.online ||
5683 	    (test_bit(ABORT_ISP_ACTIVE, &flags))) {
5684 		set_bit(RSCN_UPDATE, &flags);
5685 		set_bit(LOCAL_LOOP_UPDATE, &flags);
5686 	}
5687 
5688 	if (test_bit(LOCAL_LOOP_UPDATE, &flags)) {
5689 		if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)) {
5690 			ql_dbg(ql_dbg_disc, vha, 0x2015,
5691 			    "Loop resync needed, failing.\n");
5692 			rval = QLA_FUNCTION_FAILED;
5693 		} else
5694 			rval = qla2x00_configure_local_loop(vha);
5695 	}
5696 
5697 	if (rval == QLA_SUCCESS && test_bit(RSCN_UPDATE, &flags)) {
5698 		if (LOOP_TRANSITION(vha)) {
5699 			ql_dbg(ql_dbg_disc, vha, 0x2099,
5700 			    "Needs RSCN update and loop transition.\n");
5701 			rval = QLA_FUNCTION_FAILED;
5702 		}
5703 		else
5704 			rval = qla2x00_configure_fabric(vha);
5705 	}
5706 
5707 	if (rval == QLA_SUCCESS) {
5708 		if (atomic_read(&vha->loop_down_timer) ||
5709 		    test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)) {
5710 			rval = QLA_FUNCTION_FAILED;
5711 		} else {
5712 			atomic_set(&vha->loop_state, LOOP_READY);
5713 			ql_dbg(ql_dbg_disc, vha, 0x2069,
5714 			    "LOOP READY.\n");
5715 			ha->flags.fw_init_done = 1;
5716 
5717 			/*
5718 			 * use link up to wake up app to get ready for
5719 			 * authentication.
5720 			 */
5721 			if (ha->flags.edif_enabled && DBELL_INACTIVE(vha))
5722 				qla2x00_post_aen_work(vha, FCH_EVT_LINKUP,
5723 						      ha->link_data_rate);
5724 
5725 			/*
5726 			 * Process any ATIO queue entries that came in
5727 			 * while we weren't online.
5728 			 */
5729 			if (qla_tgt_mode_enabled(vha) ||
5730 			    qla_dual_mode_enabled(vha)) {
5731 				spin_lock_irqsave(&ha->tgt.atio_lock, flags);
5732 				qlt_24xx_process_atio_queue(vha, 0);
5733 				spin_unlock_irqrestore(&ha->tgt.atio_lock,
5734 				    flags);
5735 			}
5736 		}
5737 	}
5738 
5739 	if (rval) {
5740 		ql_dbg(ql_dbg_disc, vha, 0x206a,
5741 		    "%s *** FAILED ***.\n", __func__);
5742 	} else {
5743 		ql_dbg(ql_dbg_disc, vha, 0x206b,
5744 		    "%s: exiting normally. local port wwpn %8phN id %06x)\n",
5745 		    __func__, vha->port_name, vha->d_id.b24);
5746 	}
5747 
5748 	/* Restore state if a resync event occurred during processing */
5749 	if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)) {
5750 		if (test_bit(LOCAL_LOOP_UPDATE, &save_flags))
5751 			set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
5752 		if (test_bit(RSCN_UPDATE, &save_flags)) {
5753 			set_bit(RSCN_UPDATE, &vha->dpc_flags);
5754 		}
5755 	}
5756 
5757 	return (rval);
5758 }
5759 
qla2x00_configure_n2n_loop(scsi_qla_host_t * vha)5760 static int qla2x00_configure_n2n_loop(scsi_qla_host_t *vha)
5761 {
5762 	unsigned long flags;
5763 	fc_port_t *fcport;
5764 
5765 	ql_dbg(ql_dbg_disc, vha, 0x206a, "%s %d.\n", __func__, __LINE__);
5766 
5767 	if (test_and_clear_bit(N2N_LOGIN_NEEDED, &vha->dpc_flags))
5768 		set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
5769 
5770 	list_for_each_entry(fcport, &vha->vp_fcports, list) {
5771 		if (fcport->n2n_flag) {
5772 			qla24xx_fcport_handle_login(vha, fcport);
5773 			return QLA_SUCCESS;
5774 		}
5775 	}
5776 
5777 	spin_lock_irqsave(&vha->work_lock, flags);
5778 	vha->scan.scan_retry++;
5779 	spin_unlock_irqrestore(&vha->work_lock, flags);
5780 
5781 	if (vha->scan.scan_retry < MAX_SCAN_RETRIES) {
5782 		set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
5783 		set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
5784 	}
5785 	return QLA_FUNCTION_FAILED;
5786 }
5787 
5788 static void
qla_reinitialize_link(scsi_qla_host_t * vha)5789 qla_reinitialize_link(scsi_qla_host_t *vha)
5790 {
5791 	int rval;
5792 
5793 	atomic_set(&vha->loop_state, LOOP_DOWN);
5794 	atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
5795 	rval = qla2x00_full_login_lip(vha);
5796 	if (rval == QLA_SUCCESS) {
5797 		ql_dbg(ql_dbg_disc, vha, 0xd050, "Link reinitialized\n");
5798 	} else {
5799 		ql_dbg(ql_dbg_disc, vha, 0xd051,
5800 			"Link reinitialization failed (%d)\n", rval);
5801 	}
5802 }
5803 
5804 /*
5805  * qla2x00_configure_local_loop
5806  *	Updates Fibre Channel Device Database with local loop devices.
5807  *
5808  * Input:
5809  *	ha = adapter block pointer.
5810  *
5811  * Returns:
5812  *	0 = success.
5813  */
5814 static int
qla2x00_configure_local_loop(scsi_qla_host_t * vha)5815 qla2x00_configure_local_loop(scsi_qla_host_t *vha)
5816 {
5817 	int		rval, rval2;
5818 	int		found;
5819 	fc_port_t	*fcport, *new_fcport;
5820 	uint16_t	index;
5821 	uint16_t	entries;
5822 	struct gid_list_info *gid;
5823 	uint16_t	loop_id;
5824 	uint8_t		domain, area, al_pa;
5825 	struct qla_hw_data *ha = vha->hw;
5826 	unsigned long flags;
5827 
5828 	/* Inititae N2N login. */
5829 	if (N2N_TOPO(ha))
5830 		return qla2x00_configure_n2n_loop(vha);
5831 
5832 	new_fcport = NULL;
5833 	entries = MAX_FIBRE_DEVICES_LOOP;
5834 
5835 	/* Get list of logged in devices. */
5836 	memset(ha->gid_list, 0, qla2x00_gid_list_size(ha));
5837 	rval = qla2x00_get_id_list(vha, ha->gid_list, ha->gid_list_dma,
5838 	    &entries);
5839 	if (rval != QLA_SUCCESS)
5840 		goto err;
5841 
5842 	ql_dbg(ql_dbg_disc, vha, 0x2011,
5843 	    "Entries in ID list (%d).\n", entries);
5844 	ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x2075,
5845 	    ha->gid_list, entries * sizeof(*ha->gid_list));
5846 
5847 	if (entries == 0) {
5848 		spin_lock_irqsave(&vha->work_lock, flags);
5849 		vha->scan.scan_retry++;
5850 		spin_unlock_irqrestore(&vha->work_lock, flags);
5851 
5852 		if (vha->scan.scan_retry < MAX_SCAN_RETRIES) {
5853 			u8 loop_map_entries = 0;
5854 			int rc;
5855 
5856 			rc = qla2x00_get_fcal_position_map(vha, NULL,
5857 						&loop_map_entries);
5858 			if (rc == QLA_SUCCESS && loop_map_entries > 1) {
5859 				/*
5860 				 * There are devices that are still not logged
5861 				 * in. Reinitialize to give them a chance.
5862 				 */
5863 				qla_reinitialize_link(vha);
5864 				return QLA_FUNCTION_FAILED;
5865 			}
5866 			set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
5867 			set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
5868 		}
5869 	} else {
5870 		vha->scan.scan_retry = 0;
5871 	}
5872 
5873 	list_for_each_entry(fcport, &vha->vp_fcports, list) {
5874 		fcport->scan_state = QLA_FCPORT_SCAN;
5875 	}
5876 
5877 	/* Allocate temporary fcport for any new fcports discovered. */
5878 	new_fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
5879 	if (new_fcport == NULL) {
5880 		ql_log(ql_log_warn, vha, 0x2012,
5881 		    "Memory allocation failed for fcport.\n");
5882 		rval = QLA_MEMORY_ALLOC_FAILED;
5883 		goto err;
5884 	}
5885 	new_fcport->flags &= ~FCF_FABRIC_DEVICE;
5886 
5887 	/* Add devices to port list. */
5888 	gid = ha->gid_list;
5889 	for (index = 0; index < entries; index++) {
5890 		domain = gid->domain;
5891 		area = gid->area;
5892 		al_pa = gid->al_pa;
5893 		if (IS_QLA2100(ha) || IS_QLA2200(ha))
5894 			loop_id = gid->loop_id_2100;
5895 		else
5896 			loop_id = le16_to_cpu(gid->loop_id);
5897 		gid = (void *)gid + ha->gid_list_info_size;
5898 
5899 		/* Bypass reserved domain fields. */
5900 		if ((domain & 0xf0) == 0xf0)
5901 			continue;
5902 
5903 		/* Bypass if not same domain and area of adapter. */
5904 		if (area && domain && ((area != vha->d_id.b.area) ||
5905 		    (domain != vha->d_id.b.domain)) &&
5906 		    (ha->current_topology == ISP_CFG_NL))
5907 			continue;
5908 
5909 
5910 		/* Bypass invalid local loop ID. */
5911 		if (loop_id > LAST_LOCAL_LOOP_ID)
5912 			continue;
5913 
5914 		memset(new_fcport->port_name, 0, WWN_SIZE);
5915 
5916 		/* Fill in member data. */
5917 		new_fcport->d_id.b.domain = domain;
5918 		new_fcport->d_id.b.area = area;
5919 		new_fcport->d_id.b.al_pa = al_pa;
5920 		new_fcport->loop_id = loop_id;
5921 		new_fcport->scan_state = QLA_FCPORT_FOUND;
5922 
5923 		rval2 = qla2x00_get_port_database(vha, new_fcport, 0);
5924 		if (rval2 != QLA_SUCCESS) {
5925 			ql_dbg(ql_dbg_disc, vha, 0x2097,
5926 			    "Failed to retrieve fcport information "
5927 			    "-- get_port_database=%x, loop_id=0x%04x.\n",
5928 			    rval2, new_fcport->loop_id);
5929 			/* Skip retry if N2N */
5930 			if (ha->current_topology != ISP_CFG_N) {
5931 				ql_dbg(ql_dbg_disc, vha, 0x2105,
5932 				    "Scheduling resync.\n");
5933 				set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
5934 				continue;
5935 			}
5936 		}
5937 
5938 		spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
5939 		/* Check for matching device in port list. */
5940 		found = 0;
5941 		fcport = NULL;
5942 		list_for_each_entry(fcport, &vha->vp_fcports, list) {
5943 			if (memcmp(new_fcport->port_name, fcport->port_name,
5944 			    WWN_SIZE))
5945 				continue;
5946 
5947 			fcport->flags &= ~FCF_FABRIC_DEVICE;
5948 			fcport->loop_id = new_fcport->loop_id;
5949 			fcport->port_type = new_fcport->port_type;
5950 			fcport->d_id.b24 = new_fcport->d_id.b24;
5951 			memcpy(fcport->node_name, new_fcport->node_name,
5952 			    WWN_SIZE);
5953 			fcport->scan_state = QLA_FCPORT_FOUND;
5954 			if (fcport->login_retry == 0) {
5955 				fcport->login_retry = vha->hw->login_retry_count;
5956 				ql_dbg(ql_dbg_disc, vha, 0x2135,
5957 				    "Port login retry %8phN, lid 0x%04x retry cnt=%d.\n",
5958 				    fcport->port_name, fcport->loop_id,
5959 				    fcport->login_retry);
5960 			}
5961 			found++;
5962 			break;
5963 		}
5964 
5965 		if (!found) {
5966 			/* New device, add to fcports list. */
5967 			list_add_tail(&new_fcport->list, &vha->vp_fcports);
5968 
5969 			/* Allocate a new replacement fcport. */
5970 			fcport = new_fcport;
5971 
5972 			spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
5973 
5974 			new_fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
5975 
5976 			if (new_fcport == NULL) {
5977 				ql_log(ql_log_warn, vha, 0xd031,
5978 				    "Failed to allocate memory for fcport.\n");
5979 				rval = QLA_MEMORY_ALLOC_FAILED;
5980 				goto err;
5981 			}
5982 			spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
5983 			new_fcport->flags &= ~FCF_FABRIC_DEVICE;
5984 		}
5985 
5986 		spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
5987 
5988 		/* Base iIDMA settings on HBA port speed. */
5989 		fcport->fp_speed = ha->link_data_rate;
5990 	}
5991 
5992 	list_for_each_entry(fcport, &vha->vp_fcports, list) {
5993 		if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags))
5994 			break;
5995 
5996 		if (fcport->scan_state == QLA_FCPORT_SCAN) {
5997 			if ((qla_dual_mode_enabled(vha) ||
5998 			    qla_ini_mode_enabled(vha)) &&
5999 			    atomic_read(&fcport->state) == FCS_ONLINE) {
6000 				qla2x00_mark_device_lost(vha, fcport,
6001 					ql2xplogiabsentdevice);
6002 				if (fcport->loop_id != FC_NO_LOOP_ID &&
6003 				    (fcport->flags & FCF_FCP2_DEVICE) == 0 &&
6004 				    fcport->port_type != FCT_INITIATOR &&
6005 				    fcport->port_type != FCT_BROADCAST) {
6006 					ql_dbg(ql_dbg_disc, vha, 0x20f0,
6007 					    "%s %d %8phC post del sess\n",
6008 					    __func__, __LINE__,
6009 					    fcport->port_name);
6010 
6011 					qlt_schedule_sess_for_deletion(fcport);
6012 					continue;
6013 				}
6014 			}
6015 		}
6016 
6017 		if (fcport->scan_state == QLA_FCPORT_FOUND)
6018 			qla24xx_fcport_handle_login(vha, fcport);
6019 	}
6020 
6021 	qla2x00_free_fcport(new_fcport);
6022 
6023 	return rval;
6024 
6025 err:
6026 	ql_dbg(ql_dbg_disc, vha, 0x2098,
6027 	       "Configure local loop error exit: rval=%x.\n", rval);
6028 	return rval;
6029 }
6030 
6031 static void
qla2x00_iidma_fcport(scsi_qla_host_t * vha,fc_port_t * fcport)6032 qla2x00_iidma_fcport(scsi_qla_host_t *vha, fc_port_t *fcport)
6033 {
6034 	int rval;
6035 	uint16_t mb[MAILBOX_REGISTER_COUNT];
6036 	struct qla_hw_data *ha = vha->hw;
6037 
6038 	if (!IS_IIDMA_CAPABLE(ha))
6039 		return;
6040 
6041 	if (atomic_read(&fcport->state) != FCS_ONLINE)
6042 		return;
6043 
6044 	if (fcport->fp_speed == PORT_SPEED_UNKNOWN ||
6045 	    fcport->fp_speed > ha->link_data_rate ||
6046 	    !ha->flags.gpsc_supported)
6047 		return;
6048 
6049 	rval = qla2x00_set_idma_speed(vha, fcport->loop_id, fcport->fp_speed,
6050 	    mb);
6051 	if (rval != QLA_SUCCESS) {
6052 		ql_dbg(ql_dbg_disc, vha, 0x2004,
6053 		    "Unable to adjust iIDMA %8phN -- %04x %x %04x %04x.\n",
6054 		    fcport->port_name, rval, fcport->fp_speed, mb[0], mb[1]);
6055 	} else {
6056 		ql_dbg(ql_dbg_disc, vha, 0x2005,
6057 		    "iIDMA adjusted to %s GB/s (%X) on %8phN.\n",
6058 		    qla2x00_get_link_speed_str(ha, fcport->fp_speed),
6059 		    fcport->fp_speed, fcport->port_name);
6060 	}
6061 }
6062 
qla_do_iidma_work(struct scsi_qla_host * vha,fc_port_t * fcport)6063 void qla_do_iidma_work(struct scsi_qla_host *vha, fc_port_t *fcport)
6064 {
6065 	qla2x00_iidma_fcport(vha, fcport);
6066 	qla24xx_update_fcport_fcp_prio(vha, fcport);
6067 }
6068 
qla_post_iidma_work(struct scsi_qla_host * vha,fc_port_t * fcport)6069 int qla_post_iidma_work(struct scsi_qla_host *vha, fc_port_t *fcport)
6070 {
6071 	struct qla_work_evt *e;
6072 
6073 	e = qla2x00_alloc_work(vha, QLA_EVT_IIDMA);
6074 	if (!e)
6075 		return QLA_FUNCTION_FAILED;
6076 
6077 	e->u.fcport.fcport = fcport;
6078 	return qla2x00_post_work(vha, e);
6079 }
6080 
6081 /* qla2x00_reg_remote_port is reserved for Initiator Mode only.*/
6082 static void
qla2x00_reg_remote_port(scsi_qla_host_t * vha,fc_port_t * fcport)6083 qla2x00_reg_remote_port(scsi_qla_host_t *vha, fc_port_t *fcport)
6084 {
6085 	struct fc_rport_identifiers rport_ids;
6086 	struct fc_rport *rport;
6087 	unsigned long flags;
6088 
6089 	if (atomic_read(&fcport->state) == FCS_ONLINE)
6090 		return;
6091 
6092 	rport_ids.node_name = wwn_to_u64(fcport->node_name);
6093 	rport_ids.port_name = wwn_to_u64(fcport->port_name);
6094 	rport_ids.port_id = fcport->d_id.b.domain << 16 |
6095 	    fcport->d_id.b.area << 8 | fcport->d_id.b.al_pa;
6096 	rport_ids.roles = FC_RPORT_ROLE_UNKNOWN;
6097 	fcport->rport = rport = fc_remote_port_add(vha->host, 0, &rport_ids);
6098 	if (!rport) {
6099 		ql_log(ql_log_warn, vha, 0x2006,
6100 		    "Unable to allocate fc remote port.\n");
6101 		return;
6102 	}
6103 
6104 	spin_lock_irqsave(fcport->vha->host->host_lock, flags);
6105 	*((fc_port_t **)rport->dd_data) = fcport;
6106 	spin_unlock_irqrestore(fcport->vha->host->host_lock, flags);
6107 	fcport->dev_loss_tmo = rport->dev_loss_tmo;
6108 
6109 	rport->supported_classes = fcport->supported_classes;
6110 
6111 	rport_ids.roles = FC_PORT_ROLE_UNKNOWN;
6112 	if (fcport->port_type == FCT_INITIATOR)
6113 		rport_ids.roles |= FC_PORT_ROLE_FCP_INITIATOR;
6114 	if (fcport->port_type == FCT_TARGET)
6115 		rport_ids.roles |= FC_PORT_ROLE_FCP_TARGET;
6116 	if (fcport->port_type & FCT_NVME_INITIATOR)
6117 		rport_ids.roles |= FC_PORT_ROLE_NVME_INITIATOR;
6118 	if (fcport->port_type & FCT_NVME_TARGET)
6119 		rport_ids.roles |= FC_PORT_ROLE_NVME_TARGET;
6120 	if (fcport->port_type & FCT_NVME_DISCOVERY)
6121 		rport_ids.roles |= FC_PORT_ROLE_NVME_DISCOVERY;
6122 
6123 	fc_remote_port_rolechg(rport, rport_ids.roles);
6124 
6125 	ql_dbg(ql_dbg_disc, vha, 0x20ee,
6126 	    "%s: %8phN. rport %ld:0:%d (%p) is %s mode\n",
6127 	    __func__, fcport->port_name, vha->host_no,
6128 	    rport->scsi_target_id, rport,
6129 	    (fcport->port_type == FCT_TARGET) ? "tgt" :
6130 	    ((fcport->port_type & FCT_NVME) ? "nvme" : "ini"));
6131 }
6132 
6133 /*
6134  * qla2x00_update_fcport
6135  *	Updates device on list.
6136  *
6137  * Input:
6138  *	ha = adapter block pointer.
6139  *	fcport = port structure pointer.
6140  *
6141  * Return:
6142  *	0  - Success
6143  *  BIT_0 - error
6144  *
6145  * Context:
6146  *	Kernel context.
6147  */
6148 void
qla2x00_update_fcport(scsi_qla_host_t * vha,fc_port_t * fcport)6149 qla2x00_update_fcport(scsi_qla_host_t *vha, fc_port_t *fcport)
6150 {
6151 	unsigned long flags;
6152 
6153 	if (IS_SW_RESV_ADDR(fcport->d_id))
6154 		return;
6155 
6156 	ql_dbg(ql_dbg_disc, vha, 0x20ef, "%s %8phC\n",
6157 	    __func__, fcport->port_name);
6158 
6159 	qla2x00_set_fcport_disc_state(fcport, DSC_UPD_FCPORT);
6160 	fcport->login_retry = vha->hw->login_retry_count;
6161 	fcport->flags &= ~(FCF_LOGIN_NEEDED | FCF_ASYNC_SENT);
6162 
6163 	spin_lock_irqsave(&vha->work_lock, flags);
6164 	fcport->deleted = 0;
6165 	spin_unlock_irqrestore(&vha->work_lock, flags);
6166 
6167 	if (vha->hw->current_topology == ISP_CFG_NL)
6168 		fcport->logout_on_delete = 0;
6169 	else
6170 		fcport->logout_on_delete = 1;
6171 	fcport->n2n_chip_reset = fcport->n2n_link_reset_cnt = 0;
6172 
6173 	if (fcport->tgt_link_down_time < fcport->dev_loss_tmo) {
6174 		fcport->tgt_short_link_down_cnt++;
6175 		fcport->tgt_link_down_time = QLA2XX_MAX_LINK_DOWN_TIME;
6176 	}
6177 
6178 	switch (vha->hw->current_topology) {
6179 	case ISP_CFG_N:
6180 	case ISP_CFG_NL:
6181 		fcport->keep_nport_handle = 1;
6182 		break;
6183 	default:
6184 		break;
6185 	}
6186 
6187 	qla2x00_iidma_fcport(vha, fcport);
6188 
6189 	qla2x00_dfs_create_rport(vha, fcport);
6190 
6191 	qla24xx_update_fcport_fcp_prio(vha, fcport);
6192 
6193 	switch (vha->host->active_mode) {
6194 	case MODE_INITIATOR:
6195 		qla2x00_reg_remote_port(vha, fcport);
6196 		break;
6197 	case MODE_TARGET:
6198 		if (!vha->vha_tgt.qla_tgt->tgt_stop &&
6199 			!vha->vha_tgt.qla_tgt->tgt_stopped)
6200 			qlt_fc_port_added(vha, fcport);
6201 		break;
6202 	case MODE_DUAL:
6203 		qla2x00_reg_remote_port(vha, fcport);
6204 		if (!vha->vha_tgt.qla_tgt->tgt_stop &&
6205 			!vha->vha_tgt.qla_tgt->tgt_stopped)
6206 			qlt_fc_port_added(vha, fcport);
6207 		break;
6208 	default:
6209 		break;
6210 	}
6211 
6212 	if (NVME_TARGET(vha->hw, fcport))
6213 		qla_nvme_register_remote(vha, fcport);
6214 
6215 	qla2x00_set_fcport_state(fcport, FCS_ONLINE);
6216 
6217 	if (IS_IIDMA_CAPABLE(vha->hw) && vha->hw->flags.gpsc_supported) {
6218 		if (fcport->id_changed) {
6219 			fcport->id_changed = 0;
6220 			ql_dbg(ql_dbg_disc, vha, 0x20d7,
6221 			    "%s %d %8phC post gfpnid fcp_cnt %d\n",
6222 			    __func__, __LINE__, fcport->port_name,
6223 			    vha->fcport_count);
6224 			qla24xx_post_gfpnid_work(vha, fcport);
6225 		} else {
6226 			ql_dbg(ql_dbg_disc, vha, 0x20d7,
6227 			    "%s %d %8phC post gpsc fcp_cnt %d\n",
6228 			    __func__, __LINE__, fcport->port_name,
6229 			    vha->fcport_count);
6230 			qla24xx_post_gpsc_work(vha, fcport);
6231 		}
6232 	}
6233 
6234 	qla2x00_set_fcport_disc_state(fcport, DSC_LOGIN_COMPLETE);
6235 }
6236 
qla_register_fcport_fn(struct work_struct * work)6237 void qla_register_fcport_fn(struct work_struct *work)
6238 {
6239 	fc_port_t *fcport = container_of(work, struct fc_port, reg_work);
6240 	u32 rscn_gen = fcport->rscn_gen;
6241 	u16 data[2];
6242 
6243 	if (IS_SW_RESV_ADDR(fcport->d_id))
6244 		return;
6245 
6246 	qla2x00_update_fcport(fcport->vha, fcport);
6247 
6248 	ql_dbg(ql_dbg_disc, fcport->vha, 0x911e,
6249 	       "%s rscn gen %d/%d next DS %d\n", __func__,
6250 	       rscn_gen, fcport->rscn_gen, fcport->next_disc_state);
6251 
6252 	if (rscn_gen != fcport->rscn_gen) {
6253 		/* RSCN(s) came in while registration */
6254 		switch (fcport->next_disc_state) {
6255 		case DSC_DELETE_PEND:
6256 			qlt_schedule_sess_for_deletion(fcport);
6257 			break;
6258 		case DSC_ADISC:
6259 			data[0] = data[1] = 0;
6260 			qla2x00_post_async_adisc_work(fcport->vha, fcport,
6261 			    data);
6262 			break;
6263 		default:
6264 			break;
6265 		}
6266 	}
6267 }
6268 
6269 /*
6270  * qla2x00_configure_fabric
6271  *      Setup SNS devices with loop ID's.
6272  *
6273  * Input:
6274  *      ha = adapter block pointer.
6275  *
6276  * Returns:
6277  *      0 = success.
6278  *      BIT_0 = error
6279  */
6280 static int
qla2x00_configure_fabric(scsi_qla_host_t * vha)6281 qla2x00_configure_fabric(scsi_qla_host_t *vha)
6282 {
6283 	int	rval;
6284 	fc_port_t	*fcport;
6285 	uint16_t	mb[MAILBOX_REGISTER_COUNT];
6286 	uint16_t	loop_id;
6287 	struct qla_hw_data *ha = vha->hw;
6288 	int		discovery_gen;
6289 
6290 	/* If FL port exists, then SNS is present */
6291 	if (IS_FWI2_CAPABLE(ha))
6292 		loop_id = NPH_F_PORT;
6293 	else
6294 		loop_id = SNS_FL_PORT;
6295 	rval = qla2x00_get_port_name(vha, loop_id, vha->fabric_node_name, 1);
6296 	if (rval != QLA_SUCCESS) {
6297 		ql_dbg(ql_dbg_disc, vha, 0x20a0,
6298 		    "MBX_GET_PORT_NAME failed, No FL Port.\n");
6299 
6300 		vha->device_flags &= ~SWITCH_FOUND;
6301 		return (QLA_SUCCESS);
6302 	}
6303 	vha->device_flags |= SWITCH_FOUND;
6304 
6305 	rval = qla2x00_get_port_name(vha, loop_id, vha->fabric_port_name, 0);
6306 	if (rval != QLA_SUCCESS)
6307 		ql_dbg(ql_dbg_disc, vha, 0x20ff,
6308 		    "Failed to get Fabric Port Name\n");
6309 
6310 	if (qla_tgt_mode_enabled(vha) || qla_dual_mode_enabled(vha)) {
6311 		rval = qla2x00_send_change_request(vha, 0x3, 0);
6312 		if (rval != QLA_SUCCESS)
6313 			ql_log(ql_log_warn, vha, 0x121,
6314 			    "Failed to enable receiving of RSCN requests: 0x%x.\n",
6315 			    rval);
6316 	}
6317 
6318 	do {
6319 		qla2x00_mgmt_svr_login(vha);
6320 
6321 		/* Ensure we are logged into the SNS. */
6322 		loop_id = NPH_SNS_LID(ha);
6323 		rval = ha->isp_ops->fabric_login(vha, loop_id, 0xff, 0xff,
6324 		    0xfc, mb, BIT_1|BIT_0);
6325 		if (rval != QLA_SUCCESS || mb[0] != MBS_COMMAND_COMPLETE) {
6326 			ql_dbg(ql_dbg_disc, vha, 0x20a1,
6327 			    "Failed SNS login: loop_id=%x mb[0]=%x mb[1]=%x mb[2]=%x mb[6]=%x mb[7]=%x (%x).\n",
6328 			    loop_id, mb[0], mb[1], mb[2], mb[6], mb[7], rval);
6329 			set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
6330 			return rval;
6331 		}
6332 
6333 		/* FDMI support. */
6334 		if (ql2xfdmienable &&
6335 		    test_and_clear_bit(REGISTER_FDMI_NEEDED, &vha->dpc_flags))
6336 			qla2x00_fdmi_register(vha);
6337 
6338 		if (test_and_clear_bit(REGISTER_FC4_NEEDED, &vha->dpc_flags)) {
6339 			if (qla2x00_rft_id(vha)) {
6340 				/* EMPTY */
6341 				ql_dbg(ql_dbg_disc, vha, 0x20a2,
6342 				    "Register FC-4 TYPE failed.\n");
6343 				if (test_bit(LOOP_RESYNC_NEEDED,
6344 				    &vha->dpc_flags))
6345 					break;
6346 			}
6347 			if (qla2x00_rff_id(vha, FC4_TYPE_FCP_SCSI)) {
6348 				/* EMPTY */
6349 				ql_dbg(ql_dbg_disc, vha, 0x209a,
6350 				    "Register FC-4 Features failed.\n");
6351 				if (test_bit(LOOP_RESYNC_NEEDED,
6352 				    &vha->dpc_flags))
6353 					break;
6354 			}
6355 			if (vha->flags.nvme_enabled) {
6356 				if (qla2x00_rff_id(vha, FC_TYPE_NVME)) {
6357 					ql_dbg(ql_dbg_disc, vha, 0x2049,
6358 					    "Register NVME FC Type Features failed.\n");
6359 				}
6360 			}
6361 			if (qla2x00_rnn_id(vha)) {
6362 				/* EMPTY */
6363 				ql_dbg(ql_dbg_disc, vha, 0x2104,
6364 				    "Register Node Name failed.\n");
6365 				if (test_bit(LOOP_RESYNC_NEEDED,
6366 				    &vha->dpc_flags))
6367 					break;
6368 			} else if (qla2x00_rsnn_nn(vha)) {
6369 				/* EMPTY */
6370 				ql_dbg(ql_dbg_disc, vha, 0x209b,
6371 				    "Register Symbolic Node Name failed.\n");
6372 				if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags))
6373 					break;
6374 			}
6375 		}
6376 
6377 
6378 		/* Mark the time right before querying FW for connected ports.
6379 		 * This process is long, asynchronous and by the time it's done,
6380 		 * collected information might not be accurate anymore. E.g.
6381 		 * disconnected port might have re-connected and a brand new
6382 		 * session has been created. In this case session's generation
6383 		 * will be newer than discovery_gen. */
6384 		qlt_do_generation_tick(vha, &discovery_gen);
6385 
6386 		if (USE_ASYNC_SCAN(ha)) {
6387 			rval = qla24xx_async_gpnft(vha, FC4_TYPE_FCP_SCSI,
6388 			    NULL);
6389 			if (rval)
6390 				set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
6391 		} else  {
6392 			list_for_each_entry(fcport, &vha->vp_fcports, list)
6393 				fcport->scan_state = QLA_FCPORT_SCAN;
6394 
6395 			rval = qla2x00_find_all_fabric_devs(vha);
6396 		}
6397 		if (rval != QLA_SUCCESS)
6398 			break;
6399 	} while (0);
6400 
6401 	if (!vha->nvme_local_port && vha->flags.nvme_enabled)
6402 		qla_nvme_register_hba(vha);
6403 
6404 	if (rval)
6405 		ql_dbg(ql_dbg_disc, vha, 0x2068,
6406 		    "Configure fabric error exit rval=%d.\n", rval);
6407 
6408 	return (rval);
6409 }
6410 
6411 /*
6412  * qla2x00_find_all_fabric_devs
6413  *
6414  * Input:
6415  *	ha = adapter block pointer.
6416  *	dev = database device entry pointer.
6417  *
6418  * Returns:
6419  *	0 = success.
6420  *
6421  * Context:
6422  *	Kernel context.
6423  */
6424 static int
qla2x00_find_all_fabric_devs(scsi_qla_host_t * vha)6425 qla2x00_find_all_fabric_devs(scsi_qla_host_t *vha)
6426 {
6427 	int		rval;
6428 	uint16_t	loop_id;
6429 	fc_port_t	*fcport, *new_fcport;
6430 	int		found;
6431 
6432 	sw_info_t	*swl;
6433 	int		swl_idx;
6434 	int		first_dev, last_dev;
6435 	port_id_t	wrap = {}, nxt_d_id;
6436 	struct qla_hw_data *ha = vha->hw;
6437 	struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
6438 	unsigned long flags;
6439 
6440 	rval = QLA_SUCCESS;
6441 
6442 	/* Try GID_PT to get device list, else GAN. */
6443 	if (!ha->swl)
6444 		ha->swl = kcalloc(ha->max_fibre_devices, sizeof(sw_info_t),
6445 		    GFP_KERNEL);
6446 	swl = ha->swl;
6447 	if (!swl) {
6448 		/*EMPTY*/
6449 		ql_dbg(ql_dbg_disc, vha, 0x209c,
6450 		    "GID_PT allocations failed, fallback on GA_NXT.\n");
6451 	} else {
6452 		memset(swl, 0, ha->max_fibre_devices * sizeof(sw_info_t));
6453 		if (qla2x00_gid_pt(vha, swl) != QLA_SUCCESS) {
6454 			swl = NULL;
6455 			if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags))
6456 				return rval;
6457 		} else if (qla2x00_gpn_id(vha, swl) != QLA_SUCCESS) {
6458 			swl = NULL;
6459 			if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags))
6460 				return rval;
6461 		} else if (qla2x00_gnn_id(vha, swl) != QLA_SUCCESS) {
6462 			swl = NULL;
6463 			if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags))
6464 				return rval;
6465 		} else if (qla2x00_gfpn_id(vha, swl) != QLA_SUCCESS) {
6466 			swl = NULL;
6467 			if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags))
6468 				return rval;
6469 		}
6470 
6471 		/* If other queries succeeded probe for FC-4 type */
6472 		if (swl) {
6473 			qla2x00_gff_id(vha, swl);
6474 			if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags))
6475 				return rval;
6476 		}
6477 	}
6478 	swl_idx = 0;
6479 
6480 	/* Allocate temporary fcport for any new fcports discovered. */
6481 	new_fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
6482 	if (new_fcport == NULL) {
6483 		ql_log(ql_log_warn, vha, 0x209d,
6484 		    "Failed to allocate memory for fcport.\n");
6485 		return (QLA_MEMORY_ALLOC_FAILED);
6486 	}
6487 	new_fcport->flags |= (FCF_FABRIC_DEVICE | FCF_LOGIN_NEEDED);
6488 	/* Set start port ID scan at adapter ID. */
6489 	first_dev = 1;
6490 	last_dev = 0;
6491 
6492 	/* Starting free loop ID. */
6493 	loop_id = ha->min_external_loopid;
6494 	for (; loop_id <= ha->max_loop_id; loop_id++) {
6495 		if (qla2x00_is_reserved_id(vha, loop_id))
6496 			continue;
6497 
6498 		if (ha->current_topology == ISP_CFG_FL &&
6499 		    (atomic_read(&vha->loop_down_timer) ||
6500 		     LOOP_TRANSITION(vha))) {
6501 			atomic_set(&vha->loop_down_timer, 0);
6502 			set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
6503 			set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
6504 			break;
6505 		}
6506 
6507 		if (swl != NULL) {
6508 			if (last_dev) {
6509 				wrap.b24 = new_fcport->d_id.b24;
6510 			} else {
6511 				new_fcport->d_id.b24 = swl[swl_idx].d_id.b24;
6512 				memcpy(new_fcport->node_name,
6513 				    swl[swl_idx].node_name, WWN_SIZE);
6514 				memcpy(new_fcport->port_name,
6515 				    swl[swl_idx].port_name, WWN_SIZE);
6516 				memcpy(new_fcport->fabric_port_name,
6517 				    swl[swl_idx].fabric_port_name, WWN_SIZE);
6518 				new_fcport->fp_speed = swl[swl_idx].fp_speed;
6519 				new_fcport->fc4_type = swl[swl_idx].fc4_type;
6520 
6521 				new_fcport->nvme_flag = 0;
6522 				if (vha->flags.nvme_enabled &&
6523 				    swl[swl_idx].fc4_type & FS_FC4TYPE_NVME) {
6524 					ql_log(ql_log_info, vha, 0x2131,
6525 					    "FOUND: NVME port %8phC as FC Type 28h\n",
6526 					    new_fcport->port_name);
6527 				}
6528 
6529 				if (swl[swl_idx].d_id.b.rsvd_1 != 0) {
6530 					last_dev = 1;
6531 				}
6532 				swl_idx++;
6533 			}
6534 		} else {
6535 			/* Send GA_NXT to the switch */
6536 			rval = qla2x00_ga_nxt(vha, new_fcport);
6537 			if (rval != QLA_SUCCESS) {
6538 				ql_log(ql_log_warn, vha, 0x209e,
6539 				    "SNS scan failed -- assuming "
6540 				    "zero-entry result.\n");
6541 				rval = QLA_SUCCESS;
6542 				break;
6543 			}
6544 		}
6545 
6546 		/* If wrap on switch device list, exit. */
6547 		if (first_dev) {
6548 			wrap.b24 = new_fcport->d_id.b24;
6549 			first_dev = 0;
6550 		} else if (new_fcport->d_id.b24 == wrap.b24) {
6551 			ql_dbg(ql_dbg_disc, vha, 0x209f,
6552 			    "Device wrap (%02x%02x%02x).\n",
6553 			    new_fcport->d_id.b.domain,
6554 			    new_fcport->d_id.b.area,
6555 			    new_fcport->d_id.b.al_pa);
6556 			break;
6557 		}
6558 
6559 		/* Bypass if same physical adapter. */
6560 		if (new_fcport->d_id.b24 == base_vha->d_id.b24)
6561 			continue;
6562 
6563 		/* Bypass virtual ports of the same host. */
6564 		if (qla2x00_is_a_vp_did(vha, new_fcport->d_id.b24))
6565 			continue;
6566 
6567 		/* Bypass if same domain and area of adapter. */
6568 		if (((new_fcport->d_id.b24 & 0xffff00) ==
6569 		    (vha->d_id.b24 & 0xffff00)) && ha->current_topology ==
6570 			ISP_CFG_FL)
6571 			    continue;
6572 
6573 		/* Bypass reserved domain fields. */
6574 		if ((new_fcport->d_id.b.domain & 0xf0) == 0xf0)
6575 			continue;
6576 
6577 		/* Bypass ports whose FCP-4 type is not FCP_SCSI */
6578 		if (ql2xgffidenable &&
6579 		    (!(new_fcport->fc4_type & FS_FC4TYPE_FCP) &&
6580 		    new_fcport->fc4_type != 0))
6581 			continue;
6582 
6583 		spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
6584 
6585 		/* Locate matching device in database. */
6586 		found = 0;
6587 		list_for_each_entry(fcport, &vha->vp_fcports, list) {
6588 			if (memcmp(new_fcport->port_name, fcport->port_name,
6589 			    WWN_SIZE))
6590 				continue;
6591 
6592 			fcport->scan_state = QLA_FCPORT_FOUND;
6593 
6594 			found++;
6595 
6596 			/* Update port state. */
6597 			memcpy(fcport->fabric_port_name,
6598 			    new_fcport->fabric_port_name, WWN_SIZE);
6599 			fcport->fp_speed = new_fcport->fp_speed;
6600 
6601 			/*
6602 			 * If address the same and state FCS_ONLINE
6603 			 * (or in target mode), nothing changed.
6604 			 */
6605 			if (fcport->d_id.b24 == new_fcport->d_id.b24 &&
6606 			    (atomic_read(&fcport->state) == FCS_ONLINE ||
6607 			     (vha->host->active_mode == MODE_TARGET))) {
6608 				break;
6609 			}
6610 
6611 			if (fcport->login_retry == 0)
6612 				fcport->login_retry =
6613 					vha->hw->login_retry_count;
6614 			/*
6615 			 * If device was not a fabric device before.
6616 			 */
6617 			if ((fcport->flags & FCF_FABRIC_DEVICE) == 0) {
6618 				fcport->d_id.b24 = new_fcport->d_id.b24;
6619 				qla2x00_clear_loop_id(fcport);
6620 				fcport->flags |= (FCF_FABRIC_DEVICE |
6621 				    FCF_LOGIN_NEEDED);
6622 				break;
6623 			}
6624 
6625 			/*
6626 			 * Port ID changed or device was marked to be updated;
6627 			 * Log it out if still logged in and mark it for
6628 			 * relogin later.
6629 			 */
6630 			if (qla_tgt_mode_enabled(base_vha)) {
6631 				ql_dbg(ql_dbg_tgt_mgt, vha, 0xf080,
6632 					 "port changed FC ID, %8phC"
6633 					 " old %x:%x:%x (loop_id 0x%04x)-> new %x:%x:%x\n",
6634 					 fcport->port_name,
6635 					 fcport->d_id.b.domain,
6636 					 fcport->d_id.b.area,
6637 					 fcport->d_id.b.al_pa,
6638 					 fcport->loop_id,
6639 					 new_fcport->d_id.b.domain,
6640 					 new_fcport->d_id.b.area,
6641 					 new_fcport->d_id.b.al_pa);
6642 				fcport->d_id.b24 = new_fcport->d_id.b24;
6643 				break;
6644 			}
6645 
6646 			fcport->d_id.b24 = new_fcport->d_id.b24;
6647 			fcport->flags |= FCF_LOGIN_NEEDED;
6648 			break;
6649 		}
6650 
6651 		if (found && NVME_TARGET(vha->hw, fcport)) {
6652 			if (fcport->disc_state == DSC_DELETE_PEND) {
6653 				qla2x00_set_fcport_disc_state(fcport, DSC_GNL);
6654 				vha->fcport_count--;
6655 				fcport->login_succ = 0;
6656 			}
6657 		}
6658 
6659 		if (found) {
6660 			spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
6661 			continue;
6662 		}
6663 		/* If device was not in our fcports list, then add it. */
6664 		new_fcport->scan_state = QLA_FCPORT_FOUND;
6665 		list_add_tail(&new_fcport->list, &vha->vp_fcports);
6666 
6667 		spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
6668 
6669 
6670 		/* Allocate a new replacement fcport. */
6671 		nxt_d_id.b24 = new_fcport->d_id.b24;
6672 		new_fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
6673 		if (new_fcport == NULL) {
6674 			ql_log(ql_log_warn, vha, 0xd032,
6675 			    "Memory allocation failed for fcport.\n");
6676 			return (QLA_MEMORY_ALLOC_FAILED);
6677 		}
6678 		new_fcport->flags |= (FCF_FABRIC_DEVICE | FCF_LOGIN_NEEDED);
6679 		new_fcport->d_id.b24 = nxt_d_id.b24;
6680 	}
6681 
6682 	qla2x00_free_fcport(new_fcport);
6683 
6684 	/*
6685 	 * Logout all previous fabric dev marked lost, except FCP2 devices.
6686 	 */
6687 	list_for_each_entry(fcport, &vha->vp_fcports, list) {
6688 		if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags))
6689 			break;
6690 
6691 		if ((fcport->flags & FCF_FABRIC_DEVICE) == 0)
6692 			continue;
6693 
6694 		if (fcport->scan_state == QLA_FCPORT_SCAN) {
6695 			if ((qla_dual_mode_enabled(vha) ||
6696 			    qla_ini_mode_enabled(vha)) &&
6697 			    atomic_read(&fcport->state) == FCS_ONLINE) {
6698 				qla2x00_mark_device_lost(vha, fcport,
6699 					ql2xplogiabsentdevice);
6700 				if (fcport->loop_id != FC_NO_LOOP_ID &&
6701 				    (fcport->flags & FCF_FCP2_DEVICE) == 0 &&
6702 				    fcport->port_type != FCT_INITIATOR &&
6703 				    fcport->port_type != FCT_BROADCAST) {
6704 					ql_dbg(ql_dbg_disc, vha, 0x20f0,
6705 					    "%s %d %8phC post del sess\n",
6706 					    __func__, __LINE__,
6707 					    fcport->port_name);
6708 					qlt_schedule_sess_for_deletion(fcport);
6709 					continue;
6710 				}
6711 			}
6712 		}
6713 
6714 		if (fcport->scan_state == QLA_FCPORT_FOUND &&
6715 		    (fcport->flags & FCF_LOGIN_NEEDED) != 0)
6716 			qla24xx_fcport_handle_login(vha, fcport);
6717 	}
6718 	return (rval);
6719 }
6720 
6721 /* FW does not set aside Loop id for MGMT Server/FFFFFAh */
6722 int
qla2x00_reserve_mgmt_server_loop_id(scsi_qla_host_t * vha)6723 qla2x00_reserve_mgmt_server_loop_id(scsi_qla_host_t *vha)
6724 {
6725 	int loop_id = FC_NO_LOOP_ID;
6726 	int lid = NPH_MGMT_SERVER - vha->vp_idx;
6727 	unsigned long flags;
6728 	struct qla_hw_data *ha = vha->hw;
6729 
6730 	if (vha->vp_idx == 0) {
6731 		set_bit(NPH_MGMT_SERVER, ha->loop_id_map);
6732 		return NPH_MGMT_SERVER;
6733 	}
6734 
6735 	/* pick id from high and work down to low */
6736 	spin_lock_irqsave(&ha->vport_slock, flags);
6737 	for (; lid > 0; lid--) {
6738 		if (!test_bit(lid, vha->hw->loop_id_map)) {
6739 			set_bit(lid, vha->hw->loop_id_map);
6740 			loop_id = lid;
6741 			break;
6742 		}
6743 	}
6744 	spin_unlock_irqrestore(&ha->vport_slock, flags);
6745 
6746 	return loop_id;
6747 }
6748 
6749 /*
6750  * qla2x00_fabric_login
6751  *	Issue fabric login command.
6752  *
6753  * Input:
6754  *	ha = adapter block pointer.
6755  *	device = pointer to FC device type structure.
6756  *
6757  * Returns:
6758  *      0 - Login successfully
6759  *      1 - Login failed
6760  *      2 - Initiator device
6761  *      3 - Fatal error
6762  */
6763 int
qla2x00_fabric_login(scsi_qla_host_t * vha,fc_port_t * fcport,uint16_t * next_loopid)6764 qla2x00_fabric_login(scsi_qla_host_t *vha, fc_port_t *fcport,
6765     uint16_t *next_loopid)
6766 {
6767 	int	rval;
6768 	int	retry;
6769 	uint16_t tmp_loopid;
6770 	uint16_t mb[MAILBOX_REGISTER_COUNT];
6771 	struct qla_hw_data *ha = vha->hw;
6772 
6773 	retry = 0;
6774 	tmp_loopid = 0;
6775 
6776 	for (;;) {
6777 		ql_dbg(ql_dbg_disc, vha, 0x2000,
6778 		    "Trying Fabric Login w/loop id 0x%04x for port "
6779 		    "%02x%02x%02x.\n",
6780 		    fcport->loop_id, fcport->d_id.b.domain,
6781 		    fcport->d_id.b.area, fcport->d_id.b.al_pa);
6782 
6783 		/* Login fcport on switch. */
6784 		rval = ha->isp_ops->fabric_login(vha, fcport->loop_id,
6785 		    fcport->d_id.b.domain, fcport->d_id.b.area,
6786 		    fcport->d_id.b.al_pa, mb, BIT_0);
6787 		if (rval != QLA_SUCCESS) {
6788 			return rval;
6789 		}
6790 		if (mb[0] == MBS_PORT_ID_USED) {
6791 			/*
6792 			 * Device has another loop ID.  The firmware team
6793 			 * recommends the driver perform an implicit login with
6794 			 * the specified ID again. The ID we just used is save
6795 			 * here so we return with an ID that can be tried by
6796 			 * the next login.
6797 			 */
6798 			retry++;
6799 			tmp_loopid = fcport->loop_id;
6800 			fcport->loop_id = mb[1];
6801 
6802 			ql_dbg(ql_dbg_disc, vha, 0x2001,
6803 			    "Fabric Login: port in use - next loop "
6804 			    "id=0x%04x, port id= %02x%02x%02x.\n",
6805 			    fcport->loop_id, fcport->d_id.b.domain,
6806 			    fcport->d_id.b.area, fcport->d_id.b.al_pa);
6807 
6808 		} else if (mb[0] == MBS_COMMAND_COMPLETE) {
6809 			/*
6810 			 * Login succeeded.
6811 			 */
6812 			if (retry) {
6813 				/* A retry occurred before. */
6814 				*next_loopid = tmp_loopid;
6815 			} else {
6816 				/*
6817 				 * No retry occurred before. Just increment the
6818 				 * ID value for next login.
6819 				 */
6820 				*next_loopid = (fcport->loop_id + 1);
6821 			}
6822 
6823 			if (mb[1] & BIT_0) {
6824 				fcport->port_type = FCT_INITIATOR;
6825 			} else {
6826 				fcport->port_type = FCT_TARGET;
6827 				if (mb[1] & BIT_1) {
6828 					fcport->flags |= FCF_FCP2_DEVICE;
6829 				}
6830 			}
6831 
6832 			if (mb[10] & BIT_0)
6833 				fcport->supported_classes |= FC_COS_CLASS2;
6834 			if (mb[10] & BIT_1)
6835 				fcport->supported_classes |= FC_COS_CLASS3;
6836 
6837 			if (IS_FWI2_CAPABLE(ha)) {
6838 				if (mb[10] & BIT_7)
6839 					fcport->flags |=
6840 					    FCF_CONF_COMP_SUPPORTED;
6841 			}
6842 
6843 			rval = QLA_SUCCESS;
6844 			break;
6845 		} else if (mb[0] == MBS_LOOP_ID_USED) {
6846 			/*
6847 			 * Loop ID already used, try next loop ID.
6848 			 */
6849 			fcport->loop_id++;
6850 			rval = qla2x00_find_new_loop_id(vha, fcport);
6851 			if (rval != QLA_SUCCESS) {
6852 				/* Ran out of loop IDs to use */
6853 				break;
6854 			}
6855 		} else if (mb[0] == MBS_COMMAND_ERROR) {
6856 			/*
6857 			 * Firmware possibly timed out during login. If NO
6858 			 * retries are left to do then the device is declared
6859 			 * dead.
6860 			 */
6861 			*next_loopid = fcport->loop_id;
6862 			ha->isp_ops->fabric_logout(vha, fcport->loop_id,
6863 			    fcport->d_id.b.domain, fcport->d_id.b.area,
6864 			    fcport->d_id.b.al_pa);
6865 			qla2x00_mark_device_lost(vha, fcport, 1);
6866 
6867 			rval = 1;
6868 			break;
6869 		} else {
6870 			/*
6871 			 * unrecoverable / not handled error
6872 			 */
6873 			ql_dbg(ql_dbg_disc, vha, 0x2002,
6874 			    "Failed=%x port_id=%02x%02x%02x loop_id=%x "
6875 			    "jiffies=%lx.\n", mb[0], fcport->d_id.b.domain,
6876 			    fcport->d_id.b.area, fcport->d_id.b.al_pa,
6877 			    fcport->loop_id, jiffies);
6878 
6879 			*next_loopid = fcport->loop_id;
6880 			ha->isp_ops->fabric_logout(vha, fcport->loop_id,
6881 			    fcport->d_id.b.domain, fcport->d_id.b.area,
6882 			    fcport->d_id.b.al_pa);
6883 			qla2x00_clear_loop_id(fcport);
6884 			fcport->login_retry = 0;
6885 
6886 			rval = 3;
6887 			break;
6888 		}
6889 	}
6890 
6891 	return (rval);
6892 }
6893 
6894 /*
6895  * qla2x00_local_device_login
6896  *	Issue local device login command.
6897  *
6898  * Input:
6899  *	ha = adapter block pointer.
6900  *	loop_id = loop id of device to login to.
6901  *
6902  * Returns (Where's the #define!!!!):
6903  *      0 - Login successfully
6904  *      1 - Login failed
6905  *      3 - Fatal error
6906  */
6907 int
qla2x00_local_device_login(scsi_qla_host_t * vha,fc_port_t * fcport)6908 qla2x00_local_device_login(scsi_qla_host_t *vha, fc_port_t *fcport)
6909 {
6910 	int		rval;
6911 	uint16_t	mb[MAILBOX_REGISTER_COUNT];
6912 
6913 	memset(mb, 0, sizeof(mb));
6914 	rval = qla2x00_login_local_device(vha, fcport, mb, BIT_0);
6915 	if (rval == QLA_SUCCESS) {
6916 		/* Interrogate mailbox registers for any errors */
6917 		if (mb[0] == MBS_COMMAND_ERROR)
6918 			rval = 1;
6919 		else if (mb[0] == MBS_COMMAND_PARAMETER_ERROR)
6920 			/* device not in PCB table */
6921 			rval = 3;
6922 	}
6923 
6924 	return (rval);
6925 }
6926 
6927 /*
6928  *  qla2x00_loop_resync
6929  *      Resync with fibre channel devices.
6930  *
6931  * Input:
6932  *      ha = adapter block pointer.
6933  *
6934  * Returns:
6935  *      0 = success
6936  */
6937 int
qla2x00_loop_resync(scsi_qla_host_t * vha)6938 qla2x00_loop_resync(scsi_qla_host_t *vha)
6939 {
6940 	int rval = QLA_SUCCESS;
6941 	uint32_t wait_time;
6942 
6943 	clear_bit(ISP_ABORT_RETRY, &vha->dpc_flags);
6944 	if (vha->flags.online) {
6945 		if (!(rval = qla2x00_fw_ready(vha))) {
6946 			/* Wait at most MAX_TARGET RSCNs for a stable link. */
6947 			wait_time = 256;
6948 			do {
6949 				if (!IS_QLAFX00(vha->hw)) {
6950 					/*
6951 					 * Issue a marker after FW becomes
6952 					 * ready.
6953 					 */
6954 					qla2x00_marker(vha, vha->hw->base_qpair,
6955 					    0, 0, MK_SYNC_ALL);
6956 					vha->marker_needed = 0;
6957 				}
6958 
6959 				/* Remap devices on Loop. */
6960 				clear_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
6961 
6962 				if (IS_QLAFX00(vha->hw))
6963 					qlafx00_configure_devices(vha);
6964 				else
6965 					qla2x00_configure_loop(vha);
6966 
6967 				wait_time--;
6968 			} while (!atomic_read(&vha->loop_down_timer) &&
6969 				!(test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags))
6970 				&& wait_time && (test_bit(LOOP_RESYNC_NEEDED,
6971 				&vha->dpc_flags)));
6972 		}
6973 	}
6974 
6975 	if (test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags))
6976 		return (QLA_FUNCTION_FAILED);
6977 
6978 	if (rval)
6979 		ql_dbg(ql_dbg_disc, vha, 0x206c,
6980 		    "%s *** FAILED ***.\n", __func__);
6981 
6982 	return (rval);
6983 }
6984 
6985 /*
6986 * qla2x00_perform_loop_resync
6987 * Description: This function will set the appropriate flags and call
6988 *              qla2x00_loop_resync. If successful loop will be resynced
6989 * Arguments : scsi_qla_host_t pointer
6990 * returm    : Success or Failure
6991 */
6992 
qla2x00_perform_loop_resync(scsi_qla_host_t * ha)6993 int qla2x00_perform_loop_resync(scsi_qla_host_t *ha)
6994 {
6995 	int32_t rval = 0;
6996 
6997 	if (!test_and_set_bit(LOOP_RESYNC_ACTIVE, &ha->dpc_flags)) {
6998 		/*Configure the flags so that resync happens properly*/
6999 		atomic_set(&ha->loop_down_timer, 0);
7000 		if (!(ha->device_flags & DFLG_NO_CABLE)) {
7001 			atomic_set(&ha->loop_state, LOOP_UP);
7002 			set_bit(LOCAL_LOOP_UPDATE, &ha->dpc_flags);
7003 			set_bit(REGISTER_FC4_NEEDED, &ha->dpc_flags);
7004 			set_bit(LOOP_RESYNC_NEEDED, &ha->dpc_flags);
7005 
7006 			rval = qla2x00_loop_resync(ha);
7007 		} else
7008 			atomic_set(&ha->loop_state, LOOP_DEAD);
7009 
7010 		clear_bit(LOOP_RESYNC_ACTIVE, &ha->dpc_flags);
7011 	}
7012 
7013 	return rval;
7014 }
7015 
7016 /* Assumes idc_lock always held on entry */
7017 void
qla83xx_reset_ownership(scsi_qla_host_t * vha)7018 qla83xx_reset_ownership(scsi_qla_host_t *vha)
7019 {
7020 	struct qla_hw_data *ha = vha->hw;
7021 	uint32_t drv_presence, drv_presence_mask;
7022 	uint32_t dev_part_info1, dev_part_info2, class_type;
7023 	uint32_t class_type_mask = 0x3;
7024 	uint16_t fcoe_other_function = 0xffff, i;
7025 
7026 	if (IS_QLA8044(ha)) {
7027 		drv_presence = qla8044_rd_direct(vha,
7028 		    QLA8044_CRB_DRV_ACTIVE_INDEX);
7029 		dev_part_info1 = qla8044_rd_direct(vha,
7030 		    QLA8044_CRB_DEV_PART_INFO_INDEX);
7031 		dev_part_info2 = qla8044_rd_direct(vha,
7032 		    QLA8044_CRB_DEV_PART_INFO2);
7033 	} else {
7034 		qla83xx_rd_reg(vha, QLA83XX_IDC_DRV_PRESENCE, &drv_presence);
7035 		qla83xx_rd_reg(vha, QLA83XX_DEV_PARTINFO1, &dev_part_info1);
7036 		qla83xx_rd_reg(vha, QLA83XX_DEV_PARTINFO2, &dev_part_info2);
7037 	}
7038 	for (i = 0; i < 8; i++) {
7039 		class_type = ((dev_part_info1 >> (i * 4)) & class_type_mask);
7040 		if ((class_type == QLA83XX_CLASS_TYPE_FCOE) &&
7041 		    (i != ha->portnum)) {
7042 			fcoe_other_function = i;
7043 			break;
7044 		}
7045 	}
7046 	if (fcoe_other_function == 0xffff) {
7047 		for (i = 0; i < 8; i++) {
7048 			class_type = ((dev_part_info2 >> (i * 4)) &
7049 			    class_type_mask);
7050 			if ((class_type == QLA83XX_CLASS_TYPE_FCOE) &&
7051 			    ((i + 8) != ha->portnum)) {
7052 				fcoe_other_function = i + 8;
7053 				break;
7054 			}
7055 		}
7056 	}
7057 	/*
7058 	 * Prepare drv-presence mask based on fcoe functions present.
7059 	 * However consider only valid physical fcoe function numbers (0-15).
7060 	 */
7061 	drv_presence_mask = ~((1 << (ha->portnum)) |
7062 			((fcoe_other_function == 0xffff) ?
7063 			 0 : (1 << (fcoe_other_function))));
7064 
7065 	/* We are the reset owner iff:
7066 	 *    - No other protocol drivers present.
7067 	 *    - This is the lowest among fcoe functions. */
7068 	if (!(drv_presence & drv_presence_mask) &&
7069 			(ha->portnum < fcoe_other_function)) {
7070 		ql_dbg(ql_dbg_p3p, vha, 0xb07f,
7071 		    "This host is Reset owner.\n");
7072 		ha->flags.nic_core_reset_owner = 1;
7073 	}
7074 }
7075 
7076 static int
__qla83xx_set_drv_ack(scsi_qla_host_t * vha)7077 __qla83xx_set_drv_ack(scsi_qla_host_t *vha)
7078 {
7079 	int rval = QLA_SUCCESS;
7080 	struct qla_hw_data *ha = vha->hw;
7081 	uint32_t drv_ack;
7082 
7083 	rval = qla83xx_rd_reg(vha, QLA83XX_IDC_DRIVER_ACK, &drv_ack);
7084 	if (rval == QLA_SUCCESS) {
7085 		drv_ack |= (1 << ha->portnum);
7086 		rval = qla83xx_wr_reg(vha, QLA83XX_IDC_DRIVER_ACK, drv_ack);
7087 	}
7088 
7089 	return rval;
7090 }
7091 
7092 static int
__qla83xx_clear_drv_ack(scsi_qla_host_t * vha)7093 __qla83xx_clear_drv_ack(scsi_qla_host_t *vha)
7094 {
7095 	int rval = QLA_SUCCESS;
7096 	struct qla_hw_data *ha = vha->hw;
7097 	uint32_t drv_ack;
7098 
7099 	rval = qla83xx_rd_reg(vha, QLA83XX_IDC_DRIVER_ACK, &drv_ack);
7100 	if (rval == QLA_SUCCESS) {
7101 		drv_ack &= ~(1 << ha->portnum);
7102 		rval = qla83xx_wr_reg(vha, QLA83XX_IDC_DRIVER_ACK, drv_ack);
7103 	}
7104 
7105 	return rval;
7106 }
7107 
7108 /* Assumes idc-lock always held on entry */
7109 void
qla83xx_idc_audit(scsi_qla_host_t * vha,int audit_type)7110 qla83xx_idc_audit(scsi_qla_host_t *vha, int audit_type)
7111 {
7112 	struct qla_hw_data *ha = vha->hw;
7113 	uint32_t idc_audit_reg = 0, duration_secs = 0;
7114 
7115 	switch (audit_type) {
7116 	case IDC_AUDIT_TIMESTAMP:
7117 		ha->idc_audit_ts = (jiffies_to_msecs(jiffies) / 1000);
7118 		idc_audit_reg = (ha->portnum) |
7119 		    (IDC_AUDIT_TIMESTAMP << 7) | (ha->idc_audit_ts << 8);
7120 		qla83xx_wr_reg(vha, QLA83XX_IDC_AUDIT, idc_audit_reg);
7121 		break;
7122 
7123 	case IDC_AUDIT_COMPLETION:
7124 		duration_secs = ((jiffies_to_msecs(jiffies) -
7125 		    jiffies_to_msecs(ha->idc_audit_ts)) / 1000);
7126 		idc_audit_reg = (ha->portnum) |
7127 		    (IDC_AUDIT_COMPLETION << 7) | (duration_secs << 8);
7128 		qla83xx_wr_reg(vha, QLA83XX_IDC_AUDIT, idc_audit_reg);
7129 		break;
7130 
7131 	default:
7132 		ql_log(ql_log_warn, vha, 0xb078,
7133 		    "Invalid audit type specified.\n");
7134 		break;
7135 	}
7136 }
7137 
7138 /* Assumes idc_lock always held on entry */
7139 static int
qla83xx_initiating_reset(scsi_qla_host_t * vha)7140 qla83xx_initiating_reset(scsi_qla_host_t *vha)
7141 {
7142 	struct qla_hw_data *ha = vha->hw;
7143 	uint32_t  idc_control, dev_state;
7144 
7145 	__qla83xx_get_idc_control(vha, &idc_control);
7146 	if ((idc_control & QLA83XX_IDC_RESET_DISABLED)) {
7147 		ql_log(ql_log_info, vha, 0xb080,
7148 		    "NIC Core reset has been disabled. idc-control=0x%x\n",
7149 		    idc_control);
7150 		return QLA_FUNCTION_FAILED;
7151 	}
7152 
7153 	/* Set NEED-RESET iff in READY state and we are the reset-owner */
7154 	qla83xx_rd_reg(vha, QLA83XX_IDC_DEV_STATE, &dev_state);
7155 	if (ha->flags.nic_core_reset_owner && dev_state == QLA8XXX_DEV_READY) {
7156 		qla83xx_wr_reg(vha, QLA83XX_IDC_DEV_STATE,
7157 		    QLA8XXX_DEV_NEED_RESET);
7158 		ql_log(ql_log_info, vha, 0xb056, "HW State: NEED RESET.\n");
7159 		qla83xx_idc_audit(vha, IDC_AUDIT_TIMESTAMP);
7160 	} else {
7161 		ql_log(ql_log_info, vha, 0xb057, "HW State: %s.\n",
7162 				qdev_state(dev_state));
7163 
7164 		/* SV: XXX: Is timeout required here? */
7165 		/* Wait for IDC state change READY -> NEED_RESET */
7166 		while (dev_state == QLA8XXX_DEV_READY) {
7167 			qla83xx_idc_unlock(vha, 0);
7168 			msleep(200);
7169 			qla83xx_idc_lock(vha, 0);
7170 			qla83xx_rd_reg(vha, QLA83XX_IDC_DEV_STATE, &dev_state);
7171 		}
7172 	}
7173 
7174 	/* Send IDC ack by writing to drv-ack register */
7175 	__qla83xx_set_drv_ack(vha);
7176 
7177 	return QLA_SUCCESS;
7178 }
7179 
7180 int
__qla83xx_set_idc_control(scsi_qla_host_t * vha,uint32_t idc_control)7181 __qla83xx_set_idc_control(scsi_qla_host_t *vha, uint32_t idc_control)
7182 {
7183 	return qla83xx_wr_reg(vha, QLA83XX_IDC_CONTROL, idc_control);
7184 }
7185 
7186 int
__qla83xx_get_idc_control(scsi_qla_host_t * vha,uint32_t * idc_control)7187 __qla83xx_get_idc_control(scsi_qla_host_t *vha, uint32_t *idc_control)
7188 {
7189 	return qla83xx_rd_reg(vha, QLA83XX_IDC_CONTROL, idc_control);
7190 }
7191 
7192 static int
qla83xx_check_driver_presence(scsi_qla_host_t * vha)7193 qla83xx_check_driver_presence(scsi_qla_host_t *vha)
7194 {
7195 	uint32_t drv_presence = 0;
7196 	struct qla_hw_data *ha = vha->hw;
7197 
7198 	qla83xx_rd_reg(vha, QLA83XX_IDC_DRV_PRESENCE, &drv_presence);
7199 	if (drv_presence & (1 << ha->portnum))
7200 		return QLA_SUCCESS;
7201 	else
7202 		return QLA_TEST_FAILED;
7203 }
7204 
7205 int
qla83xx_nic_core_reset(scsi_qla_host_t * vha)7206 qla83xx_nic_core_reset(scsi_qla_host_t *vha)
7207 {
7208 	int rval = QLA_SUCCESS;
7209 	struct qla_hw_data *ha = vha->hw;
7210 
7211 	ql_dbg(ql_dbg_p3p, vha, 0xb058,
7212 	    "Entered  %s().\n", __func__);
7213 
7214 	if (vha->device_flags & DFLG_DEV_FAILED) {
7215 		ql_log(ql_log_warn, vha, 0xb059,
7216 		    "Device in unrecoverable FAILED state.\n");
7217 		return QLA_FUNCTION_FAILED;
7218 	}
7219 
7220 	qla83xx_idc_lock(vha, 0);
7221 
7222 	if (qla83xx_check_driver_presence(vha) != QLA_SUCCESS) {
7223 		ql_log(ql_log_warn, vha, 0xb05a,
7224 		    "Function=0x%x has been removed from IDC participation.\n",
7225 		    ha->portnum);
7226 		rval = QLA_FUNCTION_FAILED;
7227 		goto exit;
7228 	}
7229 
7230 	qla83xx_reset_ownership(vha);
7231 
7232 	rval = qla83xx_initiating_reset(vha);
7233 
7234 	/*
7235 	 * Perform reset if we are the reset-owner,
7236 	 * else wait till IDC state changes to READY/FAILED.
7237 	 */
7238 	if (rval == QLA_SUCCESS) {
7239 		rval = qla83xx_idc_state_handler(vha);
7240 
7241 		if (rval == QLA_SUCCESS)
7242 			ha->flags.nic_core_hung = 0;
7243 		__qla83xx_clear_drv_ack(vha);
7244 	}
7245 
7246 exit:
7247 	qla83xx_idc_unlock(vha, 0);
7248 
7249 	ql_dbg(ql_dbg_p3p, vha, 0xb05b, "Exiting %s.\n", __func__);
7250 
7251 	return rval;
7252 }
7253 
7254 int
qla2xxx_mctp_dump(scsi_qla_host_t * vha)7255 qla2xxx_mctp_dump(scsi_qla_host_t *vha)
7256 {
7257 	struct qla_hw_data *ha = vha->hw;
7258 	int rval = QLA_FUNCTION_FAILED;
7259 
7260 	if (!IS_MCTP_CAPABLE(ha)) {
7261 		/* This message can be removed from the final version */
7262 		ql_log(ql_log_info, vha, 0x506d,
7263 		    "This board is not MCTP capable\n");
7264 		return rval;
7265 	}
7266 
7267 	if (!ha->mctp_dump) {
7268 		ha->mctp_dump = dma_alloc_coherent(&ha->pdev->dev,
7269 		    MCTP_DUMP_SIZE, &ha->mctp_dump_dma, GFP_KERNEL);
7270 
7271 		if (!ha->mctp_dump) {
7272 			ql_log(ql_log_warn, vha, 0x506e,
7273 			    "Failed to allocate memory for mctp dump\n");
7274 			return rval;
7275 		}
7276 	}
7277 
7278 #define MCTP_DUMP_STR_ADDR	0x00000000
7279 	rval = qla2x00_dump_mctp_data(vha, ha->mctp_dump_dma,
7280 	    MCTP_DUMP_STR_ADDR, MCTP_DUMP_SIZE/4);
7281 	if (rval != QLA_SUCCESS) {
7282 		ql_log(ql_log_warn, vha, 0x506f,
7283 		    "Failed to capture mctp dump\n");
7284 	} else {
7285 		ql_log(ql_log_info, vha, 0x5070,
7286 		    "Mctp dump capture for host (%ld/%p).\n",
7287 		    vha->host_no, ha->mctp_dump);
7288 		ha->mctp_dumped = 1;
7289 	}
7290 
7291 	if (!ha->flags.nic_core_reset_hdlr_active && !ha->portnum) {
7292 		ha->flags.nic_core_reset_hdlr_active = 1;
7293 		rval = qla83xx_restart_nic_firmware(vha);
7294 		if (rval)
7295 			/* NIC Core reset failed. */
7296 			ql_log(ql_log_warn, vha, 0x5071,
7297 			    "Failed to restart nic firmware\n");
7298 		else
7299 			ql_dbg(ql_dbg_p3p, vha, 0xb084,
7300 			    "Restarted NIC firmware successfully.\n");
7301 		ha->flags.nic_core_reset_hdlr_active = 0;
7302 	}
7303 
7304 	return rval;
7305 
7306 }
7307 
7308 /*
7309 * qla2x00_quiesce_io
7310 * Description: This function will block the new I/Os
7311 *              Its not aborting any I/Os as context
7312 *              is not destroyed during quiescence
7313 * Arguments: scsi_qla_host_t
7314 * return   : void
7315 */
7316 void
qla2x00_quiesce_io(scsi_qla_host_t * vha)7317 qla2x00_quiesce_io(scsi_qla_host_t *vha)
7318 {
7319 	struct qla_hw_data *ha = vha->hw;
7320 	struct scsi_qla_host *vp, *tvp;
7321 	unsigned long flags;
7322 
7323 	ql_dbg(ql_dbg_dpc, vha, 0x401d,
7324 	    "Quiescing I/O - ha=%p.\n", ha);
7325 
7326 	atomic_set(&ha->loop_down_timer, LOOP_DOWN_TIME);
7327 	if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
7328 		atomic_set(&vha->loop_state, LOOP_DOWN);
7329 		qla2x00_mark_all_devices_lost(vha);
7330 
7331 		spin_lock_irqsave(&ha->vport_slock, flags);
7332 		list_for_each_entry_safe(vp, tvp, &ha->vp_list, list) {
7333 			atomic_inc(&vp->vref_count);
7334 			spin_unlock_irqrestore(&ha->vport_slock, flags);
7335 
7336 			qla2x00_mark_all_devices_lost(vp);
7337 
7338 			spin_lock_irqsave(&ha->vport_slock, flags);
7339 			atomic_dec(&vp->vref_count);
7340 		}
7341 		spin_unlock_irqrestore(&ha->vport_slock, flags);
7342 	} else {
7343 		if (!atomic_read(&vha->loop_down_timer))
7344 			atomic_set(&vha->loop_down_timer,
7345 					LOOP_DOWN_TIME);
7346 	}
7347 	/* Wait for pending cmds to complete */
7348 	WARN_ON_ONCE(qla2x00_eh_wait_for_pending_commands(vha, 0, 0, WAIT_HOST)
7349 		     != QLA_SUCCESS);
7350 }
7351 
7352 void
qla2x00_abort_isp_cleanup(scsi_qla_host_t * vha)7353 qla2x00_abort_isp_cleanup(scsi_qla_host_t *vha)
7354 {
7355 	struct qla_hw_data *ha = vha->hw;
7356 	struct scsi_qla_host *vp, *tvp;
7357 	unsigned long flags;
7358 	fc_port_t *fcport;
7359 	u16 i;
7360 
7361 	/* For ISP82XX, driver waits for completion of the commands.
7362 	 * online flag should be set.
7363 	 */
7364 	if (!(IS_P3P_TYPE(ha)))
7365 		vha->flags.online = 0;
7366 	ha->flags.chip_reset_done = 0;
7367 	clear_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
7368 	vha->qla_stats.total_isp_aborts++;
7369 
7370 	ql_log(ql_log_info, vha, 0x00af,
7371 	    "Performing ISP error recovery - ha=%p.\n", ha);
7372 
7373 	ha->flags.purge_mbox = 1;
7374 	/* For ISP82XX, reset_chip is just disabling interrupts.
7375 	 * Driver waits for the completion of the commands.
7376 	 * the interrupts need to be enabled.
7377 	 */
7378 	if (!(IS_P3P_TYPE(ha)))
7379 		ha->isp_ops->reset_chip(vha);
7380 
7381 	ha->link_data_rate = PORT_SPEED_UNKNOWN;
7382 	SAVE_TOPO(ha);
7383 	ha->flags.rida_fmt2 = 0;
7384 	ha->flags.n2n_ae = 0;
7385 	ha->flags.lip_ae = 0;
7386 	ha->current_topology = 0;
7387 	QLA_FW_STOPPED(ha);
7388 	ha->flags.fw_init_done = 0;
7389 	ha->chip_reset++;
7390 	ha->base_qpair->chip_reset = ha->chip_reset;
7391 	ha->base_qpair->cmd_cnt = ha->base_qpair->cmd_completion_cnt = 0;
7392 	ha->base_qpair->prev_completion_cnt = 0;
7393 	for (i = 0; i < ha->max_qpairs; i++) {
7394 		if (ha->queue_pair_map[i]) {
7395 			ha->queue_pair_map[i]->chip_reset =
7396 				ha->base_qpair->chip_reset;
7397 			ha->queue_pair_map[i]->cmd_cnt =
7398 			    ha->queue_pair_map[i]->cmd_completion_cnt = 0;
7399 			ha->base_qpair->prev_completion_cnt = 0;
7400 		}
7401 	}
7402 
7403 	/* purge MBox commands */
7404 	spin_lock_irqsave(&ha->hardware_lock, flags);
7405 	if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags)) {
7406 		clear_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags);
7407 		complete(&ha->mbx_intr_comp);
7408 	}
7409 	spin_unlock_irqrestore(&ha->hardware_lock, flags);
7410 
7411 	i = 0;
7412 	while (atomic_read(&ha->num_pend_mbx_stage2) ||
7413 	    atomic_read(&ha->num_pend_mbx_stage1)) {
7414 		msleep(20);
7415 		i++;
7416 		if (i > 50)
7417 			break;
7418 	}
7419 	ha->flags.purge_mbox = 0;
7420 
7421 	atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
7422 	if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
7423 		atomic_set(&vha->loop_state, LOOP_DOWN);
7424 		qla2x00_mark_all_devices_lost(vha);
7425 
7426 		spin_lock_irqsave(&ha->vport_slock, flags);
7427 		list_for_each_entry_safe(vp, tvp, &ha->vp_list, list) {
7428 			atomic_inc(&vp->vref_count);
7429 			spin_unlock_irqrestore(&ha->vport_slock, flags);
7430 
7431 			qla2x00_mark_all_devices_lost(vp);
7432 
7433 			spin_lock_irqsave(&ha->vport_slock, flags);
7434 			atomic_dec(&vp->vref_count);
7435 		}
7436 		spin_unlock_irqrestore(&ha->vport_slock, flags);
7437 	} else {
7438 		if (!atomic_read(&vha->loop_down_timer))
7439 			atomic_set(&vha->loop_down_timer,
7440 			    LOOP_DOWN_TIME);
7441 	}
7442 
7443 	/* Clear all async request states across all VPs. */
7444 	list_for_each_entry(fcport, &vha->vp_fcports, list) {
7445 		fcport->flags &= ~(FCF_LOGIN_NEEDED | FCF_ASYNC_SENT);
7446 		fcport->scan_state = 0;
7447 	}
7448 	spin_lock_irqsave(&ha->vport_slock, flags);
7449 	list_for_each_entry_safe(vp, tvp, &ha->vp_list, list) {
7450 		atomic_inc(&vp->vref_count);
7451 		spin_unlock_irqrestore(&ha->vport_slock, flags);
7452 
7453 		list_for_each_entry(fcport, &vp->vp_fcports, list)
7454 			fcport->flags &= ~(FCF_LOGIN_NEEDED | FCF_ASYNC_SENT);
7455 
7456 		spin_lock_irqsave(&ha->vport_slock, flags);
7457 		atomic_dec(&vp->vref_count);
7458 	}
7459 	spin_unlock_irqrestore(&ha->vport_slock, flags);
7460 
7461 	/* Make sure for ISP 82XX IO DMA is complete */
7462 	if (IS_P3P_TYPE(ha)) {
7463 		qla82xx_chip_reset_cleanup(vha);
7464 		ql_log(ql_log_info, vha, 0x00b4,
7465 		       "Done chip reset cleanup.\n");
7466 
7467 		/* Done waiting for pending commands. Reset online flag */
7468 		vha->flags.online = 0;
7469 	}
7470 
7471 	/* Requeue all commands in outstanding command list. */
7472 	qla2x00_abort_all_cmds(vha, DID_RESET << 16);
7473 	/* memory barrier */
7474 	wmb();
7475 }
7476 
7477 /*
7478 *  qla2x00_abort_isp
7479 *      Resets ISP and aborts all outstanding commands.
7480 *
7481 * Input:
7482 *      ha           = adapter block pointer.
7483 *
7484 * Returns:
7485 *      0 = success
7486 */
7487 int
qla2x00_abort_isp(scsi_qla_host_t * vha)7488 qla2x00_abort_isp(scsi_qla_host_t *vha)
7489 {
7490 	int rval;
7491 	uint8_t        status = 0;
7492 	struct qla_hw_data *ha = vha->hw;
7493 	struct scsi_qla_host *vp, *tvp;
7494 	struct req_que *req = ha->req_q_map[0];
7495 	unsigned long flags;
7496 
7497 	if (vha->flags.online) {
7498 		qla2x00_abort_isp_cleanup(vha);
7499 
7500 		vha->dport_status |= DPORT_DIAG_CHIP_RESET_IN_PROGRESS;
7501 		vha->dport_status &= ~DPORT_DIAG_IN_PROGRESS;
7502 
7503 		if (vha->hw->flags.port_isolated)
7504 			return status;
7505 
7506 		if (qla2x00_isp_reg_stat(ha)) {
7507 			ql_log(ql_log_info, vha, 0x803f,
7508 			       "ISP Abort - ISP reg disconnect, exiting.\n");
7509 			return status;
7510 		}
7511 
7512 		if (test_and_clear_bit(ISP_ABORT_TO_ROM, &vha->dpc_flags)) {
7513 			ha->flags.chip_reset_done = 1;
7514 			vha->flags.online = 1;
7515 			status = 0;
7516 			clear_bit(ISP_ABORT_RETRY, &vha->dpc_flags);
7517 			return status;
7518 		}
7519 
7520 		if (IS_QLA8031(ha)) {
7521 			ql_dbg(ql_dbg_p3p, vha, 0xb05c,
7522 			    "Clearing fcoe driver presence.\n");
7523 			if (qla83xx_clear_drv_presence(vha) != QLA_SUCCESS)
7524 				ql_dbg(ql_dbg_p3p, vha, 0xb073,
7525 				    "Error while clearing DRV-Presence.\n");
7526 		}
7527 
7528 		if (unlikely(pci_channel_offline(ha->pdev) &&
7529 		    ha->flags.pci_channel_io_perm_failure)) {
7530 			clear_bit(ISP_ABORT_RETRY, &vha->dpc_flags);
7531 			status = 0;
7532 			return status;
7533 		}
7534 
7535 		switch (vha->qlini_mode) {
7536 		case QLA2XXX_INI_MODE_DISABLED:
7537 			if (!qla_tgt_mode_enabled(vha))
7538 				return 0;
7539 			break;
7540 		case QLA2XXX_INI_MODE_DUAL:
7541 			if (!qla_dual_mode_enabled(vha) &&
7542 			    !qla_ini_mode_enabled(vha))
7543 				return 0;
7544 			break;
7545 		case QLA2XXX_INI_MODE_ENABLED:
7546 		default:
7547 			break;
7548 		}
7549 
7550 		ha->isp_ops->get_flash_version(vha, req->ring);
7551 
7552 		if (qla2x00_isp_reg_stat(ha)) {
7553 			ql_log(ql_log_info, vha, 0x803f,
7554 			       "ISP Abort - ISP reg disconnect pre nvram config, exiting.\n");
7555 			return status;
7556 		}
7557 		ha->isp_ops->nvram_config(vha);
7558 
7559 		if (qla2x00_isp_reg_stat(ha)) {
7560 			ql_log(ql_log_info, vha, 0x803f,
7561 			       "ISP Abort - ISP reg disconnect post nvmram config, exiting.\n");
7562 			return status;
7563 		}
7564 		if (!qla2x00_restart_isp(vha)) {
7565 			clear_bit(RESET_MARKER_NEEDED, &vha->dpc_flags);
7566 
7567 			if (!atomic_read(&vha->loop_down_timer)) {
7568 				/*
7569 				 * Issue marker command only when we are going
7570 				 * to start the I/O .
7571 				 */
7572 				vha->marker_needed = 1;
7573 			}
7574 
7575 			vha->flags.online = 1;
7576 
7577 			ha->isp_ops->enable_intrs(ha);
7578 
7579 			ha->isp_abort_cnt = 0;
7580 			clear_bit(ISP_ABORT_RETRY, &vha->dpc_flags);
7581 
7582 			if (IS_QLA81XX(ha) || IS_QLA8031(ha))
7583 				qla2x00_get_fw_version(vha);
7584 			if (ha->fce) {
7585 				ha->flags.fce_enabled = 1;
7586 				memset(ha->fce, 0,
7587 				    fce_calc_size(ha->fce_bufs));
7588 				rval = qla2x00_enable_fce_trace(vha,
7589 				    ha->fce_dma, ha->fce_bufs, ha->fce_mb,
7590 				    &ha->fce_bufs);
7591 				if (rval) {
7592 					ql_log(ql_log_warn, vha, 0x8033,
7593 					    "Unable to reinitialize FCE "
7594 					    "(%d).\n", rval);
7595 					ha->flags.fce_enabled = 0;
7596 				}
7597 			}
7598 
7599 			if (ha->eft) {
7600 				memset(ha->eft, 0, EFT_SIZE);
7601 				rval = qla2x00_enable_eft_trace(vha,
7602 				    ha->eft_dma, EFT_NUM_BUFFERS);
7603 				if (rval) {
7604 					ql_log(ql_log_warn, vha, 0x8034,
7605 					    "Unable to reinitialize EFT "
7606 					    "(%d).\n", rval);
7607 				}
7608 			}
7609 		} else {	/* failed the ISP abort */
7610 			vha->flags.online = 1;
7611 			if (test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) {
7612 				if (ha->isp_abort_cnt == 0) {
7613 					ql_log(ql_log_fatal, vha, 0x8035,
7614 					    "ISP error recover failed - "
7615 					    "board disabled.\n");
7616 					/*
7617 					 * The next call disables the board
7618 					 * completely.
7619 					 */
7620 					qla2x00_abort_isp_cleanup(vha);
7621 					vha->flags.online = 0;
7622 					clear_bit(ISP_ABORT_RETRY,
7623 					    &vha->dpc_flags);
7624 					status = 0;
7625 				} else { /* schedule another ISP abort */
7626 					ha->isp_abort_cnt--;
7627 					ql_dbg(ql_dbg_taskm, vha, 0x8020,
7628 					    "ISP abort - retry remaining %d.\n",
7629 					    ha->isp_abort_cnt);
7630 					status = 1;
7631 				}
7632 			} else {
7633 				ha->isp_abort_cnt = MAX_RETRIES_OF_ISP_ABORT;
7634 				ql_dbg(ql_dbg_taskm, vha, 0x8021,
7635 				    "ISP error recovery - retrying (%d) "
7636 				    "more times.\n", ha->isp_abort_cnt);
7637 				set_bit(ISP_ABORT_RETRY, &vha->dpc_flags);
7638 				status = 1;
7639 			}
7640 		}
7641 
7642 	}
7643 
7644 	if (vha->hw->flags.port_isolated) {
7645 		qla2x00_abort_isp_cleanup(vha);
7646 		return status;
7647 	}
7648 
7649 	if (!status) {
7650 		ql_dbg(ql_dbg_taskm, vha, 0x8022, "%s succeeded.\n", __func__);
7651 		qla2x00_configure_hba(vha);
7652 		spin_lock_irqsave(&ha->vport_slock, flags);
7653 		list_for_each_entry_safe(vp, tvp, &ha->vp_list, list) {
7654 			if (vp->vp_idx) {
7655 				atomic_inc(&vp->vref_count);
7656 				spin_unlock_irqrestore(&ha->vport_slock, flags);
7657 
7658 				qla2x00_vp_abort_isp(vp);
7659 
7660 				spin_lock_irqsave(&ha->vport_slock, flags);
7661 				atomic_dec(&vp->vref_count);
7662 			}
7663 		}
7664 		spin_unlock_irqrestore(&ha->vport_slock, flags);
7665 
7666 		if (IS_QLA8031(ha)) {
7667 			ql_dbg(ql_dbg_p3p, vha, 0xb05d,
7668 			    "Setting back fcoe driver presence.\n");
7669 			if (qla83xx_set_drv_presence(vha) != QLA_SUCCESS)
7670 				ql_dbg(ql_dbg_p3p, vha, 0xb074,
7671 				    "Error while setting DRV-Presence.\n");
7672 		}
7673 	} else {
7674 		ql_log(ql_log_warn, vha, 0x8023, "%s **** FAILED ****.\n",
7675 		       __func__);
7676 	}
7677 
7678 	return(status);
7679 }
7680 
7681 /*
7682 *  qla2x00_restart_isp
7683 *      restarts the ISP after a reset
7684 *
7685 * Input:
7686 *      ha = adapter block pointer.
7687 *
7688 * Returns:
7689 *      0 = success
7690 */
7691 static int
qla2x00_restart_isp(scsi_qla_host_t * vha)7692 qla2x00_restart_isp(scsi_qla_host_t *vha)
7693 {
7694 	int status;
7695 	struct qla_hw_data *ha = vha->hw;
7696 
7697 	/* If firmware needs to be loaded */
7698 	if (qla2x00_isp_firmware(vha)) {
7699 		vha->flags.online = 0;
7700 		status = ha->isp_ops->chip_diag(vha);
7701 		if (status)
7702 			return status;
7703 		status = qla2x00_setup_chip(vha);
7704 		if (status)
7705 			return status;
7706 	}
7707 
7708 	status = qla2x00_init_rings(vha);
7709 	if (status)
7710 		return status;
7711 
7712 	clear_bit(RESET_MARKER_NEEDED, &vha->dpc_flags);
7713 	ha->flags.chip_reset_done = 1;
7714 
7715 	/* Initialize the queues in use */
7716 	qla25xx_init_queues(ha);
7717 
7718 	status = qla2x00_fw_ready(vha);
7719 	if (status) {
7720 		/* if no cable then assume it's good */
7721 		return vha->device_flags & DFLG_NO_CABLE ? 0 : status;
7722 	}
7723 
7724 	/* Issue a marker after FW becomes ready. */
7725 	qla2x00_marker(vha, ha->base_qpair, 0, 0, MK_SYNC_ALL);
7726 	set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
7727 
7728 	return 0;
7729 }
7730 
7731 static int
qla25xx_init_queues(struct qla_hw_data * ha)7732 qla25xx_init_queues(struct qla_hw_data *ha)
7733 {
7734 	struct rsp_que *rsp = NULL;
7735 	struct req_que *req = NULL;
7736 	struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
7737 	int ret = -1;
7738 	int i;
7739 
7740 	for (i = 1; i < ha->max_rsp_queues; i++) {
7741 		rsp = ha->rsp_q_map[i];
7742 		if (rsp && test_bit(i, ha->rsp_qid_map)) {
7743 			rsp->options &= ~BIT_0;
7744 			ret = qla25xx_init_rsp_que(base_vha, rsp);
7745 			if (ret != QLA_SUCCESS)
7746 				ql_dbg(ql_dbg_init, base_vha, 0x00ff,
7747 				    "%s Rsp que: %d init failed.\n",
7748 				    __func__, rsp->id);
7749 			else
7750 				ql_dbg(ql_dbg_init, base_vha, 0x0100,
7751 				    "%s Rsp que: %d inited.\n",
7752 				    __func__, rsp->id);
7753 		}
7754 	}
7755 	for (i = 1; i < ha->max_req_queues; i++) {
7756 		req = ha->req_q_map[i];
7757 		if (req && test_bit(i, ha->req_qid_map)) {
7758 			/* Clear outstanding commands array. */
7759 			req->options &= ~BIT_0;
7760 			ret = qla25xx_init_req_que(base_vha, req);
7761 			if (ret != QLA_SUCCESS)
7762 				ql_dbg(ql_dbg_init, base_vha, 0x0101,
7763 				    "%s Req que: %d init failed.\n",
7764 				    __func__, req->id);
7765 			else
7766 				ql_dbg(ql_dbg_init, base_vha, 0x0102,
7767 				    "%s Req que: %d inited.\n",
7768 				    __func__, req->id);
7769 		}
7770 	}
7771 	return ret;
7772 }
7773 
7774 /*
7775 * qla2x00_reset_adapter
7776 *      Reset adapter.
7777 *
7778 * Input:
7779 *      ha = adapter block pointer.
7780 */
7781 int
qla2x00_reset_adapter(scsi_qla_host_t * vha)7782 qla2x00_reset_adapter(scsi_qla_host_t *vha)
7783 {
7784 	unsigned long flags = 0;
7785 	struct qla_hw_data *ha = vha->hw;
7786 	struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
7787 
7788 	vha->flags.online = 0;
7789 	ha->isp_ops->disable_intrs(ha);
7790 
7791 	spin_lock_irqsave(&ha->hardware_lock, flags);
7792 	wrt_reg_word(&reg->hccr, HCCR_RESET_RISC);
7793 	rd_reg_word(&reg->hccr);			/* PCI Posting. */
7794 	wrt_reg_word(&reg->hccr, HCCR_RELEASE_RISC);
7795 	rd_reg_word(&reg->hccr);			/* PCI Posting. */
7796 	spin_unlock_irqrestore(&ha->hardware_lock, flags);
7797 
7798 	return QLA_SUCCESS;
7799 }
7800 
7801 int
qla24xx_reset_adapter(scsi_qla_host_t * vha)7802 qla24xx_reset_adapter(scsi_qla_host_t *vha)
7803 {
7804 	unsigned long flags = 0;
7805 	struct qla_hw_data *ha = vha->hw;
7806 	struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
7807 
7808 	if (IS_P3P_TYPE(ha))
7809 		return QLA_SUCCESS;
7810 
7811 	vha->flags.online = 0;
7812 	ha->isp_ops->disable_intrs(ha);
7813 
7814 	spin_lock_irqsave(&ha->hardware_lock, flags);
7815 	wrt_reg_dword(&reg->hccr, HCCRX_SET_RISC_RESET);
7816 	rd_reg_dword(&reg->hccr);
7817 	wrt_reg_dword(&reg->hccr, HCCRX_REL_RISC_PAUSE);
7818 	rd_reg_dword(&reg->hccr);
7819 	spin_unlock_irqrestore(&ha->hardware_lock, flags);
7820 
7821 	if (IS_NOPOLLING_TYPE(ha))
7822 		ha->isp_ops->enable_intrs(ha);
7823 
7824 	return QLA_SUCCESS;
7825 }
7826 
7827 /* On sparc systems, obtain port and node WWN from firmware
7828  * properties.
7829  */
qla24xx_nvram_wwn_from_ofw(scsi_qla_host_t * vha,struct nvram_24xx * nv)7830 static void qla24xx_nvram_wwn_from_ofw(scsi_qla_host_t *vha,
7831 	struct nvram_24xx *nv)
7832 {
7833 #ifdef CONFIG_SPARC
7834 	struct qla_hw_data *ha = vha->hw;
7835 	struct pci_dev *pdev = ha->pdev;
7836 	struct device_node *dp = pci_device_to_OF_node(pdev);
7837 	const u8 *val;
7838 	int len;
7839 
7840 	val = of_get_property(dp, "port-wwn", &len);
7841 	if (val && len >= WWN_SIZE)
7842 		memcpy(nv->port_name, val, WWN_SIZE);
7843 
7844 	val = of_get_property(dp, "node-wwn", &len);
7845 	if (val && len >= WWN_SIZE)
7846 		memcpy(nv->node_name, val, WWN_SIZE);
7847 #endif
7848 }
7849 
7850 int
qla24xx_nvram_config(scsi_qla_host_t * vha)7851 qla24xx_nvram_config(scsi_qla_host_t *vha)
7852 {
7853 	int   rval;
7854 	struct init_cb_24xx *icb;
7855 	struct nvram_24xx *nv;
7856 	__le32 *dptr;
7857 	uint8_t  *dptr1, *dptr2;
7858 	uint32_t chksum;
7859 	uint16_t cnt;
7860 	struct qla_hw_data *ha = vha->hw;
7861 
7862 	rval = QLA_SUCCESS;
7863 	icb = (struct init_cb_24xx *)ha->init_cb;
7864 	nv = ha->nvram;
7865 
7866 	/* Determine NVRAM starting address. */
7867 	if (ha->port_no == 0) {
7868 		ha->nvram_base = FA_NVRAM_FUNC0_ADDR;
7869 		ha->vpd_base = FA_NVRAM_VPD0_ADDR;
7870 	} else {
7871 		ha->nvram_base = FA_NVRAM_FUNC1_ADDR;
7872 		ha->vpd_base = FA_NVRAM_VPD1_ADDR;
7873 	}
7874 
7875 	ha->nvram_size = sizeof(*nv);
7876 	ha->vpd_size = FA_NVRAM_VPD_SIZE;
7877 
7878 	/* Get VPD data into cache */
7879 	ha->vpd = ha->nvram + VPD_OFFSET;
7880 	ha->isp_ops->read_nvram(vha, ha->vpd,
7881 	    ha->nvram_base - FA_NVRAM_FUNC0_ADDR, FA_NVRAM_VPD_SIZE * 4);
7882 
7883 	/* Get NVRAM data into cache and calculate checksum. */
7884 	dptr = (__force __le32 *)nv;
7885 	ha->isp_ops->read_nvram(vha, dptr, ha->nvram_base, ha->nvram_size);
7886 	for (cnt = 0, chksum = 0; cnt < ha->nvram_size >> 2; cnt++, dptr++)
7887 		chksum += le32_to_cpu(*dptr);
7888 
7889 	ql_dbg(ql_dbg_init + ql_dbg_buffer, vha, 0x006a,
7890 	    "Contents of NVRAM\n");
7891 	ql_dump_buffer(ql_dbg_init + ql_dbg_buffer, vha, 0x010d,
7892 	    nv, ha->nvram_size);
7893 
7894 	/* Bad NVRAM data, set defaults parameters. */
7895 	if (chksum || memcmp("ISP ", nv->id, sizeof(nv->id)) ||
7896 	    le16_to_cpu(nv->nvram_version) < ICB_VERSION) {
7897 		/* Reset NVRAM data. */
7898 		ql_log(ql_log_warn, vha, 0x006b,
7899 		    "Inconsistent NVRAM checksum=%#x id=%.4s version=%#x.\n",
7900 		    chksum, nv->id, nv->nvram_version);
7901 		ql_dump_buffer(ql_dbg_init, vha, 0x006b, nv, sizeof(*nv));
7902 		ql_log(ql_log_warn, vha, 0x006c,
7903 		    "Falling back to functioning (yet invalid -- WWPN) "
7904 		    "defaults.\n");
7905 
7906 		/*
7907 		 * Set default initialization control block.
7908 		 */
7909 		memset(nv, 0, ha->nvram_size);
7910 		nv->nvram_version = cpu_to_le16(ICB_VERSION);
7911 		nv->version = cpu_to_le16(ICB_VERSION);
7912 		nv->frame_payload_size = cpu_to_le16(2048);
7913 		nv->execution_throttle = cpu_to_le16(0xFFFF);
7914 		nv->exchange_count = cpu_to_le16(0);
7915 		nv->hard_address = cpu_to_le16(124);
7916 		nv->port_name[0] = 0x21;
7917 		nv->port_name[1] = 0x00 + ha->port_no + 1;
7918 		nv->port_name[2] = 0x00;
7919 		nv->port_name[3] = 0xe0;
7920 		nv->port_name[4] = 0x8b;
7921 		nv->port_name[5] = 0x1c;
7922 		nv->port_name[6] = 0x55;
7923 		nv->port_name[7] = 0x86;
7924 		nv->node_name[0] = 0x20;
7925 		nv->node_name[1] = 0x00;
7926 		nv->node_name[2] = 0x00;
7927 		nv->node_name[3] = 0xe0;
7928 		nv->node_name[4] = 0x8b;
7929 		nv->node_name[5] = 0x1c;
7930 		nv->node_name[6] = 0x55;
7931 		nv->node_name[7] = 0x86;
7932 		qla24xx_nvram_wwn_from_ofw(vha, nv);
7933 		nv->login_retry_count = cpu_to_le16(8);
7934 		nv->interrupt_delay_timer = cpu_to_le16(0);
7935 		nv->login_timeout = cpu_to_le16(0);
7936 		nv->firmware_options_1 =
7937 		    cpu_to_le32(BIT_14|BIT_13|BIT_2|BIT_1);
7938 		nv->firmware_options_2 = cpu_to_le32(2 << 4);
7939 		nv->firmware_options_2 |= cpu_to_le32(BIT_12);
7940 		nv->firmware_options_3 = cpu_to_le32(2 << 13);
7941 		nv->host_p = cpu_to_le32(BIT_11|BIT_10);
7942 		nv->efi_parameters = cpu_to_le32(0);
7943 		nv->reset_delay = 5;
7944 		nv->max_luns_per_target = cpu_to_le16(128);
7945 		nv->port_down_retry_count = cpu_to_le16(30);
7946 		nv->link_down_timeout = cpu_to_le16(30);
7947 
7948 		rval = 1;
7949 	}
7950 
7951 	if (qla_tgt_mode_enabled(vha)) {
7952 		/* Don't enable full login after initial LIP */
7953 		nv->firmware_options_1 &= cpu_to_le32(~BIT_13);
7954 		/* Don't enable LIP full login for initiator */
7955 		nv->host_p &= cpu_to_le32(~BIT_10);
7956 	}
7957 
7958 	qlt_24xx_config_nvram_stage1(vha, nv);
7959 
7960 	/* Reset Initialization control block */
7961 	memset(icb, 0, ha->init_cb_size);
7962 
7963 	/* Copy 1st segment. */
7964 	dptr1 = (uint8_t *)icb;
7965 	dptr2 = (uint8_t *)&nv->version;
7966 	cnt = (uint8_t *)&icb->response_q_inpointer - (uint8_t *)&icb->version;
7967 	while (cnt--)
7968 		*dptr1++ = *dptr2++;
7969 
7970 	icb->login_retry_count = nv->login_retry_count;
7971 	icb->link_down_on_nos = nv->link_down_on_nos;
7972 
7973 	/* Copy 2nd segment. */
7974 	dptr1 = (uint8_t *)&icb->interrupt_delay_timer;
7975 	dptr2 = (uint8_t *)&nv->interrupt_delay_timer;
7976 	cnt = (uint8_t *)&icb->reserved_3 -
7977 	    (uint8_t *)&icb->interrupt_delay_timer;
7978 	while (cnt--)
7979 		*dptr1++ = *dptr2++;
7980 	ha->frame_payload_size = le16_to_cpu(icb->frame_payload_size);
7981 	/*
7982 	 * Setup driver NVRAM options.
7983 	 */
7984 	qla2x00_set_model_info(vha, nv->model_name, sizeof(nv->model_name),
7985 	    "QLA2462");
7986 
7987 	qlt_24xx_config_nvram_stage2(vha, icb);
7988 
7989 	if (nv->host_p & cpu_to_le32(BIT_15)) {
7990 		/* Use alternate WWN? */
7991 		memcpy(icb->node_name, nv->alternate_node_name, WWN_SIZE);
7992 		memcpy(icb->port_name, nv->alternate_port_name, WWN_SIZE);
7993 	}
7994 
7995 	/* Prepare nodename */
7996 	if ((icb->firmware_options_1 & cpu_to_le32(BIT_14)) == 0) {
7997 		/*
7998 		 * Firmware will apply the following mask if the nodename was
7999 		 * not provided.
8000 		 */
8001 		memcpy(icb->node_name, icb->port_name, WWN_SIZE);
8002 		icb->node_name[0] &= 0xF0;
8003 	}
8004 
8005 	/* Set host adapter parameters. */
8006 	ha->flags.disable_risc_code_load = 0;
8007 	ha->flags.enable_lip_reset = 0;
8008 	ha->flags.enable_lip_full_login =
8009 	    le32_to_cpu(nv->host_p) & BIT_10 ? 1 : 0;
8010 	ha->flags.enable_target_reset =
8011 	    le32_to_cpu(nv->host_p) & BIT_11 ? 1 : 0;
8012 	ha->flags.enable_led_scheme = 0;
8013 	ha->flags.disable_serdes = le32_to_cpu(nv->host_p) & BIT_5 ? 1 : 0;
8014 
8015 	ha->operating_mode = (le32_to_cpu(icb->firmware_options_2) &
8016 	    (BIT_6 | BIT_5 | BIT_4)) >> 4;
8017 
8018 	memcpy(ha->fw_seriallink_options24, nv->seriallink_options,
8019 	    sizeof(ha->fw_seriallink_options24));
8020 
8021 	/* save HBA serial number */
8022 	ha->serial0 = icb->port_name[5];
8023 	ha->serial1 = icb->port_name[6];
8024 	ha->serial2 = icb->port_name[7];
8025 	memcpy(vha->node_name, icb->node_name, WWN_SIZE);
8026 	memcpy(vha->port_name, icb->port_name, WWN_SIZE);
8027 
8028 	icb->execution_throttle = cpu_to_le16(0xFFFF);
8029 
8030 	ha->retry_count = le16_to_cpu(nv->login_retry_count);
8031 
8032 	/* Set minimum login_timeout to 4 seconds. */
8033 	if (le16_to_cpu(nv->login_timeout) < ql2xlogintimeout)
8034 		nv->login_timeout = cpu_to_le16(ql2xlogintimeout);
8035 	if (le16_to_cpu(nv->login_timeout) < 4)
8036 		nv->login_timeout = cpu_to_le16(4);
8037 	ha->login_timeout = le16_to_cpu(nv->login_timeout);
8038 
8039 	/* Set minimum RATOV to 100 tenths of a second. */
8040 	ha->r_a_tov = 100;
8041 
8042 	ha->loop_reset_delay = nv->reset_delay;
8043 
8044 	/* Link Down Timeout = 0:
8045 	 *
8046 	 * 	When Port Down timer expires we will start returning
8047 	 *	I/O's to OS with "DID_NO_CONNECT".
8048 	 *
8049 	 * Link Down Timeout != 0:
8050 	 *
8051 	 *	 The driver waits for the link to come up after link down
8052 	 *	 before returning I/Os to OS with "DID_NO_CONNECT".
8053 	 */
8054 	if (le16_to_cpu(nv->link_down_timeout) == 0) {
8055 		ha->loop_down_abort_time =
8056 		    (LOOP_DOWN_TIME - LOOP_DOWN_TIMEOUT);
8057 	} else {
8058 		ha->link_down_timeout =	le16_to_cpu(nv->link_down_timeout);
8059 		ha->loop_down_abort_time =
8060 		    (LOOP_DOWN_TIME - ha->link_down_timeout);
8061 	}
8062 
8063 	/* Need enough time to try and get the port back. */
8064 	ha->port_down_retry_count = le16_to_cpu(nv->port_down_retry_count);
8065 	if (qlport_down_retry)
8066 		ha->port_down_retry_count = qlport_down_retry;
8067 
8068 	/* Set login_retry_count */
8069 	ha->login_retry_count  = le16_to_cpu(nv->login_retry_count);
8070 	if (ha->port_down_retry_count ==
8071 	    le16_to_cpu(nv->port_down_retry_count) &&
8072 	    ha->port_down_retry_count > 3)
8073 		ha->login_retry_count = ha->port_down_retry_count;
8074 	else if (ha->port_down_retry_count > (int)ha->login_retry_count)
8075 		ha->login_retry_count = ha->port_down_retry_count;
8076 	if (ql2xloginretrycount)
8077 		ha->login_retry_count = ql2xloginretrycount;
8078 
8079 	/* N2N: driver will initiate Login instead of FW */
8080 	icb->firmware_options_3 |= cpu_to_le32(BIT_8);
8081 
8082 	/* Enable ZIO. */
8083 	if (!vha->flags.init_done) {
8084 		ha->zio_mode = le32_to_cpu(icb->firmware_options_2) &
8085 		    (BIT_3 | BIT_2 | BIT_1 | BIT_0);
8086 		ha->zio_timer = le16_to_cpu(icb->interrupt_delay_timer) ?
8087 		    le16_to_cpu(icb->interrupt_delay_timer) : 2;
8088 	}
8089 	icb->firmware_options_2 &= cpu_to_le32(
8090 	    ~(BIT_3 | BIT_2 | BIT_1 | BIT_0));
8091 	if (ha->zio_mode != QLA_ZIO_DISABLED) {
8092 		ha->zio_mode = QLA_ZIO_MODE_6;
8093 
8094 		ql_log(ql_log_info, vha, 0x006f,
8095 		    "ZIO mode %d enabled; timer delay (%d us).\n",
8096 		    ha->zio_mode, ha->zio_timer * 100);
8097 
8098 		icb->firmware_options_2 |= cpu_to_le32(
8099 		    (uint32_t)ha->zio_mode);
8100 		icb->interrupt_delay_timer = cpu_to_le16(ha->zio_timer);
8101 	}
8102 
8103 	if (rval) {
8104 		ql_log(ql_log_warn, vha, 0x0070,
8105 		    "NVRAM configuration failed.\n");
8106 	}
8107 	return (rval);
8108 }
8109 
8110 static void
qla27xx_print_image(struct scsi_qla_host * vha,char * name,struct qla27xx_image_status * image_status)8111 qla27xx_print_image(struct scsi_qla_host *vha, char *name,
8112     struct qla27xx_image_status *image_status)
8113 {
8114 	ql_dbg(ql_dbg_init, vha, 0x018b,
8115 	    "%s %s: mask=%#02x gen=%#04x ver=%u.%u map=%#01x sum=%#08x sig=%#08x\n",
8116 	    name, "status",
8117 	    image_status->image_status_mask,
8118 	    le16_to_cpu(image_status->generation),
8119 	    image_status->ver_major,
8120 	    image_status->ver_minor,
8121 	    image_status->bitmap,
8122 	    le32_to_cpu(image_status->checksum),
8123 	    le32_to_cpu(image_status->signature));
8124 }
8125 
8126 static bool
qla28xx_check_aux_image_status_signature(struct qla27xx_image_status * image_status)8127 qla28xx_check_aux_image_status_signature(
8128     struct qla27xx_image_status *image_status)
8129 {
8130 	ulong signature = le32_to_cpu(image_status->signature);
8131 
8132 	return signature != QLA28XX_AUX_IMG_STATUS_SIGN;
8133 }
8134 
8135 static bool
qla27xx_check_image_status_signature(struct qla27xx_image_status * image_status)8136 qla27xx_check_image_status_signature(struct qla27xx_image_status *image_status)
8137 {
8138 	ulong signature = le32_to_cpu(image_status->signature);
8139 
8140 	return
8141 	    signature != QLA27XX_IMG_STATUS_SIGN &&
8142 	    signature != QLA28XX_IMG_STATUS_SIGN;
8143 }
8144 
8145 static ulong
qla27xx_image_status_checksum(struct qla27xx_image_status * image_status)8146 qla27xx_image_status_checksum(struct qla27xx_image_status *image_status)
8147 {
8148 	__le32 *p = (__force __le32 *)image_status;
8149 	uint n = sizeof(*image_status) / sizeof(*p);
8150 	uint32_t sum = 0;
8151 
8152 	for ( ; n--; p++)
8153 		sum += le32_to_cpup(p);
8154 
8155 	return sum;
8156 }
8157 
8158 static inline uint
qla28xx_component_bitmask(struct qla27xx_image_status * aux,uint bitmask)8159 qla28xx_component_bitmask(struct qla27xx_image_status *aux, uint bitmask)
8160 {
8161 	return aux->bitmap & bitmask ?
8162 	    QLA27XX_SECONDARY_IMAGE : QLA27XX_PRIMARY_IMAGE;
8163 }
8164 
8165 static void
qla28xx_component_status(struct active_regions * active_regions,struct qla27xx_image_status * aux)8166 qla28xx_component_status(
8167     struct active_regions *active_regions, struct qla27xx_image_status *aux)
8168 {
8169 	active_regions->aux.board_config =
8170 	    qla28xx_component_bitmask(aux, QLA28XX_AUX_IMG_BOARD_CONFIG);
8171 
8172 	active_regions->aux.vpd_nvram =
8173 	    qla28xx_component_bitmask(aux, QLA28XX_AUX_IMG_VPD_NVRAM);
8174 
8175 	active_regions->aux.npiv_config_0_1 =
8176 	    qla28xx_component_bitmask(aux, QLA28XX_AUX_IMG_NPIV_CONFIG_0_1);
8177 
8178 	active_regions->aux.npiv_config_2_3 =
8179 	    qla28xx_component_bitmask(aux, QLA28XX_AUX_IMG_NPIV_CONFIG_2_3);
8180 
8181 	active_regions->aux.nvme_params =
8182 	    qla28xx_component_bitmask(aux, QLA28XX_AUX_IMG_NVME_PARAMS);
8183 }
8184 
8185 static int
qla27xx_compare_image_generation(struct qla27xx_image_status * pri_image_status,struct qla27xx_image_status * sec_image_status)8186 qla27xx_compare_image_generation(
8187     struct qla27xx_image_status *pri_image_status,
8188     struct qla27xx_image_status *sec_image_status)
8189 {
8190 	/* calculate generation delta as uint16 (this accounts for wrap) */
8191 	int16_t delta =
8192 	    le16_to_cpu(pri_image_status->generation) -
8193 	    le16_to_cpu(sec_image_status->generation);
8194 
8195 	ql_dbg(ql_dbg_init, NULL, 0x0180, "generation delta = %d\n", delta);
8196 
8197 	return delta;
8198 }
8199 
8200 void
qla28xx_get_aux_images(struct scsi_qla_host * vha,struct active_regions * active_regions)8201 qla28xx_get_aux_images(
8202 	struct scsi_qla_host *vha, struct active_regions *active_regions)
8203 {
8204 	struct qla_hw_data *ha = vha->hw;
8205 	struct qla27xx_image_status pri_aux_image_status, sec_aux_image_status;
8206 	bool valid_pri_image = false, valid_sec_image = false;
8207 	bool active_pri_image = false, active_sec_image = false;
8208 
8209 	if (!ha->flt_region_aux_img_status_pri) {
8210 		ql_dbg(ql_dbg_init, vha, 0x018a, "Primary aux image not addressed\n");
8211 		goto check_sec_image;
8212 	}
8213 
8214 	qla24xx_read_flash_data(vha, (uint32_t *)&pri_aux_image_status,
8215 	    ha->flt_region_aux_img_status_pri,
8216 	    sizeof(pri_aux_image_status) >> 2);
8217 	qla27xx_print_image(vha, "Primary aux image", &pri_aux_image_status);
8218 
8219 	if (qla28xx_check_aux_image_status_signature(&pri_aux_image_status)) {
8220 		ql_dbg(ql_dbg_init, vha, 0x018b,
8221 		    "Primary aux image signature (%#x) not valid\n",
8222 		    le32_to_cpu(pri_aux_image_status.signature));
8223 		goto check_sec_image;
8224 	}
8225 
8226 	if (qla27xx_image_status_checksum(&pri_aux_image_status)) {
8227 		ql_dbg(ql_dbg_init, vha, 0x018c,
8228 		    "Primary aux image checksum failed\n");
8229 		goto check_sec_image;
8230 	}
8231 
8232 	valid_pri_image = true;
8233 
8234 	if (pri_aux_image_status.image_status_mask & 1) {
8235 		ql_dbg(ql_dbg_init, vha, 0x018d,
8236 		    "Primary aux image is active\n");
8237 		active_pri_image = true;
8238 	}
8239 
8240 check_sec_image:
8241 	if (!ha->flt_region_aux_img_status_sec) {
8242 		ql_dbg(ql_dbg_init, vha, 0x018a,
8243 		    "Secondary aux image not addressed\n");
8244 		goto check_valid_image;
8245 	}
8246 
8247 	qla24xx_read_flash_data(vha, (uint32_t *)&sec_aux_image_status,
8248 	    ha->flt_region_aux_img_status_sec,
8249 	    sizeof(sec_aux_image_status) >> 2);
8250 	qla27xx_print_image(vha, "Secondary aux image", &sec_aux_image_status);
8251 
8252 	if (qla28xx_check_aux_image_status_signature(&sec_aux_image_status)) {
8253 		ql_dbg(ql_dbg_init, vha, 0x018b,
8254 		    "Secondary aux image signature (%#x) not valid\n",
8255 		    le32_to_cpu(sec_aux_image_status.signature));
8256 		goto check_valid_image;
8257 	}
8258 
8259 	if (qla27xx_image_status_checksum(&sec_aux_image_status)) {
8260 		ql_dbg(ql_dbg_init, vha, 0x018c,
8261 		    "Secondary aux image checksum failed\n");
8262 		goto check_valid_image;
8263 	}
8264 
8265 	valid_sec_image = true;
8266 
8267 	if (sec_aux_image_status.image_status_mask & 1) {
8268 		ql_dbg(ql_dbg_init, vha, 0x018d,
8269 		    "Secondary aux image is active\n");
8270 		active_sec_image = true;
8271 	}
8272 
8273 check_valid_image:
8274 	if (valid_pri_image && active_pri_image &&
8275 	    valid_sec_image && active_sec_image) {
8276 		if (qla27xx_compare_image_generation(&pri_aux_image_status,
8277 		    &sec_aux_image_status) >= 0) {
8278 			qla28xx_component_status(active_regions,
8279 			    &pri_aux_image_status);
8280 		} else {
8281 			qla28xx_component_status(active_regions,
8282 			    &sec_aux_image_status);
8283 		}
8284 	} else if (valid_pri_image && active_pri_image) {
8285 		qla28xx_component_status(active_regions, &pri_aux_image_status);
8286 	} else if (valid_sec_image && active_sec_image) {
8287 		qla28xx_component_status(active_regions, &sec_aux_image_status);
8288 	}
8289 
8290 	ql_dbg(ql_dbg_init, vha, 0x018f,
8291 	    "aux images active: BCFG=%u VPD/NVR=%u NPIV0/1=%u NPIV2/3=%u, NVME=%u\n",
8292 	    active_regions->aux.board_config,
8293 	    active_regions->aux.vpd_nvram,
8294 	    active_regions->aux.npiv_config_0_1,
8295 	    active_regions->aux.npiv_config_2_3,
8296 	    active_regions->aux.nvme_params);
8297 }
8298 
8299 void
qla27xx_get_active_image(struct scsi_qla_host * vha,struct active_regions * active_regions)8300 qla27xx_get_active_image(struct scsi_qla_host *vha,
8301     struct active_regions *active_regions)
8302 {
8303 	struct qla_hw_data *ha = vha->hw;
8304 	struct qla27xx_image_status pri_image_status, sec_image_status;
8305 	bool valid_pri_image = false, valid_sec_image = false;
8306 	bool active_pri_image = false, active_sec_image = false;
8307 
8308 	if (!ha->flt_region_img_status_pri) {
8309 		ql_dbg(ql_dbg_init, vha, 0x018a, "Primary image not addressed\n");
8310 		goto check_sec_image;
8311 	}
8312 
8313 	if (qla24xx_read_flash_data(vha, (uint32_t *)&pri_image_status,
8314 	    ha->flt_region_img_status_pri, sizeof(pri_image_status) >> 2) !=
8315 	    QLA_SUCCESS) {
8316 		WARN_ON_ONCE(true);
8317 		goto check_sec_image;
8318 	}
8319 	qla27xx_print_image(vha, "Primary image", &pri_image_status);
8320 
8321 	if (qla27xx_check_image_status_signature(&pri_image_status)) {
8322 		ql_dbg(ql_dbg_init, vha, 0x018b,
8323 		    "Primary image signature (%#x) not valid\n",
8324 		    le32_to_cpu(pri_image_status.signature));
8325 		goto check_sec_image;
8326 	}
8327 
8328 	if (qla27xx_image_status_checksum(&pri_image_status)) {
8329 		ql_dbg(ql_dbg_init, vha, 0x018c,
8330 		    "Primary image checksum failed\n");
8331 		goto check_sec_image;
8332 	}
8333 
8334 	valid_pri_image = true;
8335 
8336 	if (pri_image_status.image_status_mask & 1) {
8337 		ql_dbg(ql_dbg_init, vha, 0x018d,
8338 		    "Primary image is active\n");
8339 		active_pri_image = true;
8340 	}
8341 
8342 check_sec_image:
8343 	if (!ha->flt_region_img_status_sec) {
8344 		ql_dbg(ql_dbg_init, vha, 0x018a, "Secondary image not addressed\n");
8345 		goto check_valid_image;
8346 	}
8347 
8348 	qla24xx_read_flash_data(vha, (uint32_t *)(&sec_image_status),
8349 	    ha->flt_region_img_status_sec, sizeof(sec_image_status) >> 2);
8350 	qla27xx_print_image(vha, "Secondary image", &sec_image_status);
8351 
8352 	if (qla27xx_check_image_status_signature(&sec_image_status)) {
8353 		ql_dbg(ql_dbg_init, vha, 0x018b,
8354 		    "Secondary image signature (%#x) not valid\n",
8355 		    le32_to_cpu(sec_image_status.signature));
8356 		goto check_valid_image;
8357 	}
8358 
8359 	if (qla27xx_image_status_checksum(&sec_image_status)) {
8360 		ql_dbg(ql_dbg_init, vha, 0x018c,
8361 		    "Secondary image checksum failed\n");
8362 		goto check_valid_image;
8363 	}
8364 
8365 	valid_sec_image = true;
8366 
8367 	if (sec_image_status.image_status_mask & 1) {
8368 		ql_dbg(ql_dbg_init, vha, 0x018d,
8369 		    "Secondary image is active\n");
8370 		active_sec_image = true;
8371 	}
8372 
8373 check_valid_image:
8374 	if (valid_pri_image && active_pri_image)
8375 		active_regions->global = QLA27XX_PRIMARY_IMAGE;
8376 
8377 	if (valid_sec_image && active_sec_image) {
8378 		if (!active_regions->global ||
8379 		    qla27xx_compare_image_generation(
8380 			&pri_image_status, &sec_image_status) < 0) {
8381 			active_regions->global = QLA27XX_SECONDARY_IMAGE;
8382 		}
8383 	}
8384 
8385 	ql_dbg(ql_dbg_init, vha, 0x018f, "active image %s (%u)\n",
8386 	    active_regions->global == QLA27XX_DEFAULT_IMAGE ?
8387 		"default (boot/fw)" :
8388 	    active_regions->global == QLA27XX_PRIMARY_IMAGE ?
8389 		"primary" :
8390 	    active_regions->global == QLA27XX_SECONDARY_IMAGE ?
8391 		"secondary" : "invalid",
8392 	    active_regions->global);
8393 }
8394 
qla24xx_risc_firmware_invalid(uint32_t * dword)8395 bool qla24xx_risc_firmware_invalid(uint32_t *dword)
8396 {
8397 	return
8398 	    !(dword[4] | dword[5] | dword[6] | dword[7]) ||
8399 	    !(~dword[4] | ~dword[5] | ~dword[6] | ~dword[7]);
8400 }
8401 
8402 static int
qla24xx_load_risc_flash(scsi_qla_host_t * vha,uint32_t * srisc_addr,uint32_t faddr)8403 qla24xx_load_risc_flash(scsi_qla_host_t *vha, uint32_t *srisc_addr,
8404     uint32_t faddr)
8405 {
8406 	int rval;
8407 	uint templates, segments, fragment;
8408 	ulong i;
8409 	uint j;
8410 	ulong dlen;
8411 	uint32_t *dcode;
8412 	uint32_t risc_addr, risc_size, risc_attr = 0;
8413 	struct qla_hw_data *ha = vha->hw;
8414 	struct req_que *req = ha->req_q_map[0];
8415 	struct fwdt *fwdt = ha->fwdt;
8416 
8417 	ql_dbg(ql_dbg_init, vha, 0x008b,
8418 	    "FW: Loading firmware from flash (%x).\n", faddr);
8419 
8420 	dcode = (uint32_t *)req->ring;
8421 	qla24xx_read_flash_data(vha, dcode, faddr, 8);
8422 	if (qla24xx_risc_firmware_invalid(dcode)) {
8423 		ql_log(ql_log_fatal, vha, 0x008c,
8424 		    "Unable to verify the integrity of flash firmware "
8425 		    "image.\n");
8426 		ql_log(ql_log_fatal, vha, 0x008d,
8427 		    "Firmware data: %08x %08x %08x %08x.\n",
8428 		    dcode[0], dcode[1], dcode[2], dcode[3]);
8429 
8430 		return QLA_FUNCTION_FAILED;
8431 	}
8432 
8433 	dcode = (uint32_t *)req->ring;
8434 	*srisc_addr = 0;
8435 	segments = FA_RISC_CODE_SEGMENTS;
8436 	for (j = 0; j < segments; j++) {
8437 		ql_dbg(ql_dbg_init, vha, 0x008d,
8438 		    "-> Loading segment %u...\n", j);
8439 		qla24xx_read_flash_data(vha, dcode, faddr, 10);
8440 		risc_addr = be32_to_cpu((__force __be32)dcode[2]);
8441 		risc_size = be32_to_cpu((__force __be32)dcode[3]);
8442 		if (!*srisc_addr) {
8443 			*srisc_addr = risc_addr;
8444 			risc_attr = be32_to_cpu((__force __be32)dcode[9]);
8445 		}
8446 
8447 		dlen = ha->fw_transfer_size >> 2;
8448 		for (fragment = 0; risc_size; fragment++) {
8449 			if (dlen > risc_size)
8450 				dlen = risc_size;
8451 
8452 			ql_dbg(ql_dbg_init, vha, 0x008e,
8453 			    "-> Loading fragment %u: %#x <- %#x (%#lx dwords)...\n",
8454 			    fragment, risc_addr, faddr, dlen);
8455 			qla24xx_read_flash_data(vha, dcode, faddr, dlen);
8456 			for (i = 0; i < dlen; i++)
8457 				dcode[i] = swab32(dcode[i]);
8458 
8459 			rval = qla2x00_load_ram(vha, req->dma, risc_addr, dlen);
8460 			if (rval) {
8461 				ql_log(ql_log_fatal, vha, 0x008f,
8462 				    "-> Failed load firmware fragment %u.\n",
8463 				    fragment);
8464 				return QLA_FUNCTION_FAILED;
8465 			}
8466 
8467 			faddr += dlen;
8468 			risc_addr += dlen;
8469 			risc_size -= dlen;
8470 		}
8471 	}
8472 
8473 	if (!IS_QLA27XX(ha) && !IS_QLA28XX(ha))
8474 		return QLA_SUCCESS;
8475 
8476 	templates = (risc_attr & BIT_9) ? 2 : 1;
8477 	ql_dbg(ql_dbg_init, vha, 0x0160, "-> templates = %u\n", templates);
8478 	for (j = 0; j < templates; j++, fwdt++) {
8479 		vfree(fwdt->template);
8480 		fwdt->template = NULL;
8481 		fwdt->length = 0;
8482 
8483 		dcode = (uint32_t *)req->ring;
8484 		qla24xx_read_flash_data(vha, dcode, faddr, 7);
8485 		risc_size = be32_to_cpu((__force __be32)dcode[2]);
8486 		ql_dbg(ql_dbg_init, vha, 0x0161,
8487 		    "-> fwdt%u template array at %#x (%#x dwords)\n",
8488 		    j, faddr, risc_size);
8489 		if (!risc_size || !~risc_size) {
8490 			ql_dbg(ql_dbg_init, vha, 0x0162,
8491 			    "-> fwdt%u failed to read array\n", j);
8492 			goto failed;
8493 		}
8494 
8495 		/* skip header and ignore checksum */
8496 		faddr += 7;
8497 		risc_size -= 8;
8498 
8499 		ql_dbg(ql_dbg_init, vha, 0x0163,
8500 		    "-> fwdt%u template allocate template %#x words...\n",
8501 		    j, risc_size);
8502 		fwdt->template = vmalloc_array(risc_size, sizeof(*dcode));
8503 		if (!fwdt->template) {
8504 			ql_log(ql_log_warn, vha, 0x0164,
8505 			    "-> fwdt%u failed allocate template.\n", j);
8506 			goto failed;
8507 		}
8508 
8509 		dcode = fwdt->template;
8510 		qla24xx_read_flash_data(vha, dcode, faddr, risc_size);
8511 
8512 		if (!qla27xx_fwdt_template_valid(dcode)) {
8513 			ql_log(ql_log_warn, vha, 0x0165,
8514 			    "-> fwdt%u failed template validate\n", j);
8515 			goto failed;
8516 		}
8517 
8518 		dlen = qla27xx_fwdt_template_size(dcode);
8519 		ql_dbg(ql_dbg_init, vha, 0x0166,
8520 		    "-> fwdt%u template size %#lx bytes (%#lx words)\n",
8521 		    j, dlen, dlen / sizeof(*dcode));
8522 		if (dlen > risc_size * sizeof(*dcode)) {
8523 			ql_log(ql_log_warn, vha, 0x0167,
8524 			    "-> fwdt%u template exceeds array (%-lu bytes)\n",
8525 			    j, dlen - risc_size * sizeof(*dcode));
8526 			goto failed;
8527 		}
8528 
8529 		fwdt->length = dlen;
8530 		ql_dbg(ql_dbg_init, vha, 0x0168,
8531 		    "-> fwdt%u loaded template ok\n", j);
8532 
8533 		faddr += risc_size + 1;
8534 	}
8535 
8536 	return QLA_SUCCESS;
8537 
8538 failed:
8539 	vfree(fwdt->template);
8540 	fwdt->template = NULL;
8541 	fwdt->length = 0;
8542 
8543 	return QLA_SUCCESS;
8544 }
8545 
8546 #define QLA_FW_URL "http://ldriver.qlogic.com/firmware/"
8547 
8548 int
qla2x00_load_risc(scsi_qla_host_t * vha,uint32_t * srisc_addr)8549 qla2x00_load_risc(scsi_qla_host_t *vha, uint32_t *srisc_addr)
8550 {
8551 	int	rval;
8552 	int	i, fragment;
8553 	uint16_t *wcode;
8554 	__be16	 *fwcode;
8555 	uint32_t risc_addr, risc_size, fwclen, wlen, *seg;
8556 	struct fw_blob *blob;
8557 	struct qla_hw_data *ha = vha->hw;
8558 	struct req_que *req = ha->req_q_map[0];
8559 
8560 	/* Load firmware blob. */
8561 	blob = qla2x00_request_firmware(vha);
8562 	if (!blob) {
8563 		ql_log(ql_log_info, vha, 0x0083,
8564 		    "Firmware image unavailable.\n");
8565 		ql_log(ql_log_info, vha, 0x0084,
8566 		    "Firmware images can be retrieved from: "QLA_FW_URL ".\n");
8567 		return QLA_FUNCTION_FAILED;
8568 	}
8569 
8570 	rval = QLA_SUCCESS;
8571 
8572 	wcode = (uint16_t *)req->ring;
8573 	*srisc_addr = 0;
8574 	fwcode = (__force __be16 *)blob->fw->data;
8575 	fwclen = 0;
8576 
8577 	/* Validate firmware image by checking version. */
8578 	if (blob->fw->size < 8 * sizeof(uint16_t)) {
8579 		ql_log(ql_log_fatal, vha, 0x0085,
8580 		    "Unable to verify integrity of firmware image (%zd).\n",
8581 		    blob->fw->size);
8582 		goto fail_fw_integrity;
8583 	}
8584 	for (i = 0; i < 4; i++)
8585 		wcode[i] = be16_to_cpu(fwcode[i + 4]);
8586 	if ((wcode[0] == 0xffff && wcode[1] == 0xffff && wcode[2] == 0xffff &&
8587 	    wcode[3] == 0xffff) || (wcode[0] == 0 && wcode[1] == 0 &&
8588 		wcode[2] == 0 && wcode[3] == 0)) {
8589 		ql_log(ql_log_fatal, vha, 0x0086,
8590 		    "Unable to verify integrity of firmware image.\n");
8591 		ql_log(ql_log_fatal, vha, 0x0087,
8592 		    "Firmware data: %04x %04x %04x %04x.\n",
8593 		    wcode[0], wcode[1], wcode[2], wcode[3]);
8594 		goto fail_fw_integrity;
8595 	}
8596 
8597 	seg = blob->segs;
8598 	while (*seg && rval == QLA_SUCCESS) {
8599 		risc_addr = *seg;
8600 		*srisc_addr = *srisc_addr == 0 ? *seg : *srisc_addr;
8601 		risc_size = be16_to_cpu(fwcode[3]);
8602 
8603 		/* Validate firmware image size. */
8604 		fwclen += risc_size * sizeof(uint16_t);
8605 		if (blob->fw->size < fwclen) {
8606 			ql_log(ql_log_fatal, vha, 0x0088,
8607 			    "Unable to verify integrity of firmware image "
8608 			    "(%zd).\n", blob->fw->size);
8609 			goto fail_fw_integrity;
8610 		}
8611 
8612 		fragment = 0;
8613 		while (risc_size > 0 && rval == QLA_SUCCESS) {
8614 			wlen = (uint16_t)(ha->fw_transfer_size >> 1);
8615 			if (wlen > risc_size)
8616 				wlen = risc_size;
8617 			ql_dbg(ql_dbg_init, vha, 0x0089,
8618 			    "Loading risc segment@ risc addr %x number of "
8619 			    "words 0x%x.\n", risc_addr, wlen);
8620 
8621 			for (i = 0; i < wlen; i++)
8622 				wcode[i] = swab16((__force u32)fwcode[i]);
8623 
8624 			rval = qla2x00_load_ram(vha, req->dma, risc_addr,
8625 			    wlen);
8626 			if (rval) {
8627 				ql_log(ql_log_fatal, vha, 0x008a,
8628 				    "Failed to load segment %d of firmware.\n",
8629 				    fragment);
8630 				break;
8631 			}
8632 
8633 			fwcode += wlen;
8634 			risc_addr += wlen;
8635 			risc_size -= wlen;
8636 			fragment++;
8637 		}
8638 
8639 		/* Next segment. */
8640 		seg++;
8641 	}
8642 	return rval;
8643 
8644 fail_fw_integrity:
8645 	return QLA_FUNCTION_FAILED;
8646 }
8647 
8648 static int
qla24xx_load_risc_blob(scsi_qla_host_t * vha,uint32_t * srisc_addr)8649 qla24xx_load_risc_blob(scsi_qla_host_t *vha, uint32_t *srisc_addr)
8650 {
8651 	int	rval;
8652 	uint templates, segments, fragment;
8653 	uint32_t *dcode;
8654 	ulong dlen;
8655 	uint32_t risc_addr, risc_size, risc_attr = 0;
8656 	ulong i;
8657 	uint j;
8658 	struct fw_blob *blob;
8659 	__be32 *fwcode;
8660 	struct qla_hw_data *ha = vha->hw;
8661 	struct req_que *req = ha->req_q_map[0];
8662 	struct fwdt *fwdt = ha->fwdt;
8663 
8664 	ql_dbg(ql_dbg_init, vha, 0x0090,
8665 	    "-> FW: Loading via request-firmware.\n");
8666 
8667 	blob = qla2x00_request_firmware(vha);
8668 	if (!blob) {
8669 		ql_log(ql_log_warn, vha, 0x0092,
8670 		    "-> Firmware file not found.\n");
8671 
8672 		return QLA_FUNCTION_FAILED;
8673 	}
8674 
8675 	fwcode = (__force __be32 *)blob->fw->data;
8676 	dcode = (__force uint32_t *)fwcode;
8677 	if (qla24xx_risc_firmware_invalid(dcode)) {
8678 		ql_log(ql_log_fatal, vha, 0x0093,
8679 		    "Unable to verify integrity of firmware image (%zd).\n",
8680 		    blob->fw->size);
8681 		ql_log(ql_log_fatal, vha, 0x0095,
8682 		    "Firmware data: %08x %08x %08x %08x.\n",
8683 		    dcode[0], dcode[1], dcode[2], dcode[3]);
8684 		return QLA_FUNCTION_FAILED;
8685 	}
8686 
8687 	dcode = (uint32_t *)req->ring;
8688 	*srisc_addr = 0;
8689 	segments = FA_RISC_CODE_SEGMENTS;
8690 	for (j = 0; j < segments; j++) {
8691 		ql_dbg(ql_dbg_init, vha, 0x0096,
8692 		    "-> Loading segment %u...\n", j);
8693 		risc_addr = be32_to_cpu(fwcode[2]);
8694 		risc_size = be32_to_cpu(fwcode[3]);
8695 
8696 		if (!*srisc_addr) {
8697 			*srisc_addr = risc_addr;
8698 			risc_attr = be32_to_cpu(fwcode[9]);
8699 		}
8700 
8701 		dlen = ha->fw_transfer_size >> 2;
8702 		for (fragment = 0; risc_size; fragment++) {
8703 			if (dlen > risc_size)
8704 				dlen = risc_size;
8705 
8706 			ql_dbg(ql_dbg_init, vha, 0x0097,
8707 			    "-> Loading fragment %u: %#x <- %#x (%#lx words)...\n",
8708 			    fragment, risc_addr,
8709 			    (uint32_t)(fwcode - (typeof(fwcode))blob->fw->data),
8710 			    dlen);
8711 
8712 			for (i = 0; i < dlen; i++)
8713 				dcode[i] = swab32((__force u32)fwcode[i]);
8714 
8715 			rval = qla2x00_load_ram(vha, req->dma, risc_addr, dlen);
8716 			if (rval) {
8717 				ql_log(ql_log_fatal, vha, 0x0098,
8718 				    "-> Failed load firmware fragment %u.\n",
8719 				    fragment);
8720 				return QLA_FUNCTION_FAILED;
8721 			}
8722 
8723 			fwcode += dlen;
8724 			risc_addr += dlen;
8725 			risc_size -= dlen;
8726 		}
8727 	}
8728 
8729 	if (!IS_QLA27XX(ha) && !IS_QLA28XX(ha))
8730 		return QLA_SUCCESS;
8731 
8732 	templates = (risc_attr & BIT_9) ? 2 : 1;
8733 	ql_dbg(ql_dbg_init, vha, 0x0170, "-> templates = %u\n", templates);
8734 	for (j = 0; j < templates; j++, fwdt++) {
8735 		vfree(fwdt->template);
8736 		fwdt->template = NULL;
8737 		fwdt->length = 0;
8738 
8739 		risc_size = be32_to_cpu(fwcode[2]);
8740 		ql_dbg(ql_dbg_init, vha, 0x0171,
8741 		    "-> fwdt%u template array at %#x (%#x dwords)\n",
8742 		    j, (uint32_t)((void *)fwcode - (void *)blob->fw->data),
8743 		    risc_size);
8744 		if (!risc_size || !~risc_size) {
8745 			ql_dbg(ql_dbg_init, vha, 0x0172,
8746 			    "-> fwdt%u failed to read array\n", j);
8747 			goto failed;
8748 		}
8749 
8750 		/* skip header and ignore checksum */
8751 		fwcode += 7;
8752 		risc_size -= 8;
8753 
8754 		ql_dbg(ql_dbg_init, vha, 0x0173,
8755 		    "-> fwdt%u template allocate template %#x words...\n",
8756 		    j, risc_size);
8757 		fwdt->template = vmalloc_array(risc_size, sizeof(*dcode));
8758 		if (!fwdt->template) {
8759 			ql_log(ql_log_warn, vha, 0x0174,
8760 			    "-> fwdt%u failed allocate template.\n", j);
8761 			goto failed;
8762 		}
8763 
8764 		dcode = fwdt->template;
8765 		for (i = 0; i < risc_size; i++)
8766 			dcode[i] = (__force u32)fwcode[i];
8767 
8768 		if (!qla27xx_fwdt_template_valid(dcode)) {
8769 			ql_log(ql_log_warn, vha, 0x0175,
8770 			    "-> fwdt%u failed template validate\n", j);
8771 			goto failed;
8772 		}
8773 
8774 		dlen = qla27xx_fwdt_template_size(dcode);
8775 		ql_dbg(ql_dbg_init, vha, 0x0176,
8776 		    "-> fwdt%u template size %#lx bytes (%#lx words)\n",
8777 		    j, dlen, dlen / sizeof(*dcode));
8778 		if (dlen > risc_size * sizeof(*dcode)) {
8779 			ql_log(ql_log_warn, vha, 0x0177,
8780 			    "-> fwdt%u template exceeds array (%-lu bytes)\n",
8781 			    j, dlen - risc_size * sizeof(*dcode));
8782 			goto failed;
8783 		}
8784 
8785 		fwdt->length = dlen;
8786 		ql_dbg(ql_dbg_init, vha, 0x0178,
8787 		    "-> fwdt%u loaded template ok\n", j);
8788 
8789 		fwcode += risc_size + 1;
8790 	}
8791 
8792 	return QLA_SUCCESS;
8793 
8794 failed:
8795 	vfree(fwdt->template);
8796 	fwdt->template = NULL;
8797 	fwdt->length = 0;
8798 
8799 	return QLA_SUCCESS;
8800 }
8801 
8802 int
qla24xx_load_risc(scsi_qla_host_t * vha,uint32_t * srisc_addr)8803 qla24xx_load_risc(scsi_qla_host_t *vha, uint32_t *srisc_addr)
8804 {
8805 	int rval;
8806 
8807 	if (ql2xfwloadbin == 1)
8808 		return qla81xx_load_risc(vha, srisc_addr);
8809 
8810 	/*
8811 	 * FW Load priority:
8812 	 * 1) Firmware via request-firmware interface (.bin file).
8813 	 * 2) Firmware residing in flash.
8814 	 */
8815 	rval = qla24xx_load_risc_blob(vha, srisc_addr);
8816 	if (rval == QLA_SUCCESS)
8817 		return rval;
8818 
8819 	return qla24xx_load_risc_flash(vha, srisc_addr,
8820 	    vha->hw->flt_region_fw);
8821 }
8822 
8823 int
qla81xx_load_risc(scsi_qla_host_t * vha,uint32_t * srisc_addr)8824 qla81xx_load_risc(scsi_qla_host_t *vha, uint32_t *srisc_addr)
8825 {
8826 	int rval;
8827 	struct qla_hw_data *ha = vha->hw;
8828 	struct active_regions active_regions = { };
8829 
8830 	if (ql2xfwloadbin == 2)
8831 		goto try_blob_fw;
8832 
8833 	/* FW Load priority:
8834 	 * 1) Firmware residing in flash.
8835 	 * 2) Firmware via request-firmware interface (.bin file).
8836 	 * 3) Golden-Firmware residing in flash -- (limited operation).
8837 	 */
8838 
8839 	if (!IS_QLA27XX(ha) && !IS_QLA28XX(ha))
8840 		goto try_primary_fw;
8841 
8842 	qla27xx_get_active_image(vha, &active_regions);
8843 
8844 	if (active_regions.global != QLA27XX_SECONDARY_IMAGE)
8845 		goto try_primary_fw;
8846 
8847 	ql_dbg(ql_dbg_init, vha, 0x008b,
8848 	    "Loading secondary firmware image.\n");
8849 	rval = qla24xx_load_risc_flash(vha, srisc_addr, ha->flt_region_fw_sec);
8850 	if (!rval)
8851 		return rval;
8852 
8853 try_primary_fw:
8854 	ql_dbg(ql_dbg_init, vha, 0x008b,
8855 	    "Loading primary firmware image.\n");
8856 	rval = qla24xx_load_risc_flash(vha, srisc_addr, ha->flt_region_fw);
8857 	if (!rval)
8858 		return rval;
8859 
8860 try_blob_fw:
8861 	rval = qla24xx_load_risc_blob(vha, srisc_addr);
8862 	if (!rval || !ha->flt_region_gold_fw)
8863 		return rval;
8864 
8865 	ql_log(ql_log_info, vha, 0x0099,
8866 	    "Attempting to fallback to golden firmware.\n");
8867 	rval = qla24xx_load_risc_flash(vha, srisc_addr, ha->flt_region_gold_fw);
8868 	if (rval)
8869 		return rval;
8870 
8871 	ql_log(ql_log_info, vha, 0x009a, "Need firmware flash update.\n");
8872 	ha->flags.running_gold_fw = 1;
8873 	return rval;
8874 }
8875 
8876 void
qla2x00_try_to_stop_firmware(scsi_qla_host_t * vha)8877 qla2x00_try_to_stop_firmware(scsi_qla_host_t *vha)
8878 {
8879 	int ret, retries;
8880 	struct qla_hw_data *ha = vha->hw;
8881 
8882 	if (ha->flags.pci_channel_io_perm_failure)
8883 		return;
8884 	if (!IS_FWI2_CAPABLE(ha))
8885 		return;
8886 	if (!ha->fw_major_version)
8887 		return;
8888 	if (!ha->flags.fw_started)
8889 		return;
8890 
8891 	ret = qla2x00_stop_firmware(vha);
8892 	for (retries = 5; ret != QLA_SUCCESS && ret != QLA_FUNCTION_TIMEOUT &&
8893 	    ret != QLA_INVALID_COMMAND && retries ; retries--) {
8894 		ha->isp_ops->reset_chip(vha);
8895 		if (ha->isp_ops->chip_diag(vha) != QLA_SUCCESS)
8896 			continue;
8897 		if (qla2x00_setup_chip(vha) != QLA_SUCCESS)
8898 			continue;
8899 		ql_log(ql_log_info, vha, 0x8015,
8900 		    "Attempting retry of stop-firmware command.\n");
8901 		ret = qla2x00_stop_firmware(vha);
8902 	}
8903 
8904 	QLA_FW_STOPPED(ha);
8905 	ha->flags.fw_init_done = 0;
8906 }
8907 
8908 int
qla24xx_configure_vhba(scsi_qla_host_t * vha)8909 qla24xx_configure_vhba(scsi_qla_host_t *vha)
8910 {
8911 	int rval = QLA_SUCCESS;
8912 	int rval2;
8913 	uint16_t mb[MAILBOX_REGISTER_COUNT];
8914 	struct qla_hw_data *ha = vha->hw;
8915 	struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
8916 
8917 	if (!vha->vp_idx)
8918 		return -EINVAL;
8919 
8920 	rval = qla2x00_fw_ready(base_vha);
8921 
8922 	if (rval == QLA_SUCCESS) {
8923 		clear_bit(RESET_MARKER_NEEDED, &vha->dpc_flags);
8924 		qla2x00_marker(vha, ha->base_qpair, 0, 0, MK_SYNC_ALL);
8925 	}
8926 
8927 	vha->flags.management_server_logged_in = 0;
8928 
8929 	/* Login to SNS first */
8930 	rval2 = ha->isp_ops->fabric_login(vha, NPH_SNS, 0xff, 0xff, 0xfc, mb,
8931 	    BIT_1);
8932 	if (rval2 != QLA_SUCCESS || mb[0] != MBS_COMMAND_COMPLETE) {
8933 		if (rval2 == QLA_MEMORY_ALLOC_FAILED)
8934 			ql_dbg(ql_dbg_init, vha, 0x0120,
8935 			    "Failed SNS login: loop_id=%x, rval2=%d\n",
8936 			    NPH_SNS, rval2);
8937 		else
8938 			ql_dbg(ql_dbg_init, vha, 0x0103,
8939 			    "Failed SNS login: loop_id=%x mb[0]=%x mb[1]=%x "
8940 			    "mb[2]=%x mb[6]=%x mb[7]=%x.\n",
8941 			    NPH_SNS, mb[0], mb[1], mb[2], mb[6], mb[7]);
8942 		return (QLA_FUNCTION_FAILED);
8943 	}
8944 
8945 	atomic_set(&vha->loop_down_timer, 0);
8946 	atomic_set(&vha->loop_state, LOOP_UP);
8947 	set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
8948 	set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
8949 	rval = qla2x00_loop_resync(base_vha);
8950 
8951 	return rval;
8952 }
8953 
8954 /* 84XX Support **************************************************************/
8955 
8956 static LIST_HEAD(qla_cs84xx_list);
8957 static DEFINE_MUTEX(qla_cs84xx_mutex);
8958 
8959 static struct qla_chip_state_84xx *
qla84xx_get_chip(struct scsi_qla_host * vha)8960 qla84xx_get_chip(struct scsi_qla_host *vha)
8961 {
8962 	struct qla_chip_state_84xx *cs84xx;
8963 	struct qla_hw_data *ha = vha->hw;
8964 
8965 	mutex_lock(&qla_cs84xx_mutex);
8966 
8967 	/* Find any shared 84xx chip. */
8968 	list_for_each_entry(cs84xx, &qla_cs84xx_list, list) {
8969 		if (cs84xx->bus == ha->pdev->bus) {
8970 			kref_get(&cs84xx->kref);
8971 			goto done;
8972 		}
8973 	}
8974 
8975 	cs84xx = kzalloc(sizeof(*cs84xx), GFP_KERNEL);
8976 	if (!cs84xx)
8977 		goto done;
8978 
8979 	kref_init(&cs84xx->kref);
8980 	spin_lock_init(&cs84xx->access_lock);
8981 	mutex_init(&cs84xx->fw_update_mutex);
8982 	cs84xx->bus = ha->pdev->bus;
8983 
8984 	list_add_tail(&cs84xx->list, &qla_cs84xx_list);
8985 done:
8986 	mutex_unlock(&qla_cs84xx_mutex);
8987 	return cs84xx;
8988 }
8989 
8990 static void
__qla84xx_chip_release(struct kref * kref)8991 __qla84xx_chip_release(struct kref *kref)
8992 {
8993 	struct qla_chip_state_84xx *cs84xx =
8994 	    container_of(kref, struct qla_chip_state_84xx, kref);
8995 
8996 	mutex_lock(&qla_cs84xx_mutex);
8997 	list_del(&cs84xx->list);
8998 	mutex_unlock(&qla_cs84xx_mutex);
8999 	kfree(cs84xx);
9000 }
9001 
9002 void
qla84xx_put_chip(struct scsi_qla_host * vha)9003 qla84xx_put_chip(struct scsi_qla_host *vha)
9004 {
9005 	struct qla_hw_data *ha = vha->hw;
9006 
9007 	if (ha->cs84xx)
9008 		kref_put(&ha->cs84xx->kref, __qla84xx_chip_release);
9009 }
9010 
9011 static int
qla84xx_init_chip(scsi_qla_host_t * vha)9012 qla84xx_init_chip(scsi_qla_host_t *vha)
9013 {
9014 	int rval;
9015 	uint16_t status[2];
9016 	struct qla_hw_data *ha = vha->hw;
9017 
9018 	mutex_lock(&ha->cs84xx->fw_update_mutex);
9019 
9020 	rval = qla84xx_verify_chip(vha, status);
9021 
9022 	mutex_unlock(&ha->cs84xx->fw_update_mutex);
9023 
9024 	return rval != QLA_SUCCESS || status[0] ? QLA_FUNCTION_FAILED :
9025 	    QLA_SUCCESS;
9026 }
9027 
9028 /* 81XX Support **************************************************************/
9029 
9030 int
qla81xx_nvram_config(scsi_qla_host_t * vha)9031 qla81xx_nvram_config(scsi_qla_host_t *vha)
9032 {
9033 	int   rval;
9034 	struct init_cb_81xx *icb;
9035 	struct nvram_81xx *nv;
9036 	__le32 *dptr;
9037 	uint8_t  *dptr1, *dptr2;
9038 	uint32_t chksum;
9039 	uint16_t cnt;
9040 	struct qla_hw_data *ha = vha->hw;
9041 	uint32_t faddr;
9042 	struct active_regions active_regions = { };
9043 
9044 	rval = QLA_SUCCESS;
9045 	icb = (struct init_cb_81xx *)ha->init_cb;
9046 	nv = ha->nvram;
9047 
9048 	/* Determine NVRAM starting address. */
9049 	ha->nvram_size = sizeof(*nv);
9050 	ha->vpd_size = FA_NVRAM_VPD_SIZE;
9051 	if (IS_P3P_TYPE(ha) || IS_QLA8031(ha))
9052 		ha->vpd_size = FA_VPD_SIZE_82XX;
9053 
9054 	if (IS_QLA28XX(ha) || IS_QLA27XX(ha))
9055 		qla28xx_get_aux_images(vha, &active_regions);
9056 
9057 	/* Get VPD data into cache */
9058 	ha->vpd = ha->nvram + VPD_OFFSET;
9059 
9060 	faddr = ha->flt_region_vpd;
9061 	if (IS_QLA28XX(ha)) {
9062 		if (active_regions.aux.vpd_nvram == QLA27XX_SECONDARY_IMAGE)
9063 			faddr = ha->flt_region_vpd_sec;
9064 		ql_dbg(ql_dbg_init, vha, 0x0110,
9065 		    "Loading %s nvram image.\n",
9066 		    active_regions.aux.vpd_nvram == QLA27XX_PRIMARY_IMAGE ?
9067 		    "primary" : "secondary");
9068 	}
9069 	ha->isp_ops->read_optrom(vha, ha->vpd, faddr << 2, ha->vpd_size);
9070 
9071 	/* Get NVRAM data into cache and calculate checksum. */
9072 	faddr = ha->flt_region_nvram;
9073 	if (IS_QLA28XX(ha)) {
9074 		if (active_regions.aux.vpd_nvram == QLA27XX_SECONDARY_IMAGE)
9075 			faddr = ha->flt_region_nvram_sec;
9076 	}
9077 	ql_dbg(ql_dbg_init, vha, 0x0110,
9078 	    "Loading %s nvram image.\n",
9079 	    active_regions.aux.vpd_nvram == QLA27XX_PRIMARY_IMAGE ?
9080 	    "primary" : "secondary");
9081 	ha->isp_ops->read_optrom(vha, ha->nvram, faddr << 2, ha->nvram_size);
9082 
9083 	dptr = (__force __le32 *)nv;
9084 	for (cnt = 0, chksum = 0; cnt < ha->nvram_size >> 2; cnt++, dptr++)
9085 		chksum += le32_to_cpu(*dptr);
9086 
9087 	ql_dbg(ql_dbg_init + ql_dbg_buffer, vha, 0x0111,
9088 	    "Contents of NVRAM:\n");
9089 	ql_dump_buffer(ql_dbg_init + ql_dbg_buffer, vha, 0x0112,
9090 	    nv, ha->nvram_size);
9091 
9092 	/* Bad NVRAM data, set defaults parameters. */
9093 	if (chksum || memcmp("ISP ", nv->id, sizeof(nv->id)) ||
9094 	    le16_to_cpu(nv->nvram_version) < ICB_VERSION) {
9095 		/* Reset NVRAM data. */
9096 		ql_log(ql_log_info, vha, 0x0073,
9097 		    "Inconsistent NVRAM checksum=%#x id=%.4s version=%#x.\n",
9098 		    chksum, nv->id, le16_to_cpu(nv->nvram_version));
9099 		ql_dump_buffer(ql_dbg_init, vha, 0x0073, nv, sizeof(*nv));
9100 		ql_log(ql_log_info, vha, 0x0074,
9101 		    "Falling back to functioning (yet invalid -- WWPN) "
9102 		    "defaults.\n");
9103 
9104 		/*
9105 		 * Set default initialization control block.
9106 		 */
9107 		memset(nv, 0, ha->nvram_size);
9108 		nv->nvram_version = cpu_to_le16(ICB_VERSION);
9109 		nv->version = cpu_to_le16(ICB_VERSION);
9110 		nv->frame_payload_size = cpu_to_le16(2048);
9111 		nv->execution_throttle = cpu_to_le16(0xFFFF);
9112 		nv->exchange_count = cpu_to_le16(0);
9113 		nv->port_name[0] = 0x21;
9114 		nv->port_name[1] = 0x00 + ha->port_no + 1;
9115 		nv->port_name[2] = 0x00;
9116 		nv->port_name[3] = 0xe0;
9117 		nv->port_name[4] = 0x8b;
9118 		nv->port_name[5] = 0x1c;
9119 		nv->port_name[6] = 0x55;
9120 		nv->port_name[7] = 0x86;
9121 		nv->node_name[0] = 0x20;
9122 		nv->node_name[1] = 0x00;
9123 		nv->node_name[2] = 0x00;
9124 		nv->node_name[3] = 0xe0;
9125 		nv->node_name[4] = 0x8b;
9126 		nv->node_name[5] = 0x1c;
9127 		nv->node_name[6] = 0x55;
9128 		nv->node_name[7] = 0x86;
9129 		nv->login_retry_count = cpu_to_le16(8);
9130 		nv->interrupt_delay_timer = cpu_to_le16(0);
9131 		nv->login_timeout = cpu_to_le16(0);
9132 		nv->firmware_options_1 =
9133 		    cpu_to_le32(BIT_14|BIT_13|BIT_2|BIT_1);
9134 		nv->firmware_options_2 = cpu_to_le32(2 << 4);
9135 		nv->firmware_options_2 |= cpu_to_le32(BIT_12);
9136 		nv->firmware_options_3 = cpu_to_le32(2 << 13);
9137 		nv->host_p = cpu_to_le32(BIT_11|BIT_10);
9138 		nv->efi_parameters = cpu_to_le32(0);
9139 		nv->reset_delay = 5;
9140 		nv->max_luns_per_target = cpu_to_le16(128);
9141 		nv->port_down_retry_count = cpu_to_le16(30);
9142 		nv->link_down_timeout = cpu_to_le16(180);
9143 		nv->enode_mac[0] = 0x00;
9144 		nv->enode_mac[1] = 0xC0;
9145 		nv->enode_mac[2] = 0xDD;
9146 		nv->enode_mac[3] = 0x04;
9147 		nv->enode_mac[4] = 0x05;
9148 		nv->enode_mac[5] = 0x06 + ha->port_no + 1;
9149 
9150 		rval = 1;
9151 	}
9152 
9153 	if (IS_T10_PI_CAPABLE(ha))
9154 		nv->frame_payload_size &= cpu_to_le16(~7);
9155 
9156 	qlt_81xx_config_nvram_stage1(vha, nv);
9157 
9158 	/* Reset Initialization control block */
9159 	memset(icb, 0, ha->init_cb_size);
9160 
9161 	/* Copy 1st segment. */
9162 	dptr1 = (uint8_t *)icb;
9163 	dptr2 = (uint8_t *)&nv->version;
9164 	cnt = (uint8_t *)&icb->response_q_inpointer - (uint8_t *)&icb->version;
9165 	while (cnt--)
9166 		*dptr1++ = *dptr2++;
9167 
9168 	icb->login_retry_count = nv->login_retry_count;
9169 
9170 	/* Copy 2nd segment. */
9171 	dptr1 = (uint8_t *)&icb->interrupt_delay_timer;
9172 	dptr2 = (uint8_t *)&nv->interrupt_delay_timer;
9173 	cnt = (uint8_t *)&icb->reserved_5 -
9174 	    (uint8_t *)&icb->interrupt_delay_timer;
9175 	while (cnt--)
9176 		*dptr1++ = *dptr2++;
9177 
9178 	memcpy(icb->enode_mac, nv->enode_mac, sizeof(icb->enode_mac));
9179 	/* Some boards (with valid NVRAMs) still have NULL enode_mac!! */
9180 	if (!memcmp(icb->enode_mac, "\0\0\0\0\0\0", sizeof(icb->enode_mac))) {
9181 		icb->enode_mac[0] = 0x00;
9182 		icb->enode_mac[1] = 0xC0;
9183 		icb->enode_mac[2] = 0xDD;
9184 		icb->enode_mac[3] = 0x04;
9185 		icb->enode_mac[4] = 0x05;
9186 		icb->enode_mac[5] = 0x06 + ha->port_no + 1;
9187 	}
9188 
9189 	/* Use extended-initialization control block. */
9190 	memcpy(ha->ex_init_cb, &nv->ex_version, sizeof(*ha->ex_init_cb));
9191 	ha->frame_payload_size = le16_to_cpu(icb->frame_payload_size);
9192 	/*
9193 	 * Setup driver NVRAM options.
9194 	 */
9195 	qla2x00_set_model_info(vha, nv->model_name, sizeof(nv->model_name),
9196 	    "QLE8XXX");
9197 
9198 	qlt_81xx_config_nvram_stage2(vha, icb);
9199 
9200 	/* Use alternate WWN? */
9201 	if (nv->host_p & cpu_to_le32(BIT_15)) {
9202 		memcpy(icb->node_name, nv->alternate_node_name, WWN_SIZE);
9203 		memcpy(icb->port_name, nv->alternate_port_name, WWN_SIZE);
9204 	}
9205 
9206 	/* Prepare nodename */
9207 	if ((icb->firmware_options_1 & cpu_to_le32(BIT_14)) == 0) {
9208 		/*
9209 		 * Firmware will apply the following mask if the nodename was
9210 		 * not provided.
9211 		 */
9212 		memcpy(icb->node_name, icb->port_name, WWN_SIZE);
9213 		icb->node_name[0] &= 0xF0;
9214 	}
9215 
9216 	if (IS_QLA28XX(ha) || IS_QLA27XX(ha)) {
9217 		if ((nv->enhanced_features & BIT_7) == 0)
9218 			ha->flags.scm_supported_a = 1;
9219 	}
9220 
9221 	/* Set host adapter parameters. */
9222 	ha->flags.disable_risc_code_load = 0;
9223 	ha->flags.enable_lip_reset = 0;
9224 	ha->flags.enable_lip_full_login =
9225 	    le32_to_cpu(nv->host_p) & BIT_10 ? 1 : 0;
9226 	ha->flags.enable_target_reset =
9227 	    le32_to_cpu(nv->host_p) & BIT_11 ? 1 : 0;
9228 	ha->flags.enable_led_scheme = 0;
9229 	ha->flags.disable_serdes = le32_to_cpu(nv->host_p) & BIT_5 ? 1 : 0;
9230 
9231 	ha->operating_mode = (le32_to_cpu(icb->firmware_options_2) &
9232 	    (BIT_6 | BIT_5 | BIT_4)) >> 4;
9233 
9234 	/* save HBA serial number */
9235 	ha->serial0 = icb->port_name[5];
9236 	ha->serial1 = icb->port_name[6];
9237 	ha->serial2 = icb->port_name[7];
9238 	memcpy(vha->node_name, icb->node_name, WWN_SIZE);
9239 	memcpy(vha->port_name, icb->port_name, WWN_SIZE);
9240 
9241 	icb->execution_throttle = cpu_to_le16(0xFFFF);
9242 
9243 	ha->retry_count = le16_to_cpu(nv->login_retry_count);
9244 
9245 	/* Set minimum login_timeout to 4 seconds. */
9246 	if (le16_to_cpu(nv->login_timeout) < ql2xlogintimeout)
9247 		nv->login_timeout = cpu_to_le16(ql2xlogintimeout);
9248 	if (le16_to_cpu(nv->login_timeout) < 4)
9249 		nv->login_timeout = cpu_to_le16(4);
9250 	ha->login_timeout = le16_to_cpu(nv->login_timeout);
9251 
9252 	/* Set minimum RATOV to 100 tenths of a second. */
9253 	ha->r_a_tov = 100;
9254 
9255 	ha->loop_reset_delay = nv->reset_delay;
9256 
9257 	/* Link Down Timeout = 0:
9258 	 *
9259 	 *	When Port Down timer expires we will start returning
9260 	 *	I/O's to OS with "DID_NO_CONNECT".
9261 	 *
9262 	 * Link Down Timeout != 0:
9263 	 *
9264 	 *	 The driver waits for the link to come up after link down
9265 	 *	 before returning I/Os to OS with "DID_NO_CONNECT".
9266 	 */
9267 	if (le16_to_cpu(nv->link_down_timeout) == 0) {
9268 		ha->loop_down_abort_time =
9269 		    (LOOP_DOWN_TIME - LOOP_DOWN_TIMEOUT);
9270 	} else {
9271 		ha->link_down_timeout =	le16_to_cpu(nv->link_down_timeout);
9272 		ha->loop_down_abort_time =
9273 		    (LOOP_DOWN_TIME - ha->link_down_timeout);
9274 	}
9275 
9276 	/* Need enough time to try and get the port back. */
9277 	ha->port_down_retry_count = le16_to_cpu(nv->port_down_retry_count);
9278 	if (qlport_down_retry)
9279 		ha->port_down_retry_count = qlport_down_retry;
9280 
9281 	/* Set login_retry_count */
9282 	ha->login_retry_count  = le16_to_cpu(nv->login_retry_count);
9283 	if (ha->port_down_retry_count ==
9284 	    le16_to_cpu(nv->port_down_retry_count) &&
9285 	    ha->port_down_retry_count > 3)
9286 		ha->login_retry_count = ha->port_down_retry_count;
9287 	else if (ha->port_down_retry_count > (int)ha->login_retry_count)
9288 		ha->login_retry_count = ha->port_down_retry_count;
9289 	if (ql2xloginretrycount)
9290 		ha->login_retry_count = ql2xloginretrycount;
9291 
9292 	/* if not running MSI-X we need handshaking on interrupts */
9293 	if (!vha->hw->flags.msix_enabled &&
9294 	    (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)))
9295 		icb->firmware_options_2 |= cpu_to_le32(BIT_22);
9296 
9297 	/* Enable ZIO. */
9298 	if (!vha->flags.init_done) {
9299 		ha->zio_mode = le32_to_cpu(icb->firmware_options_2) &
9300 		    (BIT_3 | BIT_2 | BIT_1 | BIT_0);
9301 		ha->zio_timer = le16_to_cpu(icb->interrupt_delay_timer) ?
9302 		    le16_to_cpu(icb->interrupt_delay_timer) : 2;
9303 	}
9304 	icb->firmware_options_2 &= cpu_to_le32(
9305 	    ~(BIT_3 | BIT_2 | BIT_1 | BIT_0));
9306 	vha->flags.process_response_queue = 0;
9307 	if (ha->zio_mode != QLA_ZIO_DISABLED) {
9308 		ha->zio_mode = QLA_ZIO_MODE_6;
9309 
9310 		ql_log(ql_log_info, vha, 0x0075,
9311 		    "ZIO mode %d enabled; timer delay (%d us).\n",
9312 		    ha->zio_mode,
9313 		    ha->zio_timer * 100);
9314 
9315 		icb->firmware_options_2 |= cpu_to_le32(
9316 		    (uint32_t)ha->zio_mode);
9317 		icb->interrupt_delay_timer = cpu_to_le16(ha->zio_timer);
9318 		vha->flags.process_response_queue = 1;
9319 	}
9320 
9321 	 /* enable RIDA Format2 */
9322 	icb->firmware_options_3 |= cpu_to_le32(BIT_0);
9323 
9324 	/* N2N: driver will initiate Login instead of FW */
9325 	icb->firmware_options_3 |= cpu_to_le32(BIT_8);
9326 
9327 	/* Determine NVMe/FCP priority for target ports */
9328 	ha->fc4_type_priority = qla2xxx_get_fc4_priority(vha);
9329 
9330 	if (rval) {
9331 		ql_log(ql_log_warn, vha, 0x0076,
9332 		    "NVRAM configuration failed.\n");
9333 	}
9334 	return (rval);
9335 }
9336 
9337 int
qla82xx_restart_isp(scsi_qla_host_t * vha)9338 qla82xx_restart_isp(scsi_qla_host_t *vha)
9339 {
9340 	int status, rval;
9341 	struct qla_hw_data *ha = vha->hw;
9342 	struct scsi_qla_host *vp, *tvp;
9343 	unsigned long flags;
9344 
9345 	status = qla2x00_init_rings(vha);
9346 	if (!status) {
9347 		clear_bit(RESET_MARKER_NEEDED, &vha->dpc_flags);
9348 		ha->flags.chip_reset_done = 1;
9349 
9350 		status = qla2x00_fw_ready(vha);
9351 		if (!status) {
9352 			/* Issue a marker after FW becomes ready. */
9353 			qla2x00_marker(vha, ha->base_qpair, 0, 0, MK_SYNC_ALL);
9354 			vha->flags.online = 1;
9355 			set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
9356 		}
9357 
9358 		/* if no cable then assume it's good */
9359 		if ((vha->device_flags & DFLG_NO_CABLE))
9360 			status = 0;
9361 	}
9362 
9363 	if (!status) {
9364 		clear_bit(RESET_MARKER_NEEDED, &vha->dpc_flags);
9365 
9366 		if (!atomic_read(&vha->loop_down_timer)) {
9367 			/*
9368 			 * Issue marker command only when we are going
9369 			 * to start the I/O .
9370 			 */
9371 			vha->marker_needed = 1;
9372 		}
9373 
9374 		ha->isp_ops->enable_intrs(ha);
9375 
9376 		ha->isp_abort_cnt = 0;
9377 		clear_bit(ISP_ABORT_RETRY, &vha->dpc_flags);
9378 
9379 		/* Update the firmware version */
9380 		status = qla82xx_check_md_needed(vha);
9381 
9382 		if (ha->fce) {
9383 			ha->flags.fce_enabled = 1;
9384 			memset(ha->fce, 0,
9385 			    fce_calc_size(ha->fce_bufs));
9386 			rval = qla2x00_enable_fce_trace(vha,
9387 			    ha->fce_dma, ha->fce_bufs, ha->fce_mb,
9388 			    &ha->fce_bufs);
9389 			if (rval) {
9390 				ql_log(ql_log_warn, vha, 0x8001,
9391 				    "Unable to reinitialize FCE (%d).\n",
9392 				    rval);
9393 				ha->flags.fce_enabled = 0;
9394 			}
9395 		}
9396 
9397 		if (ha->eft) {
9398 			memset(ha->eft, 0, EFT_SIZE);
9399 			rval = qla2x00_enable_eft_trace(vha,
9400 			    ha->eft_dma, EFT_NUM_BUFFERS);
9401 			if (rval) {
9402 				ql_log(ql_log_warn, vha, 0x8010,
9403 				    "Unable to reinitialize EFT (%d).\n",
9404 				    rval);
9405 			}
9406 		}
9407 	}
9408 
9409 	if (!status) {
9410 		ql_dbg(ql_dbg_taskm, vha, 0x8011,
9411 		    "qla82xx_restart_isp succeeded.\n");
9412 
9413 		spin_lock_irqsave(&ha->vport_slock, flags);
9414 		list_for_each_entry_safe(vp, tvp, &ha->vp_list, list) {
9415 			if (vp->vp_idx) {
9416 				atomic_inc(&vp->vref_count);
9417 				spin_unlock_irqrestore(&ha->vport_slock, flags);
9418 
9419 				qla2x00_vp_abort_isp(vp);
9420 
9421 				spin_lock_irqsave(&ha->vport_slock, flags);
9422 				atomic_dec(&vp->vref_count);
9423 			}
9424 		}
9425 		spin_unlock_irqrestore(&ha->vport_slock, flags);
9426 
9427 	} else {
9428 		ql_log(ql_log_warn, vha, 0x8016,
9429 		    "qla82xx_restart_isp **** FAILED ****.\n");
9430 	}
9431 
9432 	return status;
9433 }
9434 
9435 /*
9436  * qla24xx_get_fcp_prio
9437  *	Gets the fcp cmd priority value for the logged in port.
9438  *	Looks for a match of the port descriptors within
9439  *	each of the fcp prio config entries. If a match is found,
9440  *	the tag (priority) value is returned.
9441  *
9442  * Input:
9443  *	vha = scsi host structure pointer.
9444  *	fcport = port structure pointer.
9445  *
9446  * Return:
9447  *	non-zero (if found)
9448  *	-1 (if not found)
9449  *
9450  * Context:
9451  * 	Kernel context
9452  */
9453 static int
qla24xx_get_fcp_prio(scsi_qla_host_t * vha,fc_port_t * fcport)9454 qla24xx_get_fcp_prio(scsi_qla_host_t *vha, fc_port_t *fcport)
9455 {
9456 	int i, entries;
9457 	uint8_t pid_match, wwn_match;
9458 	int priority;
9459 	uint32_t pid1, pid2;
9460 	uint64_t wwn1, wwn2;
9461 	struct qla_fcp_prio_entry *pri_entry;
9462 	struct qla_hw_data *ha = vha->hw;
9463 
9464 	if (!ha->fcp_prio_cfg || !ha->flags.fcp_prio_enabled)
9465 		return -1;
9466 
9467 	priority = -1;
9468 	entries = ha->fcp_prio_cfg->num_entries;
9469 	pri_entry = &ha->fcp_prio_cfg->entry[0];
9470 
9471 	for (i = 0; i < entries; i++) {
9472 		pid_match = wwn_match = 0;
9473 
9474 		if (!(pri_entry->flags & FCP_PRIO_ENTRY_VALID)) {
9475 			pri_entry++;
9476 			continue;
9477 		}
9478 
9479 		/* check source pid for a match */
9480 		if (pri_entry->flags & FCP_PRIO_ENTRY_SPID_VALID) {
9481 			pid1 = pri_entry->src_pid & INVALID_PORT_ID;
9482 			pid2 = vha->d_id.b24 & INVALID_PORT_ID;
9483 			if (pid1 == INVALID_PORT_ID)
9484 				pid_match++;
9485 			else if (pid1 == pid2)
9486 				pid_match++;
9487 		}
9488 
9489 		/* check destination pid for a match */
9490 		if (pri_entry->flags & FCP_PRIO_ENTRY_DPID_VALID) {
9491 			pid1 = pri_entry->dst_pid & INVALID_PORT_ID;
9492 			pid2 = fcport->d_id.b24 & INVALID_PORT_ID;
9493 			if (pid1 == INVALID_PORT_ID)
9494 				pid_match++;
9495 			else if (pid1 == pid2)
9496 				pid_match++;
9497 		}
9498 
9499 		/* check source WWN for a match */
9500 		if (pri_entry->flags & FCP_PRIO_ENTRY_SWWN_VALID) {
9501 			wwn1 = wwn_to_u64(vha->port_name);
9502 			wwn2 = wwn_to_u64(pri_entry->src_wwpn);
9503 			if (wwn2 == (uint64_t)-1)
9504 				wwn_match++;
9505 			else if (wwn1 == wwn2)
9506 				wwn_match++;
9507 		}
9508 
9509 		/* check destination WWN for a match */
9510 		if (pri_entry->flags & FCP_PRIO_ENTRY_DWWN_VALID) {
9511 			wwn1 = wwn_to_u64(fcport->port_name);
9512 			wwn2 = wwn_to_u64(pri_entry->dst_wwpn);
9513 			if (wwn2 == (uint64_t)-1)
9514 				wwn_match++;
9515 			else if (wwn1 == wwn2)
9516 				wwn_match++;
9517 		}
9518 
9519 		if (pid_match == 2 || wwn_match == 2) {
9520 			/* Found a matching entry */
9521 			if (pri_entry->flags & FCP_PRIO_ENTRY_TAG_VALID)
9522 				priority = pri_entry->tag;
9523 			break;
9524 		}
9525 
9526 		pri_entry++;
9527 	}
9528 
9529 	return priority;
9530 }
9531 
9532 /*
9533  * qla24xx_update_fcport_fcp_prio
9534  *	Activates fcp priority for the logged in fc port
9535  *
9536  * Input:
9537  *	vha = scsi host structure pointer.
9538  *	fcp = port structure pointer.
9539  *
9540  * Return:
9541  *	QLA_SUCCESS or QLA_FUNCTION_FAILED
9542  *
9543  * Context:
9544  *	Kernel context.
9545  */
9546 int
qla24xx_update_fcport_fcp_prio(scsi_qla_host_t * vha,fc_port_t * fcport)9547 qla24xx_update_fcport_fcp_prio(scsi_qla_host_t *vha, fc_port_t *fcport)
9548 {
9549 	int ret;
9550 	int priority;
9551 	uint16_t mb[5];
9552 
9553 	if (fcport->port_type != FCT_TARGET ||
9554 	    fcport->loop_id == FC_NO_LOOP_ID)
9555 		return QLA_FUNCTION_FAILED;
9556 
9557 	priority = qla24xx_get_fcp_prio(vha, fcport);
9558 	if (priority < 0)
9559 		return QLA_FUNCTION_FAILED;
9560 
9561 	if (IS_P3P_TYPE(vha->hw)) {
9562 		fcport->fcp_prio = priority & 0xf;
9563 		return QLA_SUCCESS;
9564 	}
9565 
9566 	ret = qla24xx_set_fcp_prio(vha, fcport->loop_id, priority, mb);
9567 	if (ret == QLA_SUCCESS) {
9568 		if (fcport->fcp_prio != priority)
9569 			ql_dbg(ql_dbg_user, vha, 0x709e,
9570 			    "Updated FCP_CMND priority - value=%d loop_id=%d "
9571 			    "port_id=%02x%02x%02x.\n", priority,
9572 			    fcport->loop_id, fcport->d_id.b.domain,
9573 			    fcport->d_id.b.area, fcport->d_id.b.al_pa);
9574 		fcport->fcp_prio = priority & 0xf;
9575 	} else
9576 		ql_dbg(ql_dbg_user, vha, 0x704f,
9577 		    "Unable to update FCP_CMND priority - ret=0x%x for "
9578 		    "loop_id=%d port_id=%02x%02x%02x.\n", ret, fcport->loop_id,
9579 		    fcport->d_id.b.domain, fcport->d_id.b.area,
9580 		    fcport->d_id.b.al_pa);
9581 	return  ret;
9582 }
9583 
9584 /*
9585  * qla24xx_update_all_fcp_prio
9586  *	Activates fcp priority for all the logged in ports
9587  *
9588  * Input:
9589  *	ha = adapter block pointer.
9590  *
9591  * Return:
9592  *	QLA_SUCCESS or QLA_FUNCTION_FAILED
9593  *
9594  * Context:
9595  *	Kernel context.
9596  */
9597 int
qla24xx_update_all_fcp_prio(scsi_qla_host_t * vha)9598 qla24xx_update_all_fcp_prio(scsi_qla_host_t *vha)
9599 {
9600 	int ret;
9601 	fc_port_t *fcport;
9602 
9603 	ret = QLA_FUNCTION_FAILED;
9604 	/* We need to set priority for all logged in ports */
9605 	list_for_each_entry(fcport, &vha->vp_fcports, list)
9606 		ret = qla24xx_update_fcport_fcp_prio(vha, fcport);
9607 
9608 	return ret;
9609 }
9610 
qla2xxx_create_qpair(struct scsi_qla_host * vha,int qos,int vp_idx,bool startqp)9611 struct qla_qpair *qla2xxx_create_qpair(struct scsi_qla_host *vha, int qos,
9612 	int vp_idx, bool startqp)
9613 {
9614 	int rsp_id = 0;
9615 	int  req_id = 0;
9616 	int i;
9617 	struct qla_hw_data *ha = vha->hw;
9618 	uint16_t qpair_id = 0;
9619 	struct qla_qpair *qpair = NULL;
9620 	struct qla_msix_entry *msix;
9621 
9622 	if (!(ha->fw_attributes & BIT_6) || !ha->flags.msix_enabled) {
9623 		ql_log(ql_log_warn, vha, 0x00181,
9624 		    "FW/Driver is not multi-queue capable.\n");
9625 		return NULL;
9626 	}
9627 
9628 	if (ql2xmqsupport || ql2xnvmeenable) {
9629 		qpair = kzalloc(sizeof(struct qla_qpair), GFP_KERNEL);
9630 		if (qpair == NULL) {
9631 			ql_log(ql_log_warn, vha, 0x0182,
9632 			    "Failed to allocate memory for queue pair.\n");
9633 			return NULL;
9634 		}
9635 
9636 		qpair->hw = vha->hw;
9637 		qpair->vha = vha;
9638 		qpair->qp_lock_ptr = &qpair->qp_lock;
9639 		spin_lock_init(&qpair->qp_lock);
9640 		qpair->use_shadow_reg = IS_SHADOW_REG_CAPABLE(ha) ? 1 : 0;
9641 
9642 		/* Assign available que pair id */
9643 		mutex_lock(&ha->mq_lock);
9644 		qpair_id = find_first_zero_bit(ha->qpair_qid_map, ha->max_qpairs);
9645 		if (ha->num_qpairs >= ha->max_qpairs) {
9646 			mutex_unlock(&ha->mq_lock);
9647 			ql_log(ql_log_warn, vha, 0x0183,
9648 			    "No resources to create additional q pair.\n");
9649 			goto fail_qid_map;
9650 		}
9651 		ha->num_qpairs++;
9652 		set_bit(qpair_id, ha->qpair_qid_map);
9653 		ha->queue_pair_map[qpair_id] = qpair;
9654 		qpair->id = qpair_id;
9655 		qpair->vp_idx = vp_idx;
9656 		qpair->fw_started = ha->flags.fw_started;
9657 		INIT_LIST_HEAD(&qpair->hints_list);
9658 		INIT_LIST_HEAD(&qpair->dsd_list);
9659 		qpair->chip_reset = ha->base_qpair->chip_reset;
9660 		qpair->enable_class_2 = ha->base_qpair->enable_class_2;
9661 		qpair->enable_explicit_conf =
9662 		    ha->base_qpair->enable_explicit_conf;
9663 
9664 		for (i = 0; i < ha->msix_count; i++) {
9665 			msix = &ha->msix_entries[i];
9666 			if (msix->in_use)
9667 				continue;
9668 			qpair->msix = msix;
9669 			ql_dbg(ql_dbg_multiq, vha, 0xc00f,
9670 			    "Vector %x selected for qpair\n", msix->vector);
9671 			break;
9672 		}
9673 		if (!qpair->msix) {
9674 			ql_log(ql_log_warn, vha, 0x0184,
9675 			    "Out of MSI-X vectors!.\n");
9676 			goto fail_msix;
9677 		}
9678 
9679 		qpair->msix->in_use = 1;
9680 		list_add_tail(&qpair->qp_list_elem, &vha->qp_list);
9681 		qpair->pdev = ha->pdev;
9682 		if (IS_QLA27XX(ha) || IS_QLA83XX(ha) || IS_QLA28XX(ha))
9683 			qpair->reqq_start_iocbs = qla_83xx_start_iocbs;
9684 
9685 		mutex_unlock(&ha->mq_lock);
9686 
9687 		/* Create response queue first */
9688 		rsp_id = qla25xx_create_rsp_que(ha, 0, 0, 0, qpair, startqp);
9689 		if (!rsp_id) {
9690 			ql_log(ql_log_warn, vha, 0x0185,
9691 			    "Failed to create response queue.\n");
9692 			goto fail_rsp;
9693 		}
9694 
9695 		qpair->rsp = ha->rsp_q_map[rsp_id];
9696 
9697 		/* Create request queue */
9698 		req_id = qla25xx_create_req_que(ha, 0, vp_idx, 0, rsp_id, qos,
9699 		    startqp);
9700 		if (!req_id) {
9701 			ql_log(ql_log_warn, vha, 0x0186,
9702 			    "Failed to create request queue.\n");
9703 			goto fail_req;
9704 		}
9705 
9706 		qpair->req = ha->req_q_map[req_id];
9707 		qpair->rsp->req = qpair->req;
9708 		qpair->rsp->qpair = qpair;
9709 
9710 		if (!qpair->cpu_mapped)
9711 			qla_cpu_update(qpair, raw_smp_processor_id());
9712 
9713 		if (IS_T10_PI_CAPABLE(ha) && ql2xenabledif) {
9714 			if (ha->fw_attributes & BIT_4)
9715 				qpair->difdix_supported = 1;
9716 		}
9717 
9718 		qpair->srb_mempool = mempool_create_slab_pool(SRB_MIN_REQ, srb_cachep);
9719 		if (!qpair->srb_mempool) {
9720 			ql_log(ql_log_warn, vha, 0xd036,
9721 			    "Failed to create srb mempool for qpair %d\n",
9722 			    qpair->id);
9723 			goto fail_mempool;
9724 		}
9725 
9726 		if (qla_create_buf_pool(vha, qpair)) {
9727 			ql_log(ql_log_warn, vha, 0xd036,
9728 			    "Failed to initialize buf pool for qpair %d\n",
9729 			    qpair->id);
9730 			goto fail_bufpool;
9731 		}
9732 
9733 		/* Mark as online */
9734 		qpair->online = 1;
9735 
9736 		if (!vha->flags.qpairs_available)
9737 			vha->flags.qpairs_available = 1;
9738 
9739 		ql_dbg(ql_dbg_multiq, vha, 0xc00d,
9740 		    "Request/Response queue pair created, id %d\n",
9741 		    qpair->id);
9742 		ql_dbg(ql_dbg_init, vha, 0x0187,
9743 		    "Request/Response queue pair created, id %d\n",
9744 		    qpair->id);
9745 	}
9746 	return qpair;
9747 
9748 fail_bufpool:
9749 	mempool_destroy(qpair->srb_mempool);
9750 fail_mempool:
9751 	qla25xx_delete_req_que(vha, qpair->req);
9752 fail_req:
9753 	qla25xx_delete_rsp_que(vha, qpair->rsp);
9754 fail_rsp:
9755 	mutex_lock(&ha->mq_lock);
9756 	qpair->msix->in_use = 0;
9757 	list_del(&qpair->qp_list_elem);
9758 	if (list_empty(&vha->qp_list))
9759 		vha->flags.qpairs_available = 0;
9760 fail_msix:
9761 	ha->queue_pair_map[qpair_id] = NULL;
9762 	clear_bit(qpair_id, ha->qpair_qid_map);
9763 	ha->num_qpairs--;
9764 	mutex_unlock(&ha->mq_lock);
9765 fail_qid_map:
9766 	kfree(qpair);
9767 	return NULL;
9768 }
9769 
qla2xxx_delete_qpair(struct scsi_qla_host * vha,struct qla_qpair * qpair)9770 int qla2xxx_delete_qpair(struct scsi_qla_host *vha, struct qla_qpair *qpair)
9771 {
9772 	int ret = QLA_FUNCTION_FAILED;
9773 	struct qla_hw_data *ha = qpair->hw;
9774 
9775 	qpair->delete_in_progress = 1;
9776 
9777 	qla_free_buf_pool(qpair);
9778 
9779 	ret = qla25xx_delete_req_que(vha, qpair->req);
9780 	if (ret != QLA_SUCCESS)
9781 		goto fail;
9782 
9783 	ret = qla25xx_delete_rsp_que(vha, qpair->rsp);
9784 	if (ret != QLA_SUCCESS)
9785 		goto fail;
9786 
9787 	if (!list_empty(&qpair->dsd_list)) {
9788 		struct dsd_dma *dsd_ptr, *tdsd_ptr;
9789 
9790 		/* clean up allocated prev pool */
9791 		list_for_each_entry_safe(dsd_ptr, tdsd_ptr,
9792 					 &qpair->dsd_list, list) {
9793 			dma_pool_free(ha->dl_dma_pool, dsd_ptr->dsd_addr,
9794 				      dsd_ptr->dsd_list_dma);
9795 			list_del(&dsd_ptr->list);
9796 			kfree(dsd_ptr);
9797 		}
9798 	}
9799 
9800 	mutex_lock(&ha->mq_lock);
9801 	ha->queue_pair_map[qpair->id] = NULL;
9802 	clear_bit(qpair->id, ha->qpair_qid_map);
9803 	ha->num_qpairs--;
9804 	list_del(&qpair->qp_list_elem);
9805 	if (list_empty(&vha->qp_list)) {
9806 		vha->flags.qpairs_available = 0;
9807 		vha->flags.qpairs_req_created = 0;
9808 		vha->flags.qpairs_rsp_created = 0;
9809 	}
9810 	mempool_destroy(qpair->srb_mempool);
9811 	kfree(qpair);
9812 	mutex_unlock(&ha->mq_lock);
9813 
9814 	return QLA_SUCCESS;
9815 fail:
9816 	return ret;
9817 }
9818 
9819 uint64_t
qla2x00_count_set_bits(uint32_t num)9820 qla2x00_count_set_bits(uint32_t num)
9821 {
9822 	/* Brian Kernighan's Algorithm */
9823 	u64 count = 0;
9824 
9825 	while (num) {
9826 		num &= (num - 1);
9827 		count++;
9828 	}
9829 	return count;
9830 }
9831 
9832 uint64_t
qla2x00_get_num_tgts(scsi_qla_host_t * vha)9833 qla2x00_get_num_tgts(scsi_qla_host_t *vha)
9834 {
9835 	fc_port_t *f, *tf;
9836 	u64 count = 0;
9837 
9838 	f = NULL;
9839 	tf = NULL;
9840 
9841 	list_for_each_entry_safe(f, tf, &vha->vp_fcports, list) {
9842 		if (f->port_type != FCT_TARGET)
9843 			continue;
9844 		count++;
9845 	}
9846 	return count;
9847 }
9848 
qla2xxx_reset_stats(struct Scsi_Host * host,u32 flags)9849 int qla2xxx_reset_stats(struct Scsi_Host *host, u32 flags)
9850 {
9851 	scsi_qla_host_t *vha = shost_priv(host);
9852 	fc_port_t *fcport = NULL;
9853 	unsigned long int_flags;
9854 
9855 	if (flags & QLA2XX_HW_ERROR)
9856 		vha->hw_err_cnt = 0;
9857 	if (flags & QLA2XX_SHT_LNK_DWN)
9858 		vha->short_link_down_cnt = 0;
9859 	if (flags & QLA2XX_INT_ERR)
9860 		vha->interface_err_cnt = 0;
9861 	if (flags & QLA2XX_CMD_TIMEOUT)
9862 		vha->cmd_timeout_cnt = 0;
9863 	if (flags & QLA2XX_RESET_CMD_ERR)
9864 		vha->reset_cmd_err_cnt = 0;
9865 	if (flags & QLA2XX_TGT_SHT_LNK_DOWN) {
9866 		spin_lock_irqsave(&vha->hw->tgt.sess_lock, int_flags);
9867 		list_for_each_entry(fcport, &vha->vp_fcports, list) {
9868 			fcport->tgt_short_link_down_cnt = 0;
9869 			fcport->tgt_link_down_time = QLA2XX_MAX_LINK_DOWN_TIME;
9870 		}
9871 		spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, int_flags);
9872 	}
9873 	vha->link_down_time = QLA2XX_MAX_LINK_DOWN_TIME;
9874 	return 0;
9875 }
9876 
qla2xxx_start_stats(struct Scsi_Host * host,u32 flags)9877 int qla2xxx_start_stats(struct Scsi_Host *host, u32 flags)
9878 {
9879 	return qla2xxx_reset_stats(host, flags);
9880 }
9881 
qla2xxx_stop_stats(struct Scsi_Host * host,u32 flags)9882 int qla2xxx_stop_stats(struct Scsi_Host *host, u32 flags)
9883 {
9884 	return qla2xxx_reset_stats(host, flags);
9885 }
9886 
qla2xxx_get_ini_stats(struct Scsi_Host * host,u32 flags,void * data,u64 size)9887 int qla2xxx_get_ini_stats(struct Scsi_Host *host, u32 flags,
9888 			  void *data, u64 size)
9889 {
9890 	scsi_qla_host_t *vha = shost_priv(host);
9891 	struct ql_vnd_host_stats_resp *resp = (struct ql_vnd_host_stats_resp *)data;
9892 	struct ql_vnd_stats *rsp_data = &resp->stats;
9893 	u64 ini_entry_count = 0;
9894 	u64 i = 0;
9895 	u64 entry_count = 0;
9896 	u64 num_tgt = 0;
9897 	u32 tmp_stat_type = 0;
9898 	fc_port_t *fcport = NULL;
9899 	unsigned long int_flags;
9900 
9901 	/* Copy stat type to work on it */
9902 	tmp_stat_type = flags;
9903 
9904 	if (tmp_stat_type & BIT_17) {
9905 		num_tgt = qla2x00_get_num_tgts(vha);
9906 		/* unset BIT_17 */
9907 		tmp_stat_type &= ~(1 << 17);
9908 	}
9909 	ini_entry_count = qla2x00_count_set_bits(tmp_stat_type);
9910 
9911 	entry_count = ini_entry_count + num_tgt;
9912 
9913 	rsp_data->entry_count = entry_count;
9914 
9915 	i = 0;
9916 	if (flags & QLA2XX_HW_ERROR) {
9917 		rsp_data->entry[i].stat_type = QLA2XX_HW_ERROR;
9918 		rsp_data->entry[i].tgt_num = 0x0;
9919 		rsp_data->entry[i].cnt = vha->hw_err_cnt;
9920 		i++;
9921 	}
9922 
9923 	if (flags & QLA2XX_SHT_LNK_DWN) {
9924 		rsp_data->entry[i].stat_type = QLA2XX_SHT_LNK_DWN;
9925 		rsp_data->entry[i].tgt_num = 0x0;
9926 		rsp_data->entry[i].cnt = vha->short_link_down_cnt;
9927 		i++;
9928 	}
9929 
9930 	if (flags & QLA2XX_INT_ERR) {
9931 		rsp_data->entry[i].stat_type = QLA2XX_INT_ERR;
9932 		rsp_data->entry[i].tgt_num = 0x0;
9933 		rsp_data->entry[i].cnt = vha->interface_err_cnt;
9934 		i++;
9935 	}
9936 
9937 	if (flags & QLA2XX_CMD_TIMEOUT) {
9938 		rsp_data->entry[i].stat_type = QLA2XX_CMD_TIMEOUT;
9939 		rsp_data->entry[i].tgt_num = 0x0;
9940 		rsp_data->entry[i].cnt = vha->cmd_timeout_cnt;
9941 		i++;
9942 	}
9943 
9944 	if (flags & QLA2XX_RESET_CMD_ERR) {
9945 		rsp_data->entry[i].stat_type = QLA2XX_RESET_CMD_ERR;
9946 		rsp_data->entry[i].tgt_num = 0x0;
9947 		rsp_data->entry[i].cnt = vha->reset_cmd_err_cnt;
9948 		i++;
9949 	}
9950 
9951 	/* i will continue from previous loop, as target
9952 	 * entries are after initiator
9953 	 */
9954 	if (flags & QLA2XX_TGT_SHT_LNK_DOWN) {
9955 		spin_lock_irqsave(&vha->hw->tgt.sess_lock, int_flags);
9956 		list_for_each_entry(fcport, &vha->vp_fcports, list) {
9957 			if (fcport->port_type != FCT_TARGET)
9958 				continue;
9959 			if (!fcport->rport)
9960 				continue;
9961 			rsp_data->entry[i].stat_type = QLA2XX_TGT_SHT_LNK_DOWN;
9962 			rsp_data->entry[i].tgt_num = fcport->rport->number;
9963 			rsp_data->entry[i].cnt = fcport->tgt_short_link_down_cnt;
9964 			i++;
9965 		}
9966 		spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, int_flags);
9967 	}
9968 	resp->status = EXT_STATUS_OK;
9969 
9970 	return 0;
9971 }
9972 
qla2xxx_get_tgt_stats(struct Scsi_Host * host,u32 flags,struct fc_rport * rport,void * data,u64 size)9973 int qla2xxx_get_tgt_stats(struct Scsi_Host *host, u32 flags,
9974 			  struct fc_rport *rport, void *data, u64 size)
9975 {
9976 	struct ql_vnd_tgt_stats_resp *tgt_data = data;
9977 	fc_port_t *fcport = *(fc_port_t **)rport->dd_data;
9978 
9979 	tgt_data->status = 0;
9980 	tgt_data->stats.entry_count = 1;
9981 	tgt_data->stats.entry[0].stat_type = flags;
9982 	tgt_data->stats.entry[0].tgt_num = rport->number;
9983 	tgt_data->stats.entry[0].cnt = fcport->tgt_short_link_down_cnt;
9984 
9985 	return 0;
9986 }
9987 
qla2xxx_disable_port(struct Scsi_Host * host)9988 int qla2xxx_disable_port(struct Scsi_Host *host)
9989 {
9990 	scsi_qla_host_t *vha = shost_priv(host);
9991 
9992 	vha->hw->flags.port_isolated = 1;
9993 
9994 	if (qla2x00_isp_reg_stat(vha->hw)) {
9995 		ql_log(ql_log_info, vha, 0x9006,
9996 		    "PCI/Register disconnect, exiting.\n");
9997 		qla_pci_set_eeh_busy(vha);
9998 		return FAILED;
9999 	}
10000 	if (qla2x00_chip_is_down(vha))
10001 		return 0;
10002 
10003 	if (vha->flags.online) {
10004 		qla2x00_abort_isp_cleanup(vha);
10005 		qla2x00_wait_for_sess_deletion(vha);
10006 	}
10007 
10008 	return 0;
10009 }
10010 
qla2xxx_enable_port(struct Scsi_Host * host)10011 int qla2xxx_enable_port(struct Scsi_Host *host)
10012 {
10013 	scsi_qla_host_t *vha = shost_priv(host);
10014 
10015 	if (qla2x00_isp_reg_stat(vha->hw)) {
10016 		ql_log(ql_log_info, vha, 0x9001,
10017 		    "PCI/Register disconnect, exiting.\n");
10018 		qla_pci_set_eeh_busy(vha);
10019 		return FAILED;
10020 	}
10021 
10022 	vha->hw->flags.port_isolated = 0;
10023 	/* Set the flag to 1, so that isp_abort can proceed */
10024 	vha->flags.online = 1;
10025 	set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
10026 	qla2xxx_wake_dpc(vha);
10027 
10028 	return 0;
10029 }
10030