1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Marvell Fibre Channel HBA Driver
4  * Copyright (c)  2021     Marvell
5  */
6 #include "qla_def.h"
7 #include "qla_edif.h"
8 
9 #include <linux/kthread.h>
10 #include <linux/vmalloc.h>
11 #include <linux/delay.h>
12 #include <scsi/scsi_tcq.h>
13 
14 static struct edif_sa_index_entry *qla_edif_sadb_find_sa_index_entry(uint16_t nport_handle,
15 		struct list_head *sa_list);
16 static uint16_t qla_edif_sadb_get_sa_index(fc_port_t *fcport,
17 		struct qla_sa_update_frame *sa_frame);
18 static int qla_edif_sadb_delete_sa_index(fc_port_t *fcport, uint16_t nport_handle,
19 		uint16_t sa_index);
20 static int qla_pur_get_pending(scsi_qla_host_t *, fc_port_t *, struct bsg_job *);
21 
22 struct edb_node {
23 	struct  list_head	list;
24 	uint32_t		ntype;
25 	union {
26 		port_id_t	plogi_did;
27 		uint32_t	async;
28 		port_id_t	els_sid;
29 		struct edif_sa_update_aen	sa_aen;
30 	} u;
31 };
32 
33 static struct els_sub_cmd {
34 	uint16_t cmd;
35 	const char *str;
36 } sc_str[] = {
37 	{SEND_ELS, "send ELS"},
38 	{SEND_ELS_REPLY, "send ELS Reply"},
39 	{PULL_ELS, "retrieve ELS"},
40 };
41 
sc_to_str(uint16_t cmd)42 const char *sc_to_str(uint16_t cmd)
43 {
44 	int i;
45 	struct els_sub_cmd *e;
46 
47 	for (i = 0; i < ARRAY_SIZE(sc_str); i++) {
48 		e = sc_str + i;
49 		if (cmd == e->cmd)
50 			return e->str;
51 	}
52 	return "unknown";
53 }
54 
qla_edb_getnext(scsi_qla_host_t * vha)55 static struct edb_node *qla_edb_getnext(scsi_qla_host_t *vha)
56 {
57 	unsigned long   flags;
58 	struct edb_node *edbnode = NULL;
59 
60 	spin_lock_irqsave(&vha->e_dbell.db_lock, flags);
61 
62 	/* db nodes are fifo - no qualifications done */
63 	if (!list_empty(&vha->e_dbell.head)) {
64 		edbnode = list_first_entry(&vha->e_dbell.head,
65 					   struct edb_node, list);
66 		list_del_init(&edbnode->list);
67 	}
68 
69 	spin_unlock_irqrestore(&vha->e_dbell.db_lock, flags);
70 
71 	return edbnode;
72 }
73 
qla_edb_node_free(scsi_qla_host_t * vha,struct edb_node * node)74 static void qla_edb_node_free(scsi_qla_host_t *vha, struct edb_node *node)
75 {
76 	list_del_init(&node->list);
77 	kfree(node);
78 }
79 
qla_edif_list_find_sa_index(fc_port_t * fcport,uint16_t handle)80 static struct edif_list_entry *qla_edif_list_find_sa_index(fc_port_t *fcport,
81 		uint16_t handle)
82 {
83 	struct edif_list_entry *entry;
84 	struct edif_list_entry *tentry;
85 	struct list_head *indx_list = &fcport->edif.edif_indx_list;
86 
87 	list_for_each_entry_safe(entry, tentry, indx_list, next) {
88 		if (entry->handle == handle)
89 			return entry;
90 	}
91 	return NULL;
92 }
93 
94 /* timeout called when no traffic and delayed rx sa_index delete */
qla2x00_sa_replace_iocb_timeout(struct timer_list * t)95 static void qla2x00_sa_replace_iocb_timeout(struct timer_list *t)
96 {
97 	struct edif_list_entry *edif_entry = from_timer(edif_entry, t, timer);
98 	fc_port_t *fcport = edif_entry->fcport;
99 	struct scsi_qla_host *vha = fcport->vha;
100 	struct  edif_sa_ctl *sa_ctl;
101 	uint16_t nport_handle;
102 	unsigned long flags = 0;
103 
104 	ql_dbg(ql_dbg_edif, vha, 0x3069,
105 	    "%s:  nport_handle 0x%x,  SA REPL Delay Timeout, %8phC portid=%06x\n",
106 	    __func__, edif_entry->handle, fcport->port_name, fcport->d_id.b24);
107 
108 	/*
109 	 * if delete_sa_index is valid then no one has serviced this
110 	 * delayed delete
111 	 */
112 	spin_lock_irqsave(&fcport->edif.indx_list_lock, flags);
113 
114 	/*
115 	 * delete_sa_index is invalidated when we find the new sa_index in
116 	 * the incoming data stream.  If it is not invalidated then we are
117 	 * still looking for the new sa_index because there is no I/O and we
118 	 * need to just force the rx delete and move on.  Otherwise
119 	 * we could get another rekey which will result in an error 66.
120 	 */
121 	if (edif_entry->delete_sa_index != INVALID_EDIF_SA_INDEX) {
122 		uint16_t delete_sa_index = edif_entry->delete_sa_index;
123 
124 		edif_entry->delete_sa_index = INVALID_EDIF_SA_INDEX;
125 		nport_handle = edif_entry->handle;
126 		spin_unlock_irqrestore(&fcport->edif.indx_list_lock, flags);
127 
128 		sa_ctl = qla_edif_find_sa_ctl_by_index(fcport,
129 		    delete_sa_index, 0);
130 
131 		if (sa_ctl) {
132 			ql_dbg(ql_dbg_edif, vha, 0x3063,
133 			    "%s: sa_ctl: %p, delete index %d, update index: %d, lid: 0x%x\n",
134 			    __func__, sa_ctl, delete_sa_index, edif_entry->update_sa_index,
135 			    nport_handle);
136 
137 			sa_ctl->flags = EDIF_SA_CTL_FLG_DEL;
138 			set_bit(EDIF_SA_CTL_REPL, &sa_ctl->state);
139 			qla_post_sa_replace_work(fcport->vha, fcport,
140 			    nport_handle, sa_ctl);
141 
142 		} else {
143 			ql_dbg(ql_dbg_edif, vha, 0x3063,
144 			    "%s: sa_ctl not found for delete_sa_index: %d\n",
145 			    __func__, edif_entry->delete_sa_index);
146 		}
147 	} else {
148 		spin_unlock_irqrestore(&fcport->edif.indx_list_lock, flags);
149 	}
150 }
151 
152 /*
153  * create a new list entry for this nport handle and
154  * add an sa_update index to the list - called for sa_update
155  */
qla_edif_list_add_sa_update_index(fc_port_t * fcport,uint16_t sa_index,uint16_t handle)156 static int qla_edif_list_add_sa_update_index(fc_port_t *fcport,
157 		uint16_t sa_index, uint16_t handle)
158 {
159 	struct edif_list_entry *entry;
160 	unsigned long flags = 0;
161 
162 	/* if the entry exists, then just update the sa_index */
163 	entry = qla_edif_list_find_sa_index(fcport, handle);
164 	if (entry) {
165 		entry->update_sa_index = sa_index;
166 		entry->count = 0;
167 		return 0;
168 	}
169 
170 	/*
171 	 * This is the normal path - there should be no existing entry
172 	 * when update is called.  The exception is at startup
173 	 * when update is called for the first two sa_indexes
174 	 * followed by a delete of the first sa_index
175 	 */
176 	entry = kzalloc((sizeof(struct edif_list_entry)), GFP_ATOMIC);
177 	if (!entry)
178 		return -ENOMEM;
179 
180 	INIT_LIST_HEAD(&entry->next);
181 	entry->handle = handle;
182 	entry->update_sa_index = sa_index;
183 	entry->delete_sa_index = INVALID_EDIF_SA_INDEX;
184 	entry->count = 0;
185 	entry->flags = 0;
186 	timer_setup(&entry->timer, qla2x00_sa_replace_iocb_timeout, 0);
187 	spin_lock_irqsave(&fcport->edif.indx_list_lock, flags);
188 	list_add_tail(&entry->next, &fcport->edif.edif_indx_list);
189 	spin_unlock_irqrestore(&fcport->edif.indx_list_lock, flags);
190 	return 0;
191 }
192 
193 /* remove an entry from the list */
qla_edif_list_delete_sa_index(fc_port_t * fcport,struct edif_list_entry * entry)194 static void qla_edif_list_delete_sa_index(fc_port_t *fcport, struct edif_list_entry *entry)
195 {
196 	unsigned long flags = 0;
197 
198 	spin_lock_irqsave(&fcport->edif.indx_list_lock, flags);
199 	list_del(&entry->next);
200 	spin_unlock_irqrestore(&fcport->edif.indx_list_lock, flags);
201 }
202 
qla_post_sa_replace_work(struct scsi_qla_host * vha,fc_port_t * fcport,uint16_t nport_handle,struct edif_sa_ctl * sa_ctl)203 int qla_post_sa_replace_work(struct scsi_qla_host *vha,
204 	 fc_port_t *fcport, uint16_t nport_handle, struct edif_sa_ctl *sa_ctl)
205 {
206 	struct qla_work_evt *e;
207 
208 	e = qla2x00_alloc_work(vha, QLA_EVT_SA_REPLACE);
209 	if (!e)
210 		return QLA_FUNCTION_FAILED;
211 
212 	e->u.sa_update.fcport = fcport;
213 	e->u.sa_update.sa_ctl = sa_ctl;
214 	e->u.sa_update.nport_handle = nport_handle;
215 	fcport->flags |= FCF_ASYNC_ACTIVE;
216 	return qla2x00_post_work(vha, e);
217 }
218 
219 static void
qla_edif_sa_ctl_init(scsi_qla_host_t * vha,struct fc_port * fcport)220 qla_edif_sa_ctl_init(scsi_qla_host_t *vha, struct fc_port  *fcport)
221 {
222 	ql_dbg(ql_dbg_edif, vha, 0x2058,
223 	    "Init SA_CTL List for fcport - nn %8phN pn %8phN portid=%06x.\n",
224 	    fcport->node_name, fcport->port_name, fcport->d_id.b24);
225 
226 	fcport->edif.tx_rekey_cnt = 0;
227 	fcport->edif.rx_rekey_cnt = 0;
228 
229 	fcport->edif.tx_bytes = 0;
230 	fcport->edif.rx_bytes = 0;
231 }
232 
qla_bsg_check(scsi_qla_host_t * vha,struct bsg_job * bsg_job,fc_port_t * fcport)233 static int qla_bsg_check(scsi_qla_host_t *vha, struct bsg_job *bsg_job,
234 fc_port_t *fcport)
235 {
236 	struct extra_auth_els *p;
237 	struct fc_bsg_reply *bsg_reply = bsg_job->reply;
238 	struct qla_bsg_auth_els_request *req =
239 	    (struct qla_bsg_auth_els_request *)bsg_job->request;
240 
241 	if (!vha->hw->flags.edif_enabled) {
242 		ql_dbg(ql_dbg_edif, vha, 0x9105,
243 		    "%s edif not enabled\n", __func__);
244 		goto done;
245 	}
246 	if (DBELL_INACTIVE(vha)) {
247 		ql_dbg(ql_dbg_edif, vha, 0x09102,
248 		    "%s doorbell not enabled\n", __func__);
249 		goto done;
250 	}
251 
252 	p = &req->e;
253 
254 	/* Get response */
255 	if (p->sub_cmd == PULL_ELS) {
256 		struct qla_bsg_auth_els_reply *rpl =
257 			(struct qla_bsg_auth_els_reply *)bsg_job->reply;
258 
259 		qla_pur_get_pending(vha, fcport, bsg_job);
260 
261 		ql_dbg(ql_dbg_edif, vha, 0x911d,
262 			"%s %s %8phN sid=%x. xchg %x, nb=%xh bsg ptr %p\n",
263 			__func__, sc_to_str(p->sub_cmd), fcport->port_name,
264 			fcport->d_id.b24, rpl->rx_xchg_address,
265 			rpl->r.reply_payload_rcv_len, bsg_job);
266 
267 		goto done;
268 	}
269 	return 0;
270 
271 done:
272 
273 	bsg_job_done(bsg_job, bsg_reply->result,
274 			bsg_reply->reply_payload_rcv_len);
275 	return -EIO;
276 }
277 
278 fc_port_t *
qla2x00_find_fcport_by_pid(scsi_qla_host_t * vha,port_id_t * id)279 qla2x00_find_fcport_by_pid(scsi_qla_host_t *vha, port_id_t *id)
280 {
281 	fc_port_t *f, *tf;
282 
283 	f = NULL;
284 	list_for_each_entry_safe(f, tf, &vha->vp_fcports, list) {
285 		if (f->d_id.b24 == id->b24)
286 			return f;
287 	}
288 	return NULL;
289 }
290 
291 /**
292  * qla_edif_app_check(): check for valid application id.
293  * @vha: host adapter pointer
294  * @appid: application id
295  * Return: false = fail, true = pass
296  */
297 static bool
qla_edif_app_check(scsi_qla_host_t * vha,struct app_id appid)298 qla_edif_app_check(scsi_qla_host_t *vha, struct app_id appid)
299 {
300 	/* check that the app is allow/known to the driver */
301 
302 	if (appid.app_vid != EDIF_APP_ID) {
303 		ql_dbg(ql_dbg_edif, vha, 0x911d, "%s app id not ok (%x)",
304 		    __func__, appid.app_vid);
305 		return false;
306 	}
307 
308 	if (appid.version != EDIF_VERSION1) {
309 		ql_dbg(ql_dbg_edif, vha, 0x911d, "%s app version is not ok (%x)",
310 		    __func__, appid.version);
311 		return false;
312 	}
313 
314 	return true;
315 }
316 
317 static void
qla_edif_free_sa_ctl(fc_port_t * fcport,struct edif_sa_ctl * sa_ctl,int index)318 qla_edif_free_sa_ctl(fc_port_t *fcport, struct edif_sa_ctl *sa_ctl,
319 	int index)
320 {
321 	unsigned long flags = 0;
322 
323 	spin_lock_irqsave(&fcport->edif.sa_list_lock, flags);
324 	list_del(&sa_ctl->next);
325 	spin_unlock_irqrestore(&fcport->edif.sa_list_lock, flags);
326 	if (index >= 512)
327 		fcport->edif.tx_rekey_cnt--;
328 	else
329 		fcport->edif.rx_rekey_cnt--;
330 	kfree(sa_ctl);
331 }
332 
333 /* return an index to the freepool */
qla_edif_add_sa_index_to_freepool(fc_port_t * fcport,int dir,uint16_t sa_index)334 static void qla_edif_add_sa_index_to_freepool(fc_port_t *fcport, int dir,
335 		uint16_t sa_index)
336 {
337 	void *sa_id_map;
338 	struct scsi_qla_host *vha = fcport->vha;
339 	struct qla_hw_data *ha = vha->hw;
340 	unsigned long flags = 0;
341 	u16 lsa_index = sa_index;
342 
343 	ql_dbg(ql_dbg_edif + ql_dbg_verbose, vha, 0x3063,
344 	    "%s: entry\n", __func__);
345 
346 	if (dir) {
347 		sa_id_map = ha->edif_tx_sa_id_map;
348 		lsa_index -= EDIF_TX_SA_INDEX_BASE;
349 	} else {
350 		sa_id_map = ha->edif_rx_sa_id_map;
351 	}
352 
353 	spin_lock_irqsave(&ha->sadb_fp_lock, flags);
354 	clear_bit(lsa_index, sa_id_map);
355 	spin_unlock_irqrestore(&ha->sadb_fp_lock, flags);
356 	ql_dbg(ql_dbg_edif, vha, 0x3063,
357 	    "%s: index %d added to free pool\n", __func__, sa_index);
358 }
359 
__qla2x00_release_all_sadb(struct scsi_qla_host * vha,struct fc_port * fcport,struct edif_sa_index_entry * entry,int pdir)360 static void __qla2x00_release_all_sadb(struct scsi_qla_host *vha,
361 	struct fc_port *fcport, struct edif_sa_index_entry *entry,
362 	int pdir)
363 {
364 	struct edif_list_entry *edif_entry;
365 	struct  edif_sa_ctl *sa_ctl;
366 	int i, dir;
367 	int key_cnt = 0;
368 
369 	for (i = 0; i < 2; i++) {
370 		if (entry->sa_pair[i].sa_index == INVALID_EDIF_SA_INDEX)
371 			continue;
372 
373 		if (fcport->loop_id != entry->handle) {
374 			ql_dbg(ql_dbg_edif, vha, 0x3063,
375 			    "%s: ** WARNING %d** entry handle: 0x%x, lid: 0x%x, sa_index: %d\n",
376 			    __func__, i, entry->handle, fcport->loop_id,
377 			    entry->sa_pair[i].sa_index);
378 		}
379 
380 		/* release the sa_ctl */
381 		sa_ctl = qla_edif_find_sa_ctl_by_index(fcport,
382 				entry->sa_pair[i].sa_index, pdir);
383 		if (sa_ctl &&
384 		    qla_edif_find_sa_ctl_by_index(fcport, sa_ctl->index, pdir)) {
385 			ql_dbg(ql_dbg_edif, vha, 0x3063,
386 			    "%s: freeing sa_ctl for index %d\n", __func__, sa_ctl->index);
387 			qla_edif_free_sa_ctl(fcport, sa_ctl, sa_ctl->index);
388 		} else {
389 			ql_dbg(ql_dbg_edif, vha, 0x3063,
390 			    "%s: sa_ctl NOT freed, sa_ctl: %p\n", __func__, sa_ctl);
391 		}
392 
393 		/* Release the index */
394 		ql_dbg(ql_dbg_edif, vha, 0x3063,
395 			"%s: freeing sa_index %d, nph: 0x%x\n",
396 			__func__, entry->sa_pair[i].sa_index, entry->handle);
397 
398 		dir = (entry->sa_pair[i].sa_index <
399 			EDIF_TX_SA_INDEX_BASE) ? 0 : 1;
400 		qla_edif_add_sa_index_to_freepool(fcport, dir,
401 			entry->sa_pair[i].sa_index);
402 
403 		/* Delete timer on RX */
404 		if (pdir != SAU_FLG_TX) {
405 			edif_entry =
406 				qla_edif_list_find_sa_index(fcport, entry->handle);
407 			if (edif_entry) {
408 				ql_dbg(ql_dbg_edif, vha, 0x5033,
409 				    "%s: remove edif_entry %p, update_sa_index: 0x%x, delete_sa_index: 0x%x\n",
410 				    __func__, edif_entry, edif_entry->update_sa_index,
411 				    edif_entry->delete_sa_index);
412 				qla_edif_list_delete_sa_index(fcport, edif_entry);
413 				/*
414 				 * valid delete_sa_index indicates there is a rx
415 				 * delayed delete queued
416 				 */
417 				if (edif_entry->delete_sa_index !=
418 						INVALID_EDIF_SA_INDEX) {
419 					timer_shutdown(&edif_entry->timer);
420 
421 					/* build and send the aen */
422 					fcport->edif.rx_sa_set = 1;
423 					fcport->edif.rx_sa_pending = 0;
424 					qla_edb_eventcreate(vha,
425 							VND_CMD_AUTH_STATE_SAUPDATE_COMPL,
426 							QL_VND_SA_STAT_SUCCESS,
427 							QL_VND_RX_SA_KEY, fcport);
428 				}
429 				ql_dbg(ql_dbg_edif, vha, 0x5033,
430 				    "%s: release edif_entry %p, update_sa_index: 0x%x, delete_sa_index: 0x%x\n",
431 				    __func__, edif_entry, edif_entry->update_sa_index,
432 				    edif_entry->delete_sa_index);
433 
434 				kfree(edif_entry);
435 			}
436 		}
437 		key_cnt++;
438 	}
439 	ql_dbg(ql_dbg_edif, vha, 0x3063,
440 	    "%s: %d %s keys released\n",
441 	    __func__, key_cnt, pdir ? "tx" : "rx");
442 }
443 
444 /* find an release all outstanding sadb sa_indicies */
qla2x00_release_all_sadb(struct scsi_qla_host * vha,struct fc_port * fcport)445 void qla2x00_release_all_sadb(struct scsi_qla_host *vha, struct fc_port *fcport)
446 {
447 	struct edif_sa_index_entry *entry, *tmp;
448 	struct qla_hw_data *ha = vha->hw;
449 	unsigned long flags;
450 
451 	ql_dbg(ql_dbg_edif + ql_dbg_verbose, vha, 0x3063,
452 	    "%s: Starting...\n", __func__);
453 
454 	spin_lock_irqsave(&ha->sadb_lock, flags);
455 
456 	list_for_each_entry_safe(entry, tmp, &ha->sadb_rx_index_list, next) {
457 		if (entry->fcport == fcport) {
458 			list_del(&entry->next);
459 			spin_unlock_irqrestore(&ha->sadb_lock, flags);
460 			__qla2x00_release_all_sadb(vha, fcport, entry, 0);
461 			kfree(entry);
462 			spin_lock_irqsave(&ha->sadb_lock, flags);
463 			break;
464 		}
465 	}
466 
467 	list_for_each_entry_safe(entry, tmp, &ha->sadb_tx_index_list, next) {
468 		if (entry->fcport == fcport) {
469 			list_del(&entry->next);
470 			spin_unlock_irqrestore(&ha->sadb_lock, flags);
471 
472 			__qla2x00_release_all_sadb(vha, fcport, entry, SAU_FLG_TX);
473 
474 			kfree(entry);
475 			spin_lock_irqsave(&ha->sadb_lock, flags);
476 			break;
477 		}
478 	}
479 	spin_unlock_irqrestore(&ha->sadb_lock, flags);
480 }
481 
482 /**
483  * qla_delete_n2n_sess_and_wait: search for N2N session, tear it down and
484  *    wait for tear down to complete.  In N2N topology, there is only one
485  *    session being active in tracking the remote device.
486  * @vha: host adapter pointer
487  * return code:  0 - found the session and completed the tear down.
488  *	1 - timeout occurred.  Caller to use link bounce to reset.
489  */
qla_delete_n2n_sess_and_wait(scsi_qla_host_t * vha)490 static int qla_delete_n2n_sess_and_wait(scsi_qla_host_t *vha)
491 {
492 	struct fc_port *fcport;
493 	int rc = -EIO;
494 	ulong expire = jiffies + 23 * HZ;
495 
496 	if (!N2N_TOPO(vha->hw))
497 		return 0;
498 
499 	fcport = NULL;
500 	list_for_each_entry(fcport, &vha->vp_fcports, list) {
501 		if (!fcport->n2n_flag)
502 			continue;
503 
504 		ql_dbg(ql_dbg_disc, fcport->vha, 0x2016,
505 		       "%s reset sess at app start \n", __func__);
506 
507 		qla_edif_sa_ctl_init(vha, fcport);
508 		qlt_schedule_sess_for_deletion(fcport);
509 
510 		while (time_before_eq(jiffies, expire)) {
511 			if (fcport->disc_state != DSC_DELETE_PEND) {
512 				rc = 0;
513 				break;
514 			}
515 			msleep(1);
516 		}
517 
518 		set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
519 		break;
520 	}
521 
522 	return rc;
523 }
524 
525 /**
526  * qla_edif_app_start:  application has announce its present
527  * @vha: host adapter pointer
528  * @bsg_job: user request
529  *
530  * Set/activate doorbell.  Reset current sessions and re-login with
531  * secure flag.
532  */
533 static int
qla_edif_app_start(scsi_qla_host_t * vha,struct bsg_job * bsg_job)534 qla_edif_app_start(scsi_qla_host_t *vha, struct bsg_job *bsg_job)
535 {
536 	int32_t			rval = 0;
537 	struct fc_bsg_reply	*bsg_reply = bsg_job->reply;
538 	struct app_start	appstart;
539 	struct app_start_reply	appreply;
540 	struct fc_port  *fcport, *tf;
541 
542 	ql_log(ql_log_info, vha, 0x1313,
543 	       "EDIF application registration with driver, FC device connections will be re-established.\n");
544 
545 	sg_copy_to_buffer(bsg_job->request_payload.sg_list,
546 	    bsg_job->request_payload.sg_cnt, &appstart,
547 	    sizeof(struct app_start));
548 
549 	ql_dbg(ql_dbg_edif, vha, 0x911d, "%s app_vid=%x app_start_flags %x\n",
550 	     __func__, appstart.app_info.app_vid, appstart.app_start_flags);
551 
552 	if (DBELL_INACTIVE(vha)) {
553 		/* mark doorbell as active since an app is now present */
554 		vha->e_dbell.db_flags |= EDB_ACTIVE;
555 	} else {
556 		goto out;
557 	}
558 
559 	if (N2N_TOPO(vha->hw)) {
560 		list_for_each_entry_safe(fcport, tf, &vha->vp_fcports, list)
561 			fcport->n2n_link_reset_cnt = 0;
562 
563 		if (vha->hw->flags.n2n_fw_acc_sec) {
564 			bool link_bounce = false;
565 			/*
566 			 * While authentication app was not running, remote device
567 			 * could still try to login with this local port.  Let's
568 			 * reset the session, reconnect and re-authenticate.
569 			 */
570 			if (qla_delete_n2n_sess_and_wait(vha))
571 				link_bounce = true;
572 
573 			/* bounce the link to start login */
574 			if (!vha->hw->flags.n2n_bigger || link_bounce) {
575 				set_bit(N2N_LINK_RESET, &vha->dpc_flags);
576 				qla2xxx_wake_dpc(vha);
577 			}
578 		} else {
579 			qla2x00_wait_for_hba_online(vha);
580 			set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
581 			qla2xxx_wake_dpc(vha);
582 			qla2x00_wait_for_hba_online(vha);
583 		}
584 	} else {
585 		list_for_each_entry_safe(fcport, tf, &vha->vp_fcports, list) {
586 			ql_dbg(ql_dbg_edif, vha, 0x2058,
587 			       "FCSP - nn %8phN pn %8phN portid=%06x.\n",
588 			       fcport->node_name, fcport->port_name,
589 			       fcport->d_id.b24);
590 			ql_dbg(ql_dbg_edif, vha, 0xf084,
591 			       "%s: se_sess %p / sess %p from port %8phC "
592 			       "loop_id %#04x s_id %06x logout %d "
593 			       "keep %d els_logo %d disc state %d auth state %d"
594 			       "stop state %d\n",
595 			       __func__, fcport->se_sess, fcport,
596 			       fcport->port_name, fcport->loop_id,
597 			       fcport->d_id.b24, fcport->logout_on_delete,
598 			       fcport->keep_nport_handle, fcport->send_els_logo,
599 			       fcport->disc_state, fcport->edif.auth_state,
600 			       fcport->edif.app_stop);
601 
602 			if (atomic_read(&vha->loop_state) == LOOP_DOWN)
603 				break;
604 
605 			fcport->login_retry = vha->hw->login_retry_count;
606 
607 			fcport->edif.app_stop = 0;
608 			fcport->edif.app_sess_online = 0;
609 
610 			if (fcport->scan_state != QLA_FCPORT_FOUND)
611 				continue;
612 
613 			if (fcport->port_type == FCT_UNKNOWN &&
614 			    !fcport->fc4_features)
615 				rval = qla24xx_async_gffid(vha, fcport, true);
616 
617 			if (!rval && !(fcport->fc4_features & FC4_FF_TARGET ||
618 			    fcport->port_type & (FCT_TARGET|FCT_NVME_TARGET)))
619 				continue;
620 
621 			rval = 0;
622 
623 			ql_dbg(ql_dbg_edif, vha, 0x911e,
624 			       "%s wwpn %8phC calling qla_edif_reset_auth_wait\n",
625 			       __func__, fcport->port_name);
626 			qlt_schedule_sess_for_deletion(fcport);
627 			qla_edif_sa_ctl_init(vha, fcport);
628 		}
629 		set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
630 	}
631 
632 	if (vha->pur_cinfo.enode_flags != ENODE_ACTIVE) {
633 		/* mark as active since an app is now present */
634 		vha->pur_cinfo.enode_flags = ENODE_ACTIVE;
635 	} else {
636 		ql_dbg(ql_dbg_edif, vha, 0x911f, "%s enode already active\n",
637 		     __func__);
638 	}
639 
640 out:
641 	appreply.host_support_edif = vha->hw->flags.edif_enabled;
642 	appreply.edif_enode_active = vha->pur_cinfo.enode_flags;
643 	appreply.edif_edb_active = vha->e_dbell.db_flags;
644 	appreply.version = EDIF_VERSION1;
645 
646 	bsg_job->reply_len = sizeof(struct fc_bsg_reply);
647 
648 	SET_DID_STATUS(bsg_reply->result, DID_OK);
649 
650 	bsg_reply->reply_payload_rcv_len = sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
651 							       bsg_job->reply_payload.sg_cnt,
652 							       &appreply,
653 							       sizeof(struct app_start_reply));
654 
655 	ql_dbg(ql_dbg_edif, vha, 0x911d,
656 	    "%s app start completed with 0x%x\n",
657 	    __func__, rval);
658 
659 	return rval;
660 }
661 
662 /**
663  * qla_edif_app_stop - app has announced it's exiting.
664  * @vha: host adapter pointer
665  * @bsg_job: user space command pointer
666  *
667  * Free any in flight messages, clear all doorbell events
668  * to application. Reject any message relate to security.
669  */
670 static int
qla_edif_app_stop(scsi_qla_host_t * vha,struct bsg_job * bsg_job)671 qla_edif_app_stop(scsi_qla_host_t *vha, struct bsg_job *bsg_job)
672 {
673 	struct app_stop         appstop;
674 	struct fc_bsg_reply     *bsg_reply = bsg_job->reply;
675 	struct fc_port  *fcport, *tf;
676 
677 	sg_copy_to_buffer(bsg_job->request_payload.sg_list,
678 	    bsg_job->request_payload.sg_cnt, &appstop,
679 	    sizeof(struct app_stop));
680 
681 	ql_dbg(ql_dbg_edif, vha, 0x911d, "%s Stopping APP: app_vid=%x\n",
682 	    __func__, appstop.app_info.app_vid);
683 
684 	/* Call db stop and enode stop functions */
685 
686 	/* if we leave this running short waits are operational < 16 secs */
687 	qla_enode_stop(vha);        /* stop enode */
688 	qla_edb_stop(vha);          /* stop db */
689 
690 	list_for_each_entry_safe(fcport, tf, &vha->vp_fcports, list) {
691 		if (!(fcport->flags & FCF_FCSP_DEVICE))
692 			continue;
693 
694 		if (fcport->flags & FCF_FCSP_DEVICE) {
695 			ql_dbg(ql_dbg_edif, vha, 0xf084,
696 			    "%s: sess %p from port %8phC lid %#04x s_id %06x logout %d keep %d els_logo %d\n",
697 			    __func__, fcport,
698 			    fcport->port_name, fcport->loop_id, fcport->d_id.b24,
699 			    fcport->logout_on_delete, fcport->keep_nport_handle,
700 			    fcport->send_els_logo);
701 
702 			if (atomic_read(&vha->loop_state) == LOOP_DOWN)
703 				break;
704 
705 			fcport->edif.app_stop = 1;
706 			ql_dbg(ql_dbg_edif, vha, 0x911e,
707 				"%s wwpn %8phC calling qla_edif_reset_auth_wait\n",
708 				__func__, fcport->port_name);
709 
710 			fcport->send_els_logo = 1;
711 			qlt_schedule_sess_for_deletion(fcport);
712 		}
713 	}
714 
715 	bsg_job->reply_len = sizeof(struct fc_bsg_reply);
716 	SET_DID_STATUS(bsg_reply->result, DID_OK);
717 
718 	/* no return interface to app - it assumes we cleaned up ok */
719 
720 	return 0;
721 }
722 
723 static int
qla_edif_app_chk_sa_update(scsi_qla_host_t * vha,fc_port_t * fcport,struct app_plogi_reply * appplogireply)724 qla_edif_app_chk_sa_update(scsi_qla_host_t *vha, fc_port_t *fcport,
725 		struct app_plogi_reply *appplogireply)
726 {
727 	int	ret = 0;
728 
729 	if (!(fcport->edif.rx_sa_set && fcport->edif.tx_sa_set)) {
730 		ql_dbg(ql_dbg_edif, vha, 0x911e,
731 		    "%s: wwpn %8phC Both SA indexes has not been SET TX %d, RX %d.\n",
732 		    __func__, fcport->port_name, fcport->edif.tx_sa_set,
733 		    fcport->edif.rx_sa_set);
734 		appplogireply->prli_status = 0;
735 		ret = 1;
736 	} else  {
737 		ql_dbg(ql_dbg_edif, vha, 0x911e,
738 		    "%s wwpn %8phC Both SA(s) updated.\n", __func__,
739 		    fcport->port_name);
740 		fcport->edif.rx_sa_set = fcport->edif.tx_sa_set = 0;
741 		fcport->edif.rx_sa_pending = fcport->edif.tx_sa_pending = 0;
742 		appplogireply->prli_status = 1;
743 	}
744 	return ret;
745 }
746 
747 /**
748  * qla_edif_app_authok - authentication by app succeeded.  Driver can proceed
749  *   with prli
750  * @vha: host adapter pointer
751  * @bsg_job: user request
752  */
753 static int
qla_edif_app_authok(scsi_qla_host_t * vha,struct bsg_job * bsg_job)754 qla_edif_app_authok(scsi_qla_host_t *vha, struct bsg_job *bsg_job)
755 {
756 	struct auth_complete_cmd appplogiok;
757 	struct app_plogi_reply	appplogireply = {0};
758 	struct fc_bsg_reply	*bsg_reply = bsg_job->reply;
759 	fc_port_t		*fcport = NULL;
760 	port_id_t		portid = {0};
761 
762 	sg_copy_to_buffer(bsg_job->request_payload.sg_list,
763 	    bsg_job->request_payload.sg_cnt, &appplogiok,
764 	    sizeof(struct auth_complete_cmd));
765 
766 	/* silent unaligned access warning */
767 	portid.b.domain = appplogiok.u.d_id.b.domain;
768 	portid.b.area   = appplogiok.u.d_id.b.area;
769 	portid.b.al_pa  = appplogiok.u.d_id.b.al_pa;
770 
771 	appplogireply.version = EDIF_VERSION1;
772 	switch (appplogiok.type) {
773 	case PL_TYPE_WWPN:
774 		fcport = qla2x00_find_fcport_by_wwpn(vha,
775 		    appplogiok.u.wwpn, 0);
776 		if (!fcport)
777 			ql_dbg(ql_dbg_edif, vha, 0x911d,
778 			    "%s wwpn lookup failed: %8phC\n",
779 			    __func__, appplogiok.u.wwpn);
780 		break;
781 	case PL_TYPE_DID:
782 		fcport = qla2x00_find_fcport_by_pid(vha, &portid);
783 		if (!fcport)
784 			ql_dbg(ql_dbg_edif, vha, 0x911d,
785 			    "%s d_id lookup failed: %x\n", __func__,
786 			    portid.b24);
787 		break;
788 	default:
789 		ql_dbg(ql_dbg_edif, vha, 0x911d,
790 		    "%s undefined type: %x\n", __func__,
791 		    appplogiok.type);
792 		break;
793 	}
794 
795 	if (!fcport) {
796 		SET_DID_STATUS(bsg_reply->result, DID_ERROR);
797 		goto errstate_exit;
798 	}
799 
800 	/*
801 	 * if port is online then this is a REKEY operation
802 	 * Only do sa update checking
803 	 */
804 	if (atomic_read(&fcport->state) == FCS_ONLINE) {
805 		ql_dbg(ql_dbg_edif, vha, 0x911d,
806 		    "%s Skipping PRLI complete based on rekey\n", __func__);
807 		appplogireply.prli_status = 1;
808 		SET_DID_STATUS(bsg_reply->result, DID_OK);
809 		qla_edif_app_chk_sa_update(vha, fcport, &appplogireply);
810 		goto errstate_exit;
811 	}
812 
813 	/* make sure in AUTH_PENDING or else reject */
814 	if (fcport->disc_state != DSC_LOGIN_AUTH_PEND) {
815 		ql_dbg(ql_dbg_edif, vha, 0x911e,
816 		    "%s wwpn %8phC is not in auth pending state (%x)\n",
817 		    __func__, fcport->port_name, fcport->disc_state);
818 		SET_DID_STATUS(bsg_reply->result, DID_OK);
819 		appplogireply.prli_status = 0;
820 		goto errstate_exit;
821 	}
822 
823 	SET_DID_STATUS(bsg_reply->result, DID_OK);
824 	appplogireply.prli_status = 1;
825 	fcport->edif.authok = 1;
826 	if (!(fcport->edif.rx_sa_set && fcport->edif.tx_sa_set)) {
827 		ql_dbg(ql_dbg_edif, vha, 0x911e,
828 		    "%s: wwpn %8phC Both SA indexes has not been SET TX %d, RX %d.\n",
829 		    __func__, fcport->port_name, fcport->edif.tx_sa_set,
830 		    fcport->edif.rx_sa_set);
831 		SET_DID_STATUS(bsg_reply->result, DID_OK);
832 		appplogireply.prli_status = 0;
833 		goto errstate_exit;
834 
835 	} else {
836 		ql_dbg(ql_dbg_edif, vha, 0x911e,
837 		    "%s wwpn %8phC Both SA(s) updated.\n", __func__,
838 		    fcport->port_name);
839 		fcport->edif.rx_sa_set = fcport->edif.tx_sa_set = 0;
840 		fcport->edif.rx_sa_pending = fcport->edif.tx_sa_pending = 0;
841 	}
842 
843 	if (qla_ini_mode_enabled(vha)) {
844 		ql_dbg(ql_dbg_edif, vha, 0x911e,
845 		    "%s AUTH complete - RESUME with prli for wwpn %8phC\n",
846 		    __func__, fcport->port_name);
847 		qla24xx_post_prli_work(vha, fcport);
848 	}
849 
850 errstate_exit:
851 	bsg_job->reply_len = sizeof(struct fc_bsg_reply);
852 	bsg_reply->reply_payload_rcv_len = sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
853 							       bsg_job->reply_payload.sg_cnt,
854 							       &appplogireply,
855 							       sizeof(struct app_plogi_reply));
856 
857 	return 0;
858 }
859 
860 /**
861  * qla_edif_app_authfail - authentication by app has failed.  Driver is given
862  *   notice to tear down current session.
863  * @vha: host adapter pointer
864  * @bsg_job: user request
865  */
866 static int
qla_edif_app_authfail(scsi_qla_host_t * vha,struct bsg_job * bsg_job)867 qla_edif_app_authfail(scsi_qla_host_t *vha, struct bsg_job *bsg_job)
868 {
869 	int32_t			rval = 0;
870 	struct auth_complete_cmd appplogifail;
871 	struct fc_bsg_reply	*bsg_reply = bsg_job->reply;
872 	fc_port_t		*fcport = NULL;
873 	port_id_t		portid = {0};
874 
875 	ql_dbg(ql_dbg_edif, vha, 0x911d, "%s app auth fail\n", __func__);
876 
877 	sg_copy_to_buffer(bsg_job->request_payload.sg_list,
878 	    bsg_job->request_payload.sg_cnt, &appplogifail,
879 	    sizeof(struct auth_complete_cmd));
880 
881 	/* silent unaligned access warning */
882 	portid.b.domain = appplogifail.u.d_id.b.domain;
883 	portid.b.area   = appplogifail.u.d_id.b.area;
884 	portid.b.al_pa  = appplogifail.u.d_id.b.al_pa;
885 
886 	/*
887 	 * TODO: edif: app has failed this plogi. Inform driver to
888 	 * take any action (if any).
889 	 */
890 	switch (appplogifail.type) {
891 	case PL_TYPE_WWPN:
892 		fcport = qla2x00_find_fcport_by_wwpn(vha,
893 		    appplogifail.u.wwpn, 0);
894 		SET_DID_STATUS(bsg_reply->result, DID_OK);
895 		break;
896 	case PL_TYPE_DID:
897 		fcport = qla2x00_find_fcport_by_pid(vha, &portid);
898 		if (!fcport)
899 			ql_dbg(ql_dbg_edif, vha, 0x911d,
900 			    "%s d_id lookup failed: %x\n", __func__,
901 			    portid.b24);
902 		SET_DID_STATUS(bsg_reply->result, DID_OK);
903 		break;
904 	default:
905 		ql_dbg(ql_dbg_edif, vha, 0x911e,
906 		    "%s undefined type: %x\n", __func__,
907 		    appplogifail.type);
908 		bsg_job->reply_len = sizeof(struct fc_bsg_reply);
909 		SET_DID_STATUS(bsg_reply->result, DID_ERROR);
910 		rval = -1;
911 		break;
912 	}
913 
914 	ql_dbg(ql_dbg_edif, vha, 0x911d,
915 	    "%s fcport is 0x%p\n", __func__, fcport);
916 
917 	if (fcport) {
918 		/* set/reset edif values and flags */
919 		ql_dbg(ql_dbg_edif, vha, 0x911e,
920 		    "%s reset the auth process - %8phC, loopid=%x portid=%06x.\n",
921 		    __func__, fcport->port_name, fcport->loop_id, fcport->d_id.b24);
922 
923 		if (qla_ini_mode_enabled(fcport->vha)) {
924 			fcport->send_els_logo = 1;
925 			qlt_schedule_sess_for_deletion(fcport);
926 		}
927 	}
928 
929 	return rval;
930 }
931 
932 /**
933  * qla_edif_app_getfcinfo - app would like to read session info (wwpn, nportid,
934  *   [initiator|target] mode.  It can specific session with specific nport id or
935  *   all sessions.
936  * @vha: host adapter pointer
937  * @bsg_job: user request pointer
938  */
939 static int
qla_edif_app_getfcinfo(scsi_qla_host_t * vha,struct bsg_job * bsg_job)940 qla_edif_app_getfcinfo(scsi_qla_host_t *vha, struct bsg_job *bsg_job)
941 {
942 	int32_t			rval = 0;
943 	int32_t			pcnt = 0;
944 	struct fc_bsg_reply	*bsg_reply = bsg_job->reply;
945 	struct app_pinfo_req	app_req;
946 	struct app_pinfo_reply	*app_reply;
947 	port_id_t		tdid;
948 
949 	ql_dbg(ql_dbg_edif, vha, 0x911d, "%s app get fcinfo\n", __func__);
950 
951 	sg_copy_to_buffer(bsg_job->request_payload.sg_list,
952 	    bsg_job->request_payload.sg_cnt, &app_req,
953 	    sizeof(struct app_pinfo_req));
954 
955 	app_reply = kzalloc((sizeof(struct app_pinfo_reply) +
956 	    sizeof(struct app_pinfo) * app_req.num_ports), GFP_KERNEL);
957 
958 	if (!app_reply) {
959 		SET_DID_STATUS(bsg_reply->result, DID_ERROR);
960 		rval = -1;
961 	} else {
962 		struct fc_port	*fcport = NULL, *tf;
963 
964 		app_reply->version = EDIF_VERSION1;
965 
966 		list_for_each_entry_safe(fcport, tf, &vha->vp_fcports, list) {
967 			if (!(fcport->flags & FCF_FCSP_DEVICE))
968 				continue;
969 
970 			tdid.b.domain = app_req.remote_pid.domain;
971 			tdid.b.area = app_req.remote_pid.area;
972 			tdid.b.al_pa = app_req.remote_pid.al_pa;
973 
974 			ql_dbg(ql_dbg_edif, vha, 0x2058,
975 			    "APP request entry - portid=%06x.\n", tdid.b24);
976 
977 			/* Ran out of space */
978 			if (pcnt >= app_req.num_ports)
979 				break;
980 
981 			if (tdid.b24 != 0 && tdid.b24 != fcport->d_id.b24)
982 				continue;
983 
984 			if (!N2N_TOPO(vha->hw)) {
985 				if (fcport->scan_state != QLA_FCPORT_FOUND)
986 					continue;
987 
988 				if (fcport->port_type == FCT_UNKNOWN &&
989 				    !fcport->fc4_features)
990 					rval = qla24xx_async_gffid(vha, fcport,
991 								   true);
992 
993 				if (!rval &&
994 				    !(fcport->fc4_features & FC4_FF_TARGET ||
995 				      fcport->port_type &
996 				      (FCT_TARGET | FCT_NVME_TARGET)))
997 					continue;
998 			}
999 
1000 			rval = 0;
1001 
1002 			app_reply->ports[pcnt].version = EDIF_VERSION1;
1003 			app_reply->ports[pcnt].remote_type =
1004 				VND_CMD_RTYPE_UNKNOWN;
1005 			if (fcport->port_type & (FCT_NVME_TARGET | FCT_TARGET))
1006 				app_reply->ports[pcnt].remote_type |=
1007 					VND_CMD_RTYPE_TARGET;
1008 			if (fcport->port_type & (FCT_NVME_INITIATOR | FCT_INITIATOR))
1009 				app_reply->ports[pcnt].remote_type |=
1010 					VND_CMD_RTYPE_INITIATOR;
1011 
1012 			app_reply->ports[pcnt].remote_pid = fcport->d_id;
1013 
1014 			ql_dbg(ql_dbg_edif, vha, 0x2058,
1015 			    "Found FC_SP fcport - nn %8phN pn %8phN pcnt %d portid=%06x secure %d.\n",
1016 			    fcport->node_name, fcport->port_name, pcnt,
1017 			    fcport->d_id.b24, fcport->flags & FCF_FCSP_DEVICE);
1018 
1019 			switch (fcport->edif.auth_state) {
1020 			case VND_CMD_AUTH_STATE_ELS_RCVD:
1021 				if (fcport->disc_state == DSC_LOGIN_AUTH_PEND) {
1022 					fcport->edif.auth_state = VND_CMD_AUTH_STATE_NEEDED;
1023 					app_reply->ports[pcnt].auth_state =
1024 						VND_CMD_AUTH_STATE_NEEDED;
1025 				} else {
1026 					app_reply->ports[pcnt].auth_state =
1027 						VND_CMD_AUTH_STATE_ELS_RCVD;
1028 				}
1029 				break;
1030 			default:
1031 				app_reply->ports[pcnt].auth_state = fcport->edif.auth_state;
1032 				break;
1033 			}
1034 
1035 			memcpy(app_reply->ports[pcnt].remote_wwpn,
1036 			    fcport->port_name, 8);
1037 
1038 			app_reply->ports[pcnt].remote_state =
1039 				(atomic_read(&fcport->state) ==
1040 				    FCS_ONLINE ? 1 : 0);
1041 
1042 			pcnt++;
1043 
1044 			if (tdid.b24 != 0)
1045 				break;
1046 		}
1047 		app_reply->port_count = pcnt;
1048 		SET_DID_STATUS(bsg_reply->result, DID_OK);
1049 	}
1050 
1051 	bsg_job->reply_len = sizeof(struct fc_bsg_reply);
1052 	bsg_reply->reply_payload_rcv_len = sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
1053 							       bsg_job->reply_payload.sg_cnt,
1054 							       app_reply,
1055 							       sizeof(struct app_pinfo_reply) + sizeof(struct app_pinfo) * pcnt);
1056 
1057 	kfree(app_reply);
1058 
1059 	return rval;
1060 }
1061 
1062 /**
1063  * qla_edif_app_getstats - app would like to read various statistics info
1064  * @vha: host adapter pointer
1065  * @bsg_job: user request
1066  */
1067 static int32_t
qla_edif_app_getstats(scsi_qla_host_t * vha,struct bsg_job * bsg_job)1068 qla_edif_app_getstats(scsi_qla_host_t *vha, struct bsg_job *bsg_job)
1069 {
1070 	int32_t			rval = 0;
1071 	struct fc_bsg_reply	*bsg_reply = bsg_job->reply;
1072 	uint32_t size;
1073 
1074 	struct app_sinfo_req	app_req;
1075 	struct app_stats_reply	*app_reply;
1076 	uint32_t pcnt = 0;
1077 
1078 	sg_copy_to_buffer(bsg_job->request_payload.sg_list,
1079 	    bsg_job->request_payload.sg_cnt, &app_req,
1080 	    sizeof(struct app_sinfo_req));
1081 	if (app_req.num_ports == 0) {
1082 		ql_dbg(ql_dbg_async, vha, 0x911d,
1083 		   "%s app did not indicate number of ports to return\n",
1084 		    __func__);
1085 		SET_DID_STATUS(bsg_reply->result, DID_ERROR);
1086 		rval = -1;
1087 	}
1088 
1089 	size = sizeof(struct app_stats_reply) +
1090 	    (sizeof(struct app_sinfo) * app_req.num_ports);
1091 
1092 	app_reply = kzalloc(size, GFP_KERNEL);
1093 	if (!app_reply) {
1094 		SET_DID_STATUS(bsg_reply->result, DID_ERROR);
1095 		rval = -1;
1096 	} else {
1097 		struct fc_port	*fcport = NULL, *tf;
1098 
1099 		app_reply->version = EDIF_VERSION1;
1100 
1101 		list_for_each_entry_safe(fcport, tf, &vha->vp_fcports, list) {
1102 			if (fcport->edif.enable) {
1103 				if (pcnt > app_req.num_ports)
1104 					break;
1105 
1106 				app_reply->elem[pcnt].rekey_count =
1107 				    fcport->edif.rekey_cnt;
1108 				app_reply->elem[pcnt].tx_bytes =
1109 				    fcport->edif.tx_bytes;
1110 				app_reply->elem[pcnt].rx_bytes =
1111 				    fcport->edif.rx_bytes;
1112 
1113 				memcpy(app_reply->elem[pcnt].remote_wwpn,
1114 				    fcport->port_name, 8);
1115 
1116 				pcnt++;
1117 			}
1118 		}
1119 		app_reply->elem_count = pcnt;
1120 		SET_DID_STATUS(bsg_reply->result, DID_OK);
1121 	}
1122 
1123 	bsg_job->reply_len = sizeof(struct fc_bsg_reply);
1124 	bsg_reply->reply_payload_rcv_len =
1125 	    sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
1126 	       bsg_job->reply_payload.sg_cnt, app_reply,
1127 	       sizeof(struct app_stats_reply) + (sizeof(struct app_sinfo) * pcnt));
1128 
1129 	kfree(app_reply);
1130 
1131 	return rval;
1132 }
1133 
1134 static int32_t
qla_edif_ack(scsi_qla_host_t * vha,struct bsg_job * bsg_job)1135 qla_edif_ack(scsi_qla_host_t *vha, struct bsg_job *bsg_job)
1136 {
1137 	struct fc_port *fcport;
1138 	struct aen_complete_cmd ack;
1139 	struct fc_bsg_reply     *bsg_reply = bsg_job->reply;
1140 
1141 	sg_copy_to_buffer(bsg_job->request_payload.sg_list,
1142 			  bsg_job->request_payload.sg_cnt, &ack, sizeof(ack));
1143 
1144 	ql_dbg(ql_dbg_edif, vha, 0x70cf,
1145 	       "%s: %06x event_code %x\n",
1146 	       __func__, ack.port_id.b24, ack.event_code);
1147 
1148 	fcport = qla2x00_find_fcport_by_pid(vha, &ack.port_id);
1149 	SET_DID_STATUS(bsg_reply->result, DID_OK);
1150 
1151 	if (!fcport) {
1152 		ql_dbg(ql_dbg_edif, vha, 0x70cf,
1153 		       "%s: unable to find fcport %06x \n",
1154 		       __func__, ack.port_id.b24);
1155 		return 0;
1156 	}
1157 
1158 	switch (ack.event_code) {
1159 	case VND_CMD_AUTH_STATE_SESSION_SHUTDOWN:
1160 		fcport->edif.sess_down_acked = 1;
1161 		break;
1162 	default:
1163 		break;
1164 	}
1165 	return 0;
1166 }
1167 
qla_edif_consume_dbell(scsi_qla_host_t * vha,struct bsg_job * bsg_job)1168 static int qla_edif_consume_dbell(scsi_qla_host_t *vha, struct bsg_job *bsg_job)
1169 {
1170 	struct fc_bsg_reply	*bsg_reply = bsg_job->reply;
1171 	u32 sg_skip, reply_payload_len;
1172 	bool keep;
1173 	struct edb_node *dbnode = NULL;
1174 	struct edif_app_dbell ap;
1175 	int dat_size = 0;
1176 
1177 	sg_skip = 0;
1178 	reply_payload_len = bsg_job->reply_payload.payload_len;
1179 
1180 	while ((reply_payload_len - sg_skip) >= sizeof(struct edb_node)) {
1181 		dbnode = qla_edb_getnext(vha);
1182 		if (dbnode) {
1183 			keep = true;
1184 			dat_size = 0;
1185 			ap.event_code = dbnode->ntype;
1186 			switch (dbnode->ntype) {
1187 			case VND_CMD_AUTH_STATE_SESSION_SHUTDOWN:
1188 			case VND_CMD_AUTH_STATE_NEEDED:
1189 				ap.port_id = dbnode->u.plogi_did;
1190 				dat_size += sizeof(ap.port_id);
1191 				break;
1192 			case VND_CMD_AUTH_STATE_ELS_RCVD:
1193 				ap.port_id = dbnode->u.els_sid;
1194 				dat_size += sizeof(ap.port_id);
1195 				break;
1196 			case VND_CMD_AUTH_STATE_SAUPDATE_COMPL:
1197 				ap.port_id = dbnode->u.sa_aen.port_id;
1198 				memcpy(&ap.event_data, &dbnode->u,
1199 				    sizeof(struct edif_sa_update_aen));
1200 				dat_size += sizeof(struct edif_sa_update_aen);
1201 				break;
1202 			default:
1203 				keep = false;
1204 				ql_log(ql_log_warn, vha, 0x09102,
1205 					"%s unknown DB type=%d %p\n",
1206 					__func__, dbnode->ntype, dbnode);
1207 				break;
1208 			}
1209 			ap.event_data_size = dat_size;
1210 			/* 8 = sizeof(ap.event_code + ap.event_data_size) */
1211 			dat_size += 8;
1212 			if (keep)
1213 				sg_skip += sg_copy_buffer(bsg_job->reply_payload.sg_list,
1214 						bsg_job->reply_payload.sg_cnt,
1215 						&ap, dat_size, sg_skip, false);
1216 
1217 			ql_dbg(ql_dbg_edif, vha, 0x09102,
1218 				"%s Doorbell consumed : type=%d %p\n",
1219 				__func__, dbnode->ntype, dbnode);
1220 
1221 			kfree(dbnode);
1222 		} else {
1223 			break;
1224 		}
1225 	}
1226 
1227 	SET_DID_STATUS(bsg_reply->result, DID_OK);
1228 	bsg_reply->reply_payload_rcv_len = sg_skip;
1229 	bsg_job->reply_len = sizeof(struct fc_bsg_reply);
1230 
1231 	return 0;
1232 }
1233 
__qla_edif_dbell_bsg_done(scsi_qla_host_t * vha,struct bsg_job * bsg_job,u32 delay)1234 static void __qla_edif_dbell_bsg_done(scsi_qla_host_t *vha, struct bsg_job *bsg_job,
1235 	u32 delay)
1236 {
1237 	struct fc_bsg_reply *bsg_reply = bsg_job->reply;
1238 
1239 	/* small sleep for doorbell events to accumulate */
1240 	if (delay)
1241 		msleep(delay);
1242 
1243 	qla_edif_consume_dbell(vha, bsg_job);
1244 
1245 	bsg_job_done(bsg_job, bsg_reply->result, bsg_reply->reply_payload_rcv_len);
1246 }
1247 
qla_edif_dbell_bsg_done(scsi_qla_host_t * vha)1248 static void qla_edif_dbell_bsg_done(scsi_qla_host_t *vha)
1249 {
1250 	unsigned long flags;
1251 	struct bsg_job *prev_bsg_job = NULL;
1252 
1253 	spin_lock_irqsave(&vha->e_dbell.db_lock, flags);
1254 	if (vha->e_dbell.dbell_bsg_job) {
1255 		prev_bsg_job = vha->e_dbell.dbell_bsg_job;
1256 		vha->e_dbell.dbell_bsg_job = NULL;
1257 	}
1258 	spin_unlock_irqrestore(&vha->e_dbell.db_lock, flags);
1259 
1260 	if (prev_bsg_job)
1261 		__qla_edif_dbell_bsg_done(vha, prev_bsg_job, 0);
1262 }
1263 
1264 static int
qla_edif_dbell_bsg(scsi_qla_host_t * vha,struct bsg_job * bsg_job)1265 qla_edif_dbell_bsg(scsi_qla_host_t *vha, struct bsg_job *bsg_job)
1266 {
1267 	unsigned long flags;
1268 	bool return_bsg = false;
1269 
1270 	/* flush previous dbell bsg */
1271 	qla_edif_dbell_bsg_done(vha);
1272 
1273 	spin_lock_irqsave(&vha->e_dbell.db_lock, flags);
1274 	if (list_empty(&vha->e_dbell.head) && DBELL_ACTIVE(vha)) {
1275 		/*
1276 		 * when the next db event happens, bsg_job will return.
1277 		 * Otherwise, timer will return it.
1278 		 */
1279 		vha->e_dbell.dbell_bsg_job = bsg_job;
1280 		vha->e_dbell.bsg_expire = jiffies + 10 * HZ;
1281 	} else {
1282 		return_bsg = true;
1283 	}
1284 	spin_unlock_irqrestore(&vha->e_dbell.db_lock, flags);
1285 
1286 	if (return_bsg)
1287 		__qla_edif_dbell_bsg_done(vha, bsg_job, 1);
1288 
1289 	return 0;
1290 }
1291 
1292 int32_t
qla_edif_app_mgmt(struct bsg_job * bsg_job)1293 qla_edif_app_mgmt(struct bsg_job *bsg_job)
1294 {
1295 	struct fc_bsg_request	*bsg_request = bsg_job->request;
1296 	struct fc_bsg_reply	*bsg_reply = bsg_job->reply;
1297 	struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
1298 	scsi_qla_host_t		*vha = shost_priv(host);
1299 	struct app_id		appcheck;
1300 	bool done = true;
1301 	int32_t         rval = 0;
1302 	uint32_t	vnd_sc = bsg_request->rqst_data.h_vendor.vendor_cmd[1];
1303 	u32 level = ql_dbg_edif;
1304 
1305 	/* doorbell is high traffic */
1306 	if (vnd_sc == QL_VND_SC_READ_DBELL)
1307 		level = 0;
1308 
1309 	ql_dbg(level, vha, 0x911d, "%s vnd subcmd=%x\n",
1310 	    __func__, vnd_sc);
1311 
1312 	sg_copy_to_buffer(bsg_job->request_payload.sg_list,
1313 	    bsg_job->request_payload.sg_cnt, &appcheck,
1314 	    sizeof(struct app_id));
1315 
1316 	if (!vha->hw->flags.edif_enabled ||
1317 		test_bit(VPORT_DELETE, &vha->dpc_flags)) {
1318 		ql_dbg(level, vha, 0x911d,
1319 		    "%s edif not enabled or vp delete. bsg ptr done %p. dpc_flags %lx\n",
1320 		    __func__, bsg_job, vha->dpc_flags);
1321 
1322 		SET_DID_STATUS(bsg_reply->result, DID_ERROR);
1323 		goto done;
1324 	}
1325 
1326 	if (!qla_edif_app_check(vha, appcheck)) {
1327 		ql_dbg(level, vha, 0x911d,
1328 		    "%s app checked failed.\n",
1329 		    __func__);
1330 
1331 		bsg_job->reply_len = sizeof(struct fc_bsg_reply);
1332 		SET_DID_STATUS(bsg_reply->result, DID_ERROR);
1333 		goto done;
1334 	}
1335 
1336 	switch (vnd_sc) {
1337 	case QL_VND_SC_SA_UPDATE:
1338 		done = false;
1339 		rval = qla24xx_sadb_update(bsg_job);
1340 		break;
1341 	case QL_VND_SC_APP_START:
1342 		rval = qla_edif_app_start(vha, bsg_job);
1343 		break;
1344 	case QL_VND_SC_APP_STOP:
1345 		rval = qla_edif_app_stop(vha, bsg_job);
1346 		break;
1347 	case QL_VND_SC_AUTH_OK:
1348 		rval = qla_edif_app_authok(vha, bsg_job);
1349 		break;
1350 	case QL_VND_SC_AUTH_FAIL:
1351 		rval = qla_edif_app_authfail(vha, bsg_job);
1352 		break;
1353 	case QL_VND_SC_GET_FCINFO:
1354 		rval = qla_edif_app_getfcinfo(vha, bsg_job);
1355 		break;
1356 	case QL_VND_SC_GET_STATS:
1357 		rval = qla_edif_app_getstats(vha, bsg_job);
1358 		break;
1359 	case QL_VND_SC_AEN_COMPLETE:
1360 		rval = qla_edif_ack(vha, bsg_job);
1361 		break;
1362 	case QL_VND_SC_READ_DBELL:
1363 		rval = qla_edif_dbell_bsg(vha, bsg_job);
1364 		done = false;
1365 		break;
1366 	default:
1367 		ql_dbg(ql_dbg_edif, vha, 0x911d, "%s unknown cmd=%x\n",
1368 		    __func__,
1369 		    bsg_request->rqst_data.h_vendor.vendor_cmd[1]);
1370 		rval = EXT_STATUS_INVALID_PARAM;
1371 		done = false;
1372 		break;
1373 	}
1374 
1375 done:
1376 	if (done) {
1377 		ql_dbg(level, vha, 0x7009,
1378 		    "%s: %d  bsg ptr done %p\n", __func__, __LINE__, bsg_job);
1379 		bsg_job_done(bsg_job, bsg_reply->result,
1380 		    bsg_reply->reply_payload_rcv_len);
1381 	}
1382 
1383 	return rval;
1384 }
1385 
1386 static struct edif_sa_ctl *
qla_edif_add_sa_ctl(fc_port_t * fcport,struct qla_sa_update_frame * sa_frame,int dir)1387 qla_edif_add_sa_ctl(fc_port_t *fcport, struct qla_sa_update_frame *sa_frame,
1388 	int dir)
1389 {
1390 	struct	edif_sa_ctl *sa_ctl;
1391 	struct qla_sa_update_frame *sap;
1392 	int	index = sa_frame->fast_sa_index;
1393 	unsigned long flags = 0;
1394 
1395 	sa_ctl = kzalloc(sizeof(*sa_ctl), GFP_KERNEL);
1396 	if (!sa_ctl) {
1397 		/* couldn't get space */
1398 		ql_dbg(ql_dbg_edif, fcport->vha, 0x9100,
1399 		    "unable to allocate SA CTL\n");
1400 		return NULL;
1401 	}
1402 
1403 	/*
1404 	 * need to allocate sa_index here and save it
1405 	 * in both sa_ctl->index and sa_frame->fast_sa_index;
1406 	 * If alloc fails then delete sa_ctl and return NULL
1407 	 */
1408 	INIT_LIST_HEAD(&sa_ctl->next);
1409 	sap = &sa_ctl->sa_frame;
1410 	*sap = *sa_frame;
1411 	sa_ctl->index = index;
1412 	sa_ctl->fcport = fcport;
1413 	sa_ctl->flags = 0;
1414 	sa_ctl->state = 0L;
1415 	ql_dbg(ql_dbg_edif, fcport->vha, 0x9100,
1416 	    "%s: Added sa_ctl %p, index %d, state 0x%lx\n",
1417 	    __func__, sa_ctl, sa_ctl->index, sa_ctl->state);
1418 	spin_lock_irqsave(&fcport->edif.sa_list_lock, flags);
1419 	if (dir == SAU_FLG_TX)
1420 		list_add_tail(&sa_ctl->next, &fcport->edif.tx_sa_list);
1421 	else
1422 		list_add_tail(&sa_ctl->next, &fcport->edif.rx_sa_list);
1423 	spin_unlock_irqrestore(&fcport->edif.sa_list_lock, flags);
1424 
1425 	return sa_ctl;
1426 }
1427 
1428 void
qla_edif_flush_sa_ctl_lists(fc_port_t * fcport)1429 qla_edif_flush_sa_ctl_lists(fc_port_t *fcport)
1430 {
1431 	struct edif_sa_ctl *sa_ctl, *tsa_ctl;
1432 	unsigned long flags = 0;
1433 
1434 	spin_lock_irqsave(&fcport->edif.sa_list_lock, flags);
1435 
1436 	list_for_each_entry_safe(sa_ctl, tsa_ctl, &fcport->edif.tx_sa_list,
1437 	    next) {
1438 		list_del(&sa_ctl->next);
1439 		kfree(sa_ctl);
1440 	}
1441 
1442 	list_for_each_entry_safe(sa_ctl, tsa_ctl, &fcport->edif.rx_sa_list,
1443 	    next) {
1444 		list_del(&sa_ctl->next);
1445 		kfree(sa_ctl);
1446 	}
1447 
1448 	spin_unlock_irqrestore(&fcport->edif.sa_list_lock, flags);
1449 }
1450 
1451 struct edif_sa_ctl *
qla_edif_find_sa_ctl_by_index(fc_port_t * fcport,int index,int dir)1452 qla_edif_find_sa_ctl_by_index(fc_port_t *fcport, int index, int dir)
1453 {
1454 	struct edif_sa_ctl *sa_ctl, *tsa_ctl;
1455 	struct list_head *sa_list;
1456 
1457 	if (dir == SAU_FLG_TX)
1458 		sa_list = &fcport->edif.tx_sa_list;
1459 	else
1460 		sa_list = &fcport->edif.rx_sa_list;
1461 
1462 	list_for_each_entry_safe(sa_ctl, tsa_ctl, sa_list, next) {
1463 		if (test_bit(EDIF_SA_CTL_USED, &sa_ctl->state) &&
1464 		    sa_ctl->index == index)
1465 			return sa_ctl;
1466 	}
1467 	return NULL;
1468 }
1469 
1470 /* add the sa to the correct list */
1471 static int
qla24xx_check_sadb_avail_slot(struct bsg_job * bsg_job,fc_port_t * fcport,struct qla_sa_update_frame * sa_frame)1472 qla24xx_check_sadb_avail_slot(struct bsg_job *bsg_job, fc_port_t *fcport,
1473 	struct qla_sa_update_frame *sa_frame)
1474 {
1475 	struct edif_sa_ctl *sa_ctl = NULL;
1476 	int dir;
1477 	uint16_t sa_index;
1478 
1479 	dir = (sa_frame->flags & SAU_FLG_TX);
1480 
1481 	/* map the spi to an sa_index */
1482 	sa_index = qla_edif_sadb_get_sa_index(fcport, sa_frame);
1483 	if (sa_index == RX_DELETE_NO_EDIF_SA_INDEX) {
1484 		/* process rx delete */
1485 		ql_dbg(ql_dbg_edif, fcport->vha, 0x3063,
1486 		    "%s: rx delete for lid 0x%x, spi 0x%x, no entry found\n",
1487 		    __func__, fcport->loop_id, sa_frame->spi);
1488 
1489 		/* build and send the aen */
1490 		fcport->edif.rx_sa_set = 1;
1491 		fcport->edif.rx_sa_pending = 0;
1492 		qla_edb_eventcreate(fcport->vha,
1493 		    VND_CMD_AUTH_STATE_SAUPDATE_COMPL,
1494 		    QL_VND_SA_STAT_SUCCESS,
1495 		    QL_VND_RX_SA_KEY, fcport);
1496 
1497 		/* force a return of good bsg status; */
1498 		return RX_DELETE_NO_EDIF_SA_INDEX;
1499 	} else if (sa_index == INVALID_EDIF_SA_INDEX) {
1500 		ql_dbg(ql_dbg_edif, fcport->vha, 0x9100,
1501 		    "%s: Failed to get sa_index for spi 0x%x, dir: %d\n",
1502 		    __func__, sa_frame->spi, dir);
1503 		return INVALID_EDIF_SA_INDEX;
1504 	}
1505 
1506 	ql_dbg(ql_dbg_edif, fcport->vha, 0x9100,
1507 	    "%s: index %d allocated to spi 0x%x, dir: %d, nport_handle: 0x%x\n",
1508 	    __func__, sa_index, sa_frame->spi, dir, fcport->loop_id);
1509 
1510 	/* This is a local copy of sa_frame. */
1511 	sa_frame->fast_sa_index = sa_index;
1512 	/* create the sa_ctl */
1513 	sa_ctl = qla_edif_add_sa_ctl(fcport, sa_frame, dir);
1514 	if (!sa_ctl) {
1515 		ql_dbg(ql_dbg_edif, fcport->vha, 0x9100,
1516 		    "%s: Failed to add sa_ctl for spi 0x%x, dir: %d, sa_index: %d\n",
1517 		    __func__, sa_frame->spi, dir, sa_index);
1518 		return -1;
1519 	}
1520 
1521 	set_bit(EDIF_SA_CTL_USED, &sa_ctl->state);
1522 
1523 	if (dir == SAU_FLG_TX)
1524 		fcport->edif.tx_rekey_cnt++;
1525 	else
1526 		fcport->edif.rx_rekey_cnt++;
1527 
1528 	ql_dbg(ql_dbg_edif, fcport->vha, 0x9100,
1529 	    "%s: Found sa_ctl %p, index %d, state 0x%lx, tx_cnt %d, rx_cnt %d, nport_handle: 0x%x\n",
1530 	    __func__, sa_ctl, sa_ctl->index, sa_ctl->state,
1531 	    fcport->edif.tx_rekey_cnt,
1532 	    fcport->edif.rx_rekey_cnt, fcport->loop_id);
1533 
1534 	return 0;
1535 }
1536 
1537 #define QLA_SA_UPDATE_FLAGS_RX_KEY      0x0
1538 #define QLA_SA_UPDATE_FLAGS_TX_KEY      0x2
1539 #define EDIF_MSLEEP_INTERVAL 100
1540 #define EDIF_RETRY_COUNT  50
1541 
1542 int
qla24xx_sadb_update(struct bsg_job * bsg_job)1543 qla24xx_sadb_update(struct bsg_job *bsg_job)
1544 {
1545 	struct	fc_bsg_reply	*bsg_reply = bsg_job->reply;
1546 	struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
1547 	scsi_qla_host_t *vha = shost_priv(host);
1548 	fc_port_t		*fcport = NULL;
1549 	srb_t			*sp = NULL;
1550 	struct edif_list_entry *edif_entry = NULL;
1551 	int			found = 0;
1552 	int			rval = 0;
1553 	int result = 0, cnt;
1554 	struct qla_sa_update_frame sa_frame;
1555 	struct srb_iocb *iocb_cmd;
1556 	port_id_t portid;
1557 
1558 	ql_dbg(ql_dbg_edif + ql_dbg_verbose, vha, 0x911d,
1559 	    "%s entered, vha: 0x%p\n", __func__, vha);
1560 
1561 	sg_copy_to_buffer(bsg_job->request_payload.sg_list,
1562 	    bsg_job->request_payload.sg_cnt, &sa_frame,
1563 	    sizeof(struct qla_sa_update_frame));
1564 
1565 	/* Check if host is online */
1566 	if (!vha->flags.online) {
1567 		ql_log(ql_log_warn, vha, 0x70a1, "Host is not online\n");
1568 		rval = -EIO;
1569 		SET_DID_STATUS(bsg_reply->result, DID_ERROR);
1570 		goto done;
1571 	}
1572 
1573 	if (DBELL_INACTIVE(vha)) {
1574 		ql_log(ql_log_warn, vha, 0x70a1, "App not started\n");
1575 		rval = -EIO;
1576 		SET_DID_STATUS(bsg_reply->result, DID_ERROR);
1577 		goto done;
1578 	}
1579 
1580 	/* silent unaligned access warning */
1581 	portid.b.domain = sa_frame.port_id.b.domain;
1582 	portid.b.area   = sa_frame.port_id.b.area;
1583 	portid.b.al_pa  = sa_frame.port_id.b.al_pa;
1584 
1585 	fcport = qla2x00_find_fcport_by_pid(vha, &portid);
1586 	if (fcport) {
1587 		found = 1;
1588 		if (sa_frame.flags == QLA_SA_UPDATE_FLAGS_TX_KEY)
1589 			fcport->edif.tx_bytes = 0;
1590 		if (sa_frame.flags == QLA_SA_UPDATE_FLAGS_RX_KEY)
1591 			fcport->edif.rx_bytes = 0;
1592 	}
1593 
1594 	if (!found) {
1595 		ql_dbg(ql_dbg_edif, vha, 0x70a3, "Failed to find port= %06x\n",
1596 		    sa_frame.port_id.b24);
1597 		rval = -EINVAL;
1598 		SET_DID_STATUS(bsg_reply->result, DID_NO_CONNECT);
1599 		goto done;
1600 	}
1601 
1602 	/* make sure the nport_handle is valid */
1603 	if (fcport->loop_id == FC_NO_LOOP_ID) {
1604 		ql_dbg(ql_dbg_edif, vha, 0x70e1,
1605 		    "%s: %8phN lid=FC_NO_LOOP_ID, spi: 0x%x, DS %d, returning NO_CONNECT\n",
1606 		    __func__, fcport->port_name, sa_frame.spi,
1607 		    fcport->disc_state);
1608 		rval = -EINVAL;
1609 		SET_DID_STATUS(bsg_reply->result, DID_NO_CONNECT);
1610 		goto done;
1611 	}
1612 
1613 	/* allocate and queue an sa_ctl */
1614 	result = qla24xx_check_sadb_avail_slot(bsg_job, fcport, &sa_frame);
1615 
1616 	/* failure of bsg */
1617 	if (result == INVALID_EDIF_SA_INDEX) {
1618 		ql_dbg(ql_dbg_edif, vha, 0x70e1,
1619 		    "%s: %8phN, skipping update.\n",
1620 		    __func__, fcport->port_name);
1621 		rval = -EINVAL;
1622 		SET_DID_STATUS(bsg_reply->result, DID_ERROR);
1623 		goto done;
1624 
1625 	/* rx delete failure */
1626 	} else if (result == RX_DELETE_NO_EDIF_SA_INDEX) {
1627 		ql_dbg(ql_dbg_edif, vha, 0x70e1,
1628 		    "%s: %8phN, skipping rx delete.\n",
1629 		    __func__, fcport->port_name);
1630 		SET_DID_STATUS(bsg_reply->result, DID_OK);
1631 		goto done;
1632 	}
1633 
1634 	ql_dbg(ql_dbg_edif, vha, 0x70e1,
1635 	    "%s: %8phN, sa_index in sa_frame: %d flags %xh\n",
1636 	    __func__, fcport->port_name, sa_frame.fast_sa_index,
1637 	    sa_frame.flags);
1638 
1639 	/* looking for rx index and delete */
1640 	if (((sa_frame.flags & SAU_FLG_TX) == 0) &&
1641 	    (sa_frame.flags & SAU_FLG_INV)) {
1642 		uint16_t nport_handle = fcport->loop_id;
1643 		uint16_t sa_index = sa_frame.fast_sa_index;
1644 
1645 		/*
1646 		 * make sure we have an existing rx key, otherwise just process
1647 		 * this as a straight delete just like TX
1648 		 * This is NOT a normal case, it indicates an error recovery or key cleanup
1649 		 * by the ipsec code above us.
1650 		 */
1651 		edif_entry = qla_edif_list_find_sa_index(fcport, fcport->loop_id);
1652 		if (!edif_entry) {
1653 			ql_dbg(ql_dbg_edif, vha, 0x911d,
1654 			    "%s: WARNING: no active sa_index for nport_handle 0x%x, forcing delete for sa_index 0x%x\n",
1655 			    __func__, fcport->loop_id, sa_index);
1656 			goto force_rx_delete;
1657 		}
1658 
1659 		/*
1660 		 * if we have a forced delete for rx, remove the sa_index from the edif list
1661 		 * and proceed with normal delete.  The rx delay timer should not be running
1662 		 */
1663 		if ((sa_frame.flags & SAU_FLG_FORCE_DELETE) == SAU_FLG_FORCE_DELETE) {
1664 			qla_edif_list_delete_sa_index(fcport, edif_entry);
1665 			ql_dbg(ql_dbg_edif, vha, 0x911d,
1666 			    "%s: FORCE DELETE flag found for nport_handle 0x%x, sa_index 0x%x, forcing DELETE\n",
1667 			    __func__, fcport->loop_id, sa_index);
1668 			kfree(edif_entry);
1669 			goto force_rx_delete;
1670 		}
1671 
1672 		/*
1673 		 * delayed rx delete
1674 		 *
1675 		 * if delete_sa_index is not invalid then there is already
1676 		 * a delayed index in progress, return bsg bad status
1677 		 */
1678 		if (edif_entry->delete_sa_index != INVALID_EDIF_SA_INDEX) {
1679 			struct edif_sa_ctl *sa_ctl;
1680 
1681 			ql_dbg(ql_dbg_edif, vha, 0x911d,
1682 			    "%s: delete for lid 0x%x, delete_sa_index %d is pending\n",
1683 			    __func__, edif_entry->handle, edif_entry->delete_sa_index);
1684 
1685 			/* free up the sa_ctl that was allocated with the sa_index */
1686 			sa_ctl = qla_edif_find_sa_ctl_by_index(fcport, sa_index,
1687 			    (sa_frame.flags & SAU_FLG_TX));
1688 			if (sa_ctl) {
1689 				ql_dbg(ql_dbg_edif, vha, 0x3063,
1690 				    "%s: freeing sa_ctl for index %d\n",
1691 				    __func__, sa_ctl->index);
1692 				qla_edif_free_sa_ctl(fcport, sa_ctl, sa_ctl->index);
1693 			}
1694 
1695 			/* release the sa_index */
1696 			ql_dbg(ql_dbg_edif, vha, 0x3063,
1697 			    "%s: freeing sa_index %d, nph: 0x%x\n",
1698 			    __func__, sa_index, nport_handle);
1699 			qla_edif_sadb_delete_sa_index(fcport, nport_handle, sa_index);
1700 
1701 			rval = -EINVAL;
1702 			SET_DID_STATUS(bsg_reply->result, DID_ERROR);
1703 			goto done;
1704 		}
1705 
1706 		fcport->edif.rekey_cnt++;
1707 
1708 		/* configure and start the rx delay timer */
1709 		edif_entry->fcport = fcport;
1710 		edif_entry->timer.expires = jiffies + RX_DELAY_DELETE_TIMEOUT * HZ;
1711 
1712 		ql_dbg(ql_dbg_edif, vha, 0x911d,
1713 		    "%s: adding timer, entry: %p, delete sa_index %d, lid 0x%x to edif_list\n",
1714 		    __func__, edif_entry, sa_index, nport_handle);
1715 
1716 		/*
1717 		 * Start the timer when we queue the delayed rx delete.
1718 		 * This is an activity timer that goes off if we have not
1719 		 * received packets with the new sa_index
1720 		 */
1721 		add_timer(&edif_entry->timer);
1722 
1723 		/*
1724 		 * sa_delete for rx key with an active rx key including this one
1725 		 * add the delete rx sa index to the hash so we can look for it
1726 		 * in the rsp queue.  Do this after making any changes to the
1727 		 * edif_entry as part of the rx delete.
1728 		 */
1729 
1730 		ql_dbg(ql_dbg_edif, vha, 0x911d,
1731 		    "%s: delete sa_index %d, lid 0x%x to edif_list. bsg done ptr %p\n",
1732 		    __func__, sa_index, nport_handle, bsg_job);
1733 
1734 		edif_entry->delete_sa_index = sa_index;
1735 
1736 		bsg_job->reply_len = sizeof(struct fc_bsg_reply);
1737 		bsg_reply->result = DID_OK << 16;
1738 
1739 		goto done;
1740 
1741 	/*
1742 	 * rx index and update
1743 	 * add the index to the list and continue with normal update
1744 	 */
1745 	} else if (((sa_frame.flags & SAU_FLG_TX) == 0) &&
1746 	    ((sa_frame.flags & SAU_FLG_INV) == 0)) {
1747 		/* sa_update for rx key */
1748 		uint32_t nport_handle = fcport->loop_id;
1749 		uint16_t sa_index = sa_frame.fast_sa_index;
1750 		int result;
1751 
1752 		/*
1753 		 * add the update rx sa index to the hash so we can look for it
1754 		 * in the rsp queue and continue normally
1755 		 */
1756 
1757 		ql_dbg(ql_dbg_edif, vha, 0x911d,
1758 		    "%s:  adding update sa_index %d, lid 0x%x to edif_list\n",
1759 		    __func__, sa_index, nport_handle);
1760 
1761 		result = qla_edif_list_add_sa_update_index(fcport, sa_index,
1762 		    nport_handle);
1763 		if (result) {
1764 			ql_dbg(ql_dbg_edif, vha, 0x911d,
1765 			    "%s: SA_UPDATE failed to add new sa index %d to list for lid 0x%x\n",
1766 			    __func__, sa_index, nport_handle);
1767 		}
1768 	}
1769 	if (sa_frame.flags & SAU_FLG_GMAC_MODE)
1770 		fcport->edif.aes_gmac = 1;
1771 	else
1772 		fcport->edif.aes_gmac = 0;
1773 
1774 force_rx_delete:
1775 	/*
1776 	 * sa_update for both rx and tx keys, sa_delete for tx key
1777 	 * immediately process the request
1778 	 */
1779 	sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
1780 	if (!sp) {
1781 		rval = -ENOMEM;
1782 		SET_DID_STATUS(bsg_reply->result, DID_IMM_RETRY);
1783 		goto done;
1784 	}
1785 
1786 	sp->type = SRB_SA_UPDATE;
1787 	sp->name = "bsg_sa_update";
1788 	sp->u.bsg_job = bsg_job;
1789 	/* sp->free = qla2x00_bsg_sp_free; */
1790 	sp->free = qla2x00_rel_sp;
1791 	sp->done = qla2x00_bsg_job_done;
1792 	iocb_cmd = &sp->u.iocb_cmd;
1793 	iocb_cmd->u.sa_update.sa_frame  = sa_frame;
1794 	cnt = 0;
1795 retry:
1796 	rval = qla2x00_start_sp(sp);
1797 	switch (rval) {
1798 	case QLA_SUCCESS:
1799 		break;
1800 	case EAGAIN:
1801 		msleep(EDIF_MSLEEP_INTERVAL);
1802 		cnt++;
1803 		if (cnt < EDIF_RETRY_COUNT)
1804 			goto retry;
1805 
1806 		fallthrough;
1807 	default:
1808 		ql_log(ql_dbg_edif, vha, 0x70e3,
1809 		       "%s qla2x00_start_sp failed=%d.\n",
1810 		       __func__, rval);
1811 
1812 		qla2x00_rel_sp(sp);
1813 		rval = -EIO;
1814 		SET_DID_STATUS(bsg_reply->result, DID_IMM_RETRY);
1815 		goto done;
1816 	}
1817 
1818 	ql_dbg(ql_dbg_edif, vha, 0x911d,
1819 	    "%s:  %s sent, hdl=%x, portid=%06x.\n",
1820 	    __func__, sp->name, sp->handle, fcport->d_id.b24);
1821 
1822 	fcport->edif.rekey_cnt++;
1823 	bsg_job->reply_len = sizeof(struct fc_bsg_reply);
1824 	SET_DID_STATUS(bsg_reply->result, DID_OK);
1825 
1826 	return 0;
1827 
1828 /*
1829  * send back error status
1830  */
1831 done:
1832 	bsg_job->reply_len = sizeof(struct fc_bsg_reply);
1833 	ql_dbg(ql_dbg_edif, vha, 0x911d,
1834 	    "%s:status: FAIL, result: 0x%x, bsg ptr done %p\n",
1835 	    __func__, bsg_reply->result, bsg_job);
1836 	bsg_job_done(bsg_job, bsg_reply->result,
1837 	    bsg_reply->reply_payload_rcv_len);
1838 
1839 	return 0;
1840 }
1841 
1842 static void
qla_enode_free(scsi_qla_host_t * vha,struct enode * node)1843 qla_enode_free(scsi_qla_host_t *vha, struct enode *node)
1844 {
1845 	node->ntype = N_UNDEF;
1846 	kfree(node);
1847 }
1848 
1849 /**
1850  * qla_enode_init - initialize enode structs & lock
1851  * @vha: host adapter pointer
1852  *
1853  * should only be called when driver attaching
1854  */
1855 void
qla_enode_init(scsi_qla_host_t * vha)1856 qla_enode_init(scsi_qla_host_t *vha)
1857 {
1858 	struct	qla_hw_data *ha = vha->hw;
1859 	char	name[32];
1860 
1861 	if (vha->pur_cinfo.enode_flags == ENODE_ACTIVE) {
1862 		/* list still active - error */
1863 		ql_dbg(ql_dbg_edif, vha, 0x09102, "%s enode still active\n",
1864 		    __func__);
1865 		return;
1866 	}
1867 
1868 	/* initialize lock which protects pur_core & init list */
1869 	spin_lock_init(&vha->pur_cinfo.pur_lock);
1870 	INIT_LIST_HEAD(&vha->pur_cinfo.head);
1871 
1872 	snprintf(name, sizeof(name), "%s_%d_purex", QLA2XXX_DRIVER_NAME,
1873 	    ha->pdev->device);
1874 }
1875 
1876 /**
1877  * qla_enode_stop - stop and clear and enode data
1878  * @vha: host adapter pointer
1879  *
1880  * called when app notified it is exiting
1881  */
1882 void
qla_enode_stop(scsi_qla_host_t * vha)1883 qla_enode_stop(scsi_qla_host_t *vha)
1884 {
1885 	unsigned long flags;
1886 	struct enode *node, *q;
1887 
1888 	if (vha->pur_cinfo.enode_flags != ENODE_ACTIVE) {
1889 		/* doorbell list not enabled */
1890 		ql_dbg(ql_dbg_edif, vha, 0x09102,
1891 		    "%s enode not active\n", __func__);
1892 		return;
1893 	}
1894 
1895 	/* grab lock so list doesn't move */
1896 	spin_lock_irqsave(&vha->pur_cinfo.pur_lock, flags);
1897 
1898 	vha->pur_cinfo.enode_flags &= ~ENODE_ACTIVE; /* mark it not active */
1899 
1900 	/* hopefully this is a null list at this point */
1901 	list_for_each_entry_safe(node, q, &vha->pur_cinfo.head, list) {
1902 		ql_dbg(ql_dbg_edif, vha, 0x910f,
1903 		    "%s freeing enode type=%x, cnt=%x\n", __func__, node->ntype,
1904 		    node->dinfo.nodecnt);
1905 		list_del_init(&node->list);
1906 		qla_enode_free(vha, node);
1907 	}
1908 	spin_unlock_irqrestore(&vha->pur_cinfo.pur_lock, flags);
1909 }
1910 
qla_enode_clear(scsi_qla_host_t * vha,port_id_t portid)1911 static void qla_enode_clear(scsi_qla_host_t *vha, port_id_t portid)
1912 {
1913 	unsigned    long flags;
1914 	struct enode    *e, *tmp;
1915 	struct purexevent   *purex;
1916 	LIST_HEAD(enode_list);
1917 
1918 	if (vha->pur_cinfo.enode_flags != ENODE_ACTIVE) {
1919 		ql_dbg(ql_dbg_edif, vha, 0x09102,
1920 		       "%s enode not active\n", __func__);
1921 		return;
1922 	}
1923 	spin_lock_irqsave(&vha->pur_cinfo.pur_lock, flags);
1924 	list_for_each_entry_safe(e, tmp, &vha->pur_cinfo.head, list) {
1925 		purex = &e->u.purexinfo;
1926 		if (purex->pur_info.pur_sid.b24 == portid.b24) {
1927 			ql_dbg(ql_dbg_edif, vha, 0x911d,
1928 			    "%s free ELS sid=%06x. xchg %x, nb=%xh\n",
1929 			    __func__, portid.b24,
1930 			    purex->pur_info.pur_rx_xchg_address,
1931 			    purex->pur_info.pur_bytes_rcvd);
1932 
1933 			list_del_init(&e->list);
1934 			list_add_tail(&e->list, &enode_list);
1935 		}
1936 	}
1937 	spin_unlock_irqrestore(&vha->pur_cinfo.pur_lock, flags);
1938 
1939 	list_for_each_entry_safe(e, tmp, &enode_list, list) {
1940 		list_del_init(&e->list);
1941 		qla_enode_free(vha, e);
1942 	}
1943 }
1944 
1945 /*
1946  *  allocate enode struct and populate buffer
1947  *  returns: enode pointer with buffers
1948  *           NULL on error
1949  */
1950 static struct enode *
qla_enode_alloc(scsi_qla_host_t * vha,uint32_t ntype)1951 qla_enode_alloc(scsi_qla_host_t *vha, uint32_t ntype)
1952 {
1953 	struct enode		*node;
1954 	struct purexevent	*purex;
1955 
1956 	node = kzalloc(RX_ELS_SIZE, GFP_ATOMIC);
1957 	if (!node)
1958 		return NULL;
1959 
1960 	purex = &node->u.purexinfo;
1961 	purex->msgp = (u8 *)(node + 1);
1962 	purex->msgp_len = ELS_MAX_PAYLOAD;
1963 
1964 	node->ntype = ntype;
1965 	INIT_LIST_HEAD(&node->list);
1966 	return node;
1967 }
1968 
1969 static void
qla_enode_add(scsi_qla_host_t * vha,struct enode * ptr)1970 qla_enode_add(scsi_qla_host_t *vha, struct enode *ptr)
1971 {
1972 	unsigned long flags;
1973 
1974 	ql_dbg(ql_dbg_edif + ql_dbg_verbose, vha, 0x9109,
1975 	    "%s add enode for type=%x, cnt=%x\n",
1976 	    __func__, ptr->ntype, ptr->dinfo.nodecnt);
1977 
1978 	spin_lock_irqsave(&vha->pur_cinfo.pur_lock, flags);
1979 	list_add_tail(&ptr->list, &vha->pur_cinfo.head);
1980 	spin_unlock_irqrestore(&vha->pur_cinfo.pur_lock, flags);
1981 
1982 	return;
1983 }
1984 
1985 static struct enode *
qla_enode_find(scsi_qla_host_t * vha,uint32_t ntype,uint32_t p1,uint32_t p2)1986 qla_enode_find(scsi_qla_host_t *vha, uint32_t ntype, uint32_t p1, uint32_t p2)
1987 {
1988 	struct enode		*node_rtn = NULL;
1989 	struct enode		*list_node, *q;
1990 	unsigned long		flags;
1991 	uint32_t		sid;
1992 	struct purexevent	*purex;
1993 
1994 	/* secure the list from moving under us */
1995 	spin_lock_irqsave(&vha->pur_cinfo.pur_lock, flags);
1996 
1997 	list_for_each_entry_safe(list_node, q, &vha->pur_cinfo.head, list) {
1998 
1999 		/* node type determines what p1 and p2 are */
2000 		purex = &list_node->u.purexinfo;
2001 		sid = p1;
2002 
2003 		if (purex->pur_info.pur_sid.b24 == sid) {
2004 			/* found it and its complete */
2005 			node_rtn = list_node;
2006 			list_del(&list_node->list);
2007 			break;
2008 		}
2009 	}
2010 
2011 	spin_unlock_irqrestore(&vha->pur_cinfo.pur_lock, flags);
2012 
2013 	return node_rtn;
2014 }
2015 
2016 /**
2017  * qla_pur_get_pending - read/return authentication message sent
2018  *  from remote port
2019  * @vha: host adapter pointer
2020  * @fcport: session pointer
2021  * @bsg_job: user request where the message is copy to.
2022  */
2023 static int
qla_pur_get_pending(scsi_qla_host_t * vha,fc_port_t * fcport,struct bsg_job * bsg_job)2024 qla_pur_get_pending(scsi_qla_host_t *vha, fc_port_t *fcport,
2025 	struct bsg_job *bsg_job)
2026 {
2027 	struct enode		*ptr;
2028 	struct purexevent	*purex;
2029 	struct qla_bsg_auth_els_reply *rpl =
2030 	    (struct qla_bsg_auth_els_reply *)bsg_job->reply;
2031 
2032 	bsg_job->reply_len = sizeof(*rpl);
2033 
2034 	ptr = qla_enode_find(vha, N_PUREX, fcport->d_id.b24, PUR_GET);
2035 	if (!ptr) {
2036 		ql_dbg(ql_dbg_edif, vha, 0x9111,
2037 		    "%s no enode data found for %8phN sid=%06x\n",
2038 		    __func__, fcport->port_name, fcport->d_id.b24);
2039 		SET_DID_STATUS(rpl->r.result, DID_IMM_RETRY);
2040 		return -EIO;
2041 	}
2042 
2043 	/*
2044 	 * enode is now off the linked list and is ours to deal with
2045 	 */
2046 	purex = &ptr->u.purexinfo;
2047 
2048 	/* Copy info back to caller */
2049 	rpl->rx_xchg_address = purex->pur_info.pur_rx_xchg_address;
2050 
2051 	SET_DID_STATUS(rpl->r.result, DID_OK);
2052 	rpl->r.reply_payload_rcv_len =
2053 	    sg_pcopy_from_buffer(bsg_job->reply_payload.sg_list,
2054 		bsg_job->reply_payload.sg_cnt, purex->msgp,
2055 		purex->pur_info.pur_bytes_rcvd, 0);
2056 
2057 	/* data copy / passback completed - destroy enode */
2058 	qla_enode_free(vha, ptr);
2059 
2060 	return 0;
2061 }
2062 
2063 /* it is assume qpair lock is held */
2064 static int
qla_els_reject_iocb(scsi_qla_host_t * vha,struct qla_qpair * qp,struct qla_els_pt_arg * a)2065 qla_els_reject_iocb(scsi_qla_host_t *vha, struct qla_qpair *qp,
2066 	struct qla_els_pt_arg *a)
2067 {
2068 	struct els_entry_24xx *els_iocb;
2069 
2070 	els_iocb = __qla2x00_alloc_iocbs(qp, NULL);
2071 	if (!els_iocb) {
2072 		ql_log(ql_log_warn, vha, 0x700c,
2073 		    "qla2x00_alloc_iocbs failed.\n");
2074 		return QLA_FUNCTION_FAILED;
2075 	}
2076 
2077 	qla_els_pt_iocb(vha, els_iocb, a);
2078 
2079 	ql_dbg(ql_dbg_edif, vha, 0x0183,
2080 	    "Sending ELS reject ox_id %04x s:%06x -> d:%06x\n",
2081 	    a->ox_id, a->sid.b24, a->did.b24);
2082 	ql_dump_buffer(ql_dbg_edif + ql_dbg_verbose, vha, 0x0185,
2083 	    vha->hw->elsrej.c, sizeof(*vha->hw->elsrej.c));
2084 	/* flush iocb to mem before notifying hw doorbell */
2085 	wmb();
2086 	qla2x00_start_iocbs(vha, qp->req);
2087 	return 0;
2088 }
2089 
2090 void
qla_edb_init(scsi_qla_host_t * vha)2091 qla_edb_init(scsi_qla_host_t *vha)
2092 {
2093 	if (DBELL_ACTIVE(vha)) {
2094 		/* list already init'd - error */
2095 		ql_dbg(ql_dbg_edif, vha, 0x09102,
2096 		    "edif db already initialized, cannot reinit\n");
2097 		return;
2098 	}
2099 
2100 	/* initialize lock which protects doorbell & init list */
2101 	spin_lock_init(&vha->e_dbell.db_lock);
2102 	INIT_LIST_HEAD(&vha->e_dbell.head);
2103 }
2104 
qla_edb_clear(scsi_qla_host_t * vha,port_id_t portid)2105 static void qla_edb_clear(scsi_qla_host_t *vha, port_id_t portid)
2106 {
2107 	unsigned long flags;
2108 	struct edb_node *e, *tmp;
2109 	port_id_t sid;
2110 	LIST_HEAD(edb_list);
2111 
2112 	if (DBELL_INACTIVE(vha)) {
2113 		/* doorbell list not enabled */
2114 		ql_dbg(ql_dbg_edif, vha, 0x09102,
2115 		       "%s doorbell not enabled\n", __func__);
2116 		return;
2117 	}
2118 
2119 	/* grab lock so list doesn't move */
2120 	spin_lock_irqsave(&vha->e_dbell.db_lock, flags);
2121 	list_for_each_entry_safe(e, tmp, &vha->e_dbell.head, list) {
2122 		switch (e->ntype) {
2123 		case VND_CMD_AUTH_STATE_NEEDED:
2124 		case VND_CMD_AUTH_STATE_SESSION_SHUTDOWN:
2125 			sid = e->u.plogi_did;
2126 			break;
2127 		case VND_CMD_AUTH_STATE_ELS_RCVD:
2128 			sid = e->u.els_sid;
2129 			break;
2130 		case VND_CMD_AUTH_STATE_SAUPDATE_COMPL:
2131 			/* app wants to see this  */
2132 			continue;
2133 		default:
2134 			ql_log(ql_log_warn, vha, 0x09102,
2135 			       "%s unknown node type: %x\n", __func__, e->ntype);
2136 			sid.b24 = 0;
2137 			break;
2138 		}
2139 		if (sid.b24 == portid.b24) {
2140 			ql_dbg(ql_dbg_edif, vha, 0x910f,
2141 			       "%s free doorbell event : node type = %x %p\n",
2142 			       __func__, e->ntype, e);
2143 			list_del_init(&e->list);
2144 			list_add_tail(&e->list, &edb_list);
2145 		}
2146 	}
2147 	spin_unlock_irqrestore(&vha->e_dbell.db_lock, flags);
2148 
2149 	list_for_each_entry_safe(e, tmp, &edb_list, list)
2150 		qla_edb_node_free(vha, e);
2151 }
2152 
2153 /* function called when app is stopping */
2154 
2155 void
qla_edb_stop(scsi_qla_host_t * vha)2156 qla_edb_stop(scsi_qla_host_t *vha)
2157 {
2158 	unsigned long flags;
2159 	struct edb_node *node, *q;
2160 
2161 	if (DBELL_INACTIVE(vha)) {
2162 		/* doorbell list not enabled */
2163 		ql_dbg(ql_dbg_edif, vha, 0x09102,
2164 		    "%s doorbell not enabled\n", __func__);
2165 		return;
2166 	}
2167 
2168 	/* grab lock so list doesn't move */
2169 	spin_lock_irqsave(&vha->e_dbell.db_lock, flags);
2170 
2171 	vha->e_dbell.db_flags &= ~EDB_ACTIVE; /* mark it not active */
2172 	/* hopefully this is a null list at this point */
2173 	list_for_each_entry_safe(node, q, &vha->e_dbell.head, list) {
2174 		ql_dbg(ql_dbg_edif, vha, 0x910f,
2175 		    "%s freeing edb_node type=%x\n",
2176 		    __func__, node->ntype);
2177 		qla_edb_node_free(vha, node);
2178 	}
2179 	spin_unlock_irqrestore(&vha->e_dbell.db_lock, flags);
2180 
2181 	qla_edif_dbell_bsg_done(vha);
2182 }
2183 
2184 static struct edb_node *
qla_edb_node_alloc(scsi_qla_host_t * vha,uint32_t ntype)2185 qla_edb_node_alloc(scsi_qla_host_t *vha, uint32_t ntype)
2186 {
2187 	struct edb_node	*node;
2188 
2189 	node = kzalloc(sizeof(*node), GFP_ATOMIC);
2190 	if (!node) {
2191 		/* couldn't get space */
2192 		ql_dbg(ql_dbg_edif, vha, 0x9100,
2193 		    "edb node unable to be allocated\n");
2194 		return NULL;
2195 	}
2196 
2197 	node->ntype = ntype;
2198 	INIT_LIST_HEAD(&node->list);
2199 	return node;
2200 }
2201 
2202 /* adds a already allocated enode to the linked list */
2203 static bool
qla_edb_node_add(scsi_qla_host_t * vha,struct edb_node * ptr)2204 qla_edb_node_add(scsi_qla_host_t *vha, struct edb_node *ptr)
2205 {
2206 	unsigned long		flags;
2207 
2208 	if (DBELL_INACTIVE(vha)) {
2209 		/* doorbell list not enabled */
2210 		ql_dbg(ql_dbg_edif, vha, 0x09102,
2211 		    "%s doorbell not enabled\n", __func__);
2212 		return false;
2213 	}
2214 
2215 	spin_lock_irqsave(&vha->e_dbell.db_lock, flags);
2216 	list_add_tail(&ptr->list, &vha->e_dbell.head);
2217 	spin_unlock_irqrestore(&vha->e_dbell.db_lock, flags);
2218 
2219 	return true;
2220 }
2221 
2222 /* adds event to doorbell list */
2223 void
qla_edb_eventcreate(scsi_qla_host_t * vha,uint32_t dbtype,uint32_t data,uint32_t data2,fc_port_t * sfcport)2224 qla_edb_eventcreate(scsi_qla_host_t *vha, uint32_t dbtype,
2225 	uint32_t data, uint32_t data2, fc_port_t	*sfcport)
2226 {
2227 	struct edb_node	*edbnode;
2228 	fc_port_t *fcport = sfcport;
2229 	port_id_t id;
2230 
2231 	if (!vha->hw->flags.edif_enabled) {
2232 		/* edif not enabled */
2233 		return;
2234 	}
2235 
2236 	if (DBELL_INACTIVE(vha)) {
2237 		if (fcport)
2238 			fcport->edif.auth_state = dbtype;
2239 		/* doorbell list not enabled */
2240 		ql_dbg(ql_dbg_edif, vha, 0x09102,
2241 		    "%s doorbell not enabled (type=%d\n", __func__, dbtype);
2242 		return;
2243 	}
2244 
2245 	edbnode = qla_edb_node_alloc(vha, dbtype);
2246 	if (!edbnode) {
2247 		ql_dbg(ql_dbg_edif, vha, 0x09102,
2248 		    "%s unable to alloc db node\n", __func__);
2249 		return;
2250 	}
2251 
2252 	if (!fcport) {
2253 		id.b.domain = (data >> 16) & 0xff;
2254 		id.b.area = (data >> 8) & 0xff;
2255 		id.b.al_pa = data & 0xff;
2256 		ql_dbg(ql_dbg_edif, vha, 0x09222,
2257 		    "%s: Arrived s_id: %06x\n", __func__,
2258 		    id.b24);
2259 		fcport = qla2x00_find_fcport_by_pid(vha, &id);
2260 		if (!fcport) {
2261 			ql_dbg(ql_dbg_edif, vha, 0x09102,
2262 			    "%s can't find fcport for sid= 0x%x - ignoring\n",
2263 			__func__, id.b24);
2264 			kfree(edbnode);
2265 			return;
2266 		}
2267 	}
2268 
2269 	/* populate the edb node */
2270 	switch (dbtype) {
2271 	case VND_CMD_AUTH_STATE_NEEDED:
2272 	case VND_CMD_AUTH_STATE_SESSION_SHUTDOWN:
2273 		edbnode->u.plogi_did.b24 = fcport->d_id.b24;
2274 		break;
2275 	case VND_CMD_AUTH_STATE_ELS_RCVD:
2276 		edbnode->u.els_sid.b24 = fcport->d_id.b24;
2277 		break;
2278 	case VND_CMD_AUTH_STATE_SAUPDATE_COMPL:
2279 		edbnode->u.sa_aen.port_id = fcport->d_id;
2280 		edbnode->u.sa_aen.status =  data;
2281 		edbnode->u.sa_aen.key_type =  data2;
2282 		edbnode->u.sa_aen.version = EDIF_VERSION1;
2283 		break;
2284 	default:
2285 		ql_dbg(ql_dbg_edif, vha, 0x09102,
2286 			"%s unknown type: %x\n", __func__, dbtype);
2287 		kfree(edbnode);
2288 		edbnode = NULL;
2289 		break;
2290 	}
2291 
2292 	if (edbnode) {
2293 		if (!qla_edb_node_add(vha, edbnode)) {
2294 			ql_dbg(ql_dbg_edif, vha, 0x09102,
2295 			    "%s unable to add dbnode\n", __func__);
2296 			kfree(edbnode);
2297 			return;
2298 		}
2299 		ql_dbg(ql_dbg_edif, vha, 0x09102,
2300 		    "%s Doorbell produced : type=%d %p\n", __func__, dbtype, edbnode);
2301 		qla_edif_dbell_bsg_done(vha);
2302 		if (fcport)
2303 			fcport->edif.auth_state = dbtype;
2304 	}
2305 }
2306 
2307 void
qla_edif_timer(scsi_qla_host_t * vha)2308 qla_edif_timer(scsi_qla_host_t *vha)
2309 {
2310 	struct qla_hw_data *ha = vha->hw;
2311 
2312 	if (!vha->vp_idx && N2N_TOPO(ha) && ha->flags.n2n_fw_acc_sec) {
2313 		if (DBELL_INACTIVE(vha) &&
2314 		    ha->edif_post_stop_cnt_down) {
2315 			ha->edif_post_stop_cnt_down--;
2316 
2317 			/*
2318 			 * turn off auto 'Plogi Acc + secure=1' feature
2319 			 * Set Add FW option[3]
2320 			 * BIT_15, if.
2321 			 */
2322 			if (ha->edif_post_stop_cnt_down == 0) {
2323 				ql_dbg(ql_dbg_async, vha, 0x911d,
2324 				       "%s chip reset to turn off PLOGI ACC + secure\n",
2325 				       __func__);
2326 				set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
2327 			}
2328 		} else {
2329 			ha->edif_post_stop_cnt_down = 60;
2330 		}
2331 	}
2332 
2333 	if (vha->e_dbell.dbell_bsg_job && time_after_eq(jiffies, vha->e_dbell.bsg_expire))
2334 		qla_edif_dbell_bsg_done(vha);
2335 }
2336 
qla_noop_sp_done(srb_t * sp,int res)2337 static void qla_noop_sp_done(srb_t *sp, int res)
2338 {
2339 	sp->fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE);
2340 	/* ref: INIT */
2341 	kref_put(&sp->cmd_kref, qla2x00_sp_release);
2342 }
2343 
2344 /*
2345  * Called from work queue
2346  * build and send the sa_update iocb to delete an rx sa_index
2347  */
2348 int
qla24xx_issue_sa_replace_iocb(scsi_qla_host_t * vha,struct qla_work_evt * e)2349 qla24xx_issue_sa_replace_iocb(scsi_qla_host_t *vha, struct qla_work_evt *e)
2350 {
2351 	srb_t *sp;
2352 	fc_port_t	*fcport = NULL;
2353 	struct srb_iocb *iocb_cmd = NULL;
2354 	int rval = QLA_SUCCESS;
2355 	struct	edif_sa_ctl *sa_ctl = e->u.sa_update.sa_ctl;
2356 	uint16_t nport_handle = e->u.sa_update.nport_handle;
2357 
2358 	ql_dbg(ql_dbg_edif, vha, 0x70e6,
2359 	    "%s: starting,  sa_ctl: %p\n", __func__, sa_ctl);
2360 
2361 	if (!sa_ctl) {
2362 		ql_dbg(ql_dbg_edif, vha, 0x70e6,
2363 		    "sa_ctl allocation failed\n");
2364 		rval = -ENOMEM;
2365 		return rval;
2366 	}
2367 
2368 	fcport = sa_ctl->fcport;
2369 
2370 	/* Alloc SRB structure */
2371 	sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
2372 	if (!sp) {
2373 		ql_dbg(ql_dbg_edif, vha, 0x70e6,
2374 		 "SRB allocation failed\n");
2375 		rval = -ENOMEM;
2376 		goto done;
2377 	}
2378 
2379 	fcport->flags |= FCF_ASYNC_SENT;
2380 	iocb_cmd = &sp->u.iocb_cmd;
2381 	iocb_cmd->u.sa_update.sa_ctl = sa_ctl;
2382 
2383 	ql_dbg(ql_dbg_edif, vha, 0x3073,
2384 	    "Enter: SA REPL portid=%06x, sa_ctl %p, index %x, nport_handle: 0x%x\n",
2385 	    fcport->d_id.b24, sa_ctl, sa_ctl->index, nport_handle);
2386 	/*
2387 	 * if this is a sadb cleanup delete, mark it so the isr can
2388 	 * take the correct action
2389 	 */
2390 	if (sa_ctl->flags & EDIF_SA_CTL_FLG_CLEANUP_DEL) {
2391 		/* mark this srb as a cleanup delete */
2392 		sp->flags |= SRB_EDIF_CLEANUP_DELETE;
2393 		ql_dbg(ql_dbg_edif, vha, 0x70e6,
2394 		    "%s: sp 0x%p flagged as cleanup delete\n", __func__, sp);
2395 	}
2396 
2397 	sp->type = SRB_SA_REPLACE;
2398 	sp->name = "SA_REPLACE";
2399 	sp->fcport = fcport;
2400 	sp->free = qla2x00_rel_sp;
2401 	sp->done = qla_noop_sp_done;
2402 
2403 	rval = qla2x00_start_sp(sp);
2404 
2405 	if (rval != QLA_SUCCESS) {
2406 		goto done_free_sp;
2407 	}
2408 
2409 	return rval;
2410 done_free_sp:
2411 	kref_put(&sp->cmd_kref, qla2x00_sp_release);
2412 	fcport->flags &= ~FCF_ASYNC_SENT;
2413 done:
2414 	fcport->flags &= ~FCF_ASYNC_ACTIVE;
2415 	return rval;
2416 }
2417 
qla24xx_sa_update_iocb(srb_t * sp,struct sa_update_28xx * sa_update_iocb)2418 void qla24xx_sa_update_iocb(srb_t *sp, struct sa_update_28xx *sa_update_iocb)
2419 {
2420 	int	itr = 0;
2421 	struct	scsi_qla_host		*vha = sp->vha;
2422 	struct	qla_sa_update_frame	*sa_frame =
2423 		&sp->u.iocb_cmd.u.sa_update.sa_frame;
2424 	u8 flags = 0;
2425 
2426 	switch (sa_frame->flags & (SAU_FLG_INV | SAU_FLG_TX)) {
2427 	case 0:
2428 		ql_dbg(ql_dbg_edif, vha, 0x911d,
2429 		    "%s: EDIF SA UPDATE RX IOCB  vha: 0x%p  index: %d\n",
2430 		    __func__, vha, sa_frame->fast_sa_index);
2431 		break;
2432 	case 1:
2433 		ql_dbg(ql_dbg_edif, vha, 0x911d,
2434 		    "%s: EDIF SA DELETE RX IOCB  vha: 0x%p  index: %d\n",
2435 		    __func__, vha, sa_frame->fast_sa_index);
2436 		flags |= SA_FLAG_INVALIDATE;
2437 		break;
2438 	case 2:
2439 		ql_dbg(ql_dbg_edif, vha, 0x911d,
2440 		    "%s: EDIF SA UPDATE TX IOCB  vha: 0x%p  index: %d\n",
2441 		    __func__, vha, sa_frame->fast_sa_index);
2442 		flags |= SA_FLAG_TX;
2443 		break;
2444 	case 3:
2445 		ql_dbg(ql_dbg_edif, vha, 0x911d,
2446 		    "%s: EDIF SA DELETE TX IOCB  vha: 0x%p  index: %d\n",
2447 		    __func__, vha, sa_frame->fast_sa_index);
2448 		flags |= SA_FLAG_TX | SA_FLAG_INVALIDATE;
2449 		break;
2450 	}
2451 
2452 	sa_update_iocb->entry_type = SA_UPDATE_IOCB_TYPE;
2453 	sa_update_iocb->entry_count = 1;
2454 	sa_update_iocb->sys_define = 0;
2455 	sa_update_iocb->entry_status = 0;
2456 	sa_update_iocb->handle = sp->handle;
2457 	sa_update_iocb->u.nport_handle = cpu_to_le16(sp->fcport->loop_id);
2458 	sa_update_iocb->vp_index = sp->fcport->vha->vp_idx;
2459 	sa_update_iocb->port_id[0] = sp->fcport->d_id.b.al_pa;
2460 	sa_update_iocb->port_id[1] = sp->fcport->d_id.b.area;
2461 	sa_update_iocb->port_id[2] = sp->fcport->d_id.b.domain;
2462 
2463 	sa_update_iocb->flags = flags;
2464 	sa_update_iocb->salt = cpu_to_le32(sa_frame->salt);
2465 	sa_update_iocb->spi = cpu_to_le32(sa_frame->spi);
2466 	sa_update_iocb->sa_index = cpu_to_le16(sa_frame->fast_sa_index);
2467 
2468 	sa_update_iocb->sa_control |= SA_CNTL_ENC_FCSP;
2469 	if (sp->fcport->edif.aes_gmac)
2470 		sa_update_iocb->sa_control |= SA_CNTL_AES_GMAC;
2471 
2472 	if (sa_frame->flags & SAU_FLG_KEY256) {
2473 		sa_update_iocb->sa_control |= SA_CNTL_KEY256;
2474 		for (itr = 0; itr < 32; itr++)
2475 			sa_update_iocb->sa_key[itr] = sa_frame->sa_key[itr];
2476 	} else {
2477 		sa_update_iocb->sa_control |= SA_CNTL_KEY128;
2478 		for (itr = 0; itr < 16; itr++)
2479 			sa_update_iocb->sa_key[itr] = sa_frame->sa_key[itr];
2480 	}
2481 
2482 	ql_dbg(ql_dbg_edif, vha, 0x921d,
2483 	    "%s SAU Port ID = %02x%02x%02x, flags=%xh, index=%u, ctl=%xh, SPI 0x%x flags 0x%x hdl=%x gmac %d\n",
2484 	    __func__, sa_update_iocb->port_id[2], sa_update_iocb->port_id[1],
2485 	    sa_update_iocb->port_id[0], sa_update_iocb->flags, sa_update_iocb->sa_index,
2486 	    sa_update_iocb->sa_control, sa_update_iocb->spi, sa_frame->flags, sp->handle,
2487 	    sp->fcport->edif.aes_gmac);
2488 
2489 	if (sa_frame->flags & SAU_FLG_TX)
2490 		sp->fcport->edif.tx_sa_pending = 1;
2491 	else
2492 		sp->fcport->edif.rx_sa_pending = 1;
2493 
2494 	sp->fcport->vha->qla_stats.control_requests++;
2495 }
2496 
2497 void
qla24xx_sa_replace_iocb(srb_t * sp,struct sa_update_28xx * sa_update_iocb)2498 qla24xx_sa_replace_iocb(srb_t *sp, struct sa_update_28xx *sa_update_iocb)
2499 {
2500 	struct	scsi_qla_host		*vha = sp->vha;
2501 	struct srb_iocb *srb_iocb = &sp->u.iocb_cmd;
2502 	struct	edif_sa_ctl		*sa_ctl = srb_iocb->u.sa_update.sa_ctl;
2503 	uint16_t nport_handle = sp->fcport->loop_id;
2504 
2505 	sa_update_iocb->entry_type = SA_UPDATE_IOCB_TYPE;
2506 	sa_update_iocb->entry_count = 1;
2507 	sa_update_iocb->sys_define = 0;
2508 	sa_update_iocb->entry_status = 0;
2509 	sa_update_iocb->handle = sp->handle;
2510 
2511 	sa_update_iocb->u.nport_handle = cpu_to_le16(nport_handle);
2512 
2513 	sa_update_iocb->vp_index = sp->fcport->vha->vp_idx;
2514 	sa_update_iocb->port_id[0] = sp->fcport->d_id.b.al_pa;
2515 	sa_update_iocb->port_id[1] = sp->fcport->d_id.b.area;
2516 	sa_update_iocb->port_id[2] = sp->fcport->d_id.b.domain;
2517 
2518 	/* Invalidate the index. salt, spi, control & key are ignore */
2519 	sa_update_iocb->flags = SA_FLAG_INVALIDATE;
2520 	sa_update_iocb->salt = 0;
2521 	sa_update_iocb->spi = 0;
2522 	sa_update_iocb->sa_index = cpu_to_le16(sa_ctl->index);
2523 	sa_update_iocb->sa_control = 0;
2524 
2525 	ql_dbg(ql_dbg_edif, vha, 0x921d,
2526 	    "%s SAU DELETE RX Port ID = %02x:%02x:%02x, lid %d flags=%xh, index=%u, hdl=%x\n",
2527 	    __func__, sa_update_iocb->port_id[2], sa_update_iocb->port_id[1],
2528 	    sa_update_iocb->port_id[0], nport_handle, sa_update_iocb->flags,
2529 	    sa_update_iocb->sa_index, sp->handle);
2530 
2531 	sp->fcport->vha->qla_stats.control_requests++;
2532 }
2533 
qla24xx_auth_els(scsi_qla_host_t * vha,void ** pkt,struct rsp_que ** rsp)2534 void qla24xx_auth_els(scsi_qla_host_t *vha, void **pkt, struct rsp_que **rsp)
2535 {
2536 	struct purex_entry_24xx *p = *pkt;
2537 	struct enode		*ptr;
2538 	int		sid;
2539 	u16 totlen;
2540 	struct purexevent	*purex;
2541 	struct scsi_qla_host *host = NULL;
2542 	int rc;
2543 	struct fc_port *fcport;
2544 	struct qla_els_pt_arg a;
2545 	be_id_t beid;
2546 
2547 	memset(&a, 0, sizeof(a));
2548 
2549 	a.els_opcode = ELS_AUTH_ELS;
2550 	a.nport_handle = p->nport_handle;
2551 	a.rx_xchg_address = p->rx_xchg_addr;
2552 	a.did.b.domain = p->s_id[2];
2553 	a.did.b.area   = p->s_id[1];
2554 	a.did.b.al_pa  = p->s_id[0];
2555 	a.tx_byte_count = a.tx_len = sizeof(struct fc_els_ls_rjt);
2556 	a.tx_addr = vha->hw->elsrej.cdma;
2557 	a.vp_idx = vha->vp_idx;
2558 	a.control_flags = EPD_ELS_RJT;
2559 	a.ox_id = le16_to_cpu(p->ox_id);
2560 
2561 	sid = p->s_id[0] | (p->s_id[1] << 8) | (p->s_id[2] << 16);
2562 
2563 	totlen = (le16_to_cpu(p->frame_size) & 0x0fff) - PURX_ELS_HEADER_SIZE;
2564 	if (le16_to_cpu(p->status_flags) & 0x8000) {
2565 		totlen = le16_to_cpu(p->trunc_frame_size);
2566 		qla_els_reject_iocb(vha, (*rsp)->qpair, &a);
2567 		__qla_consume_iocb(vha, pkt, rsp);
2568 		return;
2569 	}
2570 
2571 	if (totlen > ELS_MAX_PAYLOAD) {
2572 		ql_dbg(ql_dbg_edif, vha, 0x0910d,
2573 		    "%s WARNING: verbose ELS frame received (totlen=%x)\n",
2574 		    __func__, totlen);
2575 		qla_els_reject_iocb(vha, (*rsp)->qpair, &a);
2576 		__qla_consume_iocb(vha, pkt, rsp);
2577 		return;
2578 	}
2579 
2580 	if (!vha->hw->flags.edif_enabled) {
2581 		/* edif support not enabled */
2582 		ql_dbg(ql_dbg_edif, vha, 0x910e, "%s edif not enabled\n",
2583 		    __func__);
2584 		qla_els_reject_iocb(vha, (*rsp)->qpair, &a);
2585 		__qla_consume_iocb(vha, pkt, rsp);
2586 		return;
2587 	}
2588 
2589 	ptr = qla_enode_alloc(vha, N_PUREX);
2590 	if (!ptr) {
2591 		ql_dbg(ql_dbg_edif, vha, 0x09109,
2592 		    "WARNING: enode alloc failed for sid=%x\n",
2593 		    sid);
2594 		qla_els_reject_iocb(vha, (*rsp)->qpair, &a);
2595 		__qla_consume_iocb(vha, pkt, rsp);
2596 		return;
2597 	}
2598 
2599 	purex = &ptr->u.purexinfo;
2600 	purex->pur_info.pur_sid = a.did;
2601 	purex->pur_info.pur_bytes_rcvd = totlen;
2602 	purex->pur_info.pur_rx_xchg_address = le32_to_cpu(p->rx_xchg_addr);
2603 	purex->pur_info.pur_nphdl = le16_to_cpu(p->nport_handle);
2604 	purex->pur_info.pur_did.b.domain =  p->d_id[2];
2605 	purex->pur_info.pur_did.b.area =  p->d_id[1];
2606 	purex->pur_info.pur_did.b.al_pa =  p->d_id[0];
2607 	purex->pur_info.vp_idx = p->vp_idx;
2608 
2609 	a.sid = purex->pur_info.pur_did;
2610 
2611 	rc = __qla_copy_purex_to_buffer(vha, pkt, rsp, purex->msgp,
2612 		purex->msgp_len);
2613 	if (rc) {
2614 		qla_els_reject_iocb(vha, (*rsp)->qpair, &a);
2615 		qla_enode_free(vha, ptr);
2616 		return;
2617 	}
2618 	beid.al_pa = purex->pur_info.pur_did.b.al_pa;
2619 	beid.area   = purex->pur_info.pur_did.b.area;
2620 	beid.domain = purex->pur_info.pur_did.b.domain;
2621 	host = qla_find_host_by_d_id(vha, beid);
2622 	if (!host) {
2623 		ql_log(ql_log_fatal, vha, 0x508b,
2624 		    "%s Drop ELS due to unable to find host %06x\n",
2625 		    __func__, purex->pur_info.pur_did.b24);
2626 
2627 		qla_els_reject_iocb(vha, (*rsp)->qpair, &a);
2628 		qla_enode_free(vha, ptr);
2629 		return;
2630 	}
2631 
2632 	fcport = qla2x00_find_fcport_by_pid(host, &purex->pur_info.pur_sid);
2633 
2634 	if (DBELL_INACTIVE(vha)) {
2635 		ql_dbg(ql_dbg_edif, host, 0x0910c, "%s e_dbell.db_flags =%x %06x\n",
2636 		    __func__, host->e_dbell.db_flags,
2637 		    fcport ? fcport->d_id.b24 : 0);
2638 
2639 		qla_els_reject_iocb(host, (*rsp)->qpair, &a);
2640 		qla_enode_free(host, ptr);
2641 		return;
2642 	}
2643 
2644 	if (fcport && EDIF_SESSION_DOWN(fcport)) {
2645 		ql_dbg(ql_dbg_edif, host, 0x13b6,
2646 		    "%s terminate exchange. Send logo to 0x%x\n",
2647 		    __func__, a.did.b24);
2648 
2649 		a.tx_byte_count = a.tx_len = 0;
2650 		a.tx_addr = 0;
2651 		a.control_flags = EPD_RX_XCHG;  /* EPD_RX_XCHG = terminate cmd */
2652 		qla_els_reject_iocb(host, (*rsp)->qpair, &a);
2653 		qla_enode_free(host, ptr);
2654 		/* send logo to let remote port knows to tear down session */
2655 		fcport->send_els_logo = 1;
2656 		qlt_schedule_sess_for_deletion(fcport);
2657 		return;
2658 	}
2659 
2660 	/* add the local enode to the list */
2661 	qla_enode_add(host, ptr);
2662 
2663 	ql_dbg(ql_dbg_edif, host, 0x0910c,
2664 	    "%s COMPLETE purex->pur_info.pur_bytes_rcvd =%xh s:%06x -> d:%06x xchg=%xh\n",
2665 	    __func__, purex->pur_info.pur_bytes_rcvd, purex->pur_info.pur_sid.b24,
2666 	    purex->pur_info.pur_did.b24, purex->pur_info.pur_rx_xchg_address);
2667 
2668 	qla_edb_eventcreate(host, VND_CMD_AUTH_STATE_ELS_RCVD, sid, 0, NULL);
2669 }
2670 
qla_edif_get_sa_index_from_freepool(fc_port_t * fcport,int dir)2671 static uint16_t  qla_edif_get_sa_index_from_freepool(fc_port_t *fcport, int dir)
2672 {
2673 	struct scsi_qla_host *vha = fcport->vha;
2674 	struct qla_hw_data *ha = vha->hw;
2675 	void *sa_id_map;
2676 	unsigned long flags = 0;
2677 	u16 sa_index;
2678 
2679 	ql_dbg(ql_dbg_edif + ql_dbg_verbose, vha, 0x3063,
2680 	    "%s: entry\n", __func__);
2681 
2682 	if (dir)
2683 		sa_id_map = ha->edif_tx_sa_id_map;
2684 	else
2685 		sa_id_map = ha->edif_rx_sa_id_map;
2686 
2687 	spin_lock_irqsave(&ha->sadb_fp_lock, flags);
2688 	sa_index = find_first_zero_bit(sa_id_map, EDIF_NUM_SA_INDEX);
2689 	if (sa_index >=  EDIF_NUM_SA_INDEX) {
2690 		spin_unlock_irqrestore(&ha->sadb_fp_lock, flags);
2691 		return INVALID_EDIF_SA_INDEX;
2692 	}
2693 	set_bit(sa_index, sa_id_map);
2694 	spin_unlock_irqrestore(&ha->sadb_fp_lock, flags);
2695 
2696 	if (dir)
2697 		sa_index += EDIF_TX_SA_INDEX_BASE;
2698 
2699 	ql_dbg(ql_dbg_edif, vha, 0x3063,
2700 	    "%s: index retrieved from free pool %d\n", __func__, sa_index);
2701 
2702 	return sa_index;
2703 }
2704 
2705 /* find an sadb entry for an nport_handle */
2706 static struct edif_sa_index_entry *
qla_edif_sadb_find_sa_index_entry(uint16_t nport_handle,struct list_head * sa_list)2707 qla_edif_sadb_find_sa_index_entry(uint16_t nport_handle,
2708 		struct list_head *sa_list)
2709 {
2710 	struct edif_sa_index_entry *entry;
2711 	struct edif_sa_index_entry *tentry;
2712 	struct list_head *indx_list = sa_list;
2713 
2714 	list_for_each_entry_safe(entry, tentry, indx_list, next) {
2715 		if (entry->handle == nport_handle)
2716 			return entry;
2717 	}
2718 	return NULL;
2719 }
2720 
2721 /* remove an sa_index from the nport_handle and return it to the free pool */
qla_edif_sadb_delete_sa_index(fc_port_t * fcport,uint16_t nport_handle,uint16_t sa_index)2722 static int qla_edif_sadb_delete_sa_index(fc_port_t *fcport, uint16_t nport_handle,
2723 		uint16_t sa_index)
2724 {
2725 	struct edif_sa_index_entry *entry;
2726 	struct list_head *sa_list;
2727 	int dir = (sa_index < EDIF_TX_SA_INDEX_BASE) ? 0 : 1;
2728 	int slot = 0;
2729 	int free_slot_count = 0;
2730 	scsi_qla_host_t *vha = fcport->vha;
2731 	struct qla_hw_data *ha = vha->hw;
2732 	unsigned long flags = 0;
2733 
2734 	ql_dbg(ql_dbg_edif, vha, 0x3063,
2735 	    "%s: entry\n", __func__);
2736 
2737 	if (dir)
2738 		sa_list = &ha->sadb_tx_index_list;
2739 	else
2740 		sa_list = &ha->sadb_rx_index_list;
2741 
2742 	entry = qla_edif_sadb_find_sa_index_entry(nport_handle, sa_list);
2743 	if (!entry) {
2744 		ql_dbg(ql_dbg_edif, vha, 0x3063,
2745 		    "%s: no entry found for nport_handle 0x%x\n",
2746 		    __func__, nport_handle);
2747 		return -1;
2748 	}
2749 
2750 	spin_lock_irqsave(&ha->sadb_lock, flags);
2751 	/*
2752 	 * each tx/rx direction has up to 2 sa indexes/slots. 1 slot for in flight traffic
2753 	 * the other is use at re-key time.
2754 	 */
2755 	for (slot = 0; slot < 2; slot++) {
2756 		if (entry->sa_pair[slot].sa_index == sa_index) {
2757 			entry->sa_pair[slot].sa_index = INVALID_EDIF_SA_INDEX;
2758 			entry->sa_pair[slot].spi = 0;
2759 			free_slot_count++;
2760 			qla_edif_add_sa_index_to_freepool(fcport, dir, sa_index);
2761 		} else if (entry->sa_pair[slot].sa_index == INVALID_EDIF_SA_INDEX) {
2762 			free_slot_count++;
2763 		}
2764 	}
2765 
2766 	if (free_slot_count == 2) {
2767 		list_del(&entry->next);
2768 		kfree(entry);
2769 	}
2770 	spin_unlock_irqrestore(&ha->sadb_lock, flags);
2771 
2772 	ql_dbg(ql_dbg_edif, vha, 0x3063,
2773 	    "%s: sa_index %d removed, free_slot_count: %d\n",
2774 	    __func__, sa_index, free_slot_count);
2775 
2776 	return 0;
2777 }
2778 
2779 void
qla28xx_sa_update_iocb_entry(scsi_qla_host_t * v,struct req_que * req,struct sa_update_28xx * pkt)2780 qla28xx_sa_update_iocb_entry(scsi_qla_host_t *v, struct req_que *req,
2781 	struct sa_update_28xx *pkt)
2782 {
2783 	const char *func = "SA_UPDATE_RESPONSE_IOCB";
2784 	srb_t *sp;
2785 	struct edif_sa_ctl *sa_ctl;
2786 	int old_sa_deleted = 1;
2787 	uint16_t nport_handle;
2788 	struct scsi_qla_host *vha;
2789 
2790 	sp = qla2x00_get_sp_from_handle(v, func, req, pkt);
2791 
2792 	if (!sp) {
2793 		ql_dbg(ql_dbg_edif, v, 0x3063,
2794 			"%s: no sp found for pkt\n", __func__);
2795 		return;
2796 	}
2797 	/* use sp->vha due to npiv */
2798 	vha = sp->vha;
2799 
2800 	switch (pkt->flags & (SA_FLAG_INVALIDATE | SA_FLAG_TX)) {
2801 	case 0:
2802 		ql_dbg(ql_dbg_edif, vha, 0x3063,
2803 		    "%s: EDIF SA UPDATE RX IOCB  vha: 0x%p  index: %d\n",
2804 		    __func__, vha, pkt->sa_index);
2805 		break;
2806 	case 1:
2807 		ql_dbg(ql_dbg_edif, vha, 0x3063,
2808 		    "%s: EDIF SA DELETE RX IOCB  vha: 0x%p  index: %d\n",
2809 		    __func__, vha, pkt->sa_index);
2810 		break;
2811 	case 2:
2812 		ql_dbg(ql_dbg_edif, vha, 0x3063,
2813 		    "%s: EDIF SA UPDATE TX IOCB  vha: 0x%p  index: %d\n",
2814 		    __func__, vha, pkt->sa_index);
2815 		break;
2816 	case 3:
2817 		ql_dbg(ql_dbg_edif, vha, 0x3063,
2818 		    "%s: EDIF SA DELETE TX IOCB  vha: 0x%p  index: %d\n",
2819 		    __func__, vha, pkt->sa_index);
2820 		break;
2821 	}
2822 
2823 	/*
2824 	 * dig the nport handle out of the iocb, fcport->loop_id can not be trusted
2825 	 * to be correct during cleanup sa_update iocbs.
2826 	 */
2827 	nport_handle = sp->fcport->loop_id;
2828 
2829 	ql_dbg(ql_dbg_edif, vha, 0x3063,
2830 	    "%s: %8phN comp status=%x old_sa_info=%x new_sa_info=%x lid %d, index=0x%x pkt_flags %xh hdl=%x\n",
2831 	    __func__, sp->fcport->port_name, pkt->u.comp_sts, pkt->old_sa_info, pkt->new_sa_info,
2832 	    nport_handle, pkt->sa_index, pkt->flags, sp->handle);
2833 
2834 	/* if rx delete, remove the timer */
2835 	if ((pkt->flags & (SA_FLAG_INVALIDATE | SA_FLAG_TX)) ==  SA_FLAG_INVALIDATE) {
2836 		struct edif_list_entry *edif_entry;
2837 
2838 		sp->fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE);
2839 
2840 		edif_entry = qla_edif_list_find_sa_index(sp->fcport, nport_handle);
2841 		if (edif_entry) {
2842 			ql_dbg(ql_dbg_edif, vha, 0x5033,
2843 			    "%s: removing edif_entry %p, new sa_index: 0x%x\n",
2844 			    __func__, edif_entry, pkt->sa_index);
2845 			qla_edif_list_delete_sa_index(sp->fcport, edif_entry);
2846 			timer_shutdown(&edif_entry->timer);
2847 
2848 			ql_dbg(ql_dbg_edif, vha, 0x5033,
2849 			    "%s: releasing edif_entry %p, new sa_index: 0x%x\n",
2850 			    __func__, edif_entry, pkt->sa_index);
2851 
2852 			kfree(edif_entry);
2853 		}
2854 	}
2855 
2856 	/*
2857 	 * if this is a delete for either tx or rx, make sure it succeeded.
2858 	 * The new_sa_info field should be 0xffff on success
2859 	 */
2860 	if (pkt->flags & SA_FLAG_INVALIDATE)
2861 		old_sa_deleted = (le16_to_cpu(pkt->new_sa_info) == 0xffff) ? 1 : 0;
2862 
2863 	/* Process update and delete the same way */
2864 
2865 	/* If this is an sadb cleanup delete, bypass sending events to IPSEC */
2866 	if (sp->flags & SRB_EDIF_CLEANUP_DELETE) {
2867 		sp->fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE);
2868 		ql_dbg(ql_dbg_edif, vha, 0x3063,
2869 		    "%s: nph 0x%x, sa_index %d removed from fw\n",
2870 		    __func__, sp->fcport->loop_id, pkt->sa_index);
2871 
2872 	} else if ((pkt->entry_status == 0) && (pkt->u.comp_sts == 0) &&
2873 	    old_sa_deleted) {
2874 		/*
2875 		 * Note: Wa are only keeping track of latest SA,
2876 		 * so we know when we can start enableing encryption per I/O.
2877 		 * If all SA's get deleted, let FW reject the IOCB.
2878 
2879 		 * TODO: edif: don't set enabled here I think
2880 		 * TODO: edif: prli complete is where it should be set
2881 		 */
2882 		ql_dbg(ql_dbg_edif + ql_dbg_verbose, vha, 0x3063,
2883 			"SA(%x)updated for s_id %02x%02x%02x\n",
2884 			pkt->new_sa_info,
2885 			pkt->port_id[2], pkt->port_id[1], pkt->port_id[0]);
2886 		sp->fcport->edif.enable = 1;
2887 		if (pkt->flags & SA_FLAG_TX) {
2888 			sp->fcport->edif.tx_sa_set = 1;
2889 			sp->fcport->edif.tx_sa_pending = 0;
2890 			qla_edb_eventcreate(vha, VND_CMD_AUTH_STATE_SAUPDATE_COMPL,
2891 				QL_VND_SA_STAT_SUCCESS,
2892 				QL_VND_TX_SA_KEY, sp->fcport);
2893 		} else {
2894 			sp->fcport->edif.rx_sa_set = 1;
2895 			sp->fcport->edif.rx_sa_pending = 0;
2896 			qla_edb_eventcreate(vha, VND_CMD_AUTH_STATE_SAUPDATE_COMPL,
2897 				QL_VND_SA_STAT_SUCCESS,
2898 				QL_VND_RX_SA_KEY, sp->fcport);
2899 		}
2900 	} else {
2901 		ql_dbg(ql_dbg_edif, vha, 0x3063,
2902 		    "%s: %8phN SA update FAILED: sa_index: %d, new_sa_info %d, %02x%02x%02x\n",
2903 		    __func__, sp->fcport->port_name, pkt->sa_index, pkt->new_sa_info,
2904 		    pkt->port_id[2], pkt->port_id[1], pkt->port_id[0]);
2905 
2906 		if (pkt->flags & SA_FLAG_TX)
2907 			qla_edb_eventcreate(vha, VND_CMD_AUTH_STATE_SAUPDATE_COMPL,
2908 				(le16_to_cpu(pkt->u.comp_sts) << 16) | QL_VND_SA_STAT_FAILED,
2909 				QL_VND_TX_SA_KEY, sp->fcport);
2910 		else
2911 			qla_edb_eventcreate(vha, VND_CMD_AUTH_STATE_SAUPDATE_COMPL,
2912 				(le16_to_cpu(pkt->u.comp_sts) << 16) | QL_VND_SA_STAT_FAILED,
2913 				QL_VND_RX_SA_KEY, sp->fcport);
2914 	}
2915 
2916 	/* for delete, release sa_ctl, sa_index */
2917 	if (pkt->flags & SA_FLAG_INVALIDATE) {
2918 		/* release the sa_ctl */
2919 		sa_ctl = qla_edif_find_sa_ctl_by_index(sp->fcport,
2920 		    le16_to_cpu(pkt->sa_index), (pkt->flags & SA_FLAG_TX));
2921 		if (sa_ctl &&
2922 		    qla_edif_find_sa_ctl_by_index(sp->fcport, sa_ctl->index,
2923 			(pkt->flags & SA_FLAG_TX)) != NULL) {
2924 			ql_dbg(ql_dbg_edif + ql_dbg_verbose, vha, 0x3063,
2925 			    "%s: freeing sa_ctl for index %d\n",
2926 			    __func__, sa_ctl->index);
2927 			qla_edif_free_sa_ctl(sp->fcport, sa_ctl, sa_ctl->index);
2928 		} else {
2929 			ql_dbg(ql_dbg_edif, vha, 0x3063,
2930 			    "%s: sa_ctl NOT freed, sa_ctl: %p\n",
2931 			    __func__, sa_ctl);
2932 		}
2933 		ql_dbg(ql_dbg_edif, vha, 0x3063,
2934 		    "%s: freeing sa_index %d, nph: 0x%x\n",
2935 		    __func__, le16_to_cpu(pkt->sa_index), nport_handle);
2936 		qla_edif_sadb_delete_sa_index(sp->fcport, nport_handle,
2937 		    le16_to_cpu(pkt->sa_index));
2938 	/*
2939 	 * check for a failed sa_update and remove
2940 	 * the sadb entry.
2941 	 */
2942 	} else if (pkt->u.comp_sts) {
2943 		ql_dbg(ql_dbg_edif, vha, 0x3063,
2944 		    "%s: freeing sa_index %d, nph: 0x%x\n",
2945 		    __func__, pkt->sa_index, nport_handle);
2946 		qla_edif_sadb_delete_sa_index(sp->fcport, nport_handle,
2947 		    le16_to_cpu(pkt->sa_index));
2948 		switch (le16_to_cpu(pkt->u.comp_sts)) {
2949 		case CS_PORT_EDIF_UNAVAIL:
2950 		case CS_PORT_EDIF_LOGOUT:
2951 			qlt_schedule_sess_for_deletion(sp->fcport);
2952 			break;
2953 		default:
2954 			break;
2955 		}
2956 	}
2957 
2958 	sp->done(sp, 0);
2959 }
2960 
2961 /**
2962  * qla28xx_start_scsi_edif() - Send a SCSI type 6 command to the ISP
2963  * @sp: command to send to the ISP
2964  *
2965  * Return: non-zero if a failure occurred, else zero.
2966  */
2967 int
qla28xx_start_scsi_edif(srb_t * sp)2968 qla28xx_start_scsi_edif(srb_t *sp)
2969 {
2970 	int             nseg;
2971 	unsigned long   flags;
2972 	struct scsi_cmnd *cmd;
2973 	uint32_t        *clr_ptr;
2974 	uint32_t        index, i;
2975 	uint32_t        handle;
2976 	uint16_t        cnt;
2977 	int16_t        req_cnt;
2978 	uint16_t        tot_dsds;
2979 	__be32 *fcp_dl;
2980 	uint8_t additional_cdb_len;
2981 	struct ct6_dsd *ctx;
2982 	struct scsi_qla_host *vha = sp->vha;
2983 	struct qla_hw_data *ha = vha->hw;
2984 	struct cmd_type_6 *cmd_pkt;
2985 	struct dsd64	*cur_dsd;
2986 	uint8_t		avail_dsds = 0;
2987 	struct scatterlist *sg;
2988 	struct req_que *req = sp->qpair->req;
2989 	spinlock_t *lock = sp->qpair->qp_lock_ptr;
2990 
2991 	/* Setup device pointers. */
2992 	cmd = GET_CMD_SP(sp);
2993 
2994 	/* So we know we haven't pci_map'ed anything yet */
2995 	tot_dsds = 0;
2996 
2997 	/* Send marker if required */
2998 	if (vha->marker_needed != 0) {
2999 		if (qla2x00_marker(vha, sp->qpair, 0, 0, MK_SYNC_ALL) !=
3000 			QLA_SUCCESS) {
3001 			ql_log(ql_log_warn, vha, 0x300c,
3002 			    "qla2x00_marker failed for cmd=%p.\n", cmd);
3003 			return QLA_FUNCTION_FAILED;
3004 		}
3005 		vha->marker_needed = 0;
3006 	}
3007 
3008 	/* Acquire ring specific lock */
3009 	spin_lock_irqsave(lock, flags);
3010 
3011 	/* Check for room in outstanding command list. */
3012 	handle = req->current_outstanding_cmd;
3013 	for (index = 1; index < req->num_outstanding_cmds; index++) {
3014 		handle++;
3015 		if (handle == req->num_outstanding_cmds)
3016 			handle = 1;
3017 		if (!req->outstanding_cmds[handle])
3018 			break;
3019 	}
3020 	if (index == req->num_outstanding_cmds)
3021 		goto queuing_error;
3022 
3023 	/* Map the sg table so we have an accurate count of sg entries needed */
3024 	if (scsi_sg_count(cmd)) {
3025 		nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
3026 		    scsi_sg_count(cmd), cmd->sc_data_direction);
3027 		if (unlikely(!nseg))
3028 			goto queuing_error;
3029 	} else {
3030 		nseg = 0;
3031 	}
3032 
3033 	tot_dsds = nseg;
3034 	req_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
3035 
3036 	sp->iores.res_type = RESOURCE_IOCB | RESOURCE_EXCH;
3037 	sp->iores.exch_cnt = 1;
3038 	sp->iores.iocb_cnt = req_cnt;
3039 	if (qla_get_fw_resources(sp->qpair, &sp->iores))
3040 		goto queuing_error;
3041 
3042 	if (req->cnt < (req_cnt + 2)) {
3043 		cnt = IS_SHADOW_REG_CAPABLE(ha) ? *req->out_ptr :
3044 		    rd_reg_dword(req->req_q_out);
3045 		if (req->ring_index < cnt)
3046 			req->cnt = cnt - req->ring_index;
3047 		else
3048 			req->cnt = req->length -
3049 			    (req->ring_index - cnt);
3050 		if (req->cnt < (req_cnt + 2))
3051 			goto queuing_error;
3052 	}
3053 
3054 	if (qla_get_buf(vha, sp->qpair, &sp->u.scmd.buf_dsc)) {
3055 		ql_log(ql_log_fatal, vha, 0x3011,
3056 		    "Failed to allocate buf for fcp_cmnd for cmd=%p.\n", cmd);
3057 		goto queuing_error;
3058 	}
3059 
3060 	sp->flags |= SRB_GOT_BUF;
3061 	ctx = &sp->u.scmd.ct6_ctx;
3062 	ctx->fcp_cmnd = sp->u.scmd.buf_dsc.buf;
3063 	ctx->fcp_cmnd_dma = sp->u.scmd.buf_dsc.buf_dma;
3064 
3065 	if (cmd->cmd_len > 16) {
3066 		additional_cdb_len = cmd->cmd_len - 16;
3067 		if ((cmd->cmd_len % 4) != 0) {
3068 			/*
3069 			 * SCSI command bigger than 16 bytes must be
3070 			 * multiple of 4
3071 			 */
3072 			ql_log(ql_log_warn, vha, 0x3012,
3073 			    "scsi cmd len %d not multiple of 4 for cmd=%p.\n",
3074 			    cmd->cmd_len, cmd);
3075 			goto queuing_error_fcp_cmnd;
3076 		}
3077 		ctx->fcp_cmnd_len = 12 + cmd->cmd_len + 4;
3078 	} else {
3079 		additional_cdb_len = 0;
3080 		ctx->fcp_cmnd_len = 12 + 16 + 4;
3081 	}
3082 
3083 	cmd_pkt = (struct cmd_type_6 *)req->ring_ptr;
3084 	cmd_pkt->handle = make_handle(req->id, handle);
3085 
3086 	/*
3087 	 * Zero out remaining portion of packet.
3088 	 * tagged queuing modifier -- default is TSK_SIMPLE (0).
3089 	 */
3090 	clr_ptr = (uint32_t *)cmd_pkt + 2;
3091 	memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
3092 	cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
3093 
3094 	/* No data transfer */
3095 	if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
3096 		cmd_pkt->byte_count = cpu_to_le32(0);
3097 		goto no_dsds;
3098 	}
3099 
3100 	/* Set transfer direction */
3101 	if (cmd->sc_data_direction == DMA_TO_DEVICE) {
3102 		cmd_pkt->control_flags = cpu_to_le16(CF_WRITE_DATA);
3103 		vha->qla_stats.output_bytes += scsi_bufflen(cmd);
3104 		vha->qla_stats.output_requests++;
3105 		sp->fcport->edif.tx_bytes += scsi_bufflen(cmd);
3106 	} else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
3107 		cmd_pkt->control_flags = cpu_to_le16(CF_READ_DATA);
3108 		vha->qla_stats.input_bytes += scsi_bufflen(cmd);
3109 		vha->qla_stats.input_requests++;
3110 		sp->fcport->edif.rx_bytes += scsi_bufflen(cmd);
3111 	}
3112 
3113 	cmd_pkt->control_flags |= cpu_to_le16(CF_EN_EDIF);
3114 	cmd_pkt->control_flags &= ~(cpu_to_le16(CF_NEW_SA));
3115 
3116 	/* One DSD is available in the Command Type 6 IOCB */
3117 	avail_dsds = 1;
3118 	cur_dsd = &cmd_pkt->fcp_dsd;
3119 
3120 	/* Load data segments */
3121 	scsi_for_each_sg(cmd, sg, tot_dsds, i) {
3122 		dma_addr_t      sle_dma;
3123 		cont_a64_entry_t *cont_pkt;
3124 
3125 		/* Allocate additional continuation packets? */
3126 		if (avail_dsds == 0) {
3127 			/*
3128 			 * Five DSDs are available in the Continuation
3129 			 * Type 1 IOCB.
3130 			 */
3131 			cont_pkt = qla2x00_prep_cont_type1_iocb(vha, req);
3132 			cur_dsd = cont_pkt->dsd;
3133 			avail_dsds = 5;
3134 		}
3135 
3136 		sle_dma = sg_dma_address(sg);
3137 		put_unaligned_le64(sle_dma, &cur_dsd->address);
3138 		cur_dsd->length = cpu_to_le32(sg_dma_len(sg));
3139 		cur_dsd++;
3140 		avail_dsds--;
3141 	}
3142 
3143 no_dsds:
3144 	/* Set NPORT-ID and LUN number*/
3145 	cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
3146 	cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
3147 	cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
3148 	cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
3149 	cmd_pkt->vp_index = sp->vha->vp_idx;
3150 
3151 	cmd_pkt->entry_type = COMMAND_TYPE_6;
3152 
3153 	/* Set total data segment count. */
3154 	cmd_pkt->entry_count = (uint8_t)req_cnt;
3155 
3156 	int_to_scsilun(cmd->device->lun, &cmd_pkt->lun);
3157 	host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
3158 
3159 	/* build FCP_CMND IU */
3160 	int_to_scsilun(cmd->device->lun, &ctx->fcp_cmnd->lun);
3161 	ctx->fcp_cmnd->additional_cdb_len = additional_cdb_len;
3162 
3163 	if (cmd->sc_data_direction == DMA_TO_DEVICE)
3164 		ctx->fcp_cmnd->additional_cdb_len |= 1;
3165 	else if (cmd->sc_data_direction == DMA_FROM_DEVICE)
3166 		ctx->fcp_cmnd->additional_cdb_len |= 2;
3167 
3168 	/* Populate the FCP_PRIO. */
3169 	if (ha->flags.fcp_prio_enabled)
3170 		ctx->fcp_cmnd->task_attribute |=
3171 		    sp->fcport->fcp_prio << 3;
3172 
3173 	memcpy(ctx->fcp_cmnd->cdb, cmd->cmnd, cmd->cmd_len);
3174 
3175 	fcp_dl = (__be32 *)(ctx->fcp_cmnd->cdb + 16 +
3176 	    additional_cdb_len);
3177 	*fcp_dl = htonl((uint32_t)scsi_bufflen(cmd));
3178 
3179 	cmd_pkt->fcp_cmnd_dseg_len = cpu_to_le16(ctx->fcp_cmnd_len);
3180 	put_unaligned_le64(ctx->fcp_cmnd_dma, &cmd_pkt->fcp_cmnd_dseg_address);
3181 
3182 	cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
3183 	/* Set total data segment count. */
3184 	cmd_pkt->entry_count = (uint8_t)req_cnt;
3185 	cmd_pkt->entry_status = 0;
3186 
3187 	/* Build command packet. */
3188 	req->current_outstanding_cmd = handle;
3189 	req->outstanding_cmds[handle] = sp;
3190 	sp->handle = handle;
3191 	cmd->host_scribble = (unsigned char *)(unsigned long)handle;
3192 	req->cnt -= req_cnt;
3193 
3194 	/* Adjust ring index. */
3195 	wmb();
3196 	req->ring_index++;
3197 	if (req->ring_index == req->length) {
3198 		req->ring_index = 0;
3199 		req->ring_ptr = req->ring;
3200 	} else {
3201 		req->ring_ptr++;
3202 	}
3203 
3204 	sp->qpair->cmd_cnt++;
3205 	/* Set chip new ring index. */
3206 	wrt_reg_dword(req->req_q_in, req->ring_index);
3207 
3208 	spin_unlock_irqrestore(lock, flags);
3209 
3210 	return QLA_SUCCESS;
3211 
3212 queuing_error_fcp_cmnd:
3213 queuing_error:
3214 	if (tot_dsds)
3215 		scsi_dma_unmap(cmd);
3216 
3217 	qla_put_buf(sp->qpair, &sp->u.scmd.buf_dsc);
3218 	qla_put_fw_resources(sp->qpair, &sp->iores);
3219 	spin_unlock_irqrestore(lock, flags);
3220 
3221 	return QLA_FUNCTION_FAILED;
3222 }
3223 
3224 /**********************************************
3225  * edif update/delete sa_index list functions *
3226  **********************************************/
3227 
3228 /* clear the edif_indx_list for this port */
qla_edif_list_del(fc_port_t * fcport)3229 void qla_edif_list_del(fc_port_t *fcport)
3230 {
3231 	struct edif_list_entry *indx_lst;
3232 	struct edif_list_entry *tindx_lst;
3233 	struct list_head *indx_list = &fcport->edif.edif_indx_list;
3234 	unsigned long flags = 0;
3235 
3236 	spin_lock_irqsave(&fcport->edif.indx_list_lock, flags);
3237 	list_for_each_entry_safe(indx_lst, tindx_lst, indx_list, next) {
3238 		list_del(&indx_lst->next);
3239 		kfree(indx_lst);
3240 	}
3241 	spin_unlock_irqrestore(&fcport->edif.indx_list_lock, flags);
3242 }
3243 
3244 /******************
3245  * SADB functions *
3246  ******************/
3247 
3248 /* allocate/retrieve an sa_index for a given spi */
qla_edif_sadb_get_sa_index(fc_port_t * fcport,struct qla_sa_update_frame * sa_frame)3249 static uint16_t qla_edif_sadb_get_sa_index(fc_port_t *fcport,
3250 		struct qla_sa_update_frame *sa_frame)
3251 {
3252 	struct edif_sa_index_entry *entry;
3253 	struct list_head *sa_list;
3254 	uint16_t sa_index;
3255 	int dir = sa_frame->flags & SAU_FLG_TX;
3256 	int slot = 0;
3257 	int free_slot = -1;
3258 	scsi_qla_host_t *vha = fcport->vha;
3259 	struct qla_hw_data *ha = vha->hw;
3260 	unsigned long flags = 0;
3261 	uint16_t nport_handle = fcport->loop_id;
3262 
3263 	ql_dbg(ql_dbg_edif, vha, 0x3063,
3264 	    "%s: entry  fc_port: %p, nport_handle: 0x%x\n",
3265 	    __func__, fcport, nport_handle);
3266 
3267 	if (dir)
3268 		sa_list = &ha->sadb_tx_index_list;
3269 	else
3270 		sa_list = &ha->sadb_rx_index_list;
3271 
3272 	entry = qla_edif_sadb_find_sa_index_entry(nport_handle, sa_list);
3273 	if (!entry) {
3274 		if ((sa_frame->flags & (SAU_FLG_TX | SAU_FLG_INV)) == SAU_FLG_INV) {
3275 			ql_dbg(ql_dbg_edif, vha, 0x3063,
3276 			    "%s: rx delete request with no entry\n", __func__);
3277 			return RX_DELETE_NO_EDIF_SA_INDEX;
3278 		}
3279 
3280 		/* if there is no entry for this nport, add one */
3281 		entry = kzalloc((sizeof(struct edif_sa_index_entry)), GFP_ATOMIC);
3282 		if (!entry)
3283 			return INVALID_EDIF_SA_INDEX;
3284 
3285 		sa_index = qla_edif_get_sa_index_from_freepool(fcport, dir);
3286 		if (sa_index == INVALID_EDIF_SA_INDEX) {
3287 			kfree(entry);
3288 			return INVALID_EDIF_SA_INDEX;
3289 		}
3290 
3291 		INIT_LIST_HEAD(&entry->next);
3292 		entry->handle = nport_handle;
3293 		entry->fcport = fcport;
3294 		entry->sa_pair[0].spi = sa_frame->spi;
3295 		entry->sa_pair[0].sa_index = sa_index;
3296 		entry->sa_pair[1].spi = 0;
3297 		entry->sa_pair[1].sa_index = INVALID_EDIF_SA_INDEX;
3298 		spin_lock_irqsave(&ha->sadb_lock, flags);
3299 		list_add_tail(&entry->next, sa_list);
3300 		spin_unlock_irqrestore(&ha->sadb_lock, flags);
3301 		ql_dbg(ql_dbg_edif, vha, 0x3063,
3302 		    "%s: Created new sadb entry for nport_handle 0x%x, spi 0x%x, returning sa_index %d\n",
3303 		    __func__, nport_handle, sa_frame->spi, sa_index);
3304 
3305 		return sa_index;
3306 	}
3307 
3308 	spin_lock_irqsave(&ha->sadb_lock, flags);
3309 
3310 	/* see if we already have an entry for this spi */
3311 	for (slot = 0; slot < 2; slot++) {
3312 		if (entry->sa_pair[slot].sa_index == INVALID_EDIF_SA_INDEX) {
3313 			free_slot = slot;
3314 		} else {
3315 			if (entry->sa_pair[slot].spi == sa_frame->spi) {
3316 				spin_unlock_irqrestore(&ha->sadb_lock, flags);
3317 				ql_dbg(ql_dbg_edif, vha, 0x3063,
3318 				    "%s: sadb slot %d entry for lid 0x%x, spi 0x%x found, sa_index %d\n",
3319 				    __func__, slot, entry->handle, sa_frame->spi,
3320 				    entry->sa_pair[slot].sa_index);
3321 				return entry->sa_pair[slot].sa_index;
3322 			}
3323 		}
3324 	}
3325 	spin_unlock_irqrestore(&ha->sadb_lock, flags);
3326 
3327 	/* both slots are used */
3328 	if (free_slot == -1) {
3329 		ql_dbg(ql_dbg_edif, vha, 0x3063,
3330 		    "%s: WARNING: No free slots in sadb for nport_handle 0x%x, spi: 0x%x\n",
3331 		    __func__, entry->handle, sa_frame->spi);
3332 		ql_dbg(ql_dbg_edif, vha, 0x3063,
3333 		    "%s: Slot 0  spi: 0x%x  sa_index: %d,  Slot 1  spi: 0x%x  sa_index: %d\n",
3334 		    __func__, entry->sa_pair[0].spi, entry->sa_pair[0].sa_index,
3335 		    entry->sa_pair[1].spi, entry->sa_pair[1].sa_index);
3336 
3337 		return INVALID_EDIF_SA_INDEX;
3338 	}
3339 
3340 	/* there is at least one free slot, use it */
3341 	sa_index = qla_edif_get_sa_index_from_freepool(fcport, dir);
3342 	if (sa_index == INVALID_EDIF_SA_INDEX) {
3343 		ql_dbg(ql_dbg_edif, fcport->vha, 0x3063,
3344 		    "%s: empty freepool!!\n", __func__);
3345 		return INVALID_EDIF_SA_INDEX;
3346 	}
3347 
3348 	spin_lock_irqsave(&ha->sadb_lock, flags);
3349 	entry->sa_pair[free_slot].spi = sa_frame->spi;
3350 	entry->sa_pair[free_slot].sa_index = sa_index;
3351 	spin_unlock_irqrestore(&ha->sadb_lock, flags);
3352 	ql_dbg(ql_dbg_edif, fcport->vha, 0x3063,
3353 	    "%s: sadb slot %d entry for nport_handle 0x%x, spi 0x%x added, returning sa_index %d\n",
3354 	    __func__, free_slot, entry->handle, sa_frame->spi, sa_index);
3355 
3356 	return sa_index;
3357 }
3358 
3359 /* release any sadb entries -- only done at teardown */
qla_edif_sadb_release(struct qla_hw_data * ha)3360 void qla_edif_sadb_release(struct qla_hw_data *ha)
3361 {
3362 	struct edif_sa_index_entry *entry, *tmp;
3363 
3364 	list_for_each_entry_safe(entry, tmp, &ha->sadb_rx_index_list, next) {
3365 		list_del(&entry->next);
3366 		kfree(entry);
3367 	}
3368 
3369 	list_for_each_entry_safe(entry, tmp, &ha->sadb_tx_index_list, next) {
3370 		list_del(&entry->next);
3371 		kfree(entry);
3372 	}
3373 }
3374 
3375 /**************************
3376  * sadb freepool functions
3377  **************************/
3378 
3379 /* build the rx and tx sa_index free pools -- only done at fcport init */
qla_edif_sadb_build_free_pool(struct qla_hw_data * ha)3380 int qla_edif_sadb_build_free_pool(struct qla_hw_data *ha)
3381 {
3382 	ha->edif_tx_sa_id_map =
3383 	    kcalloc(BITS_TO_LONGS(EDIF_NUM_SA_INDEX), sizeof(long), GFP_KERNEL);
3384 
3385 	if (!ha->edif_tx_sa_id_map) {
3386 		ql_log_pci(ql_log_fatal, ha->pdev, 0x0009,
3387 		    "Unable to allocate memory for sadb tx.\n");
3388 		return -ENOMEM;
3389 	}
3390 
3391 	ha->edif_rx_sa_id_map =
3392 	    kcalloc(BITS_TO_LONGS(EDIF_NUM_SA_INDEX), sizeof(long), GFP_KERNEL);
3393 	if (!ha->edif_rx_sa_id_map) {
3394 		kfree(ha->edif_tx_sa_id_map);
3395 		ha->edif_tx_sa_id_map = NULL;
3396 		ql_log_pci(ql_log_fatal, ha->pdev, 0x0009,
3397 		    "Unable to allocate memory for sadb rx.\n");
3398 		return -ENOMEM;
3399 	}
3400 	return 0;
3401 }
3402 
3403 /* release the free pool - only done during fcport teardown */
qla_edif_sadb_release_free_pool(struct qla_hw_data * ha)3404 void qla_edif_sadb_release_free_pool(struct qla_hw_data *ha)
3405 {
3406 	kfree(ha->edif_tx_sa_id_map);
3407 	ha->edif_tx_sa_id_map = NULL;
3408 	kfree(ha->edif_rx_sa_id_map);
3409 	ha->edif_rx_sa_id_map = NULL;
3410 }
3411 
__chk_edif_rx_sa_delete_pending(scsi_qla_host_t * vha,fc_port_t * fcport,uint32_t handle,uint16_t sa_index)3412 static void __chk_edif_rx_sa_delete_pending(scsi_qla_host_t *vha,
3413 		fc_port_t *fcport, uint32_t handle, uint16_t sa_index)
3414 {
3415 	struct edif_list_entry *edif_entry;
3416 	struct edif_sa_ctl *sa_ctl;
3417 	uint16_t delete_sa_index = INVALID_EDIF_SA_INDEX;
3418 	unsigned long flags = 0;
3419 	uint16_t nport_handle = fcport->loop_id;
3420 	uint16_t cached_nport_handle;
3421 
3422 	spin_lock_irqsave(&fcport->edif.indx_list_lock, flags);
3423 	edif_entry = qla_edif_list_find_sa_index(fcport, nport_handle);
3424 	if (!edif_entry) {
3425 		spin_unlock_irqrestore(&fcport->edif.indx_list_lock, flags);
3426 		return;		/* no pending delete for this handle */
3427 	}
3428 
3429 	/*
3430 	 * check for no pending delete for this index or iocb does not
3431 	 * match rx sa_index
3432 	 */
3433 	if (edif_entry->delete_sa_index == INVALID_EDIF_SA_INDEX ||
3434 	    edif_entry->update_sa_index != sa_index) {
3435 		spin_unlock_irqrestore(&fcport->edif.indx_list_lock, flags);
3436 		return;
3437 	}
3438 
3439 	/*
3440 	 * wait until we have seen at least EDIF_DELAY_COUNT transfers before
3441 	 * queueing RX delete
3442 	 */
3443 	if (edif_entry->count++ < EDIF_RX_DELETE_FILTER_COUNT) {
3444 		spin_unlock_irqrestore(&fcport->edif.indx_list_lock, flags);
3445 		return;
3446 	}
3447 
3448 	ql_dbg(ql_dbg_edif, vha, 0x5033,
3449 	    "%s: invalidating delete_sa_index,  update_sa_index: 0x%x sa_index: 0x%x, delete_sa_index: 0x%x\n",
3450 	    __func__, edif_entry->update_sa_index, sa_index, edif_entry->delete_sa_index);
3451 
3452 	delete_sa_index = edif_entry->delete_sa_index;
3453 	edif_entry->delete_sa_index = INVALID_EDIF_SA_INDEX;
3454 	cached_nport_handle = edif_entry->handle;
3455 	spin_unlock_irqrestore(&fcport->edif.indx_list_lock, flags);
3456 
3457 	/* sanity check on the nport handle */
3458 	if (nport_handle != cached_nport_handle) {
3459 		ql_dbg(ql_dbg_edif, vha, 0x3063,
3460 		    "%s: POST SA DELETE nport_handle mismatch: lid: 0x%x, edif_entry nph: 0x%x\n",
3461 		    __func__, nport_handle, cached_nport_handle);
3462 	}
3463 
3464 	/* find the sa_ctl for the delete and schedule the delete */
3465 	sa_ctl = qla_edif_find_sa_ctl_by_index(fcport, delete_sa_index, 0);
3466 	if (sa_ctl) {
3467 		ql_dbg(ql_dbg_edif, vha, 0x3063,
3468 		    "%s: POST SA DELETE sa_ctl: %p, index recvd %d\n",
3469 		    __func__, sa_ctl, sa_index);
3470 		ql_dbg(ql_dbg_edif, vha, 0x3063,
3471 		    "delete index %d, update index: %d, nport handle: 0x%x, handle: 0x%x\n",
3472 		    delete_sa_index,
3473 		    edif_entry->update_sa_index, nport_handle, handle);
3474 
3475 		sa_ctl->flags = EDIF_SA_CTL_FLG_DEL;
3476 		set_bit(EDIF_SA_CTL_REPL, &sa_ctl->state);
3477 		qla_post_sa_replace_work(fcport->vha, fcport,
3478 		    nport_handle, sa_ctl);
3479 	} else {
3480 		ql_dbg(ql_dbg_edif, vha, 0x3063,
3481 		    "%s: POST SA DELETE sa_ctl not found for delete_sa_index: %d\n",
3482 		    __func__, delete_sa_index);
3483 	}
3484 }
3485 
qla_chk_edif_rx_sa_delete_pending(scsi_qla_host_t * vha,srb_t * sp,struct sts_entry_24xx * sts24)3486 void qla_chk_edif_rx_sa_delete_pending(scsi_qla_host_t *vha,
3487 		srb_t *sp, struct sts_entry_24xx *sts24)
3488 {
3489 	fc_port_t *fcport = sp->fcport;
3490 	/* sa_index used by this iocb */
3491 	struct scsi_cmnd *cmd = GET_CMD_SP(sp);
3492 	uint32_t handle;
3493 
3494 	handle = (uint32_t)LSW(sts24->handle);
3495 
3496 	/* find out if this status iosb is for a scsi read */
3497 	if (cmd->sc_data_direction != DMA_FROM_DEVICE)
3498 		return;
3499 
3500 	return __chk_edif_rx_sa_delete_pending(vha, fcport, handle,
3501 	   le16_to_cpu(sts24->edif_sa_index));
3502 }
3503 
qlt_chk_edif_rx_sa_delete_pending(scsi_qla_host_t * vha,fc_port_t * fcport,struct ctio7_from_24xx * pkt)3504 void qlt_chk_edif_rx_sa_delete_pending(scsi_qla_host_t *vha, fc_port_t *fcport,
3505 		struct ctio7_from_24xx *pkt)
3506 {
3507 	__chk_edif_rx_sa_delete_pending(vha, fcport,
3508 	    pkt->handle, le16_to_cpu(pkt->edif_sa_index));
3509 }
3510 
qla_parse_auth_els_ctl(struct srb * sp)3511 static void qla_parse_auth_els_ctl(struct srb *sp)
3512 {
3513 	struct qla_els_pt_arg *a = &sp->u.bsg_cmd.u.els_arg;
3514 	struct bsg_job *bsg_job = sp->u.bsg_cmd.bsg_job;
3515 	struct fc_bsg_request *request = bsg_job->request;
3516 	struct qla_bsg_auth_els_request *p =
3517 	    (struct qla_bsg_auth_els_request *)bsg_job->request;
3518 
3519 	a->tx_len = a->tx_byte_count = sp->remap.req.len;
3520 	a->tx_addr = sp->remap.req.dma;
3521 	a->rx_len = a->rx_byte_count = sp->remap.rsp.len;
3522 	a->rx_addr = sp->remap.rsp.dma;
3523 
3524 	if (p->e.sub_cmd == SEND_ELS_REPLY) {
3525 		a->control_flags = p->e.extra_control_flags << 13;
3526 		a->rx_xchg_address = cpu_to_le32(p->e.extra_rx_xchg_address);
3527 		if (p->e.extra_control_flags == BSG_CTL_FLAG_LS_ACC)
3528 			a->els_opcode = ELS_LS_ACC;
3529 		else if (p->e.extra_control_flags == BSG_CTL_FLAG_LS_RJT)
3530 			a->els_opcode = ELS_LS_RJT;
3531 	}
3532 	a->did = sp->fcport->d_id;
3533 	a->els_opcode =  request->rqst_data.h_els.command_code;
3534 	a->nport_handle = cpu_to_le16(sp->fcport->loop_id);
3535 	a->vp_idx = sp->vha->vp_idx;
3536 }
3537 
qla_edif_process_els(scsi_qla_host_t * vha,struct bsg_job * bsg_job)3538 int qla_edif_process_els(scsi_qla_host_t *vha, struct bsg_job *bsg_job)
3539 {
3540 	struct fc_bsg_request *bsg_request = bsg_job->request;
3541 	struct fc_bsg_reply *bsg_reply = bsg_job->reply;
3542 	fc_port_t *fcport = NULL;
3543 	struct qla_hw_data *ha = vha->hw;
3544 	srb_t *sp;
3545 	int rval =  (DID_ERROR << 16), cnt;
3546 	port_id_t d_id;
3547 	struct qla_bsg_auth_els_request *p =
3548 	    (struct qla_bsg_auth_els_request *)bsg_job->request;
3549 	struct qla_bsg_auth_els_reply *rpl =
3550 	    (struct qla_bsg_auth_els_reply *)bsg_job->reply;
3551 
3552 	rpl->version = EDIF_VERSION1;
3553 
3554 	d_id.b.al_pa = bsg_request->rqst_data.h_els.port_id[2];
3555 	d_id.b.area = bsg_request->rqst_data.h_els.port_id[1];
3556 	d_id.b.domain = bsg_request->rqst_data.h_els.port_id[0];
3557 
3558 	/* find matching d_id in fcport list */
3559 	fcport = qla2x00_find_fcport_by_pid(vha, &d_id);
3560 	if (!fcport) {
3561 		ql_dbg(ql_dbg_edif, vha, 0x911a,
3562 		    "%s fcport not find online portid=%06x.\n",
3563 		    __func__, d_id.b24);
3564 		SET_DID_STATUS(bsg_reply->result, DID_ERROR);
3565 		return -EIO;
3566 	}
3567 
3568 	if (qla_bsg_check(vha, bsg_job, fcport))
3569 		return 0;
3570 
3571 	if (EDIF_SESS_DELETE(fcport)) {
3572 		ql_dbg(ql_dbg_edif, vha, 0x910d,
3573 		    "%s ELS code %x, no loop id.\n", __func__,
3574 		    bsg_request->rqst_data.r_els.els_code);
3575 		SET_DID_STATUS(bsg_reply->result, DID_BAD_TARGET);
3576 		return -ENXIO;
3577 	}
3578 
3579 	if (!vha->flags.online) {
3580 		ql_log(ql_log_warn, vha, 0x7005, "Host not online.\n");
3581 		SET_DID_STATUS(bsg_reply->result, DID_BAD_TARGET);
3582 		rval = -EIO;
3583 		goto done;
3584 	}
3585 
3586 	/* pass through is supported only for ISP 4Gb or higher */
3587 	if (!IS_FWI2_CAPABLE(ha)) {
3588 		ql_dbg(ql_dbg_user, vha, 0x7001,
3589 		    "ELS passthru not supported for ISP23xx based adapters.\n");
3590 		SET_DID_STATUS(bsg_reply->result, DID_BAD_TARGET);
3591 		rval = -EPERM;
3592 		goto done;
3593 	}
3594 
3595 	sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
3596 	if (!sp) {
3597 		ql_dbg(ql_dbg_user, vha, 0x7004,
3598 		    "Failed get sp pid=%06x\n", fcport->d_id.b24);
3599 		rval = -ENOMEM;
3600 		SET_DID_STATUS(bsg_reply->result, DID_IMM_RETRY);
3601 		goto done;
3602 	}
3603 
3604 	sp->remap.req.len = bsg_job->request_payload.payload_len;
3605 	sp->remap.req.buf = dma_pool_alloc(ha->purex_dma_pool,
3606 	    GFP_KERNEL, &sp->remap.req.dma);
3607 	if (!sp->remap.req.buf) {
3608 		ql_dbg(ql_dbg_user, vha, 0x7005,
3609 		    "Failed allocate request dma len=%x\n",
3610 		    bsg_job->request_payload.payload_len);
3611 		rval = -ENOMEM;
3612 		SET_DID_STATUS(bsg_reply->result, DID_IMM_RETRY);
3613 		goto done_free_sp;
3614 	}
3615 
3616 	sp->remap.rsp.len = bsg_job->reply_payload.payload_len;
3617 	sp->remap.rsp.buf = dma_pool_alloc(ha->purex_dma_pool,
3618 	    GFP_KERNEL, &sp->remap.rsp.dma);
3619 	if (!sp->remap.rsp.buf) {
3620 		ql_dbg(ql_dbg_user, vha, 0x7006,
3621 		    "Failed allocate response dma len=%x\n",
3622 		    bsg_job->reply_payload.payload_len);
3623 		rval = -ENOMEM;
3624 		SET_DID_STATUS(bsg_reply->result, DID_IMM_RETRY);
3625 		goto done_free_remap_req;
3626 	}
3627 	sg_copy_to_buffer(bsg_job->request_payload.sg_list,
3628 	    bsg_job->request_payload.sg_cnt, sp->remap.req.buf,
3629 	    sp->remap.req.len);
3630 	sp->remap.remapped = true;
3631 
3632 	sp->type = SRB_ELS_CMD_HST_NOLOGIN;
3633 	sp->name = "SPCN_BSG_HST_NOLOGIN";
3634 	sp->u.bsg_cmd.bsg_job = bsg_job;
3635 	qla_parse_auth_els_ctl(sp);
3636 
3637 	sp->free = qla2x00_bsg_sp_free;
3638 	sp->done = qla2x00_bsg_job_done;
3639 
3640 	cnt = 0;
3641 retry:
3642 	rval = qla2x00_start_sp(sp);
3643 	switch (rval) {
3644 	case QLA_SUCCESS:
3645 		ql_dbg(ql_dbg_edif, vha, 0x700a,
3646 		       "%s %s %8phN xchg %x ctlflag %x hdl %x reqlen %xh bsg ptr %p\n",
3647 		       __func__, sc_to_str(p->e.sub_cmd), fcport->port_name,
3648 		       p->e.extra_rx_xchg_address, p->e.extra_control_flags,
3649 		       sp->handle, sp->remap.req.len, bsg_job);
3650 		break;
3651 	case EAGAIN:
3652 		msleep(EDIF_MSLEEP_INTERVAL);
3653 		cnt++;
3654 		if (cnt < EDIF_RETRY_COUNT)
3655 			goto retry;
3656 		fallthrough;
3657 	default:
3658 		ql_log(ql_log_warn, vha, 0x700e,
3659 		    "%s qla2x00_start_sp failed = %d\n", __func__, rval);
3660 		SET_DID_STATUS(bsg_reply->result, DID_IMM_RETRY);
3661 		rval = -EIO;
3662 		goto done_free_remap_rsp;
3663 	}
3664 	return rval;
3665 
3666 done_free_remap_rsp:
3667 	dma_pool_free(ha->purex_dma_pool, sp->remap.rsp.buf,
3668 	    sp->remap.rsp.dma);
3669 done_free_remap_req:
3670 	dma_pool_free(ha->purex_dma_pool, sp->remap.req.buf,
3671 	    sp->remap.req.dma);
3672 done_free_sp:
3673 	qla2x00_rel_sp(sp);
3674 
3675 done:
3676 	return rval;
3677 }
3678 
qla_edif_sess_down(struct scsi_qla_host * vha,struct fc_port * sess)3679 void qla_edif_sess_down(struct scsi_qla_host *vha, struct fc_port *sess)
3680 {
3681 	u16 cnt = 0;
3682 
3683 	if (sess->edif.app_sess_online && DBELL_ACTIVE(vha)) {
3684 		ql_dbg(ql_dbg_disc, vha, 0xf09c,
3685 			"%s: sess %8phN send port_offline event\n",
3686 			__func__, sess->port_name);
3687 		sess->edif.app_sess_online = 0;
3688 		sess->edif.sess_down_acked = 0;
3689 		qla_edb_eventcreate(vha, VND_CMD_AUTH_STATE_SESSION_SHUTDOWN,
3690 		    sess->d_id.b24, 0, sess);
3691 		qla2x00_post_aen_work(vha, FCH_EVT_PORT_OFFLINE, sess->d_id.b24);
3692 
3693 		while (!READ_ONCE(sess->edif.sess_down_acked) &&
3694 		       !test_bit(VPORT_DELETE, &vha->dpc_flags)) {
3695 			msleep(100);
3696 			cnt++;
3697 			if (cnt > 100)
3698 				break;
3699 		}
3700 		sess->edif.sess_down_acked = 0;
3701 		ql_dbg(ql_dbg_disc, vha, 0xf09c,
3702 		       "%s: sess %8phN port_offline event completed\n",
3703 		       __func__, sess->port_name);
3704 	}
3705 }
3706 
qla_edif_clear_appdata(struct scsi_qla_host * vha,struct fc_port * fcport)3707 void qla_edif_clear_appdata(struct scsi_qla_host *vha, struct fc_port *fcport)
3708 {
3709 	if (!(fcport->flags & FCF_FCSP_DEVICE))
3710 		return;
3711 
3712 	qla_edb_clear(vha, fcport->d_id);
3713 	qla_enode_clear(vha, fcport->d_id);
3714 }
3715