1 /*
2  * Aic94xx Task Management Functions
3  *
4  * Copyright (C) 2005 Adaptec, Inc.  All rights reserved.
5  * Copyright (C) 2005 Luben Tuikov <luben_tuikov@adaptec.com>
6  *
7  * This file is licensed under GPLv2.
8  *
9  * This file is part of the aic94xx driver.
10  *
11  * The aic94xx driver is free software; you can redistribute it and/or
12  * modify it under the terms of the GNU General Public License as
13  * published by the Free Software Foundation; version 2 of the
14  * License.
15  *
16  * The aic94xx driver is distributed in the hope that it will be useful,
17  * but WITHOUT ANY WARRANTY; without even the implied warranty of
18  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
19  * General Public License for more details.
20  *
21  * You should have received a copy of the GNU General Public License
22  * along with the aic94xx driver; if not, write to the Free Software
23  * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
24  *
25  */
26 
27 #include <linux/spinlock.h>
28 #include <linux/gfp.h>
29 #include "aic94xx.h"
30 #include "aic94xx_sas.h"
31 #include "aic94xx_hwi.h"
32 
33 /* ---------- Internal enqueue ---------- */
34 
asd_enqueue_internal(struct asd_ascb * ascb,void (* tasklet_complete)(struct asd_ascb *,struct done_list_struct *),void (* timed_out)(unsigned long))35 static int asd_enqueue_internal(struct asd_ascb *ascb,
36 		void (*tasklet_complete)(struct asd_ascb *,
37 					 struct done_list_struct *),
38 				void (*timed_out)(unsigned long))
39 {
40 	int res;
41 
42 	ascb->tasklet_complete = tasklet_complete;
43 	ascb->uldd_timer = 1;
44 
45 	ascb->timer.data = (unsigned long) ascb;
46 	ascb->timer.function = timed_out;
47 	ascb->timer.expires = jiffies + AIC94XX_SCB_TIMEOUT;
48 
49 	add_timer(&ascb->timer);
50 
51 	res = asd_post_ascb_list(ascb->ha, ascb, 1);
52 	if (unlikely(res))
53 		del_timer(&ascb->timer);
54 	return res;
55 }
56 
57 /* ---------- CLEAR NEXUS ---------- */
58 
59 struct tasklet_completion_status {
60 	int	dl_opcode;
61 	int	tmf_state;
62 	u8	tag_valid:1;
63 	__be16	tag;
64 };
65 
66 #define DECLARE_TCS(tcs) \
67 	struct tasklet_completion_status tcs = { \
68 		.dl_opcode = 0, \
69 		.tmf_state = 0, \
70 		.tag_valid = 0, \
71 		.tag = 0, \
72 	}
73 
74 
asd_clear_nexus_tasklet_complete(struct asd_ascb * ascb,struct done_list_struct * dl)75 static void asd_clear_nexus_tasklet_complete(struct asd_ascb *ascb,
76 					     struct done_list_struct *dl)
77 {
78 	struct tasklet_completion_status *tcs = ascb->uldd_task;
79 	ASD_DPRINTK("%s: here\n", __func__);
80 	if (!del_timer(&ascb->timer)) {
81 		ASD_DPRINTK("%s: couldn't delete timer\n", __func__);
82 		return;
83 	}
84 	ASD_DPRINTK("%s: opcode: 0x%x\n", __func__, dl->opcode);
85 	tcs->dl_opcode = dl->opcode;
86 	complete(ascb->completion);
87 	asd_ascb_free(ascb);
88 }
89 
asd_clear_nexus_timedout(unsigned long data)90 static void asd_clear_nexus_timedout(unsigned long data)
91 {
92 	struct asd_ascb *ascb = (void *)data;
93 	struct tasklet_completion_status *tcs = ascb->uldd_task;
94 
95 	ASD_DPRINTK("%s: here\n", __func__);
96 	tcs->dl_opcode = TMF_RESP_FUNC_FAILED;
97 	complete(ascb->completion);
98 }
99 
100 #define CLEAR_NEXUS_PRE         \
101 	struct asd_ascb *ascb; \
102 	struct scb *scb; \
103 	int res; \
104 	DECLARE_COMPLETION_ONSTACK(completion); \
105 	DECLARE_TCS(tcs); \
106 		\
107 	ASD_DPRINTK("%s: PRE\n", __func__); \
108         res = 1;                \
109 	ascb = asd_ascb_alloc_list(asd_ha, &res, GFP_KERNEL); \
110 	if (!ascb)              \
111 		return -ENOMEM; \
112                                 \
113 	ascb->completion = &completion; \
114 	ascb->uldd_task = &tcs; \
115 	scb = ascb->scb;        \
116 	scb->header.opcode = CLEAR_NEXUS
117 
118 #define CLEAR_NEXUS_POST        \
119 	ASD_DPRINTK("%s: POST\n", __func__); \
120 	res = asd_enqueue_internal(ascb, asd_clear_nexus_tasklet_complete, \
121 				   asd_clear_nexus_timedout);              \
122 	if (res)                \
123 		goto out_err;   \
124 	ASD_DPRINTK("%s: clear nexus posted, waiting...\n", __func__); \
125 	wait_for_completion(&completion); \
126 	res = tcs.dl_opcode; \
127 	if (res == TC_NO_ERROR) \
128 		res = TMF_RESP_FUNC_COMPLETE;   \
129 	return res; \
130 out_err:                        \
131 	asd_ascb_free(ascb);    \
132 	return res
133 
asd_clear_nexus_ha(struct sas_ha_struct * sas_ha)134 int asd_clear_nexus_ha(struct sas_ha_struct *sas_ha)
135 {
136 	struct asd_ha_struct *asd_ha = sas_ha->lldd_ha;
137 
138 	CLEAR_NEXUS_PRE;
139 	scb->clear_nexus.nexus = NEXUS_ADAPTER;
140 	CLEAR_NEXUS_POST;
141 }
142 
asd_clear_nexus_port(struct asd_sas_port * port)143 int asd_clear_nexus_port(struct asd_sas_port *port)
144 {
145 	struct asd_ha_struct *asd_ha = port->ha->lldd_ha;
146 
147 	CLEAR_NEXUS_PRE;
148 	scb->clear_nexus.nexus = NEXUS_PORT;
149 	scb->clear_nexus.conn_mask = port->phy_mask;
150 	CLEAR_NEXUS_POST;
151 }
152 
153 enum clear_nexus_phase {
154 	NEXUS_PHASE_PRE,
155 	NEXUS_PHASE_POST,
156 	NEXUS_PHASE_RESUME,
157 };
158 
asd_clear_nexus_I_T(struct domain_device * dev,enum clear_nexus_phase phase)159 static int asd_clear_nexus_I_T(struct domain_device *dev,
160 			       enum clear_nexus_phase phase)
161 {
162 	struct asd_ha_struct *asd_ha = dev->port->ha->lldd_ha;
163 
164 	CLEAR_NEXUS_PRE;
165 	scb->clear_nexus.nexus = NEXUS_I_T;
166 	switch (phase) {
167 	case NEXUS_PHASE_PRE:
168 		scb->clear_nexus.flags = EXEC_Q | SUSPEND_TX;
169 		break;
170 	case NEXUS_PHASE_POST:
171 		scb->clear_nexus.flags = SEND_Q | NOTINQ;
172 		break;
173 	case NEXUS_PHASE_RESUME:
174 		scb->clear_nexus.flags = RESUME_TX;
175 	}
176 	scb->clear_nexus.conn_handle = cpu_to_le16((u16)(unsigned long)
177 						   dev->lldd_dev);
178 	CLEAR_NEXUS_POST;
179 }
180 
asd_I_T_nexus_reset(struct domain_device * dev)181 int asd_I_T_nexus_reset(struct domain_device *dev)
182 {
183 	int res, tmp_res, i;
184 	struct sas_phy *phy = sas_find_local_phy(dev);
185 	/* Standard mandates link reset for ATA  (type 0) and
186 	 * hard reset for SSP (type 1) */
187 	int reset_type = (dev->dev_type == SATA_DEV ||
188 			  (dev->tproto & SAS_PROTOCOL_STP)) ? 0 : 1;
189 
190 	asd_clear_nexus_I_T(dev, NEXUS_PHASE_PRE);
191 	/* send a hard reset */
192 	ASD_DPRINTK("sending %s reset to %s\n",
193 		    reset_type ? "hard" : "soft", dev_name(&phy->dev));
194 	res = sas_phy_reset(phy, reset_type);
195 	if (res == TMF_RESP_FUNC_COMPLETE) {
196 		/* wait for the maximum settle time */
197 		msleep(500);
198 		/* clear all outstanding commands (keep nexus suspended) */
199 		asd_clear_nexus_I_T(dev, NEXUS_PHASE_POST);
200 	}
201 	for (i = 0 ; i < 3; i++) {
202 		tmp_res = asd_clear_nexus_I_T(dev, NEXUS_PHASE_RESUME);
203 		if (tmp_res == TC_RESUME)
204 			return res;
205 		msleep(500);
206 	}
207 
208 	/* This is a bit of a problem:  the sequencer is still suspended
209 	 * and is refusing to resume.  Hope it will resume on a bigger hammer
210 	 * or the disk is lost */
211 	dev_printk(KERN_ERR, &phy->dev,
212 		   "Failed to resume nexus after reset 0x%x\n", tmp_res);
213 
214 	return TMF_RESP_FUNC_FAILED;
215 }
216 
asd_clear_nexus_I_T_L(struct domain_device * dev,u8 * lun)217 static int asd_clear_nexus_I_T_L(struct domain_device *dev, u8 *lun)
218 {
219 	struct asd_ha_struct *asd_ha = dev->port->ha->lldd_ha;
220 
221 	CLEAR_NEXUS_PRE;
222 	scb->clear_nexus.nexus = NEXUS_I_T_L;
223 	scb->clear_nexus.flags = SEND_Q | EXEC_Q | NOTINQ;
224 	memcpy(scb->clear_nexus.ssp_task.lun, lun, 8);
225 	scb->clear_nexus.conn_handle = cpu_to_le16((u16)(unsigned long)
226 						   dev->lldd_dev);
227 	CLEAR_NEXUS_POST;
228 }
229 
asd_clear_nexus_tag(struct sas_task * task)230 static int asd_clear_nexus_tag(struct sas_task *task)
231 {
232 	struct asd_ha_struct *asd_ha = task->dev->port->ha->lldd_ha;
233 	struct asd_ascb *tascb = task->lldd_task;
234 
235 	CLEAR_NEXUS_PRE;
236 	scb->clear_nexus.nexus = NEXUS_TAG;
237 	memcpy(scb->clear_nexus.ssp_task.lun, task->ssp_task.LUN, 8);
238 	scb->clear_nexus.ssp_task.tag = tascb->tag;
239 	if (task->dev->tproto)
240 		scb->clear_nexus.conn_handle = cpu_to_le16((u16)(unsigned long)
241 							  task->dev->lldd_dev);
242 	CLEAR_NEXUS_POST;
243 }
244 
asd_clear_nexus_index(struct sas_task * task)245 static int asd_clear_nexus_index(struct sas_task *task)
246 {
247 	struct asd_ha_struct *asd_ha = task->dev->port->ha->lldd_ha;
248 	struct asd_ascb *tascb = task->lldd_task;
249 
250 	CLEAR_NEXUS_PRE;
251 	scb->clear_nexus.nexus = NEXUS_TRANS_CX;
252 	if (task->dev->tproto)
253 		scb->clear_nexus.conn_handle = cpu_to_le16((u16)(unsigned long)
254 							  task->dev->lldd_dev);
255 	scb->clear_nexus.index = cpu_to_le16(tascb->tc_index);
256 	CLEAR_NEXUS_POST;
257 }
258 
259 /* ---------- TMFs ---------- */
260 
asd_tmf_timedout(unsigned long data)261 static void asd_tmf_timedout(unsigned long data)
262 {
263 	struct asd_ascb *ascb = (void *) data;
264 	struct tasklet_completion_status *tcs = ascb->uldd_task;
265 
266 	ASD_DPRINTK("tmf timed out\n");
267 	tcs->tmf_state = TMF_RESP_FUNC_FAILED;
268 	complete(ascb->completion);
269 }
270 
asd_get_tmf_resp_tasklet(struct asd_ascb * ascb,struct done_list_struct * dl)271 static int asd_get_tmf_resp_tasklet(struct asd_ascb *ascb,
272 				    struct done_list_struct *dl)
273 {
274 	struct asd_ha_struct *asd_ha = ascb->ha;
275 	unsigned long flags;
276 	struct tc_resp_sb_struct {
277 		__le16 index_escb;
278 		u8     len_lsb;
279 		u8     flags;
280 	} __attribute__ ((packed)) *resp_sb = (void *) dl->status_block;
281 
282 	int  edb_id = ((resp_sb->flags & 0x70) >> 4)-1;
283 	struct asd_ascb *escb;
284 	struct asd_dma_tok *edb;
285 	struct ssp_frame_hdr *fh;
286 	struct ssp_response_iu   *ru;
287 	int res = TMF_RESP_FUNC_FAILED;
288 
289 	ASD_DPRINTK("tmf resp tasklet\n");
290 
291 	spin_lock_irqsave(&asd_ha->seq.tc_index_lock, flags);
292 	escb = asd_tc_index_find(&asd_ha->seq,
293 				 (int)le16_to_cpu(resp_sb->index_escb));
294 	spin_unlock_irqrestore(&asd_ha->seq.tc_index_lock, flags);
295 
296 	if (!escb) {
297 		ASD_DPRINTK("Uh-oh! No escb for this dl?!\n");
298 		return res;
299 	}
300 
301 	edb = asd_ha->seq.edb_arr[edb_id + escb->edb_index];
302 	ascb->tag = *(__be16 *)(edb->vaddr+4);
303 	fh = edb->vaddr + 16;
304 	ru = edb->vaddr + 16 + sizeof(*fh);
305 	res = ru->status;
306 	if (ru->datapres == 1)	  /* Response data present */
307 		res = ru->resp_data[3];
308 #if 0
309 	ascb->tag = fh->tag;
310 #endif
311 	ascb->tag_valid = 1;
312 
313 	asd_invalidate_edb(escb, edb_id);
314 	return res;
315 }
316 
asd_tmf_tasklet_complete(struct asd_ascb * ascb,struct done_list_struct * dl)317 static void asd_tmf_tasklet_complete(struct asd_ascb *ascb,
318 				     struct done_list_struct *dl)
319 {
320 	struct tasklet_completion_status *tcs;
321 
322 	if (!del_timer(&ascb->timer))
323 		return;
324 
325 	tcs = ascb->uldd_task;
326 	ASD_DPRINTK("tmf tasklet complete\n");
327 
328 	tcs->dl_opcode = dl->opcode;
329 
330 	if (dl->opcode == TC_SSP_RESP) {
331 		tcs->tmf_state = asd_get_tmf_resp_tasklet(ascb, dl);
332 		tcs->tag_valid = ascb->tag_valid;
333 		tcs->tag = ascb->tag;
334 	}
335 
336 	complete(ascb->completion);
337 	asd_ascb_free(ascb);
338 }
339 
asd_clear_nexus(struct sas_task * task)340 static int asd_clear_nexus(struct sas_task *task)
341 {
342 	int res = TMF_RESP_FUNC_FAILED;
343 	int leftover;
344 	struct asd_ascb *tascb = task->lldd_task;
345 	DECLARE_COMPLETION_ONSTACK(completion);
346 	unsigned long flags;
347 
348 	tascb->completion = &completion;
349 
350 	ASD_DPRINTK("task not done, clearing nexus\n");
351 	if (tascb->tag_valid)
352 		res = asd_clear_nexus_tag(task);
353 	else
354 		res = asd_clear_nexus_index(task);
355 	leftover = wait_for_completion_timeout(&completion,
356 					       AIC94XX_SCB_TIMEOUT);
357 	tascb->completion = NULL;
358 	ASD_DPRINTK("came back from clear nexus\n");
359 	spin_lock_irqsave(&task->task_state_lock, flags);
360 	if (leftover < 1)
361 		res = TMF_RESP_FUNC_FAILED;
362 	if (task->task_state_flags & SAS_TASK_STATE_DONE)
363 		res = TMF_RESP_FUNC_COMPLETE;
364 	spin_unlock_irqrestore(&task->task_state_lock, flags);
365 
366 	return res;
367 }
368 
369 /**
370  * asd_abort_task -- ABORT TASK TMF
371  * @task: the task to be aborted
372  *
373  * Before calling ABORT TASK the task state flags should be ORed with
374  * SAS_TASK_STATE_ABORTED (unless SAS_TASK_STATE_DONE is set) under
375  * the task_state_lock IRQ spinlock, then ABORT TASK *must* be called.
376  *
377  * Implements the ABORT TASK TMF, I_T_L_Q nexus.
378  * Returns: SAS TMF responses (see sas_task.h),
379  *          -ENOMEM,
380  *          -SAS_QUEUE_FULL.
381  *
382  * When ABORT TASK returns, the caller of ABORT TASK checks first the
383  * task->task_state_flags, and then the return value of ABORT TASK.
384  *
385  * If the task has task state bit SAS_TASK_STATE_DONE set, then the
386  * task was completed successfully prior to it being aborted.  The
387  * caller of ABORT TASK has responsibility to call task->task_done()
388  * xor free the task, depending on their framework.  The return code
389  * is TMF_RESP_FUNC_FAILED in this case.
390  *
391  * Else the SAS_TASK_STATE_DONE bit is not set,
392  * 	If the return code is TMF_RESP_FUNC_COMPLETE, then
393  * 		the task was aborted successfully.  The caller of
394  * 		ABORT TASK has responsibility to call task->task_done()
395  *              to finish the task, xor free the task depending on their
396  *		framework.
397  *	else
398  * 		the ABORT TASK returned some kind of error. The task
399  *              was _not_ cancelled.  Nothing can be assumed.
400  *		The caller of ABORT TASK may wish to retry.
401  */
asd_abort_task(struct sas_task * task)402 int asd_abort_task(struct sas_task *task)
403 {
404 	struct asd_ascb *tascb = task->lldd_task;
405 	struct asd_ha_struct *asd_ha = tascb->ha;
406 	int res = 1;
407 	unsigned long flags;
408 	struct asd_ascb *ascb = NULL;
409 	struct scb *scb;
410 	int leftover;
411 	DECLARE_TCS(tcs);
412 	DECLARE_COMPLETION_ONSTACK(completion);
413 	DECLARE_COMPLETION_ONSTACK(tascb_completion);
414 
415 	tascb->completion = &tascb_completion;
416 
417 	spin_lock_irqsave(&task->task_state_lock, flags);
418 	if (task->task_state_flags & SAS_TASK_STATE_DONE) {
419 		spin_unlock_irqrestore(&task->task_state_lock, flags);
420 		res = TMF_RESP_FUNC_COMPLETE;
421 		ASD_DPRINTK("%s: task 0x%p done\n", __func__, task);
422 		goto out_done;
423 	}
424 	spin_unlock_irqrestore(&task->task_state_lock, flags);
425 
426 	ascb = asd_ascb_alloc_list(asd_ha, &res, GFP_KERNEL);
427 	if (!ascb)
428 		return -ENOMEM;
429 
430 	ascb->uldd_task = &tcs;
431 	ascb->completion = &completion;
432 	scb = ascb->scb;
433 	scb->header.opcode = SCB_ABORT_TASK;
434 
435 	switch (task->task_proto) {
436 	case SAS_PROTOCOL_SATA:
437 	case SAS_PROTOCOL_STP:
438 		scb->abort_task.proto_conn_rate = (1 << 5); /* STP */
439 		break;
440 	case SAS_PROTOCOL_SSP:
441 		scb->abort_task.proto_conn_rate  = (1 << 4); /* SSP */
442 		scb->abort_task.proto_conn_rate |= task->dev->linkrate;
443 		break;
444 	case SAS_PROTOCOL_SMP:
445 		break;
446 	default:
447 		break;
448 	}
449 
450 	if (task->task_proto == SAS_PROTOCOL_SSP) {
451 		scb->abort_task.ssp_frame.frame_type = SSP_TASK;
452 		memcpy(scb->abort_task.ssp_frame.hashed_dest_addr,
453 		       task->dev->hashed_sas_addr, HASHED_SAS_ADDR_SIZE);
454 		memcpy(scb->abort_task.ssp_frame.hashed_src_addr,
455 		       task->dev->port->ha->hashed_sas_addr,
456 		       HASHED_SAS_ADDR_SIZE);
457 		scb->abort_task.ssp_frame.tptt = cpu_to_be16(0xFFFF);
458 
459 		memcpy(scb->abort_task.ssp_task.lun, task->ssp_task.LUN, 8);
460 		scb->abort_task.ssp_task.tmf = TMF_ABORT_TASK;
461 		scb->abort_task.ssp_task.tag = cpu_to_be16(0xFFFF);
462 	}
463 
464 	scb->abort_task.sister_scb = cpu_to_le16(0xFFFF);
465 	scb->abort_task.conn_handle = cpu_to_le16(
466 		(u16)(unsigned long)task->dev->lldd_dev);
467 	scb->abort_task.retry_count = 1;
468 	scb->abort_task.index = cpu_to_le16((u16)tascb->tc_index);
469 	scb->abort_task.itnl_to = cpu_to_le16(ITNL_TIMEOUT_CONST);
470 
471 	res = asd_enqueue_internal(ascb, asd_tmf_tasklet_complete,
472 				   asd_tmf_timedout);
473 	if (res)
474 		goto out_free;
475 	wait_for_completion(&completion);
476 	ASD_DPRINTK("tmf came back\n");
477 
478 	tascb->tag = tcs.tag;
479 	tascb->tag_valid = tcs.tag_valid;
480 
481 	spin_lock_irqsave(&task->task_state_lock, flags);
482 	if (task->task_state_flags & SAS_TASK_STATE_DONE) {
483 		spin_unlock_irqrestore(&task->task_state_lock, flags);
484 		res = TMF_RESP_FUNC_COMPLETE;
485 		ASD_DPRINTK("%s: task 0x%p done\n", __func__, task);
486 		goto out_done;
487 	}
488 	spin_unlock_irqrestore(&task->task_state_lock, flags);
489 
490 	if (tcs.dl_opcode == TC_SSP_RESP) {
491 		/* The task to be aborted has been sent to the device.
492 		 * We got a Response IU for the ABORT TASK TMF. */
493 		if (tcs.tmf_state == TMF_RESP_FUNC_COMPLETE)
494 			res = asd_clear_nexus(task);
495 		else
496 			res = tcs.tmf_state;
497 	} else if (tcs.dl_opcode == TC_NO_ERROR &&
498 		   tcs.tmf_state == TMF_RESP_FUNC_FAILED) {
499 		/* timeout */
500 		res = TMF_RESP_FUNC_FAILED;
501 	} else {
502 		/* In the following we assume that the managing layer
503 		 * will _never_ make a mistake, when issuing ABORT
504 		 * TASK.
505 		 */
506 		switch (tcs.dl_opcode) {
507 		default:
508 			res = asd_clear_nexus(task);
509 			/* fallthrough */
510 		case TC_NO_ERROR:
511 			break;
512 			/* The task hasn't been sent to the device xor
513 			 * we never got a (sane) Response IU for the
514 			 * ABORT TASK TMF.
515 			 */
516 		case TF_NAK_RECV:
517 			res = TMF_RESP_INVALID_FRAME;
518 			break;
519 		case TF_TMF_TASK_DONE:	/* done but not reported yet */
520 			res = TMF_RESP_FUNC_FAILED;
521 			leftover =
522 				wait_for_completion_timeout(&tascb_completion,
523 							  AIC94XX_SCB_TIMEOUT);
524 			spin_lock_irqsave(&task->task_state_lock, flags);
525 			if (leftover < 1)
526 				res = TMF_RESP_FUNC_FAILED;
527 			if (task->task_state_flags & SAS_TASK_STATE_DONE)
528 				res = TMF_RESP_FUNC_COMPLETE;
529 			spin_unlock_irqrestore(&task->task_state_lock, flags);
530 			break;
531 		case TF_TMF_NO_TAG:
532 		case TF_TMF_TAG_FREE: /* the tag is in the free list */
533 		case TF_TMF_NO_CONN_HANDLE: /* no such device */
534 			res = TMF_RESP_FUNC_COMPLETE;
535 			break;
536 		case TF_TMF_NO_CTX: /* not in seq, or proto != SSP */
537 			res = TMF_RESP_FUNC_ESUPP;
538 			break;
539 		}
540 	}
541  out_done:
542 	tascb->completion = NULL;
543 	if (res == TMF_RESP_FUNC_COMPLETE) {
544 		task->lldd_task = NULL;
545 		mb();
546 		asd_ascb_free(tascb);
547 	}
548 	ASD_DPRINTK("task 0x%p aborted, res: 0x%x\n", task, res);
549 	return res;
550 
551  out_free:
552 	asd_ascb_free(ascb);
553 	ASD_DPRINTK("task 0x%p aborted, res: 0x%x\n", task, res);
554 	return res;
555 }
556 
557 /**
558  * asd_initiate_ssp_tmf -- send a TMF to an I_T_L or I_T_L_Q nexus
559  * @dev: pointer to struct domain_device of interest
560  * @lun: pointer to u8[8] which is the LUN
561  * @tmf: the TMF to be performed (see sas_task.h or the SAS spec)
562  * @index: the transaction context of the task to be queried if QT TMF
563  *
564  * This function is used to send ABORT TASK SET, CLEAR ACA,
565  * CLEAR TASK SET, LU RESET and QUERY TASK TMFs.
566  *
567  * No SCBs should be queued to the I_T_L nexus when this SCB is
568  * pending.
569  *
570  * Returns: TMF response code (see sas_task.h or the SAS spec)
571  */
asd_initiate_ssp_tmf(struct domain_device * dev,u8 * lun,int tmf,int index)572 static int asd_initiate_ssp_tmf(struct domain_device *dev, u8 *lun,
573 				int tmf, int index)
574 {
575 	struct asd_ha_struct *asd_ha = dev->port->ha->lldd_ha;
576 	struct asd_ascb *ascb;
577 	int res = 1;
578 	struct scb *scb;
579 	DECLARE_COMPLETION_ONSTACK(completion);
580 	DECLARE_TCS(tcs);
581 
582 	if (!(dev->tproto & SAS_PROTOCOL_SSP))
583 		return TMF_RESP_FUNC_ESUPP;
584 
585 	ascb = asd_ascb_alloc_list(asd_ha, &res, GFP_KERNEL);
586 	if (!ascb)
587 		return -ENOMEM;
588 
589 	ascb->completion = &completion;
590 	ascb->uldd_task = &tcs;
591 	scb = ascb->scb;
592 
593 	if (tmf == TMF_QUERY_TASK)
594 		scb->header.opcode = QUERY_SSP_TASK;
595 	else
596 		scb->header.opcode = INITIATE_SSP_TMF;
597 
598 	scb->ssp_tmf.proto_conn_rate  = (1 << 4); /* SSP */
599 	scb->ssp_tmf.proto_conn_rate |= dev->linkrate;
600 	/* SSP frame header */
601 	scb->ssp_tmf.ssp_frame.frame_type = SSP_TASK;
602 	memcpy(scb->ssp_tmf.ssp_frame.hashed_dest_addr,
603 	       dev->hashed_sas_addr, HASHED_SAS_ADDR_SIZE);
604 	memcpy(scb->ssp_tmf.ssp_frame.hashed_src_addr,
605 	       dev->port->ha->hashed_sas_addr, HASHED_SAS_ADDR_SIZE);
606 	scb->ssp_tmf.ssp_frame.tptt = cpu_to_be16(0xFFFF);
607 	/* SSP Task IU */
608 	memcpy(scb->ssp_tmf.ssp_task.lun, lun, 8);
609 	scb->ssp_tmf.ssp_task.tmf = tmf;
610 
611 	scb->ssp_tmf.sister_scb = cpu_to_le16(0xFFFF);
612 	scb->ssp_tmf.conn_handle= cpu_to_le16((u16)(unsigned long)
613 					      dev->lldd_dev);
614 	scb->ssp_tmf.retry_count = 1;
615 	scb->ssp_tmf.itnl_to = cpu_to_le16(ITNL_TIMEOUT_CONST);
616 	if (tmf == TMF_QUERY_TASK)
617 		scb->ssp_tmf.index = cpu_to_le16(index);
618 
619 	res = asd_enqueue_internal(ascb, asd_tmf_tasklet_complete,
620 				   asd_tmf_timedout);
621 	if (res)
622 		goto out_err;
623 	wait_for_completion(&completion);
624 
625 	switch (tcs.dl_opcode) {
626 	case TC_NO_ERROR:
627 		res = TMF_RESP_FUNC_COMPLETE;
628 		break;
629 	case TF_NAK_RECV:
630 		res = TMF_RESP_INVALID_FRAME;
631 		break;
632 	case TF_TMF_TASK_DONE:
633 		res = TMF_RESP_FUNC_FAILED;
634 		break;
635 	case TF_TMF_NO_TAG:
636 	case TF_TMF_TAG_FREE: /* the tag is in the free list */
637 	case TF_TMF_NO_CONN_HANDLE: /* no such device */
638 		res = TMF_RESP_FUNC_COMPLETE;
639 		break;
640 	case TF_TMF_NO_CTX: /* not in seq, or proto != SSP */
641 		res = TMF_RESP_FUNC_ESUPP;
642 		break;
643 	default:
644 		/* Allow TMF response codes to propagate upwards */
645 		res = tcs.dl_opcode;
646 		break;
647 	}
648 	return res;
649 out_err:
650 	asd_ascb_free(ascb);
651 	return res;
652 }
653 
asd_abort_task_set(struct domain_device * dev,u8 * lun)654 int asd_abort_task_set(struct domain_device *dev, u8 *lun)
655 {
656 	int res = asd_initiate_ssp_tmf(dev, lun, TMF_ABORT_TASK_SET, 0);
657 
658 	if (res == TMF_RESP_FUNC_COMPLETE)
659 		asd_clear_nexus_I_T_L(dev, lun);
660 	return res;
661 }
662 
asd_clear_aca(struct domain_device * dev,u8 * lun)663 int asd_clear_aca(struct domain_device *dev, u8 *lun)
664 {
665 	int res = asd_initiate_ssp_tmf(dev, lun, TMF_CLEAR_ACA, 0);
666 
667 	if (res == TMF_RESP_FUNC_COMPLETE)
668 		asd_clear_nexus_I_T_L(dev, lun);
669 	return res;
670 }
671 
asd_clear_task_set(struct domain_device * dev,u8 * lun)672 int asd_clear_task_set(struct domain_device *dev, u8 *lun)
673 {
674 	int res = asd_initiate_ssp_tmf(dev, lun, TMF_CLEAR_TASK_SET, 0);
675 
676 	if (res == TMF_RESP_FUNC_COMPLETE)
677 		asd_clear_nexus_I_T_L(dev, lun);
678 	return res;
679 }
680 
asd_lu_reset(struct domain_device * dev,u8 * lun)681 int asd_lu_reset(struct domain_device *dev, u8 *lun)
682 {
683 	int res = asd_initiate_ssp_tmf(dev, lun, TMF_LU_RESET, 0);
684 
685 	if (res == TMF_RESP_FUNC_COMPLETE)
686 		asd_clear_nexus_I_T_L(dev, lun);
687 	return res;
688 }
689 
690 /**
691  * asd_query_task -- send a QUERY TASK TMF to an I_T_L_Q nexus
692  * task: pointer to sas_task struct of interest
693  *
694  * Returns: TMF_RESP_FUNC_COMPLETE if the task is not in the task set,
695  * or TMF_RESP_FUNC_SUCC if the task is in the task set.
696  *
697  * Normally the management layer sets the task to aborted state,
698  * and then calls query task and then abort task.
699  */
asd_query_task(struct sas_task * task)700 int asd_query_task(struct sas_task *task)
701 {
702 	struct asd_ascb *ascb = task->lldd_task;
703 	int index;
704 
705 	if (ascb) {
706 		index = ascb->tc_index;
707 		return asd_initiate_ssp_tmf(task->dev, task->ssp_task.LUN,
708 					    TMF_QUERY_TASK, index);
709 	}
710 	return TMF_RESP_FUNC_COMPLETE;
711 }
712