1 /*
2 * PMC-Sierra PM8001/8081/8088/8089 SAS/SATA based host adapters driver
3 *
4 * Copyright (c) 2008-2009 USI Co., Ltd.
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions, and the following disclaimer,
12 * without modification.
13 * 2. Redistributions in binary form must reproduce at minimum a disclaimer
14 * substantially similar to the "NO WARRANTY" disclaimer below
15 * ("Disclaimer") and any redistribution must be conditioned upon
16 * including a substantially similar Disclaimer requirement for further
17 * binary redistribution.
18 * 3. Neither the names of the above-listed copyright holders nor the names
19 * of any contributors may be used to endorse or promote products derived
20 * from this software without specific prior written permission.
21 *
22 * Alternatively, this software may be distributed under the terms of the
23 * GNU General Public License ("GPL") version 2 as published by the Free
24 * Software Foundation.
25 *
26 * NO WARRANTY
27 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
28 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
29 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
30 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
31 * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
32 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
33 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
34 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
35 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
36 * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
37 * POSSIBILITY OF SUCH DAMAGES.
38 *
39 */
40
41 #include <linux/slab.h>
42 #include "pm8001_sas.h"
43 #include "pm80xx_tracepoints.h"
44
45 /**
46 * pm8001_find_tag - from sas task to find out tag that belongs to this task
47 * @task: the task sent to the LLDD
48 * @tag: the found tag associated with the task
49 */
pm8001_find_tag(struct sas_task * task,u32 * tag)50 static int pm8001_find_tag(struct sas_task *task, u32 *tag)
51 {
52 if (task->lldd_task) {
53 struct pm8001_ccb_info *ccb;
54 ccb = task->lldd_task;
55 *tag = ccb->ccb_tag;
56 return 1;
57 }
58 return 0;
59 }
60
61 /**
62 * pm8001_tag_free - free the no more needed tag
63 * @pm8001_ha: our hba struct
64 * @tag: the found tag associated with the task
65 */
pm8001_tag_free(struct pm8001_hba_info * pm8001_ha,u32 tag)66 void pm8001_tag_free(struct pm8001_hba_info *pm8001_ha, u32 tag)
67 {
68 void *bitmap = pm8001_ha->tags;
69 clear_bit(tag, bitmap);
70 }
71
72 /**
73 * pm8001_tag_alloc - allocate a empty tag for task used.
74 * @pm8001_ha: our hba struct
75 * @tag_out: the found empty tag .
76 */
pm8001_tag_alloc(struct pm8001_hba_info * pm8001_ha,u32 * tag_out)77 int pm8001_tag_alloc(struct pm8001_hba_info *pm8001_ha, u32 *tag_out)
78 {
79 unsigned int tag;
80 void *bitmap = pm8001_ha->tags;
81 unsigned long flags;
82
83 spin_lock_irqsave(&pm8001_ha->bitmap_lock, flags);
84 tag = find_first_zero_bit(bitmap, pm8001_ha->tags_num);
85 if (tag >= pm8001_ha->tags_num) {
86 spin_unlock_irqrestore(&pm8001_ha->bitmap_lock, flags);
87 return -SAS_QUEUE_FULL;
88 }
89 set_bit(tag, bitmap);
90 spin_unlock_irqrestore(&pm8001_ha->bitmap_lock, flags);
91 *tag_out = tag;
92 return 0;
93 }
94
pm8001_tag_init(struct pm8001_hba_info * pm8001_ha)95 void pm8001_tag_init(struct pm8001_hba_info *pm8001_ha)
96 {
97 int i;
98 for (i = 0; i < pm8001_ha->tags_num; ++i)
99 pm8001_tag_free(pm8001_ha, i);
100 }
101
102 /**
103 * pm8001_mem_alloc - allocate memory for pm8001.
104 * @pdev: pci device.
105 * @virt_addr: the allocated virtual address
106 * @pphys_addr: DMA address for this device
107 * @pphys_addr_hi: the physical address high byte address.
108 * @pphys_addr_lo: the physical address low byte address.
109 * @mem_size: memory size.
110 * @align: requested byte alignment
111 */
pm8001_mem_alloc(struct pci_dev * pdev,void ** virt_addr,dma_addr_t * pphys_addr,u32 * pphys_addr_hi,u32 * pphys_addr_lo,u32 mem_size,u32 align)112 int pm8001_mem_alloc(struct pci_dev *pdev, void **virt_addr,
113 dma_addr_t *pphys_addr, u32 *pphys_addr_hi,
114 u32 *pphys_addr_lo, u32 mem_size, u32 align)
115 {
116 caddr_t mem_virt_alloc;
117 dma_addr_t mem_dma_handle;
118 u64 phys_align;
119 u64 align_offset = 0;
120 if (align)
121 align_offset = (dma_addr_t)align - 1;
122 mem_virt_alloc = dma_alloc_coherent(&pdev->dev, mem_size + align,
123 &mem_dma_handle, GFP_KERNEL);
124 if (!mem_virt_alloc)
125 return -ENOMEM;
126 *pphys_addr = mem_dma_handle;
127 phys_align = (*pphys_addr + align_offset) & ~align_offset;
128 *virt_addr = (void *)mem_virt_alloc + phys_align - *pphys_addr;
129 *pphys_addr_hi = upper_32_bits(phys_align);
130 *pphys_addr_lo = lower_32_bits(phys_align);
131 return 0;
132 }
133
134 /**
135 * pm8001_find_ha_by_dev - from domain device which come from sas layer to
136 * find out our hba struct.
137 * @dev: the domain device which from sas layer.
138 */
139 static
pm8001_find_ha_by_dev(struct domain_device * dev)140 struct pm8001_hba_info *pm8001_find_ha_by_dev(struct domain_device *dev)
141 {
142 struct sas_ha_struct *sha = dev->port->ha;
143 struct pm8001_hba_info *pm8001_ha = sha->lldd_ha;
144 return pm8001_ha;
145 }
146
147 /**
148 * pm8001_phy_control - this function should be registered to
149 * sas_domain_function_template to provide libsas used, note: this is just
150 * control the HBA phy rather than other expander phy if you want control
151 * other phy, you should use SMP command.
152 * @sas_phy: which phy in HBA phys.
153 * @func: the operation.
154 * @funcdata: always NULL.
155 */
pm8001_phy_control(struct asd_sas_phy * sas_phy,enum phy_func func,void * funcdata)156 int pm8001_phy_control(struct asd_sas_phy *sas_phy, enum phy_func func,
157 void *funcdata)
158 {
159 int rc = 0, phy_id = sas_phy->id;
160 struct pm8001_hba_info *pm8001_ha = NULL;
161 struct sas_phy_linkrates *rates;
162 struct pm8001_phy *phy;
163 DECLARE_COMPLETION_ONSTACK(completion);
164 unsigned long flags;
165 pm8001_ha = sas_phy->ha->lldd_ha;
166 phy = &pm8001_ha->phy[phy_id];
167 pm8001_ha->phy[phy_id].enable_completion = &completion;
168 switch (func) {
169 case PHY_FUNC_SET_LINK_RATE:
170 rates = funcdata;
171 if (rates->minimum_linkrate) {
172 pm8001_ha->phy[phy_id].minimum_linkrate =
173 rates->minimum_linkrate;
174 }
175 if (rates->maximum_linkrate) {
176 pm8001_ha->phy[phy_id].maximum_linkrate =
177 rates->maximum_linkrate;
178 }
179 if (pm8001_ha->phy[phy_id].phy_state == PHY_LINK_DISABLE) {
180 PM8001_CHIP_DISP->phy_start_req(pm8001_ha, phy_id);
181 wait_for_completion(&completion);
182 }
183 PM8001_CHIP_DISP->phy_ctl_req(pm8001_ha, phy_id,
184 PHY_LINK_RESET);
185 break;
186 case PHY_FUNC_HARD_RESET:
187 if (pm8001_ha->phy[phy_id].phy_state == PHY_LINK_DISABLE) {
188 PM8001_CHIP_DISP->phy_start_req(pm8001_ha, phy_id);
189 wait_for_completion(&completion);
190 }
191 PM8001_CHIP_DISP->phy_ctl_req(pm8001_ha, phy_id,
192 PHY_HARD_RESET);
193 break;
194 case PHY_FUNC_LINK_RESET:
195 if (pm8001_ha->phy[phy_id].phy_state == PHY_LINK_DISABLE) {
196 PM8001_CHIP_DISP->phy_start_req(pm8001_ha, phy_id);
197 wait_for_completion(&completion);
198 }
199 PM8001_CHIP_DISP->phy_ctl_req(pm8001_ha, phy_id,
200 PHY_LINK_RESET);
201 break;
202 case PHY_FUNC_RELEASE_SPINUP_HOLD:
203 PM8001_CHIP_DISP->phy_ctl_req(pm8001_ha, phy_id,
204 PHY_LINK_RESET);
205 break;
206 case PHY_FUNC_DISABLE:
207 if (pm8001_ha->chip_id != chip_8001) {
208 if (pm8001_ha->phy[phy_id].phy_state ==
209 PHY_STATE_LINK_UP_SPCV) {
210 sas_phy_disconnected(&phy->sas_phy);
211 sas_notify_phy_event(&phy->sas_phy,
212 PHYE_LOSS_OF_SIGNAL, GFP_KERNEL);
213 phy->phy_attached = 0;
214 }
215 } else {
216 if (pm8001_ha->phy[phy_id].phy_state ==
217 PHY_STATE_LINK_UP_SPC) {
218 sas_phy_disconnected(&phy->sas_phy);
219 sas_notify_phy_event(&phy->sas_phy,
220 PHYE_LOSS_OF_SIGNAL, GFP_KERNEL);
221 phy->phy_attached = 0;
222 }
223 }
224 PM8001_CHIP_DISP->phy_stop_req(pm8001_ha, phy_id);
225 break;
226 case PHY_FUNC_GET_EVENTS:
227 spin_lock_irqsave(&pm8001_ha->lock, flags);
228 if (pm8001_ha->chip_id == chip_8001) {
229 if (-1 == pm8001_bar4_shift(pm8001_ha,
230 (phy_id < 4) ? 0x30000 : 0x40000)) {
231 spin_unlock_irqrestore(&pm8001_ha->lock, flags);
232 return -EINVAL;
233 }
234 }
235 {
236 struct sas_phy *phy = sas_phy->phy;
237 u32 __iomem *qp = pm8001_ha->io_mem[2].memvirtaddr
238 + 0x1034 + (0x4000 * (phy_id & 3));
239
240 phy->invalid_dword_count = readl(qp);
241 phy->running_disparity_error_count = readl(&qp[1]);
242 phy->loss_of_dword_sync_count = readl(&qp[3]);
243 phy->phy_reset_problem_count = readl(&qp[4]);
244 }
245 if (pm8001_ha->chip_id == chip_8001)
246 pm8001_bar4_shift(pm8001_ha, 0);
247 spin_unlock_irqrestore(&pm8001_ha->lock, flags);
248 return 0;
249 default:
250 pm8001_dbg(pm8001_ha, DEVIO, "func 0x%x\n", func);
251 rc = -EOPNOTSUPP;
252 }
253 msleep(300);
254 return rc;
255 }
256
257 /**
258 * pm8001_scan_start - we should enable all HBA phys by sending the phy_start
259 * command to HBA.
260 * @shost: the scsi host data.
261 */
pm8001_scan_start(struct Scsi_Host * shost)262 void pm8001_scan_start(struct Scsi_Host *shost)
263 {
264 int i;
265 struct pm8001_hba_info *pm8001_ha;
266 struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost);
267 DECLARE_COMPLETION_ONSTACK(completion);
268 pm8001_ha = sha->lldd_ha;
269 /* SAS_RE_INITIALIZATION not available in SPCv/ve */
270 if (pm8001_ha->chip_id == chip_8001)
271 PM8001_CHIP_DISP->sas_re_init_req(pm8001_ha);
272 for (i = 0; i < pm8001_ha->chip->n_phy; ++i) {
273 pm8001_ha->phy[i].enable_completion = &completion;
274 PM8001_CHIP_DISP->phy_start_req(pm8001_ha, i);
275 wait_for_completion(&completion);
276 msleep(300);
277 }
278 }
279
pm8001_scan_finished(struct Scsi_Host * shost,unsigned long time)280 int pm8001_scan_finished(struct Scsi_Host *shost, unsigned long time)
281 {
282 struct sas_ha_struct *ha = SHOST_TO_SAS_HA(shost);
283
284 /* give the phy enabling interrupt event time to come in (1s
285 * is empirically about all it takes) */
286 if (time < HZ)
287 return 0;
288 /* Wait for discovery to finish */
289 sas_drain_work(ha);
290 return 1;
291 }
292
293 /**
294 * pm8001_task_prep_smp - the dispatcher function, prepare data for smp task
295 * @pm8001_ha: our hba card information
296 * @ccb: the ccb which attached to smp task
297 */
pm8001_task_prep_smp(struct pm8001_hba_info * pm8001_ha,struct pm8001_ccb_info * ccb)298 static int pm8001_task_prep_smp(struct pm8001_hba_info *pm8001_ha,
299 struct pm8001_ccb_info *ccb)
300 {
301 return PM8001_CHIP_DISP->smp_req(pm8001_ha, ccb);
302 }
303
pm8001_get_ncq_tag(struct sas_task * task,u32 * tag)304 u32 pm8001_get_ncq_tag(struct sas_task *task, u32 *tag)
305 {
306 struct ata_queued_cmd *qc = task->uldd_task;
307
308 if (qc && ata_is_ncq(qc->tf.protocol)) {
309 *tag = qc->tag;
310 return 1;
311 }
312
313 return 0;
314 }
315
316 /**
317 * pm8001_task_prep_ata - the dispatcher function, prepare data for sata task
318 * @pm8001_ha: our hba card information
319 * @ccb: the ccb which attached to sata task
320 */
pm8001_task_prep_ata(struct pm8001_hba_info * pm8001_ha,struct pm8001_ccb_info * ccb)321 static int pm8001_task_prep_ata(struct pm8001_hba_info *pm8001_ha,
322 struct pm8001_ccb_info *ccb)
323 {
324 return PM8001_CHIP_DISP->sata_req(pm8001_ha, ccb);
325 }
326
327 /**
328 * pm8001_task_prep_internal_abort - the dispatcher function, prepare data
329 * for internal abort task
330 * @pm8001_ha: our hba card information
331 * @ccb: the ccb which attached to sata task
332 */
pm8001_task_prep_internal_abort(struct pm8001_hba_info * pm8001_ha,struct pm8001_ccb_info * ccb)333 static int pm8001_task_prep_internal_abort(struct pm8001_hba_info *pm8001_ha,
334 struct pm8001_ccb_info *ccb)
335 {
336 return PM8001_CHIP_DISP->task_abort(pm8001_ha, ccb);
337 }
338
339 /**
340 * pm8001_task_prep_ssp_tm - the dispatcher function, prepare task management data
341 * @pm8001_ha: our hba card information
342 * @ccb: the ccb which attached to TM
343 * @tmf: the task management IU
344 */
pm8001_task_prep_ssp_tm(struct pm8001_hba_info * pm8001_ha,struct pm8001_ccb_info * ccb,struct sas_tmf_task * tmf)345 static int pm8001_task_prep_ssp_tm(struct pm8001_hba_info *pm8001_ha,
346 struct pm8001_ccb_info *ccb, struct sas_tmf_task *tmf)
347 {
348 return PM8001_CHIP_DISP->ssp_tm_req(pm8001_ha, ccb, tmf);
349 }
350
351 /**
352 * pm8001_task_prep_ssp - the dispatcher function, prepare ssp data for ssp task
353 * @pm8001_ha: our hba card information
354 * @ccb: the ccb which attached to ssp task
355 */
pm8001_task_prep_ssp(struct pm8001_hba_info * pm8001_ha,struct pm8001_ccb_info * ccb)356 static int pm8001_task_prep_ssp(struct pm8001_hba_info *pm8001_ha,
357 struct pm8001_ccb_info *ccb)
358 {
359 return PM8001_CHIP_DISP->ssp_io_req(pm8001_ha, ccb);
360 }
361
362 /* Find the local port id that's attached to this device */
sas_find_local_port_id(struct domain_device * dev)363 static int sas_find_local_port_id(struct domain_device *dev)
364 {
365 struct domain_device *pdev = dev->parent;
366
367 /* Directly attached device */
368 if (!pdev)
369 return dev->port->id;
370 while (pdev) {
371 struct domain_device *pdev_p = pdev->parent;
372 if (!pdev_p)
373 return pdev->port->id;
374 pdev = pdev->parent;
375 }
376 return 0;
377 }
378
379 #define DEV_IS_GONE(pm8001_dev) \
380 ((!pm8001_dev || (pm8001_dev->dev_type == SAS_PHY_UNUSED)))
381
382
pm8001_deliver_command(struct pm8001_hba_info * pm8001_ha,struct pm8001_ccb_info * ccb)383 static int pm8001_deliver_command(struct pm8001_hba_info *pm8001_ha,
384 struct pm8001_ccb_info *ccb)
385 {
386 struct sas_task *task = ccb->task;
387 enum sas_protocol task_proto = task->task_proto;
388 struct sas_tmf_task *tmf = task->tmf;
389 int is_tmf = !!tmf;
390
391 switch (task_proto) {
392 case SAS_PROTOCOL_SMP:
393 return pm8001_task_prep_smp(pm8001_ha, ccb);
394 case SAS_PROTOCOL_SSP:
395 if (is_tmf)
396 return pm8001_task_prep_ssp_tm(pm8001_ha, ccb, tmf);
397 return pm8001_task_prep_ssp(pm8001_ha, ccb);
398 case SAS_PROTOCOL_SATA:
399 case SAS_PROTOCOL_STP:
400 return pm8001_task_prep_ata(pm8001_ha, ccb);
401 case SAS_PROTOCOL_INTERNAL_ABORT:
402 return pm8001_task_prep_internal_abort(pm8001_ha, ccb);
403 default:
404 dev_err(pm8001_ha->dev, "unknown sas_task proto: 0x%x\n",
405 task_proto);
406 }
407
408 return -EINVAL;
409 }
410
411 /**
412 * pm8001_queue_command - register for upper layer used, all IO commands sent
413 * to HBA are from this interface.
414 * @task: the task to be execute.
415 * @gfp_flags: gfp_flags
416 */
pm8001_queue_command(struct sas_task * task,gfp_t gfp_flags)417 int pm8001_queue_command(struct sas_task *task, gfp_t gfp_flags)
418 {
419 struct task_status_struct *ts = &task->task_status;
420 enum sas_protocol task_proto = task->task_proto;
421 struct domain_device *dev = task->dev;
422 struct pm8001_device *pm8001_dev = dev->lldd_dev;
423 bool internal_abort = sas_is_internal_abort(task);
424 struct pm8001_hba_info *pm8001_ha;
425 struct pm8001_port *port = NULL;
426 struct pm8001_ccb_info *ccb;
427 unsigned long flags;
428 u32 n_elem = 0;
429 int rc = 0;
430
431 if (!internal_abort && !dev->port) {
432 ts->resp = SAS_TASK_UNDELIVERED;
433 ts->stat = SAS_PHY_DOWN;
434 if (dev->dev_type != SAS_SATA_DEV)
435 task->task_done(task);
436 return 0;
437 }
438
439 pm8001_ha = pm8001_find_ha_by_dev(dev);
440 if (pm8001_ha->controller_fatal_error) {
441 ts->resp = SAS_TASK_UNDELIVERED;
442 task->task_done(task);
443 return 0;
444 }
445
446 pm8001_dbg(pm8001_ha, IO, "pm8001_task_exec device\n");
447
448 spin_lock_irqsave(&pm8001_ha->lock, flags);
449
450 pm8001_dev = dev->lldd_dev;
451 port = &pm8001_ha->port[sas_find_local_port_id(dev)];
452
453 if (!internal_abort &&
454 (DEV_IS_GONE(pm8001_dev) || !port->port_attached)) {
455 ts->resp = SAS_TASK_UNDELIVERED;
456 ts->stat = SAS_PHY_DOWN;
457 if (sas_protocol_ata(task_proto)) {
458 spin_unlock_irqrestore(&pm8001_ha->lock, flags);
459 task->task_done(task);
460 spin_lock_irqsave(&pm8001_ha->lock, flags);
461 } else {
462 task->task_done(task);
463 }
464 rc = -ENODEV;
465 goto err_out;
466 }
467
468 ccb = pm8001_ccb_alloc(pm8001_ha, pm8001_dev, task);
469 if (!ccb) {
470 rc = -SAS_QUEUE_FULL;
471 goto err_out;
472 }
473
474 if (!sas_protocol_ata(task_proto)) {
475 if (task->num_scatter) {
476 n_elem = dma_map_sg(pm8001_ha->dev, task->scatter,
477 task->num_scatter, task->data_dir);
478 if (!n_elem) {
479 rc = -ENOMEM;
480 goto err_out_ccb;
481 }
482 }
483 } else {
484 n_elem = task->num_scatter;
485 }
486
487 task->lldd_task = ccb;
488 ccb->n_elem = n_elem;
489
490 atomic_inc(&pm8001_dev->running_req);
491
492 rc = pm8001_deliver_command(pm8001_ha, ccb);
493 if (rc) {
494 atomic_dec(&pm8001_dev->running_req);
495 if (!sas_protocol_ata(task_proto) && n_elem)
496 dma_unmap_sg(pm8001_ha->dev, task->scatter,
497 task->num_scatter, task->data_dir);
498 err_out_ccb:
499 pm8001_ccb_free(pm8001_ha, ccb);
500
501 err_out:
502 pm8001_dbg(pm8001_ha, IO, "pm8001_task_exec failed[%d]!\n", rc);
503 }
504
505 spin_unlock_irqrestore(&pm8001_ha->lock, flags);
506
507 return rc;
508 }
509
510 /**
511 * pm8001_ccb_task_free - free the sg for ssp and smp command, free the ccb.
512 * @pm8001_ha: our hba card information
513 * @ccb: the ccb which attached to ssp task to free
514 */
pm8001_ccb_task_free(struct pm8001_hba_info * pm8001_ha,struct pm8001_ccb_info * ccb)515 void pm8001_ccb_task_free(struct pm8001_hba_info *pm8001_ha,
516 struct pm8001_ccb_info *ccb)
517 {
518 struct sas_task *task = ccb->task;
519 struct ata_queued_cmd *qc;
520 struct pm8001_device *pm8001_dev;
521
522 if (!task)
523 return;
524
525 if (!sas_protocol_ata(task->task_proto) && ccb->n_elem)
526 dma_unmap_sg(pm8001_ha->dev, task->scatter,
527 task->num_scatter, task->data_dir);
528
529 switch (task->task_proto) {
530 case SAS_PROTOCOL_SMP:
531 dma_unmap_sg(pm8001_ha->dev, &task->smp_task.smp_resp, 1,
532 DMA_FROM_DEVICE);
533 dma_unmap_sg(pm8001_ha->dev, &task->smp_task.smp_req, 1,
534 DMA_TO_DEVICE);
535 break;
536
537 case SAS_PROTOCOL_SATA:
538 case SAS_PROTOCOL_STP:
539 case SAS_PROTOCOL_SSP:
540 default:
541 /* do nothing */
542 break;
543 }
544
545 if (sas_protocol_ata(task->task_proto)) {
546 /* For SCSI/ATA commands uldd_task points to ata_queued_cmd */
547 qc = task->uldd_task;
548 pm8001_dev = ccb->device;
549 trace_pm80xx_request_complete(pm8001_ha->id,
550 pm8001_dev ? pm8001_dev->attached_phy : PM8001_MAX_PHYS,
551 ccb->ccb_tag, 0 /* ctlr_opcode not known */,
552 qc ? qc->tf.command : 0, // ata opcode
553 pm8001_dev ? atomic_read(&pm8001_dev->running_req) : -1);
554 }
555
556 task->lldd_task = NULL;
557 pm8001_ccb_free(pm8001_ha, ccb);
558 }
559
560 /**
561 * pm8001_alloc_dev - find a empty pm8001_device
562 * @pm8001_ha: our hba card information
563 */
pm8001_alloc_dev(struct pm8001_hba_info * pm8001_ha)564 static struct pm8001_device *pm8001_alloc_dev(struct pm8001_hba_info *pm8001_ha)
565 {
566 u32 dev;
567 for (dev = 0; dev < PM8001_MAX_DEVICES; dev++) {
568 if (pm8001_ha->devices[dev].dev_type == SAS_PHY_UNUSED) {
569 pm8001_ha->devices[dev].id = dev;
570 return &pm8001_ha->devices[dev];
571 }
572 }
573 if (dev == PM8001_MAX_DEVICES) {
574 pm8001_dbg(pm8001_ha, FAIL,
575 "max support %d devices, ignore ..\n",
576 PM8001_MAX_DEVICES);
577 }
578 return NULL;
579 }
580 /**
581 * pm8001_find_dev - find a matching pm8001_device
582 * @pm8001_ha: our hba card information
583 * @device_id: device ID to match against
584 */
pm8001_find_dev(struct pm8001_hba_info * pm8001_ha,u32 device_id)585 struct pm8001_device *pm8001_find_dev(struct pm8001_hba_info *pm8001_ha,
586 u32 device_id)
587 {
588 u32 dev;
589 for (dev = 0; dev < PM8001_MAX_DEVICES; dev++) {
590 if (pm8001_ha->devices[dev].device_id == device_id)
591 return &pm8001_ha->devices[dev];
592 }
593 if (dev == PM8001_MAX_DEVICES) {
594 pm8001_dbg(pm8001_ha, FAIL, "NO MATCHING DEVICE FOUND !!!\n");
595 }
596 return NULL;
597 }
598
pm8001_free_dev(struct pm8001_device * pm8001_dev)599 void pm8001_free_dev(struct pm8001_device *pm8001_dev)
600 {
601 u32 id = pm8001_dev->id;
602 memset(pm8001_dev, 0, sizeof(*pm8001_dev));
603 pm8001_dev->id = id;
604 pm8001_dev->dev_type = SAS_PHY_UNUSED;
605 pm8001_dev->device_id = PM8001_MAX_DEVICES;
606 pm8001_dev->sas_device = NULL;
607 }
608
609 /**
610 * pm8001_dev_found_notify - libsas notify a device is found.
611 * @dev: the device structure which sas layer used.
612 *
613 * when libsas find a sas domain device, it should tell the LLDD that
614 * device is found, and then LLDD register this device to HBA firmware
615 * by the command "OPC_INB_REG_DEV", after that the HBA will assign a
616 * device ID(according to device's sas address) and returned it to LLDD. From
617 * now on, we communicate with HBA FW with the device ID which HBA assigned
618 * rather than sas address. it is the necessary step for our HBA but it is
619 * the optional for other HBA driver.
620 */
pm8001_dev_found_notify(struct domain_device * dev)621 static int pm8001_dev_found_notify(struct domain_device *dev)
622 {
623 unsigned long flags = 0;
624 int res = 0;
625 struct pm8001_hba_info *pm8001_ha = NULL;
626 struct domain_device *parent_dev = dev->parent;
627 struct pm8001_device *pm8001_device;
628 DECLARE_COMPLETION_ONSTACK(completion);
629 u32 flag = 0;
630 pm8001_ha = pm8001_find_ha_by_dev(dev);
631 spin_lock_irqsave(&pm8001_ha->lock, flags);
632
633 pm8001_device = pm8001_alloc_dev(pm8001_ha);
634 if (!pm8001_device) {
635 res = -1;
636 goto found_out;
637 }
638 pm8001_device->sas_device = dev;
639 dev->lldd_dev = pm8001_device;
640 pm8001_device->dev_type = dev->dev_type;
641 pm8001_device->dcompletion = &completion;
642 if (parent_dev && dev_is_expander(parent_dev->dev_type)) {
643 int phy_id;
644 struct ex_phy *phy;
645 for (phy_id = 0; phy_id < parent_dev->ex_dev.num_phys;
646 phy_id++) {
647 phy = &parent_dev->ex_dev.ex_phy[phy_id];
648 if (SAS_ADDR(phy->attached_sas_addr)
649 == SAS_ADDR(dev->sas_addr)) {
650 pm8001_device->attached_phy = phy_id;
651 break;
652 }
653 }
654 if (phy_id == parent_dev->ex_dev.num_phys) {
655 pm8001_dbg(pm8001_ha, FAIL,
656 "Error: no attached dev:%016llx at ex:%016llx.\n",
657 SAS_ADDR(dev->sas_addr),
658 SAS_ADDR(parent_dev->sas_addr));
659 res = -1;
660 }
661 } else {
662 if (dev->dev_type == SAS_SATA_DEV) {
663 pm8001_device->attached_phy =
664 dev->rphy->identify.phy_identifier;
665 flag = 1; /* directly sata */
666 }
667 } /*register this device to HBA*/
668 pm8001_dbg(pm8001_ha, DISC, "Found device\n");
669 PM8001_CHIP_DISP->reg_dev_req(pm8001_ha, pm8001_device, flag);
670 spin_unlock_irqrestore(&pm8001_ha->lock, flags);
671 wait_for_completion(&completion);
672 if (dev->dev_type == SAS_END_DEVICE)
673 msleep(50);
674 pm8001_ha->flags = PM8001F_RUN_TIME;
675 return 0;
676 found_out:
677 spin_unlock_irqrestore(&pm8001_ha->lock, flags);
678 return res;
679 }
680
pm8001_dev_found(struct domain_device * dev)681 int pm8001_dev_found(struct domain_device *dev)
682 {
683 return pm8001_dev_found_notify(dev);
684 }
685
pm8001_task_done(struct sas_task * task)686 void pm8001_task_done(struct sas_task *task)
687 {
688 del_timer(&task->slow_task->timer);
689 complete(&task->slow_task->completion);
690 }
691
692 #define PM8001_TASK_TIMEOUT 20
693
694 /**
695 * pm8001_dev_gone_notify - see the comments for "pm8001_dev_found_notify"
696 * @dev: the device structure which sas layer used.
697 */
pm8001_dev_gone_notify(struct domain_device * dev)698 static void pm8001_dev_gone_notify(struct domain_device *dev)
699 {
700 unsigned long flags = 0;
701 struct pm8001_hba_info *pm8001_ha;
702 struct pm8001_device *pm8001_dev = dev->lldd_dev;
703
704 pm8001_ha = pm8001_find_ha_by_dev(dev);
705 spin_lock_irqsave(&pm8001_ha->lock, flags);
706 if (pm8001_dev) {
707 u32 device_id = pm8001_dev->device_id;
708
709 pm8001_dbg(pm8001_ha, DISC, "found dev[%d:%x] is gone.\n",
710 pm8001_dev->device_id, pm8001_dev->dev_type);
711 if (atomic_read(&pm8001_dev->running_req)) {
712 spin_unlock_irqrestore(&pm8001_ha->lock, flags);
713 sas_execute_internal_abort_dev(dev, 0, NULL);
714 while (atomic_read(&pm8001_dev->running_req))
715 msleep(20);
716 spin_lock_irqsave(&pm8001_ha->lock, flags);
717 }
718 PM8001_CHIP_DISP->dereg_dev_req(pm8001_ha, device_id);
719 pm8001_free_dev(pm8001_dev);
720 } else {
721 pm8001_dbg(pm8001_ha, DISC, "Found dev has gone.\n");
722 }
723 dev->lldd_dev = NULL;
724 spin_unlock_irqrestore(&pm8001_ha->lock, flags);
725 }
726
pm8001_dev_gone(struct domain_device * dev)727 void pm8001_dev_gone(struct domain_device *dev)
728 {
729 pm8001_dev_gone_notify(dev);
730 }
731
732 /* retry commands by ha, by task and/or by device */
pm8001_open_reject_retry(struct pm8001_hba_info * pm8001_ha,struct sas_task * task_to_close,struct pm8001_device * device_to_close)733 void pm8001_open_reject_retry(
734 struct pm8001_hba_info *pm8001_ha,
735 struct sas_task *task_to_close,
736 struct pm8001_device *device_to_close)
737 {
738 int i;
739 unsigned long flags;
740
741 if (pm8001_ha == NULL)
742 return;
743
744 spin_lock_irqsave(&pm8001_ha->lock, flags);
745
746 for (i = 0; i < PM8001_MAX_CCB; i++) {
747 struct sas_task *task;
748 struct task_status_struct *ts;
749 struct pm8001_device *pm8001_dev;
750 unsigned long flags1;
751 struct pm8001_ccb_info *ccb = &pm8001_ha->ccb_info[i];
752
753 if (ccb->ccb_tag == PM8001_INVALID_TAG)
754 continue;
755
756 pm8001_dev = ccb->device;
757 if (!pm8001_dev || (pm8001_dev->dev_type == SAS_PHY_UNUSED))
758 continue;
759 if (!device_to_close) {
760 uintptr_t d = (uintptr_t)pm8001_dev
761 - (uintptr_t)&pm8001_ha->devices;
762 if (((d % sizeof(*pm8001_dev)) != 0)
763 || ((d / sizeof(*pm8001_dev)) >= PM8001_MAX_DEVICES))
764 continue;
765 } else if (pm8001_dev != device_to_close)
766 continue;
767 task = ccb->task;
768 if (!task || !task->task_done)
769 continue;
770 if (task_to_close && (task != task_to_close))
771 continue;
772 ts = &task->task_status;
773 ts->resp = SAS_TASK_COMPLETE;
774 /* Force the midlayer to retry */
775 ts->stat = SAS_OPEN_REJECT;
776 ts->open_rej_reason = SAS_OREJ_RSVD_RETRY;
777 if (pm8001_dev)
778 atomic_dec(&pm8001_dev->running_req);
779 spin_lock_irqsave(&task->task_state_lock, flags1);
780 task->task_state_flags &= ~SAS_TASK_STATE_PENDING;
781 task->task_state_flags |= SAS_TASK_STATE_DONE;
782 if (unlikely((task->task_state_flags
783 & SAS_TASK_STATE_ABORTED))) {
784 spin_unlock_irqrestore(&task->task_state_lock,
785 flags1);
786 pm8001_ccb_task_free(pm8001_ha, ccb);
787 } else {
788 spin_unlock_irqrestore(&task->task_state_lock,
789 flags1);
790 pm8001_ccb_task_free(pm8001_ha, ccb);
791 mb();/* in order to force CPU ordering */
792 spin_unlock_irqrestore(&pm8001_ha->lock, flags);
793 task->task_done(task);
794 spin_lock_irqsave(&pm8001_ha->lock, flags);
795 }
796 }
797
798 spin_unlock_irqrestore(&pm8001_ha->lock, flags);
799 }
800
801 /**
802 * pm8001_I_T_nexus_reset() - reset the initiator/target connection
803 * @dev: the device structure for the device to reset.
804 *
805 * Standard mandates link reset for ATA (type 0) and hard reset for
806 * SSP (type 1), only for RECOVERY
807 */
pm8001_I_T_nexus_reset(struct domain_device * dev)808 int pm8001_I_T_nexus_reset(struct domain_device *dev)
809 {
810 int rc = TMF_RESP_FUNC_FAILED;
811 struct pm8001_device *pm8001_dev;
812 struct pm8001_hba_info *pm8001_ha;
813 struct sas_phy *phy;
814
815 if (!dev || !dev->lldd_dev)
816 return -ENODEV;
817
818 pm8001_dev = dev->lldd_dev;
819 pm8001_ha = pm8001_find_ha_by_dev(dev);
820 phy = sas_get_local_phy(dev);
821
822 if (dev_is_sata(dev)) {
823 if (scsi_is_sas_phy_local(phy)) {
824 rc = 0;
825 goto out;
826 }
827 rc = sas_phy_reset(phy, 1);
828 if (rc) {
829 pm8001_dbg(pm8001_ha, EH,
830 "phy reset failed for device %x\n"
831 "with rc %d\n", pm8001_dev->device_id, rc);
832 rc = TMF_RESP_FUNC_FAILED;
833 goto out;
834 }
835 msleep(2000);
836 rc = sas_execute_internal_abort_dev(dev, 0, NULL);
837 if (rc) {
838 pm8001_dbg(pm8001_ha, EH, "task abort failed %x\n"
839 "with rc %d\n", pm8001_dev->device_id, rc);
840 rc = TMF_RESP_FUNC_FAILED;
841 }
842 } else {
843 rc = sas_phy_reset(phy, 1);
844 msleep(2000);
845 }
846 pm8001_dbg(pm8001_ha, EH, " for device[%x]:rc=%d\n",
847 pm8001_dev->device_id, rc);
848 out:
849 sas_put_local_phy(phy);
850 return rc;
851 }
852
853 /*
854 * This function handle the IT_NEXUS_XXX event or completion
855 * status code for SSP/SATA/SMP I/O request.
856 */
pm8001_I_T_nexus_event_handler(struct domain_device * dev)857 int pm8001_I_T_nexus_event_handler(struct domain_device *dev)
858 {
859 int rc = TMF_RESP_FUNC_FAILED;
860 struct pm8001_device *pm8001_dev;
861 struct pm8001_hba_info *pm8001_ha;
862 struct sas_phy *phy;
863
864 if (!dev || !dev->lldd_dev)
865 return -1;
866
867 pm8001_dev = dev->lldd_dev;
868 pm8001_ha = pm8001_find_ha_by_dev(dev);
869
870 pm8001_dbg(pm8001_ha, EH, "I_T_Nexus handler invoked !!\n");
871
872 phy = sas_get_local_phy(dev);
873
874 if (dev_is_sata(dev)) {
875 DECLARE_COMPLETION_ONSTACK(completion_setstate);
876 if (scsi_is_sas_phy_local(phy)) {
877 rc = 0;
878 goto out;
879 }
880 /* send internal ssp/sata/smp abort command to FW */
881 sas_execute_internal_abort_dev(dev, 0, NULL);
882 msleep(100);
883
884 /* deregister the target device */
885 pm8001_dev_gone_notify(dev);
886 msleep(200);
887
888 /*send phy reset to hard reset target */
889 rc = sas_phy_reset(phy, 1);
890 msleep(2000);
891 pm8001_dev->setds_completion = &completion_setstate;
892
893 wait_for_completion(&completion_setstate);
894 } else {
895 /* send internal ssp/sata/smp abort command to FW */
896 sas_execute_internal_abort_dev(dev, 0, NULL);
897 msleep(100);
898
899 /* deregister the target device */
900 pm8001_dev_gone_notify(dev);
901 msleep(200);
902
903 /*send phy reset to hard reset target */
904 rc = sas_phy_reset(phy, 1);
905 msleep(2000);
906 }
907 pm8001_dbg(pm8001_ha, EH, " for device[%x]:rc=%d\n",
908 pm8001_dev->device_id, rc);
909 out:
910 sas_put_local_phy(phy);
911
912 return rc;
913 }
914 /* mandatory SAM-3, the task reset the specified LUN*/
pm8001_lu_reset(struct domain_device * dev,u8 * lun)915 int pm8001_lu_reset(struct domain_device *dev, u8 *lun)
916 {
917 int rc = TMF_RESP_FUNC_FAILED;
918 struct pm8001_device *pm8001_dev = dev->lldd_dev;
919 struct pm8001_hba_info *pm8001_ha = pm8001_find_ha_by_dev(dev);
920 DECLARE_COMPLETION_ONSTACK(completion_setstate);
921 if (dev_is_sata(dev)) {
922 struct sas_phy *phy = sas_get_local_phy(dev);
923 sas_execute_internal_abort_dev(dev, 0, NULL);
924 rc = sas_phy_reset(phy, 1);
925 sas_put_local_phy(phy);
926 pm8001_dev->setds_completion = &completion_setstate;
927 rc = PM8001_CHIP_DISP->set_dev_state_req(pm8001_ha,
928 pm8001_dev, DS_OPERATIONAL);
929 wait_for_completion(&completion_setstate);
930 } else {
931 rc = sas_lu_reset(dev, lun);
932 }
933 /* If failed, fall-through I_T_Nexus reset */
934 pm8001_dbg(pm8001_ha, EH, "for device[%x]:rc=%d\n",
935 pm8001_dev->device_id, rc);
936 return rc;
937 }
938
939 /* optional SAM-3 */
pm8001_query_task(struct sas_task * task)940 int pm8001_query_task(struct sas_task *task)
941 {
942 u32 tag = 0xdeadbeef;
943 int rc = TMF_RESP_FUNC_FAILED;
944 if (unlikely(!task || !task->lldd_task || !task->dev))
945 return rc;
946
947 if (task->task_proto & SAS_PROTOCOL_SSP) {
948 struct scsi_cmnd *cmnd = task->uldd_task;
949 struct domain_device *dev = task->dev;
950 struct pm8001_hba_info *pm8001_ha =
951 pm8001_find_ha_by_dev(dev);
952
953 rc = pm8001_find_tag(task, &tag);
954 if (rc == 0) {
955 rc = TMF_RESP_FUNC_FAILED;
956 return rc;
957 }
958 pm8001_dbg(pm8001_ha, EH, "Query:[%16ph]\n", cmnd->cmnd);
959
960 rc = sas_query_task(task, tag);
961 switch (rc) {
962 /* The task is still in Lun, release it then */
963 case TMF_RESP_FUNC_SUCC:
964 pm8001_dbg(pm8001_ha, EH,
965 "The task is still in Lun\n");
966 break;
967 /* The task is not in Lun or failed, reset the phy */
968 case TMF_RESP_FUNC_FAILED:
969 case TMF_RESP_FUNC_COMPLETE:
970 pm8001_dbg(pm8001_ha, EH,
971 "The task is not in Lun or failed, reset the phy\n");
972 break;
973 }
974 }
975 pr_err("pm80xx: rc= %d\n", rc);
976 return rc;
977 }
978
979 /* mandatory SAM-3, still need free task/ccb info, abort the specified task */
pm8001_abort_task(struct sas_task * task)980 int pm8001_abort_task(struct sas_task *task)
981 {
982 unsigned long flags;
983 u32 tag;
984 struct domain_device *dev ;
985 struct pm8001_hba_info *pm8001_ha;
986 struct pm8001_device *pm8001_dev;
987 int rc = TMF_RESP_FUNC_FAILED, ret;
988 u32 phy_id, port_id;
989 struct sas_task_slow slow_task;
990
991 if (unlikely(!task || !task->lldd_task || !task->dev))
992 return TMF_RESP_FUNC_FAILED;
993
994 dev = task->dev;
995 pm8001_dev = dev->lldd_dev;
996 pm8001_ha = pm8001_find_ha_by_dev(dev);
997 phy_id = pm8001_dev->attached_phy;
998
999 if (PM8001_CHIP_DISP->fatal_errors(pm8001_ha)) {
1000 // If the controller is seeing fatal errors
1001 // abort task will not get a response from the controller
1002 return TMF_RESP_FUNC_FAILED;
1003 }
1004
1005 ret = pm8001_find_tag(task, &tag);
1006 if (ret == 0) {
1007 pm8001_info(pm8001_ha, "no tag for task:%p\n", task);
1008 return TMF_RESP_FUNC_FAILED;
1009 }
1010 spin_lock_irqsave(&task->task_state_lock, flags);
1011 if (task->task_state_flags & SAS_TASK_STATE_DONE) {
1012 spin_unlock_irqrestore(&task->task_state_lock, flags);
1013 return TMF_RESP_FUNC_COMPLETE;
1014 }
1015 task->task_state_flags |= SAS_TASK_STATE_ABORTED;
1016 if (task->slow_task == NULL) {
1017 init_completion(&slow_task.completion);
1018 task->slow_task = &slow_task;
1019 }
1020 spin_unlock_irqrestore(&task->task_state_lock, flags);
1021 if (task->task_proto & SAS_PROTOCOL_SSP) {
1022 rc = sas_abort_task(task, tag);
1023 sas_execute_internal_abort_single(dev, tag, 0, NULL);
1024 } else if (task->task_proto & SAS_PROTOCOL_SATA ||
1025 task->task_proto & SAS_PROTOCOL_STP) {
1026 if (pm8001_ha->chip_id == chip_8006) {
1027 DECLARE_COMPLETION_ONSTACK(completion_reset);
1028 DECLARE_COMPLETION_ONSTACK(completion);
1029 struct pm8001_phy *phy = pm8001_ha->phy + phy_id;
1030 port_id = phy->port->port_id;
1031
1032 /* 1. Set Device state as Recovery */
1033 pm8001_dev->setds_completion = &completion;
1034 PM8001_CHIP_DISP->set_dev_state_req(pm8001_ha,
1035 pm8001_dev, DS_IN_RECOVERY);
1036 wait_for_completion(&completion);
1037
1038 /* 2. Send Phy Control Hard Reset */
1039 reinit_completion(&completion);
1040 phy->port_reset_status = PORT_RESET_TMO;
1041 phy->reset_success = false;
1042 phy->enable_completion = &completion;
1043 phy->reset_completion = &completion_reset;
1044 ret = PM8001_CHIP_DISP->phy_ctl_req(pm8001_ha, phy_id,
1045 PHY_HARD_RESET);
1046 if (ret) {
1047 phy->enable_completion = NULL;
1048 phy->reset_completion = NULL;
1049 goto out;
1050 }
1051
1052 /* In the case of the reset timeout/fail we still
1053 * abort the command at the firmware. The assumption
1054 * here is that the drive is off doing something so
1055 * that it's not processing requests, and we want to
1056 * avoid getting a completion for this and either
1057 * leaking the task in libsas or losing the race and
1058 * getting a double free.
1059 */
1060 pm8001_dbg(pm8001_ha, MSG,
1061 "Waiting for local phy ctl\n");
1062 ret = wait_for_completion_timeout(&completion,
1063 PM8001_TASK_TIMEOUT * HZ);
1064 if (!ret || !phy->reset_success) {
1065 phy->enable_completion = NULL;
1066 phy->reset_completion = NULL;
1067 } else {
1068 /* 3. Wait for Port Reset complete or
1069 * Port reset TMO
1070 */
1071 pm8001_dbg(pm8001_ha, MSG,
1072 "Waiting for Port reset\n");
1073 ret = wait_for_completion_timeout(
1074 &completion_reset,
1075 PM8001_TASK_TIMEOUT * HZ);
1076 if (!ret)
1077 phy->reset_completion = NULL;
1078 WARN_ON(phy->port_reset_status ==
1079 PORT_RESET_TMO);
1080 if (phy->port_reset_status == PORT_RESET_TMO) {
1081 pm8001_dev_gone_notify(dev);
1082 PM8001_CHIP_DISP->hw_event_ack_req(
1083 pm8001_ha, 0,
1084 0x07, /*HW_EVENT_PHY_DOWN ack*/
1085 port_id, phy_id, 0, 0);
1086 goto out;
1087 }
1088 }
1089
1090 /*
1091 * 4. SATA Abort ALL
1092 * we wait for the task to be aborted so that the task
1093 * is removed from the ccb. on success the caller is
1094 * going to free the task.
1095 */
1096 ret = sas_execute_internal_abort_dev(dev, 0, NULL);
1097 if (ret)
1098 goto out;
1099 ret = wait_for_completion_timeout(
1100 &task->slow_task->completion,
1101 PM8001_TASK_TIMEOUT * HZ);
1102 if (!ret)
1103 goto out;
1104
1105 /* 5. Set Device State as Operational */
1106 reinit_completion(&completion);
1107 pm8001_dev->setds_completion = &completion;
1108 PM8001_CHIP_DISP->set_dev_state_req(pm8001_ha,
1109 pm8001_dev, DS_OPERATIONAL);
1110 wait_for_completion(&completion);
1111 } else {
1112 ret = sas_execute_internal_abort_single(dev, tag, 0, NULL);
1113 }
1114 rc = TMF_RESP_FUNC_COMPLETE;
1115 } else if (task->task_proto & SAS_PROTOCOL_SMP) {
1116 /* SMP */
1117 rc = sas_execute_internal_abort_single(dev, tag, 0, NULL);
1118
1119 }
1120 out:
1121 spin_lock_irqsave(&task->task_state_lock, flags);
1122 if (task->slow_task == &slow_task)
1123 task->slow_task = NULL;
1124 spin_unlock_irqrestore(&task->task_state_lock, flags);
1125 if (rc != TMF_RESP_FUNC_COMPLETE)
1126 pm8001_info(pm8001_ha, "rc= %d\n", rc);
1127 return rc;
1128 }
1129
pm8001_clear_task_set(struct domain_device * dev,u8 * lun)1130 int pm8001_clear_task_set(struct domain_device *dev, u8 *lun)
1131 {
1132 struct pm8001_device *pm8001_dev = dev->lldd_dev;
1133 struct pm8001_hba_info *pm8001_ha = pm8001_find_ha_by_dev(dev);
1134
1135 pm8001_dbg(pm8001_ha, EH, "I_T_L_Q clear task set[%x]\n",
1136 pm8001_dev->device_id);
1137 return sas_clear_task_set(dev, lun);
1138 }
1139
pm8001_port_formed(struct asd_sas_phy * sas_phy)1140 void pm8001_port_formed(struct asd_sas_phy *sas_phy)
1141 {
1142 struct sas_ha_struct *sas_ha = sas_phy->ha;
1143 struct pm8001_hba_info *pm8001_ha = sas_ha->lldd_ha;
1144 struct pm8001_phy *phy = sas_phy->lldd_phy;
1145 struct asd_sas_port *sas_port = sas_phy->port;
1146 struct pm8001_port *port = phy->port;
1147
1148 if (!sas_port) {
1149 pm8001_dbg(pm8001_ha, FAIL, "Received null port\n");
1150 return;
1151 }
1152 sas_port->lldd_port = port;
1153 }
1154
pm8001_setds_completion(struct domain_device * dev)1155 void pm8001_setds_completion(struct domain_device *dev)
1156 {
1157 struct pm8001_hba_info *pm8001_ha = pm8001_find_ha_by_dev(dev);
1158 struct pm8001_device *pm8001_dev = dev->lldd_dev;
1159 DECLARE_COMPLETION_ONSTACK(completion_setstate);
1160
1161 if (pm8001_ha->chip_id != chip_8001) {
1162 pm8001_dev->setds_completion = &completion_setstate;
1163 PM8001_CHIP_DISP->set_dev_state_req(pm8001_ha,
1164 pm8001_dev, DS_OPERATIONAL);
1165 wait_for_completion(&completion_setstate);
1166 }
1167 }
1168
pm8001_tmf_aborted(struct sas_task * task)1169 void pm8001_tmf_aborted(struct sas_task *task)
1170 {
1171 struct pm8001_ccb_info *ccb = task->lldd_task;
1172
1173 if (ccb)
1174 ccb->task = NULL;
1175 }
1176